content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from unsync import unsync import asyncio import datetime import math import aiohttp import requests if __name__ == '__main__': main()
[ 6738, 555, 27261, 1330, 555, 27261, 198, 11748, 30351, 952, 198, 11748, 4818, 8079, 198, 11748, 10688, 198, 11748, 257, 952, 4023, 198, 11748, 7007, 628, 628, 628, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198,...
3.085106
47
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['WorkspaceArgs', 'Workspace'] class Workspace(pulumi.CustomResource): def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__["description"] = description __props__.__dict__["friendly_name"] = friendly_name __props__.__dict__["location"] = location __props__.__dict__["name"] = name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["tags"] = tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts)
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 17202, 39410, 25, 428, 2393, 373, 7560, 416, 262, 21624, 12994, 24118, 687, 10290, 357, 27110, 5235, 8, 16984, 13, 17202, 198, 2, 17202, 2141, 407, 4370, 416, 1021, 4556, 345, 821, 1728, 345, 760...
2.316254
1,132
"""Code for reading and writing results to google sheets""" from bs4 import BeautifulSoup import requests import warnings import json import pandas as pd from six.moves.urllib.parse import urlparse, parse_qs from six.moves.urllib.request import urlopen _CELLSET_ID = "AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4" def get_sheet_as_df(base_url, kk, columns="A:AG"): """ Gets the sheet as a list of Dicts (directly importable to Pandas) :return: """ try: # TODO: we should probably get the whole sheet all_vals = "{base_url}/{cols}?key={kk}".format(base_url=base_url, cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0) return pd.DataFrame([ dict([(key, '' if idx >= len(irow) else irow[idx]) for idx, key in enumerate(frow)]) for irow in t_data]) except IOError as e: warnings.warn( 'Sheet could not be accessed, check internet connectivity, \ proxies and permissions: {}'.format( e)) return pd.DataFrame([{}])
[ 37811, 10669, 329, 3555, 290, 3597, 2482, 284, 23645, 15747, 37811, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 11748, 7007, 198, 11748, 14601, 198, 11748, 33918, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 2237, 13, 76, ...
1.987402
635
# -*- coding: utf-8 -* ''' 98 / \ 96 84 / \ / \ 92 82 78 47 / \ / \ / \ / \ 33 26 51 85 50 15 44 60 / \ / \ / \ / \ / \ / \ / \ / \ 40 51 98 51 7 17 94 82 32 21 64 60 7 44 63 63 ''' import random if __name__ == '__main__': N = 31 M = 100 heap = Maxheap(N) for i in range(0,N): k = random.randint(1, M) heap.insert(k) # arr = [random.randint(1,M) for i in range(N)] # heap = Maxheap(len(arr),arr) print(heap.size()) print(heap.data) print(heap.extractMax())
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 198, 198, 7061, 6, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 198, 220, 220, 220, 220, 220...
1.511278
532
from ink2canvas.svg.AbstractShape import AbstractShape
[ 6738, 16882, 17, 5171, 11017, 13, 21370, 70, 13, 23839, 33383, 1330, 27741, 33383 ]
3.857143
14
import pandas datas = pandas.read_csv("../../Sample/example_dataset.csv", index_col=0) print(datas)
[ 11748, 19798, 292, 198, 198, 19608, 292, 796, 19798, 292, 13, 961, 62, 40664, 7203, 40720, 40720, 36674, 14, 20688, 62, 19608, 292, 316, 13, 40664, 1600, 6376, 62, 4033, 28, 15, 8, 198, 4798, 7, 19608, 292, 8, 198 ]
2.525
40
from flask import * app = Flask(__name__) import botty # ---------------------------------- # ----------------------------------- # ----------------------------------- # ----------------------------------- if __name__ == "__main__": app.debug = True app.run(host="0.0.0.0")
[ 6738, 42903, 1330, 1635, 198, 1324, 796, 46947, 7, 834, 3672, 834, 8, 198, 198, 11748, 10214, 774, 198, 198, 2, 20368, 438, 198, 2, 20368, 6329, 628, 198, 2, 20368, 6329, 198, 2, 20368, 6329, 198, 198, 361, 11593, 3672, 834, 6624, ...
3.776316
76
#!/usr/bin/env python3 import os DATABASE="/home/tomate/Warehouse/syte/meta.db" XLSDIR = "/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/" temp = [i for i in next(os.walk(XLSDIR))[2] if i.endswith("xlsx") or i.endswith("xls")] flist = {} for i in temp: name = i.split(" ")[0].split("-")[0].split(".")[0] if name.startswith("~") or name.startswith("PR") or name.startswith("FAB"): continue else: flist[name] = i
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 28686, 198, 198, 35, 1404, 6242, 11159, 35922, 11195, 14, 39532, 378, 14, 38824, 4803, 14, 1837, 660, 14, 28961, 13, 9945, 1, 198, 55, 6561, 34720, 796, 12813, 76, 42...
2.147619
210
#!/usr/bin/env python import sys from setuptools import setup from setuptools.command.test import test as TestCommand setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy for HTTP Range Requests', author='Marko Trajkov', author_email='markostrajkov@gmail.com', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1', 'pycurl==7.43.0'], packages=['rangerequestsproxy'], license='BSD', url='https://github.com/markostrajkov/range-requests-proxy', )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 25064, 198, 6738, 900, 37623, 10141, 1330, 9058, 198, 6738, 900, 37623, 10141, 13, 21812, 13, 9288, 1330, 1332, 355, 6208, 21575, 628, 198, 198, 40406, 7, 198, 220, 220, 22...
2.554585
229
import os import pytest import torch import pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net
[ 11748, 28686, 198, 198, 11748, 12972, 9288, 198, 11748, 28034, 198, 198, 11748, 12972, 13165, 354, 62, 79, 22184, 62, 2302, 8847, 13, 261, 77, 87, 355, 2819, 198, 6738, 5254, 13, 9078, 13165, 354, 62, 79, 22184, 62, 2302, 8847, 62, ...
2.62069
58
#!/usr/bin/env python # -*- coding:UTF-8 -*- ''' @Description: @Author: Zpp @Date: 2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02 ''' params = { # 'fields': { 'type': { 'name': '', 'type': 'int', 'between': [1, 2, 3], 'required': True }, 'document': { 'name': '', 'type': 'file', 'required': True, 'msg': '' }, 'admin_id': { 'name': '', 'type': 'str', 'required': True }, 'time': { 'name': '', 'type': 'str', 'required': True } }, # 'Export': ['type'], # 'Import': ['document'], # 'Login': ['admin_id', 'time'] }
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 48504, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 31, 11828, 25, 220, 198, 31, 13838, 25, 1168, 381, 198, 31, 10430, 25, 12131, 12, 2713, 12, 2078, 1511, ...
1.669405
487
import mxnet as mx
[ 11748, 285, 87, 3262, 355, 285, 87, 628 ]
2.5
8
import datetime import json from calendar import timegm from urllib.parse import parse_qsl import requests from allauth.socialaccount import models as aamodels from requests_oauthlib import OAuth1 from rest_framework import parsers, renderers from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.views import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from dss import settings from spa.models import UserProfile from spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload): """ Do some magic here to find user account and deprecate psa 1. Look for account in """ user = None try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id = uid sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to create UserProfile') # we got an allauth, create the SocialAccountLink sa = SocialAccountLink() sa.user = user sa.social_id = aa.uid sa.type = aa.provider sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to create social model') return user if user else None
[ 11748, 4818, 8079, 198, 11748, 33918, 198, 6738, 11845, 1330, 640, 39870, 198, 6738, 2956, 297, 571, 13, 29572, 1330, 21136, 62, 80, 6649, 198, 198, 11748, 7007, 198, 6738, 477, 18439, 13, 14557, 23317, 1330, 4981, 355, 257, 321, 375, ...
2.6121
843
import requests import aiohttp from constants import API_KEY
[ 11748, 7007, 198, 198, 11748, 257, 952, 4023, 198, 6738, 38491, 1330, 7824, 62, 20373, 198 ]
3.875
16
""" En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til gjre nyttige ting, f.eks. lagre geografiske datasett Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en del andre ting som m installeres separat. Noen av disse bibliotekene kunne historisk av og til vre plundrete installere, evt ha versjonskonflikter seg i mellom, spesielt p windows. Slikt plunder hrer historien til (stort sett) Anbefalingen er like fullt bruke (ana)conda installasjon i et eget "environment". Dette er god kodehygiene og sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss lar seg greit reparere ved lage nytt "enviroment", uten at det pvirker hele python-installasjonen din. """ import re import pdb from shapely import wkt # from shapely.ops import unary_union import pandas as pd import geopandas as gpd from datetime import datetime import nvdbapiv3 from apiforbindelse import apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): """ Lagrer NVDB vegnett og angitte objekttyper til geopackage ARGUMENTS objekttyper: Liste med objekttyper du vil lagre KEYWORDS mittfilter=None : Dictionary med filter til skeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001 } Samme filter brukes p bde vegnett og fagdata vegnett=True : Bool, default=True. Angir om vi skal ta med data om vegnett eller ikke vegsegmenter=False : Bool, default=False. Angir om vi skal repetere objektet delt inn etter vegsegementer geometri=True : Bool, default=True. Angir om vi skal hente geometri fra egengeometri (hvis det finnes) Hvis du nsker presentere vegobjekt ut fra objektets stedfesting langs veg s bruker du kombinasjonen vegsegmenter=True, geometri=False RETURNS None """ if not '.gpkg' in filnavn: filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not isinstance(objekttyper, list ): objekttyper = [ objekttyper ] for enObjTypeId in objekttyper: enObjTypeId = int( enObjTypeId ) sok = nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter: sok.filter( mittfilter ) stat = sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn ) lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if len( rec ) > 0: mindf = pd.DataFrame( rec ) # M trickse litt for unng navnekollisjon kolonner = list( mindf.columns ) lowerkolonner = [ x.lower() for x in kolonner ] # Duplicate element indices in list # Using list comprehension + list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]] for ii, dublett in enumerate( res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) # m droppe kolonne vegsegmenter hvis du har vegsegmenter=False if 'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver="GPKG") else: print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter) if vegnett: veg = nvdbapiv3.nvdbVegnett() if mittfilter: junk = mittfilter.pop( 'egenskap', None) junk = mittfilter.pop( 'overlapp', None) veg.filter( mittfilter ) print( 'Henter vegnett') rec = veg.to_records() mindf = pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) minGdf.to_file( filnavn, layer='vegnett', driver="GPKG") def dumpkontraktsomr( komr = [] ): """ Dumper et har (hardkodede) kontraktsomrder """ if not komr: komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ] komr = [ '9253 Agder elektro og veglys 2021-2024'] objliste = [ 540, # Trafikkmengde 105, # Fartsgrense 810, # Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153, # Vrstasjon 64, # Ferjeleie 39, # Rasteplass 48, # Fortau 199, # Trr 15, # Grasdekker 274, # Blomsterbeplanting 511, # Busker 300 , # Naturomrde (ingen treff i Haugesund kontrakt) 517, # Artsrik vegkant 800, # Fremmede arter 67, # Tunnellp 846, # Skredsikring, bremsekjegler 850 # Skredsikring, forbygning ] objliste = [] for enkontrakt in komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt }) def firefeltrapport( mittfilter={}): """ Finner alle firefeltsveger i Norge, evt innafor angitt skekriterie Bruker skeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter: Dictionary med skefilter RETURNS geodataframe med resultatet """ v = nvdbapiv3.nvdbVegnett() # Legger til filter p kun fase = V (eksistende veg), sfremt det ikke kommer i konflikt med anna filter if not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] = 'false' if not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter( mittfilter ) # Kun kjrende, og kun verste topologiniv, og ikke adskiltelop=MOT v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } ) data = [] vegsegment = v.nesteForekomst() while vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte lp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_lp'] data.append( vegsegment ) vegsegment = v.nesteForekomst() if len( data ) > 1: mindf = pd.DataFrame( data ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) mindf.drop( 'kontraktsomrder', 1, inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop( 'href', 1, inplace=True) mindf.drop( 'metadata', 1, inplace=True) mindf.drop( 'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop( 'startnode', 1, inplace=True) mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop( 'referanse', 1, inplace=True) mindf.drop( 'mlemetode', 1, inplace=True) mindf.drop( 'mledato', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) return minGdf else: return None def sjekkfelt( vegsegment, felttype='firefelt' ): """ Sjekker hva slags felt som finnes p et vegsegment ARGUMENTS: vegsegment - dicionary med data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva slags felttype som skal sjekkes. Mulige verdier: firefelt (default). Antar at firefeltsveg betyr at kjrefeltnummer 1-4 er brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt (flere varianter kommer nr de trengs) RETURNS boolean - True hvis kjrefeltene er av riktig type """ svar = False vr = 'vegsystemreferanse' sr = 'strekning' if felttype == 'firefelt': if 'feltoversikt' in vegsegment.keys() and 'detaljniv' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljniv']: kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) ) if vr in vegsegment.keys(): if sr in vegsegment[vr] and 'adskilte_lp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_lp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}): svar = True # Siste klausul her har f.eks. forekommet p Fv5724, envegskjrt tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_lp'] == 'Med' and len( kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2} ): svar = True return svar else: raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype + 'er ikke implementert (enn)' ) def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]): """ Returnerer liste med kjrefeltnummer filtrert p hva slags feltkode vi evt har ARGUMENTS feltoversikt - Liste med feltkoder for et vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for hva slags felt vi skal telle med. Sjekk hndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: 'vanlig' - Helt vanlig kjrefelt, kjrefeltnumemr er angitt som heltall uten noen bokstaver. 'K' - kollektivfelt 'R' - reversibelt felt 'S' - Sykkelfelt 'H' - Svingefelt mot hyre 'V' - Svingefelt mot venstre 'B' - Ekstra felt for bompengeinnkreving RETURNS Liste med kjrefeltnummer hvor kun kjrefelt som angitt med mittfilter-nkkelord er inkludert """ data = [ ] for felt in feltoversikt: feltbokstav = re.findall( '[A-Za-z]', felt) if feltbokstav: feltbokstav = feltbokstav[0] else: feltbokstav = 'vanlig' if feltbokstav in mittfilter: feltnummer = int( re.split( '[A-Z]', felt)[0] ) data.append( feltnummer ) return data
[ 37811, 198, 4834, 6072, 1359, 289, 73, 417, 431, 12543, 591, 73, 14491, 3870, 18145, 6122, 299, 85, 9945, 499, 452, 18, 12, 12543, 591, 46286, 1734, 21502, 220, 308, 73, 260, 299, 88, 926, 10045, 256, 278, 11, 277, 13, 2573, 13, 1...
1.986653
5,769
""" You are given the following information, but you may prefer to do some research for yourself. 1 Jan 1900 was a Monday. Thirty days has September, April, June and November. All the rest have thirty-one, Saving February alone, Which has twenty-eight, rain or shine. And on leap years, twenty-nine. A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400. How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)? ans: 171 """ # set to day of week for 1 Jan 1901 (Tuesday) dow = 2 sum = 0 for y in range(1901, 2001): for m in range(0, 12): if dow == 0: sum += 1 dow = (dow + no_days(m, y)) % 7 print(sum)
[ 37811, 198, 1639, 389, 1813, 262, 1708, 1321, 11, 475, 345, 743, 4702, 284, 466, 617, 2267, 329, 3511, 13, 628, 220, 220, 220, 352, 2365, 21489, 373, 257, 3321, 13, 198, 220, 220, 220, 29948, 1528, 468, 2693, 11, 198, 220, 220, 22...
3.02834
247
#!/usr/bin/env python3 # -*- encoding: utf-8 -*- """ Setup for the dbservice """ from setuptools import setup, find_packages setup( name='dbservice', version='0.9', description="Database service for storing meter data", author="Sren Aagaard Mikkelsen", author_email='smik@eng.au.dk', url='https://github.com/dbservice/dbservice', packages=find_packages(), package_data={'': ['static/*.*', 'templates/*.*']}, scripts=['manage.py'], )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 40786, 329, 262, 288, 1443, 712, 501, 198, 37811, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, ...
2.510638
188
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The arg spec for the junos facts module. """ from __future__ import absolute_import, division, print_function __metaclass__ = type
[ 2, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 13130, 2297, 10983, 198, 2, 22961, 3611, 5094, 13789, 410, 18, 13, 15, 10, 198, 2, 357, 3826, 27975, 45761, 393, 3740, 1378, 2503, 13, 41791, 13, 2...
2.858586
99
from flask_restful import Resource, reqparse from firebase_admin import auth as firebase_auth from dbcls.models import User parser = reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False)
[ 6738, 42903, 62, 2118, 913, 1330, 20857, 11, 43089, 29572, 198, 6738, 2046, 8692, 62, 28482, 1330, 6284, 355, 2046, 8692, 62, 18439, 198, 198, 6738, 20613, 565, 82, 13, 27530, 1330, 11787, 628, 198, 48610, 796, 43089, 29572, 13, 18453, ...
3.515152
66
#Import required modules import requests import json # Get json results for the required input InputString = "kobe is a basketball player" headers = { 'Content-type': 'application/json', } data = '{"text":InputString = '+ InputString + '}' response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test comment to check if the automatic git pull is working or not #print(json.dumps(response, indent=4, sort_keys=True))
[ 2, 20939, 2672, 13103, 201, 198, 11748, 7007, 201, 198, 11748, 33918, 201, 198, 201, 198, 2, 3497, 33918, 2482, 329, 262, 2672, 5128, 220, 201, 198, 201, 198, 20560, 10100, 796, 366, 74, 5910, 318, 257, 9669, 2137, 1, 201, 198, 201,...
2.932099
162
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Sentencize the raw wikitext103.""" import tensorflow.compat.v1 as tf app = tf.app flags = tf.flags gfile = tf.gfile logging = tf.logging flags.DEFINE_string("wiki103_raw", None, "Path to raw wikitext103 train corpus.") flags.DEFINE_string("output_path", None, "Path to output the processed dataset.") FLAGS = flags.FLAGS if __name__ == "__main__": app.run(main)
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 2864, 383, 3012, 9552, 15417, 4816, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393,...
3.055394
343
default_bot = 'example_bots.any_to_any.bot.AnyToAny'
[ 12286, 62, 13645, 796, 705, 20688, 62, 42478, 13, 1092, 62, 1462, 62, 1092, 13, 13645, 13, 7149, 2514, 7149, 6, 198 ]
2.409091
22
from server import roles
[ 6738, 4382, 1330, 9176, 628, 628, 628, 628, 198 ]
3.666667
9
import csv import logging import os import shutil from csv import DictWriter # noqa: F401 from pyhocon import ConfigTree, ConfigFactory # noqa: F401 from typing import Dict, Any # noqa: F401 from databuilder.job.base_job import Job from databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL, \ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401 from databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__)
[ 11748, 269, 21370, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 6738, 269, 21370, 1330, 360, 713, 34379, 220, 1303, 645, 20402, 25, 376, 21844, 198, 198, 6738, 12972, 71, 36221, 1330, 17056, 27660, 11, 17056, 22810, ...
2.781553
206
# -*- coding: utf-8 -*- """ @author: Hiromasa Kaneko """ import pandas as pd from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn = 5 # k-NN k rate_of_training_samples_inside_ad = 0.96 # AD AD dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) # y = dataset.iloc[:, 0] # x = dataset.iloc[:, 1:] # # 0 deleting_variables = x.columns[x.std() == 0] x = x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) # autoscaled_x = (x - x.mean()) / x.std() autoscaled_x_prediction = (x_prediction - x.mean()) / x.std() # k-NN AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD ad_model.fit(autoscaled_x) # k-NN AD x model_ad # k k 2 # k 0 k_in_knn + 1 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv # rate_of_training_samples_inside_ad * 100 % sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] # AD inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv # k-NN knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv # AD inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 31, 9800, 25, 29379, 296, 15462, 14248, 988, 78, 201, 198, 37811, 201, 198, 201, 198, 11748, 19798, 292, 355, 279, 67, 201, 198, 6738, 1341, 3572...
2.144928
1,173
#!/usr/bin/python """ """ from mininet.net import Mininet from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli import CLI from mininet.log import setLogLevel from mininet.link import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): "Create a network." net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print "*** Creating nodes" h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 = net.addSwitch( 's1', listenPort=6671 ) s2 = net.addSwitch( 's2', listenPort=6672 ) s3 = net.addSwitch( 's3', listenPort=6673 ) s4 = net.addSwitch( 's4', listenPort=6674 ) s5 = net.addSwitch( 's5', listenPort=6675 ) c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 ) print "*** Creating links" net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5) print "*** Starting network" net.build() h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start() s1.start( [c1] ) s2.start( [c1] ) s3.start( [c1] ) s4.start( [c1] ) s5.start( [c1] ) print "*** Running CLI" CLI( net ) print "*** Stopping network" net.stop() if __name__ == '__main__': setLogLevel( 'info' ) topology()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 220, 198, 37811, 198, 37811, 198, 6738, 949, 42504, 13, 3262, 1330, 1855, 42504, 198, 6738, 949, 42504, 13, 17440, 1330, 22741, 11, 21520, 22130, 11, 440, 53, 18831, 7948, 38978, 11, 12982, 3...
1.995
1,400
#Definicion de la clase #antes de empezar una clase se declara de la siguiente manera
[ 198, 2, 7469, 47277, 295, 390, 8591, 537, 589, 198, 2, 39781, 390, 795, 431, 41046, 555, 64, 537, 589, 384, 2377, 3301, 390, 8591, 43237, 84, 1153, 68, 582, 8607, 628, 198 ]
2.69697
33
""" Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table. """ from __future__ import print_function import logging from sqlalchemy import ( Column, ForeignKey, Integer, MetaData ) from galaxy.model.migrate.versions.util import ( add_column, drop_column ) log = logging.getLogger(__name__) metadata = MetaData()
[ 37811, 198, 44, 4254, 4226, 284, 751, 705, 335, 6814, 62, 312, 6, 5721, 284, 262, 31821, 62, 1102, 13658, 62, 19608, 292, 316, 62, 562, 41003, 3084, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748...
2.879699
133
votes_t_shape = [3, 0, 1, 2] for i in range(6 - 4): votes_t_shape += [i + 4] print(votes_t_shape)
[ 29307, 62, 83, 62, 43358, 796, 685, 18, 11, 657, 11, 352, 11, 362, 60, 198, 1640, 1312, 287, 2837, 7, 21, 532, 604, 2599, 198, 220, 220, 220, 5690, 62, 83, 62, 43358, 15853, 685, 72, 1343, 604, 60, 198, 4798, 7, 29307, 62, 83,...
2.04
50
import numpy as np import itertools import gpuscheduler import argparse import os import uuid import hashlib import glob import math from itertools import product from torch.optim.lr_scheduler import OneCycleLR from os.path import join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.') args = parser.parse_args() gpus = 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name = 'blockwise5' constraint = 'volta32gb' # 1024 tokens * 8 update_freq * 56250 steps = 0.4608e9 tokens -> optimal batch size 3460 # model sizes: 1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours = 24*2 cores_per_job = 5 mem = 56*(8 if gpus > 8 else gpus) num_seeds = 1 seed_offset = 5 time_hours = 72 time_minutes = 0 #partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition = 'learnfair' #partition = 'uninterruptible' change_dir = 'fairseq_private' repo = 'fairseq_private' exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 = True args3 = {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] = 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] = [2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] = [] # 32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4 = [] args5 = {} args6 = {} rdm = np.random.RandomState(5345) for key, value in args2.items(): cmd = cmd + ' --{0} {1}'.format(key, value) args_prod = [] for key, values in args3.items(): if isinstance(key, tuple): keyvalues = [] for tups in values: arg = '' for i, v in enumerate(tups): if v is True: v = '' if v is False: continue if len(key[i]) == 0: arg += '{0} '.format(v) else: arg += '--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str): keyvalues = [] for v in values: if v is True: v = '' if v is False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod = list(product(*args_prod)) else: new_args = [] if len(args_prod) > 0: for arg in args_prod[0]: new_args.append([arg]) args_prod = new_args jobs = [] if len(args4) == 0: args4.append('') for seed in range(num_seeds): seed = seed + seed_offset for arg4 in args4: if len(args_prod) == 0: args_prod.append(('', '')) for i, values in enumerate(args_prod): job_cmd = cmd + arg4 for val in values: job_cmd += ' {0}' .format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ') job_cmd = job_cmd + ' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir cmds = [job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for i, job in enumerate(jobs): print(i, job) print('') print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir)) if not args.dry: s.run_jobs()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 340, 861, 10141, 198, 11748, 27809, 385, 1740, 18173, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 334, 27112, 198, 11748, 12234, 8019, 198, 11748, 15095, 198, 11748, 10688, 198, 6738, ...
2.207294
3,729
from boa3.builtin import public from boa3.builtin.contract import Nep5TransferEvent transfer = Nep5TransferEvent
[ 6738, 1489, 64, 18, 13, 18780, 259, 1330, 1171, 198, 198, 6738, 1489, 64, 18, 13, 18780, 259, 13, 28484, 1330, 15310, 20, 43260, 9237, 628, 198, 39437, 796, 15310, 20, 43260, 9237, 628 ]
3.441176
34
# The Topical Guide # Copyright 2010-2011 Brigham Young University # # This file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # # The Topical Guide is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # The Topical Guide is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License # for more details. # # You should have received a copy of the GNU Affero General Public License # along with the Topical Guide. If not, see <http://www.gnu.org/licenses/>. # # If you have inquiries regarding any further use of the Topical Guide, please # contact the Copyright Licensing Office, Brigham Young University, 3760 HBLL, # Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail copyright@byu.edu. from __future__ import print_function from django.shortcuts import render, redirect from django.http import HttpResponse import abtest from abtest.settings import TEST_LIST from visualize import root # Create your views here. # This view is called when the given url does not match anything
[ 2, 383, 5849, 605, 10005, 198, 2, 15069, 3050, 12, 9804, 37434, 6960, 2059, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 262, 5849, 605, 10005, 1279, 4023, 1378, 21283, 79, 13, 6359, 13, 1525, 84, 13, 15532, 14, 26652, 62, 40259, 284...
3.70137
365
"""Tests for merge.py.""" from __future__ import absolute_import, division, print_function from glob import glob import os import tarfile import tempfile from neurodocker.docker import client from neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands, dir): """Create packfile from list `commands` in debian:stretch container.""" container = client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path = minimizer.run() except: raise finally: container.stop() container.remove() return packfile_path
[ 37811, 51, 3558, 329, 20121, 13, 9078, 526, 15931, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 6738, 15095, 1330, 15095, 198, 11748, 28686, 198, 11748, 13422, 7753, 198, 11748, 20218...
2.474286
350
""" HTML5 contexts. :author: Dominik Lang :license: MIT """ import contextlib import io import sys __all__ = ['create_document', 'tag', 'as_link']
[ 37811, 201, 198, 28656, 20, 26307, 13, 201, 198, 201, 198, 25, 9800, 25, 11817, 1134, 16332, 201, 198, 25, 43085, 25, 17168, 201, 198, 37811, 201, 198, 201, 198, 11748, 4732, 8019, 201, 198, 11748, 33245, 201, 198, 11748, 25064, 201, ...
2.324324
74
""" Jonathan Zacsh's solution to homework #3, Nov 14., Part I """ # Per homework instructions, following lead from matlab example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow as tf import tempfile import os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing intersting things in this lab, so just ignore optimization # g(x) = x^4+2x-7 ; per matlab example # g'(x) = 4x^3+2 fExFourth = Differentiable("fExFourth", lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo = fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix="hw3-nov14-parti") print(log_dir) with tf.Session() as sess: writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write("results:\n\tf(2)=%s\n\tf'(2)=%s\n" % (fOfTwo, fDerivOfTwo)) # note: only needed when doing a *loop* of sess.run() calls, and want to see # intermediary results per-loop. #writer.add_summary(results) writer.flush() writer.close()
[ 37811, 198, 30365, 44922, 1477, 338, 4610, 284, 26131, 1303, 18, 11, 5267, 1478, 1539, 2142, 314, 198, 37811, 198, 2, 2448, 26131, 7729, 11, 1708, 1085, 422, 2603, 23912, 1672, 416, 6240, 25, 198, 2, 220, 220, 2638, 1378, 785, 316, ...
2.415507
503
import torch import torch.nn as nn import numpy as np import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import pandas as pd from sklearn.metrics import * from sklearn.metrics import precision_recall_fscore_support as prfs device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, 7203, 46384, 4943, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, ...
3.009434
106
""" Celery queued tasks for Helios 2010-08-01 ben@adida.net """ import copy from celery import shared_task from celery.utils.log import get_logger import signals from models import CastVote, Election, Voter, VoterFile from view_utils import render_template_raw
[ 37811, 198, 34, 417, 1924, 8358, 1739, 8861, 329, 5053, 4267, 198, 198, 10333, 12, 2919, 12, 486, 198, 11722, 31, 324, 3755, 13, 3262, 198, 37811, 198, 11748, 4866, 198, 6738, 18725, 1924, 1330, 4888, 62, 35943, 198, 6738, 18725, 1924...
3.238095
84
""" pytest fixtures """ import unittest.mock as mock import pytest import virtual_dealer.api
[ 37811, 198, 9078, 9288, 34609, 198, 37811, 198, 11748, 555, 715, 395, 13, 76, 735, 355, 15290, 198, 11748, 12972, 9288, 198, 11748, 7166, 62, 31769, 263, 13, 15042, 628, 628, 628 ]
3.0625
32
from xml.etree import ElementTree from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml import V2 from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \ FixtureItemField, FieldList from corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models import CommCareUser from django.test import TestCase
[ 6738, 35555, 13, 316, 631, 1330, 11703, 27660, 198, 6738, 1339, 19875, 13, 18211, 13, 7442, 13, 41989, 13, 22602, 1330, 2198, 62, 19875, 62, 1370, 62, 1525, 62, 1370, 198, 6738, 1339, 19875, 13, 18211, 13, 7442, 13, 19875, 1330, 569, ...
3.277778
162
# -*- coding: utf-8 -*- """We define custom Django signals to trigger before executing searches.""" from django.db.models.signals import post_save, pre_delete from django.dispatch import receiver from django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models import HTMLFile, Project from readthedocs.projects.signals import bulk_post_create, bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 1135, 8160, 2183, 37770, 10425, 284, 7616, 878, 23710, 15455, 526, 15931, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 13, 12683, 874, 1330, 1281, 62, 21928, ...
3.22069
145
"""Internal API endpoint constant library. _______ __ _______ __ __ __ | _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----. |. 1___| _| _ | | | | _ | 1___| _| _| | <| -__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 | |: 1 | |::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy `-------' `-------' OAuth2 API - Customer SDK This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <https://unlicense.org> """ _filevantage_endpoints = [ [ "getChanges", "GET", "/filevantage/entities/changes/v2", "Retrieve information on changes", "filevantage", [ { "type": "array", "items": { "type": "string" }, "collectionFormat": "multi", "description": "Comma separated values of change ids", "name": "ids", "in": "query", "required": True } ] ], [ "queryChanges", "GET", "/filevantage/queries/changes/v2", "Returns one or more change IDs", "filevantage", [ { "minimum": 0, "type": "integer", "description": "The first change index to return in the response. " "If not provided it will default to '0'. " "Use with the `limit` parameter to manage pagination of results.", "name": "offset", "in": "query" }, { "type": "integer", "description": "The maximum number of changes to return in the response " "(default: 100; max: 500). " "Use with the `offset` parameter to manage pagination of results", "name": "limit", "in": "query" }, { "type": "string", "description": "Sort changes using options like:\n\n" "- `action_timestamp` (timestamp of the change occurrence) \n\n " "Sort either `asc` (ascending) or `desc` (descending). " "For example: `action_timestamp|asc`.\n" "The full list of allowed sorting options can be reviewed in our API documentation.", "name": "sort", "in": "query" }, { "type": "string", "description": "Filter changes using a query in Falcon Query Language (FQL). \n\n" "Common filter options include:\n\n - `host.host_name`\n - `action_timestamp`\n\n " "The full list of allowed filter parameters can be reviewed in our API documentation.", "name": "filter", "in": "query" } ] ] ]
[ 37811, 37693, 7824, 36123, 6937, 5888, 13, 628, 220, 37405, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 11593, 220, 37405, 11593, 220, 220, 220, 220, 220, 220, ...
2.54583
1,451
import itertools import sys from signal import SIGINT, default_int_handler, signal from typing import Any, Dict, List from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import format_size try: from pip._vendor import colorama # Lots of different errors can come from this, including SystemError and # ImportError. except Exception: colorama = None _BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any class WindowsMixin: def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any]) -> None # The Windows terminal does not support the hide/show cursor ANSI codes # even with colorama. So we'll ensure that hide_cursor is False on # Windows. # This call needs to go before the super() call, so that hide_cursor # is set in time. The base progress bar class writes the "hide cursor" # code to the terminal in its init, so if we don't set this soon # enough, we get a "hide" with no corresponding "show"... if WINDOWS and self.hide_cursor: # type: ignore self.hide_cursor = False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore # Check if we are running on Windows and we have the colorama module, # if we do then wrap our file with it. if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) # type: ignore # The progress code expects to be able to call self.file.isatty() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.isatty = lambda: self.file.wrapped.isatty() # The progress code expects to be able to call self.file.flush() # but the colorama.AnsiToWin32() object doesn't have that, so we'll # add it. self.file.flush = lambda: self.file.wrapped.flush() BAR_TYPES = { "off": (DownloadSilentBar, DownloadSilentBar), "on": (DefaultDownloadProgressBar, DownloadProgressSpinner), "ascii": (DownloadBar, DownloadProgressSpinner), "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner), "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), }
[ 11748, 340, 861, 10141, 201, 198, 11748, 25064, 201, 198, 6738, 6737, 1330, 33993, 12394, 11, 4277, 62, 600, 62, 30281, 11, 6737, 201, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 11, 7343, 201, 198, 201, 198, 6738, 7347, 13557, 85, ...
2.557
1,000
import typing import numpy as np import scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner import Tuner def tune( params: 'mz.ParamTable', optimizer: str = 'adam', trainloader: mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader = None, embedding: np.ndarray = None, fit_kwargs: dict = None, metric: typing.Union[str, BaseMetric] = None, mode: str = 'maximize', num_runs: int = 10, verbose=1 ): """ Tune model hyper-parameters. A simple shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's hyper-parameters search space, which is the cross-product of individual hyper parameter's hyper space. When a `Tuner` builds a model, for each hyper parameter in `model.params`, if the hyper-parameter has a hyper-space, then a sample will be taken in the space. However, if the hyper-parameter does not have a hyper-space, then the default value of the hyper-parameter will be used. See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage. :param params: A completed parameter table to tune. Usually `model.params` of the desired model to tune. `params.completed()` should be `True`. :param optimizer: Str or `Optimizer` class. Optimizer for optimizing model. :param trainloader: Training data to use. Should be a `DataLoader`. :param validloader: Testing data to use. Should be a `DataLoader`. :param embedding: Embedding used by model. :param fit_kwargs: Extra keyword arguments to pass to `fit`. (default: `dict(epochs=10, verbose=0)`) :param metric: Metric to tune upon. Must be one of the metrics in `model.params['task'].metrics`. (default: the first metric in `params.['task'].metrics`. :param mode: Either `maximize` the metric or `minimize` the metric. (default: 'maximize') :param num_runs: Number of runs. Each run takes a sample in `params.hyper_space` and build a model based on the sample. (default: 10) :param callbacks: A list of callbacks to handle. Handled sequentially at every callback point. :param verbose: Verbosity. (default: 1) Example: >>> import scripts.study_case.ID_5.matchzoo as mz >>> import numpy as np >>> train = mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train, verbose=0) >>> valid = prpr.transform(valid, verbose=0) >>> trainset = mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader = mz.dataloader.DataLoader(validset, callback=padding) >>> model = mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking() >>> optimizer = 'adam' >>> embedding = np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100)) >>> tuner = mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader, ... embedding=embedding, ... num_runs=1, ... verbose=0 ... ) >>> results = tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params', 'sample', 'score'] """ tuner = Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode, num_runs=num_runs, verbose=verbose ) return tuner.tune()
[ 11748, 19720, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 14750, 13, 44517, 62, 7442, 13, 2389, 62, 20, 13, 15699, 89, 2238, 355, 285, 89, 198, 6738, 14750, 13, 44517, 62, 7442, 13, 2389, 62, 20, 13, 15699, 89, 2238, ...
2.464373
1,628
"""Tests for the pixel observation wrapper.""" from typing import Optional import pytest import numpy as np import gym from gym import spaces from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY
[ 37811, 51, 3558, 329, 262, 17465, 13432, 29908, 526, 15931, 198, 6738, 19720, 1330, 32233, 198, 198, 11748, 12972, 9288, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 11550, 198, 6738, 11550, 1330, 9029, 198, 6738, 11550, 13, 29988...
3.766667
60
import pyaudio import wave from scipy.fftpack import fft, ifft import numpy as np import matplotlib.pyplot as plt import cv2 from scipy import signal from swan import pycwt CHUNK = 1024 FORMAT = pyaudio.paInt16 # int16 CHANNELS = 1 # 1;monoral 2;- RATE = 22100 # 22.1kHz 44.1kHz RECORD_SECONDS = 5 # 5 WAVE_OUTPUT_FILENAME = "output2.wav" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 # figure fig = plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3 = fig.add_subplot(313) print("* recording") frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print("* done recording") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, "rb") ch = CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE #wr.getframerate() fn = wr.getnframes() fs = fn / fr origin = wr.readframes(wr.getnframes()) data = origin[:fn] wr.close() sig = np.frombuffer(data, dtype="int16") /32768.0 t = np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg = 256 f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy) plt.pause(1) plt.savefig('figure'+str(s)+'.png') s += 1
[ 11748, 12972, 24051, 198, 11748, 6769, 198, 6738, 629, 541, 88, 13, 487, 83, 8002, 1330, 277, 701, 11, 611, 701, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 269, 85, ...
1.957759
1,089
#/ <reference path="./testBlocks/mb.ts" /> basic.forever(function_0)
[ 2, 14, 1279, 35790, 3108, 28, 1911, 14, 9288, 45356, 14, 2022, 13, 912, 1, 11037, 198, 35487, 13, 754, 332, 7, 8818, 62, 15, 8 ]
2.615385
26
#!/usr/bin/env python # -*- coding:utf-8 -*- import random import numpy as np import matplotlib.pyplot as plt robot = Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05) x_trajectory, y_trajectory = run(robot, 0.1, 1.0) n = len(x_trajectory) fig, ax1 = plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller') ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference') plt.show()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 220, 198, 198, 11748, 4738, 198, 220, 198, 11748, 299, 32152, 355, 45941, 198, 220, 198, 11748, 2603, 29487, 8019, 13, 9078, 29...
2.00885
226
from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler __all__ = ("DistributedSampler", "UniformClipSampler", "RandomClipSampler")
[ 6738, 764, 15036, 62, 37687, 20053, 1330, 4307, 6169, 16305, 20053, 11, 35712, 2601, 541, 16305, 20053, 11, 14534, 2601, 541, 16305, 20053, 198, 198, 834, 439, 834, 796, 5855, 20344, 6169, 16305, 20053, 1600, 366, 3118, 6933, 2601, 541, ...
3.156863
51
# # PySNMP MIB module HH3C-PPPOE-SERVER-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-PPPOE-SERVER-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 19:16:17 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion") hh3cCommon, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cCommon") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") ObjectIdentity, Integer32, IpAddress, NotificationType, Unsigned32, iso, MibIdentifier, Counter64, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "IpAddress", "NotificationType", "Unsigned32", "iso", "MibIdentifier", "Counter64", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "Bits", "TimeTicks") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") hh3cPPPoEServer = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 102)) hh3cPPPoEServer.setRevisions(('2009-05-06 00:00',)) if mibBuilder.loadTexts: hh3cPPPoEServer.setLastUpdated('200905060000Z') if mibBuilder.loadTexts: hh3cPPPoEServer.setOrganization('Hangzhou H3C Technologies Co., Ltd.') hh3cPPPoEServerObject = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1)) hh3cPPPoEServerMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cPPPoEServerMaxSessions.setStatus('current') hh3cPPPoEServerCurrSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cPPPoEServerCurrSessions.setStatus('current') hh3cPPPoEServerAuthRequests = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cPPPoEServerAuthRequests.setStatus('current') hh3cPPPoEServerAuthSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cPPPoEServerAuthSuccesses.setStatus('current') hh3cPPPoEServerAuthFailures = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: hh3cPPPoEServerAuthFailures.setStatus('current') hh3cPPPoESAbnormOffsThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsThreshold.setStatus('current') hh3cPPPoESAbnormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerThreshold.setStatus('current') hh3cPPPoESNormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite") if mibBuilder.loadTexts: hh3cPPPoESNormOffPerThreshold.setStatus('current') hh3cPPPoEServerTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2)) hh3cPPPoeServerTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0)) hh3cPPPoESAbnormOffsAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 1)) if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsAlarm.setStatus('current') hh3cPPPoESAbnormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 2)) if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerAlarm.setStatus('current') hh3cPPPoESNormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 3)) if mibBuilder.loadTexts: hh3cPPPoESNormOffPerAlarm.setStatus('current') mibBuilder.exportSymbols("HH3C-PPPOE-SERVER-MIB", hh3cPPPoEServerMaxSessions=hh3cPPPoEServerMaxSessions, hh3cPPPoEServerObject=hh3cPPPoEServerObject, hh3cPPPoeServerTrapPrefix=hh3cPPPoeServerTrapPrefix, hh3cPPPoEServerAuthFailures=hh3cPPPoEServerAuthFailures, hh3cPPPoEServer=hh3cPPPoEServer, PYSNMP_MODULE_ID=hh3cPPPoEServer, hh3cPPPoESAbnormOffsAlarm=hh3cPPPoESAbnormOffsAlarm, hh3cPPPoEServerAuthRequests=hh3cPPPoEServerAuthRequests, hh3cPPPoEServerAuthSuccesses=hh3cPPPoEServerAuthSuccesses, hh3cPPPoESNormOffPerThreshold=hh3cPPPoESNormOffPerThreshold, hh3cPPPoEServerCurrSessions=hh3cPPPoEServerCurrSessions, hh3cPPPoEServerTraps=hh3cPPPoEServerTraps, hh3cPPPoESAbnormOffPerThreshold=hh3cPPPoESAbnormOffPerThreshold, hh3cPPPoESAbnormOffPerAlarm=hh3cPPPoESAbnormOffPerAlarm, hh3cPPPoESAbnormOffsThreshold=hh3cPPPoESAbnormOffsThreshold, hh3cPPPoESNormOffPerAlarm=hh3cPPPoESNormOffPerAlarm)
[ 2, 198, 2, 9485, 15571, 7378, 337, 9865, 8265, 47138, 18, 34, 12, 10246, 16402, 36, 12, 35009, 5959, 12, 8895, 33, 357, 4023, 1378, 16184, 76, 489, 8937, 13, 785, 14, 79, 893, 11632, 8, 198, 2, 7054, 45, 13, 16, 2723, 2393, 1378...
2.462229
2,118
name = input("masukkan nama pembeli = ") alamat= input("Alamat = ") NoTelp = input("No Telp = ") print("\n") print("=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============") print("Pilih Jenis Mobil :") print("\t 1.Daihatsu ") print("\t 2.Honda ") print("\t 3.Toyota ") print("") pilihan = int(input("Pilih jenis mobil yang ingin dibeli : ")) print("") if (pilihan==1): print("<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>") print("\ta.Grand New Xenia") print("\tb.All New Terios") print("\tc.New Ayla") Pilih1 = input("Mana yang ingin anda pilih ?? = ") if(Pilih1 == "a"): print("Harga mobil Grand New Xenia adalah 183 juta ") elif(Pilih1== "b"): print("Harga mobil All New Terios adalah 215 juta") elif(Pilih1== "c"): print("Harga mobil New Ayla adalah 110 juta") else: print("Tidak terdefinisi") elif (pilihan==2): print("<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>") print("\ta.Honda Brio Satya S") print("\tb.Honda Jazz ") print("\tb.Honda Mobilio ") pilih2 = input("Mana yang ingin anda pilih??") if(pilih2=="a"): print("Harga mobil HOnda Brio Satya S adalah 131 juta") elif(pilih2=="b"): print("Harga mobil Honda Jazz adalah 232 juta") elif(pilih2=="c"): print("Harga mobil Honda mobilio adalah 189 juta") else: print("Tidak terdefinisi") elif (pilihan==3): print("<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?") print("\ta.Alphard") print("\tb.Camry") print("\tc.Fortuner") pilih3 = input("Mana yang ingin anda pilih??") if (pilih3=="a"): print("Harga mobil Alphard adalah 870 juta") elif (pilih3=="b"): print("Harga mobil Camry adalah 560 Juta") elif (pilih3=="c"): print("Harga mobil Fortuner adalah 492 Juta")
[ 3672, 796, 5128, 7203, 5356, 2724, 27541, 299, 1689, 279, 368, 6667, 72, 796, 366, 8, 198, 44949, 265, 28, 5128, 7203, 2348, 321, 265, 796, 366, 8, 198, 2949, 33317, 79, 796, 5128, 7203, 2949, 12088, 79, 796, 366, 8, 198, 4798, 72...
1.916432
1,065
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow import numpy as np import oneflow.typing as tp from test_util import GenArgList import unittest from collections import OrderedDict from typing import Dict import os if __name__ == "__main__": unittest.main()
[ 37811, 198, 15269, 12131, 383, 1881, 37535, 46665, 13, 1439, 2489, 10395, 13, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 5832, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.681416
226
import unittest import datetime from dida import schemas, triggers from marshmallow import ValidationError
[ 11748, 555, 715, 395, 198, 11748, 4818, 8079, 198, 198, 6738, 750, 64, 1330, 3897, 5356, 11, 20022, 198, 6738, 22397, 42725, 1330, 3254, 24765, 12331, 628 ]
4.037037
27
from django.shortcuts import render from rest_framework import status from rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import permission_classes from apps.configuration.models import Book from apps.hardspot.models import HardSpot from .models import Content,ContentContributors from .serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator from django.contrib.auth.decorators import permission_required from rest_framework.parsers import MultiPartParser from apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json import pandas as pd from evolve import settings from evolve import settings from azure.storage.blob import ( BlockBlobService, ContainerPermissions ) from datetime import datetime, timedelta import os import itertools from django.db.models import Q import threading account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key)
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 1334, 62, 30604, 1330, 3722, 198, 6738, 1334, 62, 30604, 13, 8612, 873, 1330, 357, 198, 220, 220, 220, 7343, 2969, 3824, 769, 11, 198, 220, 220, 220, 7343, 16447, 2969, 3824...
3.117175
623
# -*- coding: utf-8 -*- """ Example to train and evaluate a model with given data @author: Quoc-Tuan Truong <tuantq.vnu@gmail.com> """ from cornac.data import Reader from cornac.eval_methods import BaseMethod from cornac.models import MF from cornac.metrics import MAE, RMSE from cornac.utils import cache # Download MovieLens 100K provided training and test splits reader = Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) # Evaluation result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()], user_based=True) print(result)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 16281, 284, 4512, 290, 13446, 257, 2746, 351, 1813, 1366, 198, 198, 31, 9800, 25, 2264, 420, 12, 51, 7258, 13616, 506, 1279, 28047, 415, 80, 13, 85, ...
2.525907
386
import os import numpy as np import cv2 import albumentations from PIL import Image from torch.utils.data import Dataset from taming.data.sflckr import SegmentationBase # for examples included in repo # With semantic map and scene label if __name__ == "__main__": dset = ADE20kValidation() ex = dset[0] for k in ["image", "scene_category", "segmentation"]: print(type(ex[k])) try: print(ex[k].shape) except: print(ex[k])
[ 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 198, 11748, 435, 65, 1713, 602, 198, 6738, 350, 4146, 1330, 7412, 198, 6738, 28034, 13, 26791, 13, 7890, 1330, 16092, 292, 316, 198, 198, 6738, 256, 3723, 13, ...
2.397059
204
#!/usr/bin/env python3 """ fr_distributed_cleanup.py - a script to remove hosts which have migrated to other feeder hubs. To be run on Federated Reporting superhub after each import of feeder data. First, to setup, enable fr_distributed_cleanup by setting a class in augments (def.json). This enables policy in cfe_internal/enterprise/federation/federation.cf ```json { "classes": { "cfengine_mp_enable_fr_distributed_cleanup": [ "any::" ] } } ``` After the policy has run on superhub and feeders, run this script to setup fr_distributed_cleanup role and account on all feeders and superhubs with proper RBAC settings for normal operation. You will be prompted for superhub admin credentials and then admin credentials on each feeder. """ import argparse import logging import os import platform import string import random import subprocess import sys from getpass import getpass from nova_api import NovaApi from cfsecret import read_secret, write_secret WORKDIR = None CFE_FR_TABLES = None # get WORKDIR and CFE_FR_TABLES from config.sh config_sh_path = os.path.join(os.path.dirname(__file__), "config.sh") cmd = "source {}; echo $WORKDIR; echo $CFE_FR_TABLES".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable="/bin/bash" ) as proc: lines = proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()] if not WORKDIR or not CFE_FR_TABLES: print("Unable to get WORKDIR and CFE_FR_TABLES values from config.sh") sys.exit(1) # Primary dir in which to place various needed files DISTRIBUTED_CLEANUP_DIR = "/opt/cfengine/federation/cftransport/distributed_cleanup" # collect cert files from /var/cfengine/httpd/ssl/certs on # superhub and feeders and cat all together into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, "hubs.cert") # Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. # api calls will overwrite fr_distributed_cleanup user and role on superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, "state/fr_distributed_cleanup.cfsecret") if __name__ == "__main__": main() else: raise ImportError("fr_distributed_cleanup.py must only be used as a script!")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 198, 8310, 62, 17080, 6169, 62, 27773, 929, 13, 9078, 532, 257, 4226, 284, 4781, 11453, 543, 423, 40227, 284, 198, 847, 3745, 263, 38459, 13, 1675, 307, 1057, 319, 35089, ...
2.976744
774
#Doesn't work. import time fibonacci = [1, 1] n = int(input()) while len(fibonacci) < n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i in range(n): print(fibonacci[i], end=' ')
[ 2, 13921, 77, 470, 670, 13, 201, 198, 11748, 640, 201, 198, 201, 198, 69, 571, 261, 44456, 796, 685, 16, 11, 352, 60, 201, 198, 77, 796, 493, 7, 15414, 28955, 201, 198, 4514, 18896, 7, 69, 571, 261, 44456, 8, 1279, 299, 25, 20...
2.009901
101
from setuptools import setup setup(name="csgoinvshuffle")
[ 6738, 900, 37623, 10141, 1330, 9058, 198, 198, 40406, 7, 3672, 2625, 6359, 2188, 16340, 1477, 18137, 4943, 198 ]
3.105263
19
""" basic logging functionality based on a producer/consumer scheme. XXX implement this API: (maybe put it into slogger.py?) log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info("hello", "world") log.command("hello", "world") log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) """ import py, sys def default_consumer(msg): """ the default consumer, prints the message to stdout (using 'print') """ sys.stderr.write(str(msg)+"\n") default_keywordmapper = KeywordMapper() # # Consumers # def STDOUT(msg): """ consumer that writes to sys.stdout """ sys.stdout.write(str(msg)+"\n") def STDERR(msg): """ consumer that writes to sys.stderr """ sys.stderr.write(str(msg)+"\n") for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split(): _prio = "LOG_" + _prio try: setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) except AttributeError: pass
[ 37811, 201, 198, 35487, 18931, 11244, 1912, 319, 257, 9920, 14, 49827, 7791, 13, 201, 198, 201, 198, 43145, 3494, 428, 7824, 25, 357, 25991, 1234, 340, 656, 25801, 1362, 13, 9078, 10091, 201, 198, 201, 198, 220, 220, 220, 220, 220, ...
2.118959
538
import re from model.contact import Contact
[ 11748, 302, 198, 6738, 2746, 13, 32057, 1330, 14039, 628, 628, 628 ]
4.083333
12
""" ABP analyzer and graphics tests """ cases = [ ('Run Pymodel Graphics to generate dot file from FSM model, no need use pma', 'pmg ABP'), ('Generate SVG file from dot', 'dotsvg ABP'), # Now display ABP.dot in browser ('Run PyModel Analyzer to generate FSM from original FSM, should be the same', 'pma ABP'), ('Run PyModel Graphics to generate a file of graphics commands from new FSM', 'pmg ABPFSM'), ('Generate an svg file from the graphics commands', 'dotsvg ABPFSM'), # Now display ABPFSM.svg in browser, should look the same as ABP.svg ]
[ 37811, 198, 6242, 47, 4284, 9107, 290, 9382, 5254, 198, 37811, 198, 198, 33964, 796, 685, 198, 220, 220, 220, 19203, 10987, 9485, 19849, 19840, 284, 7716, 16605, 2393, 422, 376, 12310, 2746, 11, 645, 761, 779, 279, 2611, 3256, 198, 22...
2.78341
217
# Generated by Django 3.1.2 on 2020-10-26 12:21 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 17, 319, 12131, 12, 940, 12, 2075, 1105, 25, 2481, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, ...
2.818182
44
""" PackageTools - A set of tools to aid working with packages. Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:info@egenix.com See the documentation for further information on copyrights, or contact the author. All Rights Reserved. """ __version__ = '0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE to identify Python modules suffixes = projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$') initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$') initmodule_names = [] for suffix in suffixes: initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): """ Return a list of package names found in dir. Packages are Python modules and subdirectories that provide an __init__ module. The .py extension is removed from the files. The __init__ modules are not considered being seperate packages. If files_only is true, only Python files are included in the search (subdirectories are *not* taken into account). If ignore_modules is true (default is false), modules are ignored. If recursive is true the search recurses into package directories. pkgbasename and pkgdict are only used during recursion. """ l = listdir(dir) if pkgdict is None: pkgdict = {} if files_only: for filename in l: m = module_name.match(filename) if m is not None and \ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 else: for filename in l: path = join(dir, filename) if isdir(path): # Check for __init__ module(s) for name in initmodule_names: if isfile(join(path, name)): pkgname = pkgbasename + filename pkgdict[pkgname] = 1 if recursive: find_packages(path, recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict) break elif not ignore_modules: m = module_name.match(filename) if m is not None and \ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 return pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split): """ Assuming that package points to a loaded package module, this function tries to identify all subpackages of that package. Subpackages are all Python files included in the same directory as the module plus all subdirectories having an __init__.py file. The modules name is prepended to all subpackage names. The module location is found by looking at the __file__ attribute that non-builtin modules define. The function uses the __all__ attribute from the package __init__ module if available. If recursive is true (default is false), then subpackages of subpackages are recursively also included in the search. """ if not recursive: # Try the __all__ attribute... try: subpackages = list(package.__all__) except (ImportError, AttributeError): # Did not work, then let's try to find the subpackages by looking # at the directory where package lives... subpackages = find_packages(package.__path__[0], recursive=recursive) else: # XXX Recursive search does not support the __all__ attribute subpackages = find_packages(package.__path__[0], recursive=recursive) basename = package.__name__ + '.' for i,name in irange(subpackages): subpackages[i] = basename + name return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): """ Returns the module object that the callee is calling from. upcount can be given to indicate how far up the execution stack the function is supposed to look (1 == direct callee, 2 == callee of callee, etc.). """ try: 1/0 except: frame = exc_info()[2].tb_frame for i in trange(upcount): frame = frame.f_back name = frame.f_globals['__name__'] del frame return sys.modules[name] def _module_loader(name, locals, globals, sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']): """ Internal API for loading a module """ if not sysmods.has_key(name): is_new = 1 else: is_new = 0 try: mod = importer(name, locals, globals, from_list) if reload and not is_new: mod = reloader(mod) except KeyboardInterrupt: # Pass through; SystemExit will be handled by the error handler raise except Exception, why: if errors == 'ignore': pass elif errors == 'strict': raise elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown errors value' else: return mod return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): """ Import all modules given in modnames into module. module defaults to the caller's module. modnames may contain dotted package names. If errors is 'strict' (default), then ImportErrors and SyntaxErrors are raised. If set to 'ignore', they are silently ignored. If errors is a callable object, then it is called with arguments (modname, errorclass, errorvalue). If the handler returns, processing continues. If reload is true (default is false), all already modules among the list will be forced to reload. """ if module is None: module = _thismodule(2) locals = module.__dict__ sysmods = sys.modules for name in modnames: mod = _module_loader(name, locals, locals, sysmods, errors=errors) if mod is not None: locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): """ Imports all modules in modnames using the given namespaces and returns list of corresponding module objects. If errors is 'strict' (default), then ImportErrors and SyntaxErrors are raised. If set to 'ignore', they are silently ignored. If errors is a callable object, then it is called with arguments (modname, errorclass, errorvalue). If the handler returns, processing continues. If reload is true (default is false), all already modules among the list will be forced to reload. """ modules = [] append = modules.append sysmods = sys.modules for name in modnames: mod = _module_loader(name, locals, globals, sysmods, errors=errors) if mod is not None: append(mod) return modules def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): """ Does a subpackages scan using find_subpackages(module) and then imports all submodules found into module. The module location is found by looking at the __file__ attribute that non-builtin modules define. The function uses the __all__ attribute from the package __init__ module if available. If reload is true (default is false), all already modules among the list will be forced to reload. """ import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): """ Same as import_subpackages but with load_modules functionality, i.e. imports the modules and also returns a list of module objects. If errors is 'strict' (default), then ImportErrors are raised. If set to 'ignore', they are silently ignored. If reload is true (default is false), all already modules among the list will be forced to reload. """ return load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors, reload=reload) def modules(names, extract=extract): """ Converts a list of module names into a list of module objects. The modules must already be loaded. """ return extract(sys.modules, names) def package_modules(pkgname): """ Returns a list of all modules belonging to the package with the given name. The package must already be loaded. Only the currently registered modules are included in the list. """ match = pkgname + '.' match_len = len(match) mods = [sys.modules[pkgname]] for k,v in sys.modules.items(): if k[:match_len] == match and v is not None: mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): """ Find all subclasses of baseclass or simply all classes (if baseclass is None) defined by the module objects in list mods. If annotated is true the returned list will contain tuples (module_object,name,class_object) for each class found where module_object is the module where the class is defined. """ classes = [] for mod in mods: for name,obj in mod.__dict__.items(): if type(obj) is ClassType: if baseclass and not issubclass(obj,baseclass): continue if annotated: classes.append((mod, name, obj)) else: classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): """ Find all instances of baseclass defined by the module objects in list mods. If annotated is true the returned list will contain tuples (module_object,name,instances_object) for each instances found where module_object is the module where the instances is defined. """ instances = [] for mod in mods: for name,obj in mod.__dict__.items(): if isinstance(obj,baseclass): if annotated: instances.append((mod,name,obj)) else: instances.append(obj) return instances
[ 37811, 15717, 33637, 532, 317, 900, 286, 4899, 284, 6133, 1762, 351, 10392, 13, 628, 220, 220, 220, 15069, 357, 66, 8, 7795, 12, 11024, 11, 13067, 12, 31258, 20607, 7423, 26, 6920, 1462, 25, 7617, 31, 293, 2022, 3686, 13, 785, 198, ...
2.360252
4,916
"""Regression tests for what was in Python 2's "urllib" module""" import urllib.parse import urllib.request import urllib.error import http.client import email.message import io import unittest from unittest.mock import patch from test import support import os try: import ssl except ImportError: ssl = None import sys import tempfile from nturl2path import url2pathname, pathname2url from base64 import b64encode import collections def hexescape(char): """Escape char as RFC 2396 specifies""" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = "0%s" % hex_repr return "%" + hex_repr # Shortcut for testing FancyURLopener _urlopener = None def urlopen(url, data=None, proxies=None): """urlopen(url [, data]) -> open file-like object""" global _urlopener if proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener() _urlopener = opener else: opener = _urlopener if data is None: return opener.open(url) else: return opener.open(url, data) def test_read_1_0(self): self.check_read(b"1.0") def test_read_1_1(self): self.check_read(b"1.1") def test_read_bogus(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError, urlopen, "http://python.org/") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: msg = "Redirection to url 'file:" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen("http://python.org/") finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923: make sure independent requests each use their # own retry limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, "http://something") finally: self.unfakehttp() # Just commented them out. # Can't really tell why keep failing in windows and sparc. # Everywhere else they work ok, but on those machines, sometimes # fail in one of the tests, sometimes in other. I have a linux, and # the tests go ok. # If anybody has one of the problematic environments, please help! # . Facundo # # def server(evt): # import socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind(("", 9093)) # serv.listen() # try: # conn, addr = serv.accept() # conn.send("1 Hola mundo\n") # cantdata = 0 # while cantdata < 13: # data = conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3) # conn.send("2 No more lines\n") # conn.close() # except socket.timeout: # pass # finally: # serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import ftplib, time, threading # ftplib.FTP.port = 9093 # self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self): # self.evt.wait() # # def testBasic(self): # # connects # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # ftp.close() # # def testTimeoutNone(self): # # global default timeout is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutDefault(self): # # global default timeout is used # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutValue(self): # ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() if __name__ == '__main__': unittest.main()
[ 37811, 8081, 2234, 5254, 329, 644, 373, 287, 11361, 362, 338, 366, 333, 297, 571, 1, 8265, 37811, 198, 198, 11748, 2956, 297, 571, 13, 29572, 198, 11748, 2956, 297, 571, 13, 25927, 198, 11748, 2956, 297, 571, 13, 18224, 198, 11748, ...
2.1875
2,528
# Python 2 and 3 from __future__ import unicode_literals from ...models import Address, SeasonalPriceBand from ..base import Product
[ 2, 11361, 362, 290, 513, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 2644, 27530, 1330, 17917, 11, 7369, 282, 18124, 31407, 198, 198, 6738, 11485, 8692, 1330, 8721, 628 ]
3.777778
36
from discord.ext import commands import json import random with open("assets/json/questions.json") as data: data = json.load(data) dares = data["dares"]
[ 6738, 36446, 13, 2302, 1330, 9729, 198, 11748, 33918, 198, 11748, 4738, 628, 198, 4480, 1280, 7203, 19668, 14, 17752, 14, 6138, 507, 13, 17752, 4943, 355, 1366, 25, 198, 220, 220, 220, 1366, 796, 33918, 13, 2220, 7, 7890, 8, 198, 22...
3
55
APIC_IP="sandboxapic.cisco.com" APIC_PORT="443" GROUP='group-xx'
[ 2969, 2149, 62, 4061, 2625, 38142, 3524, 499, 291, 13, 66, 4861, 13, 785, 1, 198, 2969, 2149, 62, 15490, 2625, 34938, 1, 198, 46846, 11639, 8094, 12, 5324, 6, 198 ]
2.096774
31
# Copyright 2013-2015 David Mohr # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from random import randint import mtpy from . import * # noqa
[ 2, 15069, 2211, 12, 4626, 3271, 9719, 81, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, ...
3.714286
175
# encoding: utf-8 import sys from market_maker import OrderManager from settings import * import os from pymongo import MongoClient, ASCENDING from pymongo.errors import ConnectionFailure from datetime import datetime , timedelta import numpy as np ######################################################################################################################## # constants EXCHANGE_BITMEX = "BITMEX" EMPTY_STRING = "" EMPTY_FLOAT = 0.0 EMPTY_INT = 0 #---------------------------------------------------------------------- ''' tick ''' ######################################################################## ''' engine ''' ''' Engine ''' ######################################################################## ########################################################################
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 198, 11748, 25064, 198, 198, 6738, 1910, 62, 10297, 1330, 8284, 13511, 198, 6738, 6460, 1330, 1635, 198, 11748, 28686, 198, 198, 6738, 279, 4948, 25162, 1330, 42591, 11792, 11, 25400, 10619, 2751, ...
4.668571
175
# -*- coding: utf-8 -*- # Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Utilities for creating VCG and Dot diagrams""" from logilab.common.vcgutils import VCGPrinter from logilab.common.graph import DotBackend from pylint.pyreverse.utils import is_exception
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 3648, 12, 6390, 41605, 4146, 6242, 311, 13, 32, 13, 357, 40313, 11, 8782, 19240, 737, 198, 2, 2638, 1378, 2503, 13, 6404, 346, 397, 13, 8310, ...
3.522184
293
import logging from ariadne import MutationType, convert_kwargs_to_snake_case from config import clients, messages, queue mutation = MutationType()
[ 11748, 18931, 198, 6738, 257, 21244, 710, 1330, 337, 7094, 6030, 11, 10385, 62, 46265, 22046, 62, 1462, 62, 16184, 539, 62, 7442, 198, 6738, 4566, 1330, 7534, 11, 6218, 11, 16834, 198, 198, 76, 7094, 796, 337, 7094, 6030, 3419, 628, ...
3.511628
43
import os from django.conf import settings from django.template.loader import render_to_string from django.utils import timezone import json import requests from urllib.parse import quote, urlencode from hc.accounts.models import Profile from hc.lib import emails from hc.lib.string import replace try: import apprise except ImportError: # Enforce settings.APPRISE_ENABLED = False class Sms(HttpTransport): URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" class WhatsApp(HttpTransport): URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" class Trello(HttpTransport): URL = "https://api.trello.com/1/cards" class Apprise(HttpTransport): class MsTeams(HttpTransport): class Zulip(HttpTransport):
[ 11748, 28686, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 28243, 13, 29356, 1330, 8543, 62, 1462, 62, 8841, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 198, 11748, 33918, 198, 11748, 7007, ...
2.800712
281
#!/usr/bin/env python3 # # Copyright 2022 Graviti. Licensed under MIT License. # """Portex type builder related classes.""" from hashlib import md5 from pathlib import Path from shutil import rmtree from subprocess import PIPE, CalledProcessError, run from tempfile import gettempdir from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar import yaml import graviti.portex.ptype as PTYPE from graviti.exception import GitCommandError, GitNotFoundError from graviti.portex.base import PortexRecordBase from graviti.portex.external import PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from graviti.portex.package import ExternalPackage, Imports, packages from graviti.portex.param import Param, Params from graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING: from subprocess import CompletedProcess from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar("_I", bound="BuilderImports") def build_package(url: str, revision: str) -> ExternalPackage: """Build an external package. Arguments: url: The git repo url of the external package. revision: The git repo revision (tag/commit) of the external package. Returns: The :class:`ExternalPackage` instance. """ builder = PackageBuilder(url, revision) package = builder.build() packages.externals[url, revision] = package return package
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 198, 2, 15069, 33160, 32599, 8846, 13, 49962, 739, 17168, 13789, 13, 198, 2, 198, 37811, 13924, 1069, 2099, 27098, 3519, 6097, 526, 15931, 628, 198, 6738, 12234, 8019, 1330, 452...
3.295259
464
from typing import Dict, List, Any from ..df.types import Definition from ..df.base import op from ..util.data import traverse_get MAPPING = Definition(name="mapping", primitive="map") MAPPING_TRAVERSE = Definition(name="mapping_traverse", primitive="List[str]") MAPPING_KEY = Definition(name="key", primitive="str") MAPPING_VALUE = Definition(name="value", primitive="generic")
[ 6738, 19720, 1330, 360, 713, 11, 7343, 11, 4377, 198, 198, 6738, 11485, 7568, 13, 19199, 1330, 30396, 198, 6738, 11485, 7568, 13, 8692, 1330, 1034, 198, 6738, 11485, 22602, 13, 7890, 1330, 38138, 62, 1136, 198, 198, 44, 24805, 2751, 7...
3.301724
116
import time import sys import pkg_resources import os import retrying from sqlalchemy.exc import IntegrityError # anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import logger from anchore_engine.configuration import localconfig from anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed, ) # from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils import timer feed_sync_queuename = "feed_sync_tasks" system_user_auth = None feed_sync_msg = {"task_type": "feed_sync", "enabled": True} # These are user-configurable but mostly for debugging and testing purposes try: FEED_SYNC_RETRIES = int(os.getenv("ANCHORE_FEED_SYNC_CHECK_RETRIES", 5)) except ValueError: logger.exception( "Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5" ) FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv("ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", 5) ) except ValueError: logger.exception( "Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5" ) FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries = int(os.getenv("FEED_CLIENT_CHECK_RETRIES", 3)) except ValueError: logger.exception( "Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3" ) feed_config_check_retries = 3 try: feed_config_check_backoff = int(os.getenv("FEED_CLIENT_CHECK_BACKOFF", 5)) except ValueError: logger.exception( "Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5" ) feed_config_check_backoff = 5 # service funcs (must be here) def process_preflight(): """ Execute the preflight functions, aborting service startup if any throw uncaught exceptions or return False return value :return: """ preflight_check_functions = [init_db_content, init_feed_registry] for fn in preflight_check_functions: try: fn() except Exception as e: logger.exception( "Preflight checks failed with error: {}. Aborting service startup".format( e ) ) sys.exit(1) def init_db_content(): """ Initialize the policy engine db with any data necessary at startup. :return: """ return _init_distro_mappings() def handle_feed_sync(*args, **kwargs): """ Initiates a feed sync in the system in response to a message from the queue :param args: :param kwargs: :return: """ system_user = _system_creds() logger.info("init args: {}".format(kwargs)) cycle_time = kwargs["mythread"]["cycle_timer"] while True: config = localconfig.get_config() feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True) if feed_sync_enabled: logger.info("Feed sync task executor activated") try: run_feed_sync(system_user) except Exception as e: logger.error("Caught escaped error in feed sync handler: {}".format(e)) finally: logger.info("Feed sync task executor complete") else: logger.info("sync_enabled is set to false in config - skipping feed sync") time.sleep(cycle_time) return True def handle_feed_sync_trigger(*args, **kwargs): """ Checks to see if there is a task for a feed sync in the queue and if not, adds one. Interval for firing this should be longer than the expected feed sync duration. :param args: :param kwargs: :return: """ system_user = _system_creds() logger.info("init args: {}".format(kwargs)) cycle_time = kwargs["mythread"]["cycle_timer"] while True: config = localconfig.get_config() feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True) if feed_sync_enabled: logger.info("Feed Sync task creator activated") try: push_sync_task(system_user) logger.info("Feed Sync Trigger done, waiting for next cycle.") except Exception as e: logger.error( "Error caught in feed sync trigger handler after all retries. Will wait for next cycle" ) finally: logger.info("Feed Sync task creator complete") else: logger.info( "sync_enabled is set to false in config - skipping feed sync trigger" ) time.sleep(cycle_time) return True
[ 11748, 640, 198, 11748, 25064, 198, 11748, 279, 10025, 62, 37540, 198, 11748, 28686, 198, 11748, 1005, 14992, 198, 198, 6738, 44161, 282, 26599, 13, 41194, 1330, 39348, 12331, 198, 198, 2, 12619, 382, 13103, 198, 11748, 12619, 382, 62, ...
2.486239
2,071
"""Scraper for Supreme Court of U.S. CourtID: scotus Court Short Name: scotus History: - 2014-07-20 - Created by Andrei Chelaru, reviewed by MLR - 2017-10-09 - Updated by MLR. """ from datetime import datetime from juriscraper.OralArgumentSite import OralArgumentSite
[ 37811, 3351, 38545, 329, 5617, 3078, 286, 471, 13, 50, 13, 198, 36699, 2389, 25, 629, 313, 385, 198, 36699, 10073, 6530, 25, 629, 313, 385, 198, 18122, 25, 198, 532, 1946, 12, 2998, 12, 1238, 532, 15622, 416, 10948, 72, 11425, 11493...
3.067416
89
# -*- coding: utf-8 -*- import argparse import os import shutil import time import numpy as np import random from collections import OrderedDict import torch import torch.backends.cudnn as cudnn from callbacks import AverageMeter from data_utils.causal_data_loader_frames import VideoFolder from utils import save_results from tqdm import tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset and log related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to the json file with train video meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the json file with validation video meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the json file with ground truth labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset to train') parser.add_argument('--logname', default='my_method', help='name of the experiment for checkpoints and logs') parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N', help='print frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') # model, image&feature dim and training related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for fusing activations from each branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for coord-based features') parser.add_argument('--size', default=224, type=int, metavar='N', help='primary image input size') parser.add_argument('--num_boxes', default=4, type=int, help='num of boxes for each image') parser.add_argument('--num_frames', default=16, type=int, help='num of frames for the model') parser.add_argument('--num_classes', default=174, type=int, help='num of class in the model') parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs="+", metavar='LRSteps', help='epochs to decay learning rate by 10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W', help='gradient norm clipping (default: 5)') parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides') # train mode, hardware setting and others related arguments parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set') parser.add_argument('--parallel', default=True, type=bool, help='whether or not train with multi GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of gpu you want to use') best_loss = 1000000 def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): """Sets the learning rate to the initial LR decayed by 10""" decay = 0.1 ** (sum(epoch >= np.array(lr_steps))) lr = args.lr * decay if branch_name == 'vision': for param_group in optimizer.param_groups: param_group['lr'] = lr * 0.8 elif branch_name == 'coord': for param_group in optimizer.param_groups: param_group['lr'] = lr elif branch_name == 'fusion': for param_group in optimizer.param_groups: param_group['lr'] = lr else: for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == '__main__': main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 201, 198, 11748, 1822, 29572, 201, 198, 11748, 28686, 201, 198, 11748, 4423, 346, 201, 198, 11748, 640, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 47...
2.423331
2,726
# encoding: utf-8 from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_cors import CORS import logging app = Flask(__name__) CORS(app, resources={r"/*": {"origins": "*"}}) app.config.from_object('config.current') db = SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import application.jwt import application.routes.config import application.routes.user import application.routes.permission import application.routes.role import application.routes.access # after Model defined db.create_all()
[ 2, 21004, 25, 3384, 69, 12, 23, 201, 198, 201, 198, 6738, 42903, 1330, 46947, 201, 198, 6738, 42903, 62, 25410, 282, 26599, 1330, 16363, 2348, 26599, 201, 198, 6738, 42903, 62, 66, 669, 1330, 327, 20673, 201, 198, 201, 198, 201, 198...
2.600858
233
from Module import AbstractModule
[ 6738, 19937, 1330, 27741, 26796, 628, 628 ]
5.285714
7
from string import Template import re if __name__ == '__main__': template_str = '${0} is Aug.' t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern) print(type(t.idpattern)) print(t.flags) print(t.pattern) print(t.substitute(**{'0':'V'})) t.find_place_holders(template_str)
[ 6738, 4731, 1330, 37350, 198, 11748, 302, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 11055, 62, 2536, 796, 705, 38892, 15, 92, 318, 2447, 2637, 198, 220, 220, 220, 256, 796, 18574, 1859, ...
2.321212
165
# Copyright The Linux Foundation and each contributor to CommunityBridge. # SPDX-License-Identifier: MIT import json import os import requests import uuid import hug import pytest from falcon import HTTP_200, HTTP_409 import cla from cla import routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def test_create_company_duplicate(): """ Test creating duplicate company names """ import pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name = 'test_company_name' data = { 'company_id' : uuid.uuid4() , 'company_name' : company_name , } headers = { 'Authorization' : f'Bearer {ID_TOKEN}' } response = requests.post(url, data=data, headers=headers) assert response.status == HTTP_200 # add duplicate company data = { 'company_id' : uuid.uuid4(), 'company_name' : company_name } req = hug.test.post(routes, url, data=data, headers=headers) assert req.status == HTTP_409
[ 2, 15069, 383, 7020, 5693, 290, 1123, 18920, 284, 8108, 37385, 13, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 17168, 198, 198, 11748, 33918, 198, 11748, 28686, 198, 11748, 7007, 198, 11748, 334, 27112, 198, 198, 11748, 16225, ...
2.55
400
# Copyright(c) 2017, Dimitar Venkov # @5devene, dimitar.ven@gmail.com # www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import Point, Color, Font from System.Windows.Forms import * from cStringIO import StringIO str_file = StringIO() size1 = [30, 23] #height, width l1 = [] if IN[0] is None else tolist(IN[0]) list2str(l1, IN[1]) str_content = str_file.getvalue() str_file.close() width1 = 100 form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form) OUT = form.text1 Application.Exit() form.Dispose()
[ 2, 15069, 7, 66, 8, 2177, 11, 14048, 7940, 9932, 21862, 198, 2, 2488, 20, 2934, 574, 68, 11, 5391, 7940, 13, 574, 31, 14816, 13, 785, 198, 2, 7324, 13, 14774, 2144, 13083, 13, 3262, 198, 198, 11748, 537, 81, 198, 198, 565, 81, ...
2.702222
225
""" Question: Nim Game My Submissions Question You are playing the following Nim Game with your friend: There is a heap of stones on the table, each time one of you take turns to remove 1 to 3 stones. The one who removes the last stone will be the winner. You will take the first turn to remove the stones. Both of you are very clever and have optimal strategies for the game. Write a function to determine whether you can win the game given the number of stones in the heap. For example, if there are 4 stones in the heap, then you will never win the game: no matter 1, 2, or 3 stones you remove, the last stone will always be removed by your friend. Hint: If there are 5 stones in the heap, could you figure out a way to remove the stones such that you will always be the winner? Credits: Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases. Performance: 1. Total Accepted: 31755 Total Submissions: 63076 Difficulty: Easy 2. Your runtime beats 43.52% of python submissions. """ assert Solution().canWinNim(0) is True assert Solution().canWinNim(1) is True assert Solution().canWinNim(2) is True assert Solution().canWinNim(3) is True assert Solution().canWinNim(4) is False assert Solution().canWinNim(5) is True assert Solution().canWinNim(6) is True assert Solution().canWinNim(7) is True assert Solution().canWinNim(8) is False
[ 37811, 198, 24361, 25, 198, 220, 220, 220, 27168, 3776, 2011, 3834, 8481, 18233, 628, 220, 220, 220, 921, 389, 2712, 262, 1708, 27168, 3776, 351, 534, 1545, 25, 1318, 318, 257, 24575, 286, 14966, 319, 262, 3084, 11, 1123, 640, 530, ...
3.603581
391
import unittest import base
[ 11748, 555, 715, 395, 198, 198, 11748, 2779, 628 ]
3.333333
9
''' (c) 2011, 2012 Georgia Tech Research Corporation This source code is released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on Feb 20, 2011 @author: John Cornwell @organization: Georgia Institute of Technology @contact: JohnWCornwellV@gmail.com @summary: This is an implementation of the 1-KNN algorithm for ranking features quickly. It uses the knn implementation. @status: oneKNN functions correctly, optimized to use n^2/2 algorithm. ''' import matplotlib.pyplot as plt from pylab import gca import itertools import string import numpy as np import math import knn from time import clock ''' @summary: Query function for 1KNN, return value is a double between 0 and 1. @param naData: A 2D numpy array. Each row is a data point with the final column containing the classification. ''' ''' Test function to plot results ''' ''' Function to plot 2 distributions ''' ''' Function to test KNN performance ''' def _knnResult( naData ): ''' Split up data into training/testing ''' lSplit = naData.shape[0] * .7 naTrain = naData[:lSplit, :] naTest = naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1 ); ''' Query with last column omitted and 5 nearest neighbors ''' naResults = knn.query( naTest[:,:-1], 5, 'mode') ''' Count returns which are correct ''' lCount = 0 for i, dVal in enumerate(naResults): if dVal == naTest[i,-1]: lCount = lCount + 1 dResult = float(lCount) / naResults.size return dResult ''' Tests performance of 1-KNN ''' def _test1(): ''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance ''' for i in range(3): ''' Select one of three distributions ''' if i == 0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) elif i == 1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) ) naOrig = np.vstack( (naTest1, naTest2) ) naBoth = np.vstack( (naTest1, naTest2) ) ''' Keep track of runtimes ''' t = clock() cOneRuntime = t-t; cKnnRuntime = t-t; lfResults = [] lfKnnResults = [] for i in range( 15 ): #_plotDist( naTest1, naBoth[100:,:], i ) t = clock() lfResults.append( oneKnn( naBoth ) ) cOneRuntime = cOneRuntime + (clock() - t) t = clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime = cKnnRuntime + (clock() - t) naBoth[500:,0] = naBoth[500:,0] - .1 print 'Runtime OneKnn:', cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2, lfResults, lfKnnResults ) ''' Tests performance of 1-KNN ''' def _test2(): ''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance ''' np.random.seed( 12345 ) ''' Create 5 distributions for each of the 5 attributes ''' dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) lDists = [ dist1, dist2, dist3, dist4, dist5 ] ''' All features used except for distribution 4 ''' distY = np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5 ) distY = distY.reshape( -1, 1 ) for i, fVal in enumerate( distY ): if fVal >= 0: distY[i] = 1 else: distY[i] = 0 for i in range( 1, 6 ): lsNames = [] lf1Vals = [] lfVals = [] for perm in itertools.combinations( '12345', i ): ''' set test distribution to first element ''' naTest = lDists[ int(perm[0]) - 1 ] sPerm = perm[0] ''' stack other distributions on ''' for j in range( 1, len(perm) ): sPerm = sPerm + str(perm[j]) naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) ) ''' finally stack y values ''' naTest = np.hstack( (naTest, distY) ) lf1Vals.append( oneKnn( naTest ) ) lfVals.append( _knnResult( np.random.permutation(naTest) ) ) lsNames.append( sPerm ) ''' Plot results ''' plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' ) plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' ) plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations of ' + str(i) + ' Features') plt.ylim( (0,1) ) if len(lf1Vals) < 2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels( lsNames ) plt.show() if __name__ == '__main__': _test1() #_test2()
[ 7061, 6, 201, 198, 7, 66, 8, 2813, 11, 2321, 7859, 9634, 4992, 10501, 201, 198, 1212, 2723, 2438, 318, 2716, 739, 262, 968, 347, 10305, 5964, 13, 220, 4222, 766, 201, 198, 4023, 1378, 15466, 13, 40972, 43776, 13, 2398, 14, 9630, 1...
1.955148
3,322
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division from pyscf.nao.m_coulomb_am import coulomb_am import numpy as np try: import numba as nb from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba = True except: use_numba = False # # # def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): """ Computes the matrix elements given by funct, for instance coulomb interaction Args: sv : (System Variables), this must have arrays of coordinates and species, etc ao_log : description of functions (either orbitals or product basis functions) Returns: matrix elements for the whole system in packed form (lower triangular part) """ from pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print("atom1 = {0}, rv1 = {1}".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue # skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs) else: for i1 in range(s1,f1): for i2 in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print("number call = ", count) #print("sum kernel: {0:.6f}".format(np.sum(abs(res)))) #np.savetxt("kernel_pyscf.txt", res) #import sys #sys.exit() return res, norbs
[ 2, 15069, 1946, 12, 7908, 383, 9485, 6173, 37, 34152, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287...
2.357143
1,064
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token import mock import nova.conf from nova import context from nova import service_auth from nova import test CONF = nova.conf.CONF
[ 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, 220, 220, ...
3.388889
234
import svhn2mnist import usps import syn2gtrsb import syndig2svhn
[ 11748, 38487, 21116, 17, 10295, 396, 198, 11748, 514, 862, 198, 11748, 6171, 17, 70, 2213, 36299, 198, 11748, 11150, 328, 17, 21370, 21116, 628, 198 ]
2.615385
26
import torch from torch import Tensor from torch.nn.modules.loss import _Loss
[ 11748, 28034, 198, 6738, 28034, 1330, 309, 22854, 198, 6738, 28034, 13, 20471, 13, 18170, 13, 22462, 1330, 4808, 43, 793, 628, 198 ]
3.478261
23
import json import logging import os import shutil import tempfile import yaml from collections import defaultdict from copy import deepcopy from patroni import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from patroni.dcs import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def _load_config_path(self, path): """ If path is a file, loads the yml file pointed to by path. If path is a directory, loads all yml files in that directory in alphabetical order """ if os.path.isfile(path): files = [path] elif os.path.isdir(path): files = [os.path.join(path, f) for f in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s is neither directory nor file', path) raise ConfigParseError('invalid config path') overall_config = {} for fname in files: with open(fname) as f: config = yaml.safe_load(f) patch_config(overall_config, config) return overall_config def _load_config_file(self): """Loads config.yaml from filesystem and applies some values which were set via ENV""" config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config # configuration could be either ClusterConfig or dict
[ 11748, 33918, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 20218, 7753, 198, 11748, 331, 43695, 198, 198, 6738, 17268, 1330, 4277, 11600, 198, 6738, 4866, 1330, 2769, 30073, 198, 6738, 19686, 72, 1330, 28748, 4...
2.451697
766
############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """ Unit tests for DirectoryView module. """ import sys import unittest import warnings from os import mkdir from os import remove from os.path import join from tempfile import mktemp from App.config import getConfiguration from . import _globals from .base.dummy import DummyFolder from .base.testcase import FSDVTest from .base.testcase import WritableFSDVTest def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests)) suite.addTest(unittest.makeSuite(DirectoryViewFolderTests)) suite.addTest(unittest.makeSuite(DebugModeTests)) return suite
[ 29113, 29113, 7804, 4242, 2235, 198, 2, 198, 2, 15069, 357, 66, 8, 6244, 1168, 3008, 5693, 290, 25767, 669, 13, 198, 2, 198, 2, 770, 3788, 318, 2426, 284, 262, 8617, 286, 262, 1168, 3008, 5094, 13789, 11, 198, 2, 10628, 362, 13, ...
3.450382
393
import openmdao.api as om from pycycle.thermo.cea import species_data from pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient import Ambient from pycycle.elements.flow_start import FlowStart if __name__ == "__main__": p1 = om.Problem() p1.model = om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR') fc = p1.model.add_subsystem("fc", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho']) print('W', p1['fc.Fl_O:stat:W']) print('Pt: ', p1['fc.Fl_O:tot:P'])
[ 11748, 1280, 9132, 5488, 13, 15042, 355, 39030, 198, 198, 6738, 12972, 13696, 13, 490, 5908, 13, 344, 64, 1330, 4693, 62, 7890, 198, 6738, 12972, 13696, 13, 9979, 1187, 1330, 31600, 62, 36, 2538, 28957, 198, 6738, 12972, 13696, 13, 68...
2.05175
657
from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth import update_session_auth_hash, login, authenticate from django.contrib import messages from django.shortcuts import render, redirect from social_django.models import UserSocialAuth from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect from rest_framework.authtoken.models import Token from app.methods import prepare_user
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12501, 273, 2024, 1330, 17594, 62, 35827, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 23914, 1330, 32053, 35215, 19...
3.738854
157
# Lint as: python3 # coding=utf-8 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Split data into train, validation and test dataset according to person. That is, use some people's data as train, some other people's data as validation, and the rest ones' data as test. These data would be saved separately under "/person_split". It will generate new files with the following structure: person_split test train valid """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random from data_split import read_data from data_split import write_data def person_split(whole_data, train_names, valid_names, test_names): """Split data by person.""" random.seed(30) random.shuffle(whole_data) train_data = [] valid_data = [] test_data = [] for idx, data in enumerate(whole_data): # pylint: disable=unused-variable if data["name"] in train_names: train_data.append(data) elif data["name"] in valid_names: valid_data.append(data) elif data["name"] in test_names: test_data.append(data) print("train_length:" + str(len(train_data))) print("valid_length:" + str(len(valid_data))) print("test_length:" + str(len(test_data))) return train_data, valid_data, test_data if __name__ == "__main__": data = read_data("./data/complete_data") train_names = [ "hyw", "shiyun", "tangsy", "dengyl", "jiangyh", "xunkai", "negative3", "negative4", "negative5", "negative6" ] valid_names = ["lsj", "pengxl", "negative2", "negative7"] test_names = ["liucx", "zhangxy", "negative1", "negative8"] train_data, valid_data, test_data = person_split(data, train_names, valid_names, test_names) if not os.path.exists("./person_split"): os.makedirs("./person_split") write_data(train_data, "./person_split/train") write_data(valid_data, "./person_split/valid") write_data(test_data, "./person_split/test")
[ 2, 406, 600, 355, 25, 21015, 18, 198, 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 13130, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, ...
2.714425
1,026
from datetime import datetime from kubernetes import client from kubernetes.client.rest import ApiException import os import time import yaml from tests import config as conf import tests.utils as ut
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 479, 18478, 3262, 274, 1330, 5456, 198, 6738, 479, 18478, 3262, 274, 13, 16366, 13, 2118, 1330, 5949, 72, 16922, 198, 11748, 28686, 198, 11748, 640, 198, 11748, 331, 43695, 198, 198, 6738, ...
3.540984
61
import os from dotenv import load_dotenv
[ 11748, 28686, 198, 6738, 16605, 24330, 1330, 3440, 62, 26518, 24330, 628 ]
3.5
12
import os import sys import click import pickle import sncosmo import numpy as np from astropy.table import Table DATA_PATH = '/home/samdixon/jla_light_curves/' def modify_error(lc, error_floor=0.): """Add an error floor of `error_floor` times the maximum flux of the band to each observation """ data = sncosmo.photdata.photometric_data(lc).normalized(zp=25., zpsys='ab') new_lc = {'time': data.time, 'band': data.band, 'flux': data.flux, 'fluxerr': data.fluxerr, 'zp': data.zp, 'zpsys': data.zpsys} for band in set(data.band): band_cut = data.band==band max_flux_in_band = np.max(data.flux[band_cut]) new_lc['fluxerr'][band_cut] = np.sqrt((error_floor*max_flux_in_band)**2+data.fluxerr[band_cut]**2) new_lc = Table(new_lc, meta=lc.meta) return new_lc if __name__=='__main__': main()
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 3904, 198, 11748, 2298, 293, 198, 11748, 3013, 6966, 5908, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 6468, 28338, 13, 11487, 1330, 8655, 628, 198, 26947, 62, 34219, 796, 31051, 11195, 14, ...
2.049462
465
""" Unit tests for the system interface.""" import unittest from six import assertRaisesRegex from six.moves import cStringIO import numpy as np from openmdao.api import Problem, Group, IndepVarComp, ExecComp from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp from openmdao.utils.assert_utils import assert_rel_error, assert_warning if __name__ == "__main__": unittest.main()
[ 37811, 11801, 5254, 329, 262, 1080, 7071, 526, 15931, 198, 198, 11748, 555, 715, 395, 198, 6738, 2237, 1330, 6818, 21762, 2696, 3041, 25636, 198, 6738, 2237, 13, 76, 5241, 1330, 269, 10100, 9399, 198, 198, 11748, 299, 32152, 355, 45941,...
3.248062
129
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import os import sqlite3 import sys import pandas as pd from src import config def db_tables(connection): """List tables in database.""" res = pd.read_sql("select name from sqlite_master", connection) return res.name.values def create_database(sample): """Create database with tables for targets, outcomes, and predictions.""" db_name = f'{sample}.db' db_path = os.path.join(config.DATADIR, db_name) conn = sqlite3.connect(db_path) usr_name = f'users_{sample}.csv' usr_path = os.path.join(config.DATADIR, usr_name) users = pd.read_csv(usr_path) db_tbls = db_tables(conn) for tbl in ['decisions', 'outcomes', 'predictions']: if tbl not in db_tbls: users.to_sql(tbl, conn, index=False) conn.execute(f"create index idx_{tbl}_user_id on {tbl}(user_id)") if __name__ == '__main__': sys.exit(main())
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 44161, 578, 18, 198, 11748, 25064, 198, 11748, 19798, 292, ...
2.380952
399
# coding: utf-8 import pytest import app as service import yaml import responder from starlette.responses import PlainTextResponse
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 12972, 9288, 198, 11748, 598, 355, 2139, 198, 11748, 331, 43695, 198, 11748, 3031, 263, 198, 6738, 3491, 21348, 13, 16733, 274, 1330, 28847, 8206, 31077, 628, 628, 628, 628, 628 ]
3.439024
41