content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!_PYTHONLOC
#
# (C) COPYRIGHT 2005-2014 Al von Ruff and Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
import cgi
import sys
import MySQLdb
from isfdb import *
from isfdblib import *
from titleClass import *
from SQLparsing import *
from login import *
from library import *
from viewers import DisplayUnmergeTitle
if __name__ == '__main__':
submission = Submission()
submission.header = 'Title Unmerge Submission'
submission.cgi_script = 'tv_unmerge'
submission.type = MOD_TITLE_UNMERGE
submission.viewer = DisplayUnmergeTitle
form = cgi.FieldStorage()
try:
record = int(form['record'].value)
except:
submission.error("Integer title number required")
titlename = SQLgetTitle(record)
if not titlename:
submission.error("Specified title number doesn't exist")
if not submission.user.id:
submission.error("", record)
update_string = '<?xml version="1.0" encoding="' +UNICODE+ '" ?>\n'
update_string += "<IsfdbSubmission>\n"
update_string += " <TitleUnmerge>\n"
update_string += " <Submitter>%s</Submitter>\n" % (db.escape_string(XMLescape(submission.user.name)))
update_string += " <Subject>%s</Subject>\n" % (db.escape_string(XMLescape(titlename)))
update_string += " <Record>%d</Record>\n" % (record)
entry = 1
pub_count = 0
while entry < 2000:
name = 'pub%d' % entry
if form.has_key(name):
try:
val = int(form[name].value)
except:
submission.error("Invalid publication number")
update_string += " <PubRecord>%d</PubRecord>\n" % (val)
pub_count += 1
else:
pass
entry += 1
if not pub_count:
submission.error("No publications selected to be unmerged")
if form.has_key('mod_note'):
update_string += " <ModNote>%s</ModNote>\n" % (db.escape_string(XMLescape(form['mod_note'].value)))
update_string += " </TitleUnmerge>\n"
update_string += "</IsfdbSubmission>\n"
submission.file(update_string)
| [
2,
0,
62,
47,
56,
4221,
1340,
29701,
198,
2,
198,
2,
220,
220,
220,
220,
357,
34,
8,
27975,
38162,
9947,
5075,
12,
4967,
220,
220,
978,
18042,
42409,
290,
7900,
292,
15573,
385,
198,
2,
197,
11096,
371,
34874,
15731,
1137,
53,
1... | 2.340292 | 958 |
""" Activity manager configuration
- config-file schema
- prometheus endpoint information
"""
import trafaret as T
CONFIG_SECTION_NAME = "activity"
schema = T.Dict(
{
T.Key("enabled", default=True, optional=True): T.Bool(),
T.Key(
"prometheus_host", default="http://prometheus", optional=False
): T.String(),
T.Key("prometheus_port", default=9090, optional=False): T.ToInt(),
T.Key("prometheus_api_version", default="v1", optional=False): T.String(),
}
)
| [
37811,
24641,
4706,
8398,
198,
220,
220,
220,
532,
4566,
12,
7753,
32815,
198,
220,
220,
220,
532,
1552,
36916,
36123,
1321,
198,
37811,
198,
11748,
1291,
69,
8984,
355,
309,
198,
198,
10943,
16254,
62,
50,
24565,
62,
20608,
796,
366,... | 2.5 | 210 |
from rubin_sim.scheduler.utils import int_rounded
__all__ = ['filter_swap_scheduler', 'simple_filter_sched']
class filter_swap_scheduler(object):
"""A simple way to schedule what filter to load
"""
def __call__(self, conditions):
"""
Returns
-------
list of strings for the filters that should be loaded
"""
pass
| [
6738,
6437,
259,
62,
14323,
13,
1416,
704,
18173,
13,
26791,
1330,
493,
62,
39262,
198,
198,
834,
439,
834,
796,
37250,
24455,
62,
2032,
499,
62,
1416,
704,
18173,
3256,
705,
36439,
62,
24455,
62,
1416,
704,
20520,
628,
198,
4871,
8... | 2.536913 | 149 |
from collections import OrderedDict
import csv
import django_filters
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseRedirect
from mptt.exceptions import InvalidMove
from rest_framework import status
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.filters import SearchFilter, OrderingFilter
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from perma.utils import run_task, stream_warc, stream_warc_if_permissible, clear_wr_session
from perma.tasks import run_next_capture
from perma.models import Folder, CaptureJob, Link, Capture, Organization, LinkBatch
from .utils import TastypiePagination, load_parent, raise_general_validation_error, \
raise_invalid_capture_job, dispatch_multiple_requests, reverse_api_view_relative
from .serializers import FolderSerializer, CaptureJobSerializer, LinkSerializer, AuthenticatedLinkSerializer, \
LinkUserSerializer, OrganizationSerializer, LinkBatchSerializer, DetailedLinkBatchSerializer
### BASE VIEW ###
### ORGANIZATION VIEWS ###
# /organizations
# /organizations/:id
### FOLDER VIEWS ###
# /folders
# /folders/:parent_id/folders
# /folders/:id
# /folders/:parent_id/folders/:id
### CAPTUREJOB VIEWS ###
# /capture_jobs
# /capture_jobs/:id
# /capture_jobs/:guid
### LINK VIEWS ###
class LinkFilter(django_filters.rest_framework.FilterSet):
"""
Custom filter for searching links by query string.
"""
date = django_filters.IsoDateTimeFilter(field_name="creation_timestamp", lookup_expr='date') # ?date=
min_date = django_filters.IsoDateTimeFilter(field_name="creation_timestamp", lookup_expr='gte') # ?min_date=
max_date = django_filters.IsoDateTimeFilter(field_name="creation_timestamp", lookup_expr='lte') # ?max_date=
url = django_filters.CharFilter(field_name="submitted_url", lookup_expr='icontains') # ?url=
# /public/archives
# /public/archives/:guid
#/public/archives/:guid/download
# /archives
# /folders/:parent_id/archives
# /archives/export
# /folders/:parent_id/archives/export
# /archives/:guid
#/archives/:guid/download
# /folders/:parent_id/archives/:guid
### LINKUSER ###
# /user
### LINKBATCH ###
# /batches
# /batches/:id
# /batches/:id/export
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
269,
21370,
198,
11748,
42625,
14208,
62,
10379,
1010,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,... | 2.992883 | 843 |
from .verbs import Create, Match, Merge
from .operations import (All, Any, Avg, Collect, Count, Distinct, Exists,
Max, Min, None_, Single, Sum, Unwind)
| [
6738,
764,
46211,
1330,
13610,
11,
13225,
11,
39407,
198,
6738,
764,
3575,
602,
1330,
357,
3237,
11,
4377,
11,
33455,
11,
9745,
11,
2764,
11,
4307,
4612,
11,
1475,
1023,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.458333 | 72 |
size(200, 200)
linearGradient((25, 25), (175, 175), [(1, 0, 0), (0, 1, 0), (0, 0, 1)], [0, 0.25, 1])
rect(0, 0, 200, 200)
stroke(0)
strokeWidth(4)
fill(1, 0.5)
rect(50, 50, 100, 100)
| [
7857,
7,
2167,
11,
939,
8,
198,
198,
29127,
42731,
1153,
19510,
1495,
11,
1679,
828,
357,
17430,
11,
19038,
828,
47527,
16,
11,
657,
11,
657,
828,
357,
15,
11,
352,
11,
657,
828,
357,
15,
11,
657,
11,
352,
8,
4357,
685,
15,
11... | 1.936842 | 95 |
import tweepy
import socket
import json
import os
if __name__ == "__main__":
keywords = ['crypto', 'cryptocurrency']
credentials = get_credentials()
test_authentication(
credentials['consumer_key'],
credentials['consumer_secret'],
credentials['access_token'],
credentials['access_token_secret']
)
api = create_api(
credentials['consumer_key'],
credentials['consumer_secret'],
credentials['access_token'],
credentials['access_token_secret']
)
# Start server
c_socket, address = create_server_connection()
# Stream tweets
tweets_listener = TweetListener(api, c_socket)
stream = tweepy.Stream(api.auth, tweets_listener)
stream.filter(track=keywords, languages=["en"])
| [
11748,
4184,
538,
88,
198,
11748,
17802,
198,
11748,
33918,
198,
11748,
28686,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
26286,
796,
37250,
29609,
78,
3256,
705,
29609,
420,
13382,
20520,
198,
220... | 2.99187 | 246 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 17:30:52 2018
@author: Javier Alejandro Acevedo Barroso
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import rcParams
fsize = 16
rcParams.update({'figure.autolayout': True})
#plt.rcParams['image.cmap'] = 'PuBu'
#plt.rcParams['image.cmap'] = 'YlGnBu'
rcParams.update({'font.size': 11})
plt.rcParams['image.cmap'] = 'plasma'
dat = np.loadtxt("./datFiles/grid0.dat").T
#density = np.loadtxt("density.dat")
grid0 = np.loadtxt("./datFiles/grid{:d}.dat".format(0)).T
dpII = 300
figure = plt.figure(figsize=(7,5))
########################################### 1D ##############################
constantes = np.loadtxt("constants.dat", usecols = 1)
TAU = int(constantes[8])
x = np.linspace(constantes[0], constantes[1], int(constantes[4]))
velUnit = 621 #m/s
estUnit = 35 #kpc
potUnit = 385962691092 #J/kg
acceUnit = 3.5737451e-13 #km/s²
plt.imshow(dat, extent=[constantes[0],constantes[1],constantes[2],constantes[3]], interpolation='nearest', aspect='auto') #Es mucho más rápido imshow
plt.yticks(plt.yticks()[0], [str(np.round(t*velUnit)) for t in plt.yticks()[0]])
plt.ylabel("Velocity [km/s]",fontsize=fsize)
plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
plt.xlabel("Position [kpc]", fontsize=fsize)
plt.title("Phase Space Initialization",fontsize=fsize)
plt.clim(0,20e5)
plt.ylim(constantes[2]/4,constantes[3]/4)
plt.xlim(constantes[2]/4,constantes[3]/4)
cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))
cbar.set_label("Mass density [$M_{\odot}$ / kpc $\\frac{km}{s}$]",fontsize=fsize+1)
plt.savefig("1dInitPS.png", dpi = dpII)
plt.clf()
dens = np.loadtxt("./datFiles/density{:d}.dat".format(0))
plt.plot(x,dens)
plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
plt.xlabel("Position [kpc]",fontsize=fsize)
plt.ylabel("Linear Density [$M_{\odot}$ / kpc]",fontsize=fsize)
plt.title("Density Initialization",fontsize=fsize)
plt.ylim(-0.75e9,20e10)
plt.xlim(-1.05,1.05)
plt.savefig("1dInitDens.png", dpi = dpII)
plt.clf()
potential = np.loadtxt("./datFiles/potential{:d}.dat".format(0))
plt.plot(x,potential)
plt.ylabel("Potential [J /kg]",fontsize=fsize)
plt.title("Potential at t=0".format(TAU),fontsize=fsize)
#plt.ylim(-6.6e10,-5.8e10)
plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
#plt.yticks(plt.yticks()[0], [fmt(np.round(t*potUnit),1) for t in plt.yticks()[0]])
plt.xlabel("Position [kpc]",fontsize=fsize)
plt.xlim(-1.05,1.05)
plt.savefig("1dInitPot.png", dpi = dpII)
plt.clf()
acce = np.loadtxt("./datFiles/acce{:d}.dat".format(0))
plt.plot(x,acce)
plt.ylabel("Acceleration [kpc / Gy$^2$]",fontsize=fsize)
plt.title("Acceleration at t=0".format(TAU),fontsize=fsize)
#plt.yticks(plt.yticks()[0], [str(t*2754463327) for t in plt.yticks()[0]])
plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
#plt.yticks(plt.yticks()[0], [fmt(t*acceUnit,1) for t in plt.yticks()[0]])
plt.ylim(np.min(acce)*1.1,np.max(acce)*1.1)
plt.xlabel("Position [kpc]",fontsize=fsize)
plt.xlim(-1.05,1.05)
plt.savefig("1dInitAcce.png", dpi = dpII)
plt.clf()
####################################################################
################################################################### 2D ########################
#velUnit = 1183 #m/s
#estUnit = 50 #kpc
#potUnit = 1400318153625 #J/kg
#acceUnit = 9.0761782e-13 #km/s²
#
##
##dens = np.loadtxt("./miniCluster/2D/density0.dat").T
#dens = np.loadtxt("./nocol/density0.dat").T
#plt.imshow(dens,extent=[-1,1,-1,1])
#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))
#plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
#plt.xlabel("Position [kpc]",fontsize=fsize)
#plt.yticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
#plt.ylabel("Position [kpc]",fontsize=fsize)
#cbar.set_label("Density [$M_{\odot}$ / kpc$^2$]",fontsize=fsize)
##plt.title("2D Density Initialization",fontsize=fsize)
##plt.savefig("2dInitDens.png", dpi = dpII)
##plt.clf()
#
#x, y= np.meshgrid(np.arange(-1, 1, 2.0/128),
# np.arange(-1, 1, 2.0/128))
#accex = np.loadtxt('./miniCluster/2D/accex0.dat').T
#accey = np.loadtxt('./miniCluster/2D/accey0.dat').T
#
#everyN = 6
##plt.quiver(x[::everyN, ::everyN], y[::everyN, ::everyN], accex[::everyN, ::everyN],accey[::everyN, ::everyN], color = 'xkcd:green')
##plt.quiver(x[::everyN, ::everyN], y[::everyN, ::everyN], accex[::everyN, ::everyN],accey[::everyN, ::everyN], color = 'xkcd:pink')
#plt.quiver(x[::everyN, ::everyN], y[::everyN, ::everyN], accex[::everyN, ::everyN],accey[::everyN, ::everyN], color = 'xkcd:white',pivot='mid')
#plt.title("Density and acceleretion at t=0",fontsize=fsize)
#plt.savefig("2dInitAcceDens", dpi=dpII)
#plt.clf()
##
##
##phasex = np.loadtxt('./miniCluster/2D/gridx0.dat').T
#phasex = np.loadtxt('./nocol/gridx0.dat').T
#plt.imshow(phasex,extent=[-1,1,-1,1])
#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))
#plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
#plt.xlabel("Position [kpc]",fontsize=fsize)
#plt.yticks(plt.xticks()[0], [str(t*velUnit) for t in plt.xticks()[0]])
#plt.ylabel("Velocity [km/s]",fontsize=fsize)
##plt.title("Phase Space initialization cut at y=0, Vy = 0",fontsize=fsize)
#plt.title("$f$ $(x,y=0,vx,vy=0,t=0$)",fontsize=fsize)
#cbar.set_label("Phase Space Density [$M_{\odot}$ / (kpc $\\frac{km}{s}$)$^2$]",fontsize=fsize)
#plt.savefig('2dInitPhase.png',dpi=dpII)
#plt.clf()
#
#
#potential = np.loadtxt('./miniCluster/2D/potential0.dat').T
#plt.imshow(potential,extent=[-1,1,-1,1])
#cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))
#plt.title("Potential at t=0",fontsize=fsize)
#plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
#plt.xlabel("Position [kpc]",fontsize=fsize)
#plt.yticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
#plt.ylabel("Position [kpc]",fontsize=fsize)
#cbar.set_label("Potential [J /kg]",fontsize=fsize)
#plt.savefig('2dInitPot.png',dpi=dpII)
#plt.clf() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
5267,
220,
513,
1596,
25,
1270,
25,
4309,
2864,
198,
198,
31,
9800,
25,
44492,
9300... | 2.116793 | 2,894 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/3/2 16:53
# @Author : Gavin
from .osDriver import osSystem
from .url import Url | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
198,
2,
2488,
7575,
220,
220,
220,
1058,
33448,
14,
18,
14,
17,
1467,
25,
4310,
198,
2,
2488,
13838,
220,
1058,... | 2.333333 | 63 |
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data as Data
import numpy as np
class FocalLoss(nn.Module):
""" Focal loss for better handling imbalanced class distribution. """
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR""" | [
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
26791,
13,
7890,
355,
6060,
198,
11748,
299,
32152,
355,
45941,
220,
198,
198,
4871,
376,
4374,
43,
79... | 3.091603 | 131 |
from flow import FlowProject, cmd, directives
# MPI-parallelized operation using mpi4py:
# Execute directly with:
# $ mpiexec -n 2 python project.py exec mpi_hello_world
# or
# $ mpiexec -n 2 python project.py run -o mpi_hello_world
# To generate scripts you would need to take one of the two
# approaches shown below.
@FlowProject.operation
# This cmd-operaiton calls another MPI program, which may
# be our own script or any other program.
# Execute this operation with:
# $ python project.py exec mpi_hello_world_cmd
# or
# $ python project.py run -o mpi_hello_world
#
# Providing the number of processors (np) with the @directives
# decorator is not strictly necessary, but might be used by some
# script templates to either directly prepend the command with
# mpiexec or equivalent, and/or to calculate required resources.
@FlowProject.operation
@directives(np=2)
@cmd
# The np argument to the directives operator can be a function of job:
@FlowProject.operation
@directives(np=lambda job: job.sp.foo+1)
if __name__ == '__main__':
FlowProject().main()
| [
6738,
5202,
1330,
27782,
16775,
11,
23991,
11,
34819,
628,
198,
2,
4904,
40,
12,
1845,
29363,
1143,
4905,
1262,
285,
14415,
19,
9078,
25,
198,
2,
8393,
1133,
3264,
351,
25,
198,
2,
720,
285,
21749,
87,
721,
532,
77,
362,
21015,
16... | 3.351097 | 319 |
import numpy as np
import torch
eps = 2.2204e-16
##Tensor operation
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
25386,
796,
362,
13,
17572,
19,
68,
12,
1433,
628,
198,
2235,
51,
22854,
4905,
628,
628,
628,
628
] | 2.689655 | 29 |
A_e = 4.478 # m**2 | [
32,
62,
68,
796,
604,
13,
29059,
220,
1303,
285,
1174,
17
] | 1.583333 | 12 |
"""
LC 713
Given an array with positive numbers and a positive target number, find all of its contiguous subarrays whose product is less than the target number.
Example 1:
Input: [2, 5, 3, 10], target=30
Output: [2], [5], [2, 5], [3], [5, 3], [10]
Explanation: There are six contiguous subarrays whose product is less than the target.
Example 2:
Input: [8, 2, 6, 5], target=50
Output: [8], [2], [8, 2], [6], [2, 6], [5], [6, 5]
Explanation: There are seven contiguous subarrays whose product is less than the target.
"""
main()
"""
Time O(N^3): create space
Space O(N^3): N^2 arrays * N length
"""
| [
37811,
198,
5639,
767,
1485,
198,
15056,
281,
7177,
351,
3967,
3146,
290,
257,
3967,
2496,
1271,
11,
1064,
477,
286,
663,
48627,
850,
3258,
592,
3025,
1720,
318,
1342,
621,
262,
2496,
1271,
13,
198,
198,
16281,
352,
25,
198,
198,
20... | 2.882075 | 212 |
import math
from typing import Callable, Union, Tuple
class Newton:
"""
This class models the Newton-Raphson Approximation algorithm.
See https://en.wikipedia.org/wiki/Newton%27s_method
It is an example of a non-deterministic algorithm inasmuch as the convergence (or lack thereof) is very dependent
on the value of the initial guess x0 to the solve method.
However, if you run it with identical starting conditions, it will always come out the same:
it does not use any random elements.
"""
def __init__(self, equation: str, f: Callable, dfbydx: Callable):
"""
Constructor to create a problem to be solved by Newton's method.
In particular, the problem is to find a root of the equation f(x) = 0.
:param equation: a String rendition of the function in the form f(x).
There is no need to add the "= 0" part of the equation.
:param f: the function f(x)
:param dfbydx: the first derivative of f(x) with respect to the variable x
"""
self.equation = equation
self.f = f
self.dfbydx = dfbydx
def solve(self, x0: float, max_tries: int, tolerance: float) -> Tuple[bool, Union[str, float]]:
"""
Method to solve this Newton problem.
:param x0: the initial estimate of x.
If this is too far from any root, the solution may not converge.
:param max_tries: the maximum number of tries before admitting defeat due to non-convergence.
:param tolerance: the required precision for the value of f(x) to be considered equal to zero.
:return: a tuple of (bool, Union[str, float]), either (True, result) or (False, reason)
"""
x = x0
for tries in range(max_tries):
try:
y = self.f(x)
if abs(y) < tolerance:
return True, x
x = x - y / self.dfbydx(x)
except Exception as e:
return False, f"Exception thrown solving {self.equation}=0, given x0={x0}, max_tries={max_tries}, " \
f"and tolerance={tolerance} because {e}"
return False, f"{self.equation}=0 did not converge given x0={x0}, max_tries={max_tries}, " \
f"and tolerance={tolerance}"
if __name__ == "__main__":
main()
| [
11748,
10688,
201,
198,
6738,
19720,
1330,
4889,
540,
11,
4479,
11,
309,
29291,
201,
198,
201,
198,
201,
198,
4871,
17321,
25,
201,
198,
220,
220,
220,
37227,
201,
198,
220,
220,
220,
770,
1398,
4981,
262,
17321,
12,
49,
6570,
1559,... | 2.278237 | 1,089 |
import turtle
turtle.up()
turtle.shape('turtle')
turtle.goto(0,0)
turtle.color('black')
turtle.down()
turtle.begin_fill()
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(120)
turtle.forward(100)
turtle.end_fill()
turtle.up()
turtle.shape('turtle')
turtle.goto(100,0)
turtle.color('blue')
turtle.down()
turtle.begin_fill()
turtle.left(60)
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.left(120)
turtle.forward(100)
turtle.end_fill()
turtle.up()
turtle.shape('turtle')
turtle.color('red')
turtle.down()
turtle.begin_fill()
turtle.left(180)
turtle.forward(100)
turtle.left(120)
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.end_fill()
turtle.hideturtle()
turtle.done() | [
11748,
28699,
198,
83,
17964,
13,
929,
3419,
198,
83,
17964,
13,
43358,
10786,
83,
17964,
11537,
198,
83,
17964,
13,
70,
2069,
7,
15,
11,
15,
8,
198,
83,
17964,
13,
8043,
10786,
13424,
11537,
198,
83,
17964,
13,
2902,
3419,
198,
8... | 2.323171 | 328 |
"""Defines the visualization server."""
from mesa.visualization.ModularVisualization import (ModularServer,
VisualizationElement)
from mesa.visualization.modules import ChartModule
from mesa.visualization.UserParam import UserSettableParameter
from .agent import Candy, Creature
from .model import Evolution
class SimpleCanvas(VisualizationElement):
"""Continuous canvas."""
HEIGHT = 500
WIDTH = 500
local_includes = ["candied/simple_continuous_canvas.js"]
@staticmethod
def portrayal_method(agent):
"""Defines how agents are portrayed in the visualization."""
portrayal = {'Shape': 'circle'}
if isinstance(agent, Creature):
r = agent.view_range
portrayal['r'] = r * SimpleCanvas.HEIGHT / Evolution.HEIGHT
eaten_candies = agent.eaten_candies
if eaten_candies == 0:
portrayal['Color'] = 'Red'
portrayal['Layer'] = 2
elif eaten_candies == 1:
portrayal['Color'] = 'Orange'
portrayal['Layer'] = 1
elif eaten_candies == 2:
portrayal['Color'] = 'Green'
portrayal['Layer'] = 0
portrayal['Filled'] = 'true'
if isinstance(agent, Candy):
portrayal['Layer'] = 10
portrayal['Color'] = "Blue"
portrayal['Filled'] = "True"
portrayal['r'] = 2
if agent.eaten:
portrayal['r'] = 0
return portrayal
canvas_element = SimpleCanvas()
creatures_slider = UserSettableParameter(
'slider',
"Creatures",
value=20,
min_value=1,
max_value=100,
step=1,
)
candies_slider = UserSettableParameter(
'slider',
"Candies",
value=100,
min_value=0,
max_value=1000,
step=1,
)
energy_graph = ChartModule(
series=[{"Label": "Energy", "Color": "Yellow"}],
data_collector_name='datacollector',
)
eaters_graph = ChartModule(
series=[
{"Label": "Zero eaters", "Color":
"Red"}, {"Label": "One eaters", "Color": "Yellow"},
{"Label": "Two eaters", "Color": "Green"}
],
data_collector_name='datacollector',
)
display_params = {
"height": Evolution.HEIGHT,
"width": Evolution.WIDTH,
"n_creatures": creatures_slider,
"n_candies": candies_slider,
"max_days": 3000,
}
server = ModularServer(
model_cls=Evolution,
visualization_elements=[canvas_element, energy_graph, eaters_graph],
name="Evolution model",
model_params=display_params,
)
| [
37811,
7469,
1127,
262,
32704,
4382,
526,
15931,
198,
6738,
18842,
64,
13,
41464,
1634,
13,
5841,
934,
36259,
1634,
1330,
357,
5841,
934,
10697,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 2.27535 | 1,144 |
# Generated by Django 4.0.1 on 2022-01-19 15:53
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
16,
319,
33160,
12,
486,
12,
1129,
1315,
25,
4310,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright (c) 2016 by University of Kassel and Fraunhofer Institute for Wind Energy and Energy
# System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Runs a DC power flow.
"""
from os.path import dirname, join
from pypower.ppoption import ppoption
from pandapower.runpf import runpf
def rundcpf(casedata=None, ppopt=None, fname='', solvedcase=''):
"""Runs a DC power flow.
@see: L{runpf}
@author: Ray Zimmerman (PSERC Cornell)
@author: Richard Lincoln
Changes by University of Kassel: Different runpf is imported
"""
## default arguments
if casedata is None:
casedata = join(dirname(__file__), 'case9')
ppopt = ppoption(ppopt, PF_DC=True)
return runpf(casedata, ppopt, fname, solvedcase)
| [
2,
15069,
357,
66,
8,
1584,
416,
2059,
286,
15035,
741,
290,
39313,
403,
71,
30288,
5136,
329,
3086,
6682,
290,
6682,
198,
2,
4482,
8987,
357,
40,
54,
1546,
828,
15035,
741,
13,
1439,
2489,
10395,
13,
5765,
286,
428,
2723,
2438,
3... | 2.891525 | 295 |
import json
from django.http import Http404, HttpResponse
from models import Fish, FishSchedule, Bug, BugSchedule
| [
11748,
33918,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
11,
367,
29281,
31077,
198,
198,
6738,
4981,
1330,
13388,
11,
13388,
27054,
5950,
11,
15217,
11,
15217,
27054,
5950,
628
] | 3.441176 | 34 |
# https://www.algoexpert.io/questions/BST%20Traversal
# Average: O(log(n)) time | O(1) space
# Worst: O(n) time | O(1) space
# O(n) time | O(n) space
# O(n) time | O(n) space
# O(n) time | O(n) space
# driver/test code
test_tree = BST(100).insert(5).insert(15).insert(5).insert(2).insert(1).insert(22) \
.insert(1).insert(1).insert(3).insert(1).insert(1).insert(502).insert(55000) \
.insert(204).insert(205).insert(207).insert(206).insert(208).insert(203) \
.insert(-51).insert(-403).insert(1001).insert(57).insert(60).insert(4500)
| [
2,
3740,
1378,
2503,
13,
282,
2188,
1069,
11766,
13,
952,
14,
6138,
507,
14,
33,
2257,
4,
1238,
15721,
690,
282,
628,
1303,
13475,
25,
440,
7,
6404,
7,
77,
4008,
640,
930,
440,
7,
16,
8,
2272,
198,
1303,
33443,
25,
440,
7,
77,... | 2.381974 | 233 |
"""
Test cases for ldaptor.inmemory module.
"""
from io import BytesIO
from twisted.trial import unittest
from ldaptor import inmemory, delta, testutil
from ldaptor.protocols.ldap import distinguishedname, ldaperrors
| [
37811,
198,
14402,
2663,
329,
300,
67,
2373,
273,
13,
259,
31673,
8265,
13,
198,
37811,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
6738,
19074,
13,
45994,
1330,
555,
715,
395,
198,
198,
6738,
300,
67,
2373,
273,
1330,
287,
... | 3.097222 | 72 |
from collections import defaultdict
import tensorflow as tf
from tensorboardX import SummaryWriter
# (N, T, C, H, W) | [
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
3526,
55,
1330,
21293,
34379,
628,
220,
220,
220,
1303,
357,
45,
11,
309,
11,
327,
11,
367,
11,
370,
8
] | 3.210526 | 38 |
import datetime
from werkzeug.security import generate_password_hash
from flask import request, jsonify
from ..models.user_model import UserModel
from ..schemas.user_serealize import user_schema, users_schema
from .base_controller import get_all, get_one, delete, post, update
from ..notify.base_notification import is_required
| [
11748,
4818,
8079,
198,
6738,
266,
9587,
2736,
1018,
13,
12961,
1330,
7716,
62,
28712,
62,
17831,
198,
6738,
42903,
1330,
2581,
11,
33918,
1958,
198,
6738,
11485,
27530,
13,
7220,
62,
19849,
1330,
11787,
17633,
198,
6738,
11485,
1416,
4... | 3.438776 | 98 |
import re
from django.db.models import Q
from django.shortcuts import Http404
REGEX_FOR_ANY_TEXT_FIELD = re.compile(r'[^\w]', re.I | re.U)
class ApartmentFilterMixin(object):
"""ApartmentFilterMixin is an mixin for searching in CBV
it filter self.apartment_list using data from self.form
"""
| [
11748,
302,
198,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
367,
29281,
26429,
198,
198,
31553,
6369,
62,
13775,
62,
31827,
62,
32541,
62,
44603,
796,
302,
13,
5589,
576,
... | 2.75 | 112 |
# -*- coding: utf-8 -*-
"""
:mod:`orion.core.worker.consumer` -- Evaluate objective on a set of parameters
==============================================================================
.. module:: consumer
:platform: Unix
:synopsis: Call user's script as a black box process to evaluate a trial.
"""
import logging
import os
import signal
import subprocess
import tempfile
import orion.core
from orion.core.io.orion_cmdline_parser import OrionCmdlineParser
from orion.core.utils.working_dir import WorkingDir
from orion.core.worker.trial_pacemaker import TrialPacemaker
log = logging.getLogger(__name__)
# pylint: disable = unused-argument
class ExecutionError(Exception):
"""Error raised when Orion is unable to execute the user's script without errors."""
pass
class Consumer(object):
"""Consume a trial by using it to initialize a black-box box to evaluate it.
It uses an `Experiment` object to push an evaluated trial, if results are
delivered to the worker process successfully.
It forks another process which executes user's script with the suggested
options. It expects results to be written in a **JSON** file, whose path
has been defined in a special orion environmental variable which is set
into the child process' environment.
"""
def __init__(self, experiment):
"""Initialize a consumer.
:param experiment: Manager of this experiment, provides convenient
interface for interacting with the database.
"""
log.debug("Creating Consumer object.")
self.experiment = experiment
self.space = experiment.space
if self.space is None:
raise RuntimeError("Experiment object provided to Consumer has not yet completed"
" initialization.")
# Fetch space builder
self.template_builder = OrionCmdlineParser(orion.core.config.user_script_config)
self.template_builder.set_state_dict(experiment.metadata['parser'])
# Get path to user's script and infer trial configuration directory
if experiment.working_dir:
self.working_dir = os.path.abspath(experiment.working_dir)
else:
self.working_dir = os.path.join(tempfile.gettempdir(), 'orion')
self.script_path = experiment.metadata['user_script']
self.pacemaker = None
def consume(self, trial):
"""Execute user's script as a block box using the options contained
within `trial`.
:type trial: `orion.core.worker.trial.Trial`
"""
log.debug("### Create new directory at '%s':", self.working_dir)
temp_dir = self.experiment.working_dir is None
prefix = self.experiment.name + "_"
suffix = trial.id
try:
with WorkingDir(self.working_dir, temp_dir,
prefix=prefix, suffix=suffix) as workdirname:
log.debug("## New consumer context: %s", workdirname)
trial.working_dir = workdirname
results_file = self._consume(trial, workdirname)
log.debug("## Parse results from file and fill corresponding Trial object.")
self.experiment.update_completed_trial(trial, results_file)
except KeyboardInterrupt:
log.debug("### Save %s as interrupted.", trial)
self.experiment.set_trial_status(trial, status='interrupted')
raise
except ExecutionError:
log.debug("### Save %s as broken.", trial)
self.experiment.set_trial_status(trial, status='broken')
def get_execution_environment(self, trial, results_file='results.log'):
"""Set a few environment variables to allow users and
underlying processes to know if they are running under orion.
Parameters
----------
results_file: str
file used to store results, this is only used by the legacy protocol
trial: Trial
reference to the trial object that is going to be run
Notes
-----
This function defines the environment variables described below
.. envvar:: ORION_EXPERIMENT_ID
Current experiment that is being ran.
.. envvar:: ORION_EXPERIMENT_NAME
Name of the experiment the worker is currently working on.
.. envvar:: ORION_EXPERIMENT_VERSION
Version of the experiment the worker is currently working on.
.. envvar:: ORION_TRIAL_ID
Current trial id that is currently being executed in this process.
.. envvar:: ORION_WORKING_DIRECTORY
Trial's current working directory.
.. envvar:: ORION_RESULTS_PATH
Trial's results file that is read by the legacy protocol to get the results of the trial
after a successful run.
"""
env = dict(os.environ)
env['ORION_EXPERIMENT_ID'] = str(self.experiment.id)
env['ORION_EXPERIMENT_NAME'] = str(self.experiment.name)
env['ORION_EXPERIMENT_VERSION'] = str(self.experiment.version)
env['ORION_TRIAL_ID'] = str(trial.id)
env['ORION_WORKING_DIR'] = str(trial.working_dir)
env['ORION_RESULTS_PATH'] = str(results_file)
return env
def execute_process(self, cmd_args, environ):
"""Facilitate launching a black-box trial."""
command = [self.script_path] + cmd_args
signal.signal(signal.SIGTERM, _handler)
process = subprocess.Popen(command, env=environ)
return_code = process.wait()
if return_code != 0:
raise ExecutionError("Something went wrong. Check logs. Process "
"returned with code {} !".format(return_code))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
25,
4666,
25,
63,
273,
295,
13,
7295,
13,
28816,
13,
49827,
63,
1377,
26439,
4985,
9432,
319,
257,
900,
286,
10007,
198,
23926,
25609,
855,
198,
198,
492,
... | 2.657459 | 2,172 |
import re
from win32com.client import Dispatch
try:
app = Dispatch('Excel.Application')
except Exception:
app = None
# wb = app.Workbooks.Open(filename)
default_pattern = 'Total'
def DeleteUnmatchedRows():
"""
Delete rows that don't match the pattern; you'll have to run this
multiple times if you want to get consecutive matches
"""
# excel doesn't support iteration
# valueRows = takewhile(IsNotBlank, app.Rows)
# map(DeleteRow, filter(NotHasPattern, valueRows))
for row in GetPopulatedRows():
if NotHasPattern(row):
print('deleting row', CleanRow(row))
row.Delete()
| [
11748,
302,
198,
198,
6738,
1592,
2624,
785,
13,
16366,
1330,
35934,
628,
198,
28311,
25,
198,
220,
220,
220,
598,
796,
35934,
10786,
3109,
5276,
13,
23416,
11537,
198,
16341,
35528,
25,
198,
220,
220,
220,
598,
796,
6045,
198,
198,
... | 2.729167 | 240 |
import os
import signal
import time
import threading
import BaseHTTPServer
import SimpleHTTPServer
watched_files = ['SmartVA-Analyze.exe'.lower()]
start = time.time()
run_time = 60
httpd = BaseHTTPServer.HTTPServer(('0.0.0.0', 8000), SimpleHTTPRequestHandlerFileWatcher)
print('{address[0]}:{address[1]} - - Listening...'.format(address=httpd.server_address))
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
while True:
time.sleep(1)
| [
11748,
28686,
198,
11748,
6737,
198,
11748,
640,
198,
11748,
4704,
278,
198,
198,
11748,
7308,
6535,
28820,
18497,
198,
11748,
17427,
6535,
28820,
18497,
198,
198,
86,
14265,
62,
16624,
796,
37250,
25610,
11731,
12,
37702,
2736,
13,
13499... | 2.720588 | 204 |
# -*- coding: utf-8 -*-
"""
flask_security
~~~~~~~~~~~~~~
Flask-Security is a Flask extension that aims to add quick and simple
security via Flask-Login, Flask-Principal, Flask-WTF, and passlib.
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
# Monkey patch Werkzeug 2.1
# Flask-Login uses the safe_str_cmp method which has been removed in Werkzeug
# 2.1. Flask-Login v0.6.0 (yet to be released at the time of writing) fixes the
# issue. Once we depend on Flask-Login v0.6.0 as the minimal version in
# Flask-Security-Invenio/Invenio-Accounts we can remove this patch again.
try:
# Werkzeug <2.1
from werkzeug import security
security.safe_str_cmp
except AttributeError:
# Werkzeug >=2.1
import hmac
from werkzeug import security
security.safe_str_cmp = hmac.compare_digest
from .core import AnonymousUser, RoleMixin, Security, UserMixin, current_user
from .datastore import SQLAlchemySessionUserDatastore, SQLAlchemyUserDatastore
from .decorators import auth_required, login_required, roles_accepted, \
roles_required
from .forms import ConfirmRegisterForm, ForgotPasswordForm, LoginForm, \
RegisterForm, ResetPasswordForm
from .signals import confirm_instructions_sent, password_reset, \
reset_password_instructions_sent, user_confirmed, user_registered
from .utils import login_user, logout_user, url_for_security
__version__ = '3.1.3'
__all__ = (
'AnonymousUser',
'auth_required',
'confirm_instructions_sent',
'ConfirmRegisterForm',
'current_user',
'ForgotPasswordForm',
'login_required',
'login_user',
'LoginForm',
'logout_user',
'password_reset',
'RegisterForm',
'reset_password_instructions_sent',
'ResetPasswordForm',
'RoleMixin',
'roles_accepted',
'roles_required',
'Security',
'SQLAlchemySessionUserDatastore',
'SQLAlchemyUserDatastore',
'url_for_security',
'user_confirmed',
'user_registered',
'UserMixin',
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
42903,
62,
12961,
198,
220,
220,
220,
220,
15116,
8728,
4907,
628,
220,
220,
220,
46947,
12,
24074,
318,
257,
46947,
7552,
326,
12031,
284,
... | 2.76881 | 731 |
import torch
import numpy as np
# Tensors
x = torch.empty(5, 3)
print(x)
x = torch.rand(5, 3)
print(x)
x = torch.zeros(5, 3, dtype=torch.long)
print(x)
x = torch.tensor([5.5, 3])
print(x)
x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes
print(x)
x = torch.randn_like(x, dtype=torch.float) # override dtype!
print(x) # result has the same size
print(x.size())
# There are many ways to operations
# addition
y = torch.rand(5, 3)
print(x + y)
print(torch.add(x, y))
result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)
# adds x to y
y.add_(x)
print(y)
# NumPy-like indexing with all bells and whistles!
print(x[:, 1])
# resize or reshape tensor
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
print(x.size(), y.size(), z.size())
# If you have an one element tensor, use .item() to get the value as a Python number
x = torch.randn(1)
print(x)
print(x.item())
# Converting a Torch Tensor to a NumPy Array
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
# Converting a NumPy Array to Torch Tensor
a = np.ones(5)
b = torch.from_numpy(a)
np.add(a, 1, out=a)
print(a)
print(b) | [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
40280,
669,
198,
198,
87,
796,
28034,
13,
28920,
7,
20,
11,
513,
8,
198,
4798,
7,
87,
8,
198,
198,
87,
796,
28034,
13,
25192,
7,
20,
11,
513,
8,
198,
4798,
7,
87,
... | 2.335953 | 509 |
import logging
from django.http import Http404
from django.utils.encoding import smart_unicode
logger = logging.getLogger(__name__)
__all__ = ('LoggingMiddleware',)
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
198,
6738,
42625,
14208,
13,
26791,
13,
12685,
7656,
1330,
4451,
62,
46903,
1098,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,... | 3 | 56 |
import schedule
import time
from up_fan_rank import stat
schedule.every().minutes.do(stat,'fans') # 每隔 10 分钟运行一次 job 函数
schedule.every().minutes.do(stat,'playNum') # 每隔 10 分钟运行一次 job 函数
# schedule.every(10).minutes.do(stat,time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),'playNum') # 每隔 10 分钟运行一次 job 函数
# # schedule.every().hour.do(job) # 每隔 1 小时运行一次 job 函数
# schedule.every().hour.do(stat,'fans') # 每隔 1 小时运行一次 job 函数
# schedule.every().hour.do(stat,'playNum') # 每隔 1 小时运行一次 job 函数
# schedule.every().day.at("22:30").do(stat,'fans') # 每天在 10:30 时间点运行 job 函数
# schedule.every().day.at("22:30").do(stat,'fans') # 每天在 10:30 时间点运行 job 函数
# schedule.every().day.at("22:30").do(stat,'playNum') # 每天在 10:30 时间点运行 job 函数
# schedule.every().monday.do(job) # 每周一 运行一次 job 函数
# schedule.every().wednesday.at("13:15").do(job) # 每周三 13:15 时间点运行 job 函数
# schedule.every().saturday.at("17:56").do(stat,time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),'playNum') # 每周三 13:15 时间点运行 job 函数
# schedule.every().minute.at(":01").do(stat,time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),'playNum') # 每分钟的 17 秒时间点运行 job 函数
# schedule.every().minute.at(":30").do(stat,time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),'fans') # 每分钟的 17 秒时间点运行 job 函数
while True:
schedule.run_pending() # 运行所有可以运行的任务
time.sleep(1) | [
11748,
7269,
198,
11748,
640,
198,
6738,
510,
62,
24408,
62,
43027,
1330,
1185,
198,
198,
15952,
5950,
13,
16833,
22446,
1084,
1769,
13,
4598,
7,
14269,
4032,
69,
504,
11537,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 1.523524 | 999 |
import requests
import json
import models.request as xacml_request
from utils import ClassEncoder
| [
11748,
7007,
198,
11748,
33918,
198,
198,
11748,
4981,
13,
25927,
355,
2124,
330,
4029,
62,
25927,
198,
6738,
3384,
4487,
1330,
5016,
27195,
12342,
198
] | 3.807692 | 26 |
from objects.modulebase import ModuleBase
from utils.funcs import find_image
API_URL = 'https://api.tsu.sh/google/ocr'
| [
6738,
5563,
13,
21412,
8692,
1330,
19937,
14881,
198,
198,
6738,
3384,
4487,
13,
12543,
6359,
1330,
1064,
62,
9060,
628,
198,
17614,
62,
21886,
796,
705,
5450,
1378,
15042,
13,
912,
84,
13,
1477,
14,
13297,
14,
1696,
6,
198
] | 2.97561 | 41 |
### $Id$
### $URL$
import os
from sfa.util.storage import *
| [
21017,
720,
7390,
3,
198,
21017,
720,
21886,
3,
198,
198,
11748,
28686,
198,
198,
6738,
264,
13331,
13,
22602,
13,
35350,
1330,
1635,
198,
220,
198
] | 2.37037 | 27 |
from __future__ import annotations
class VaultFlags(str):
"""Class that represents flags that may be used for a vaulted
function to tweak the behavior of the vaulted function."""
_RETURN_VALUES_CANNOT_BE_NONE = "return_values_cannot_be_none"
_PERMIT_MODIFICATIONS = "permit_modifications"
_INPUT_KEY_CAN_BE_MISSING = "input_key_can_be_missing"
_CLEAN_RETURN_KEYS = "clean_return_keys"
_DEBUG = "debug"
_SILENT = "silent"
_LIVE_UPDATE = "live_update"
_RETURN_TUPLE_IS_SINGLE_ITEM = "return_tuple_is_single_item"
_SPLIT_RETURN_KEYS = "split_return_keys"
_FILE_IS_READ_ONLY = "file_is_read_only"
_DISABLE_LOGGER = "disable_logger"
_IGNORE_KEYS_NOT_IN_KEYRING = "ignore_keys_not_in_keyring"
_REMOVE_EXISTING_LOG_FILE = "remove_existing_log_file"
_RETURN_KEY_CAN_BE_MISSING = "return_key_can_be_missing"
_NO_ERROR_LOGGING = "no_error_logging"
@staticmethod
def flag_is_set(flag: VaultFlags, *flags: VaultFlags):
"""This is not a flag. This function checks if a flag exists among a bunch of flags"""
return flag in flags
@staticmethod
def return_values_cannot_be_none():
f"""Flag to set if return values must be something other than {None}. By default, this is fine, but you can enforce return variables to be something other than {None}"""
return VaultFlags(VaultFlags._RETURN_VALUES_CANNOT_BE_NONE)
@staticmethod
def permit_modifications():
f"""Flag to set if variables may be modified either in the vault itself or for a specific decorated function.
By default, varvault doesn't permit modifications to existing keys as this can cause unintended behavior."""
return VaultFlags(VaultFlags._PERMIT_MODIFICATIONS)
@staticmethod
def input_key_can_be_missing():
f"""Flag to set if an input variable may be missing in a vault when it is accessed. In this case, the key will be sent to kwargs but it will be mapped to {None}."""
return VaultFlags(VaultFlags._INPUT_KEY_CAN_BE_MISSING)
@staticmethod
def clean_return_keys():
f"""Flag to clean return keys in a vault defined for a decorated function. This can be used during a cleanup stage.
Varvault will try to map the key to a default value for the valid type, like for example str(), or list(). If it doesn't work, the key will be mapped to {None}."""
return VaultFlags(VaultFlags._CLEAN_RETURN_KEYS)
@staticmethod
def debug():
f"""Flag to enable debug mode for logger output to the console to help you with debugging. By default, varvault will write debug logs to the logfile, but not the console.
By setting this, you'll have a much easier time debugging unintended behavior. Using this and {VaultFlags.silent} in conjunction will cancel each other out and make logging the default."""
return VaultFlags(VaultFlags._DEBUG)
@staticmethod
def silent():
f"""Flag to enable silent mode for a vault. This will completely remove debug logs being written to the logfile. This can be used to reduce unnecessary
bloat and make debugging much more easy to do. Using this and {VaultFlags.debug} in conjunction will cancel each other out and make logging the default."""
return VaultFlags(VaultFlags._SILENT)
@staticmethod
def live_update():
f"""Flag to enable live-update of a vault file. If this is set, the vault will try to update its contents from an existing vault file if the contents of the file
has changed since last time (this is determined by getting an md5 hash of the contents of the file). The live-update is only performed when the vault is accessed via the decorator."""
return VaultFlags(VaultFlags._LIVE_UPDATE)
@staticmethod
def return_tuple_is_single_item():
f"""Flag to tell varvault that the return value is a tuple that should be mapped to a single return-key. Varvault cannot tell if
a tuple is multiple return values or a single item meant for a single key as this how Python handles multiple return values"""
return VaultFlags(VaultFlags._RETURN_TUPLE_IS_SINGLE_ITEM)
@staticmethod
def split_return_keys():
f"""Flag to tell varvault that the return keys provided in a MiniVault being returned are split between multiple vaults decorating the same function.
By default, any return values from a decorated function must be able to be mapped to the keys defined as return keys. If two vaults are taking return values separately,
this wouldn't be possible. Usage of this flag REQUIRES that the return value is a MiniVault-object."""
return VaultFlags(VaultFlags._SPLIT_RETURN_KEYS)
@staticmethod
def file_is_read_only():
f"""Flag to tell varvault that a vault-file used to create a vault from is read-only."""
return VaultFlags(VaultFlags._FILE_IS_READ_ONLY)
@staticmethod
def disable_logger():
f"""Flag to tell varvault to disable logger completely and not log anything to a log-file."""
return VaultFlags(VaultFlags._DISABLE_LOGGER)
@staticmethod
def ignore_keys_not_in_keyring():
f"""Flag to ignore keys not in keyring when creating a vault from an existing vault-file. If {VaultFlags.file_is_read_only} is enabled, this will be enabled by default."""
return VaultFlags(VaultFlags._IGNORE_KEYS_NOT_IN_KEYRING)
@staticmethod
def remove_existing_log_file():
"""Flag to tell varvault to delete an existing log file when creating a vault from an existing vault-file"""
return VaultFlags(VaultFlags._REMOVE_EXISTING_LOG_FILE)
@staticmethod
def return_key_can_be_missing():
"""Flag to tell varvault when using a vaulter-decorated function and not returning objects for all keys to not fail and just set the keys defined.
If this is set, the return variables MUST be inside a MiniVault object, otherwise varvault cannot determine what variable belongs to what key."""
return VaultFlags(VaultFlags._RETURN_KEY_CAN_BE_MISSING)
@staticmethod
def no_error_logging():
"""Flag to tell a vaulter-decorated function to not log exceptions. Exceptions can sometimes be expected,
and sometimes it might be preferable to not log errors using varvault and just log them normally."""
return VaultFlags(VaultFlags._NO_ERROR_LOGGING)
| [
6738,
11593,
37443,
834,
1330,
37647,
628,
198,
4871,
23450,
40053,
7,
2536,
2599,
198,
220,
220,
220,
37227,
9487,
326,
6870,
9701,
326,
743,
307,
973,
329,
257,
22563,
276,
198,
220,
220,
220,
2163,
284,
25393,
262,
4069,
286,
262,
... | 3.021147 | 2,128 |
# Copyright (c) 2010-2020 openpyxlzip
import pytest
from openpyxlzip.xml.functions import fromstring, tostring
from openpyxlzip.tests.helper import compare_xml
@pytest.mark.parametrize("value, expected",
[
("&9", [('', '', '9')]),
('&"Lucida Grande,Standard"', [("Lucida Grande,Standard", '', '')]),
('&K000000', [('', '000000', '')])
]
)
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
2,
15069,
357,
66,
8,
3050,
12,
42334,
1280,
9078,
87,
75,
13344,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
1280,
9078,
87,
75,
13344,
13,
19875,
13,
12543,
2733,
1330,
422,
8841,
11,
284,
8841,
198,
6738,
1280,
9078,
87,
75,
... | 1.805643 | 319 |
# topic prefix
NX_PREFIX = ""
from thing import Thing
from channel import MaleChannel, FemaleChannel
| [
2,
7243,
21231,
198,
45,
55,
62,
47,
31688,
10426,
796,
13538,
198,
198,
6738,
1517,
1330,
21561,
198,
6738,
6518,
1330,
12674,
29239,
11,
15396,
29239,
628
] | 3.678571 | 28 |
__author__ = "David"
import json
from find_account_playlists import find_account_playlists
from youtube_playlist import load_config
config = load_config()
if __name__ == "__main__":
update_existing_uploaders() | [
834,
9800,
834,
796,
366,
11006,
1,
198,
198,
11748,
33918,
198,
6738,
1064,
62,
23317,
62,
1759,
20713,
1330,
1064,
62,
23317,
62,
1759,
20713,
198,
6738,
35116,
62,
1759,
4868,
1330,
3440,
62,
11250,
198,
198,
11250,
796,
3440,
62,
... | 3.205882 | 68 |
import matplotlib.pyplot as plt
import csv
file = open("tricky.csv")
reader = csv.reader(file)
data = []
for row in reader:
data.append(row)
data = make_int(data)
data10 = []
for x in range(1, len(data), 3):
data10.append(data[x])
plt.hist(data10, 6, rwidth=0.9)
plt.grid(True)
plt.xlabel("values ")
plt.ylabel("frequency")
plt.title("tricky every 3. number(from second)")
plt.show()
print(data10[:10]) | [
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
269,
21370,
628,
198,
7753,
796,
1280,
7203,
2213,
17479,
13,
40664,
4943,
198,
46862,
796,
269,
21370,
13,
46862,
7,
7753,
8,
198,
7890,
796,
17635,
198,
1640,... | 2.348315 | 178 |
# -*- coding: utf-8 -*- | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12
] | 1.769231 | 13 |
import os
import re
from .execution.denonExecuter import DenonExecuter
from .execution.foobarExecuter import FoobarExecuter
from .execution.kodiExecuter import KodiExecuter
from .execution.tvExecuter import TVExecuter
from .execution.profileExecuter import ProfileExecuter
from .client.denonClient import DenonClient
from .client.foobarClient import FoobarClient
from .client.kodiClient import KodiClient
from .client.samsungtv import SamsungTVClient
# run execution
# e.g. profile.nintendoSwitch()
# instantiate executor and belonging client
| [
11748,
28686,
198,
11748,
302,
198,
6738,
764,
18558,
1009,
13,
6559,
261,
23002,
11894,
1330,
5601,
261,
23002,
11894,
198,
6738,
764,
18558,
1009,
13,
6513,
30973,
23002,
11894,
1330,
19434,
30973,
23002,
11894,
198,
6738,
764,
18558,
1... | 3.515723 | 159 |
import logging
import sys
from . import utils
from .networks import *
| [
11748,
18931,
198,
11748,
25064,
198,
198,
6738,
764,
1330,
3384,
4487,
198,
6738,
764,
3262,
5225,
1330,
1635,
628
] | 3.6 | 20 |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from collections import Counter
s = readline().rstrip().decode()
check = ['1', '4', '7', '9']
counter = list(Counter(list(s)))
for k in counter:
if k in check:
check.pop(check.index(k))
if check:
print('NO')
else:
print('YES')
| [
11748,
25064,
198,
961,
796,
25064,
13,
19282,
259,
13,
22252,
13,
961,
198,
961,
1370,
796,
25064,
13,
19282,
259,
13,
22252,
13,
961,
1370,
198,
961,
6615,
796,
25064,
13,
19282,
259,
13,
22252,
13,
961,
6615,
198,
17597,
13,
2617... | 2.588235 | 153 |
'''
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
import sys
import torch
from torch.utils.data import DataLoader
# PROJ ROOT DIR
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
ROOT_PATH = os.path.join(DIR_PATH, os.path.pardir)
sys.path.append(ROOT_PATH)
# PROJ LIBRARY
import pipeline.constants as const
from model.centerNet import Resnet18FeatureExtractor
from model.utils import Criterion
from dataLoader.dataLoader import ProjDataLoader
from dataLoader.fusedDataLoader import FusedProjDataLoader
class ModelSetup(object):
"""
Model setup class to configure model and dataloader
"""
model_data_loader_switcher = {
"depth": ProjDataLoader,
"fused": FusedProjDataLoader,
} | [
7061,
6,
198,
15269,
357,
66,
8,
12131,
11,
15127,
23929,
44680,
6234,
13,
1439,
2489,
10395,
13,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
3788,
290,
3917,
10314,
3... | 3.526839 | 503 |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import requests
import time
import os
import csv
root_url = "https://seekingalpha.com"
query = "stock repurchase program"
url = "https://seekingalpha.com/search?q="+query.replcae(" ", "+")
chrome_driver_path = "/usr/lib/chromium-browser/chromedriver" #add your own driver path
opts = Options()
opts.add_argument("--headless")
opts.add_argument("--no-sandbox")
opts.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36")
driver = webdriver.Chrome(chrome_driver_path, options=opts)
driver.get(url)
time.sleep(5)
soup = BeautifulSoup(driver.page_source, 'lxml')
result_list = soup.find("div", {"id":"result_list"})
result_page = result_list.find("div", {"class":"result-pages"})
fields = ['Title', 'Link', 'MetaData', 'Summary']
csv_rows = []
for a in result_page.find_all("a"):
link = a['href']
new_url = url+link
driver.get(new_url)
time.sleep(5)
new_soup = BeautifulSoup(driver.page_source, 'lxml')
new_result_list = new_soup.find("div", {"id":"result_list"})
items = new_result_list.find_all("li")
for item in items:
item_link = item.find("div", {"class":"item-link"})
item_link_a = item_link.find("a")
item_meta = item.find("div", {"class":"item-metadata"})
item_summary = item.find("div", {"class":"item-summary"})
name = item_link_a.text.replace(" ", "").replace("\n", "")
link = root_url+item_link_a['href']
metadata = item_meta.text.replace(" ", "")
summary = item_summary.text
csv_rows.append([str(name), str(link), str(metadata), str(summary)])
with open("SeekingAlpha.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(csv_rows)
print("Done") | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
25811,
1330,
18634,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
11748,
640,
198,
11748,
28686,
198,
1174... | 2.531088 | 772 |
# -*- coding: utf-8 -*-
"""
Practical Algorthns
Problem set: 5.1 - Working with Data Structures
2f) Write a function join which, given two lists, it returns a list in which each element
is a list of two elements, one from each of the given lists. For example:
join( [1,2,3] , ["a","b","c"] )
returns: [ [1, "a"], [2, "b"], [3, "c"] ]
We assume that the given lists both have the same length.
2g) Write split, reverse of the above
"""
"""
merge_lists(list1, list2)
"""
"""
split_list (ilist)
"""
"""
main
"""
# create input lists
list1 = [1,2,3,5]
list2 = ["a","b","c","falanafalana"]
print("Initial lists:")
print(list1)
print(list2)
#merge lists
olist = merge_lists(list1, list2)
print("Merged list:")
print(olist)
# split_list agaibn
print("List split up again:")
newlist1, newlist2 = split_list(olist)
print(newlist1)
print(newlist2)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
47,
36112,
978,
70,
1506,
5907,
198,
40781,
900,
25,
642,
13,
16,
532,
14594,
351,
6060,
32112,
942,
198,
198,
17,
69,
8,
220,
19430,
257,
2163,
4654,
54... | 2.559524 | 336 |
#!/usr/bin/env python3
# --------------------------------------------------------------------------- #
# The MIT License (MIT) #
# #
# Copyright (c) 2021 Eliud Cabrera Castillo <e.cabrera-castillo@tum.de> #
# #
# Permission is hereby granted, free of charge, to any person obtaining #
# a copy of this software and associated documentation files #
# (the "Software"), to deal in the Software without restriction, including #
# without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit #
# persons to whom the Software is furnished to do so, subject to the #
# following conditions: #
# #
# The above copyright notice and this permission notice shall be included #
# in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# --------------------------------------------------------------------------- #
"""Auxiliary functions for handling supports."""
import requests
import lbrytools.funcs as funcs
import lbrytools.search as srch
def get_all_supports(server="http://localhost:5279"):
"""Get all supports in a dictionary; all, valid, and invalid.
Returns
-------
dict
A dictionary with information on the supports.
The keys are the following:
- 'all_supports': list with dictionaries of all supports.
- 'all_resolved': list with dictionaries of all resolved claims
corresponding to all supports.
Invalid claims will simply be `False`.
- 'valid_supports': list with dictionaries of supports
for valid claims only.
- 'valid_resolved': list with dictionaries of resolved claims
corresponding to `'valid_supports'` only.
- 'invalid_supports': list with dictionaries of supports
for invalid claims. The claim IDs in these dictionaries
cannot be resolved anymore.
False
If there is a problem or no list of supports, it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
msg = {"method": "support_list",
"params": {"page_size": 99000}}
output = requests.post(server, json=msg).json()
if "error" in output:
return False
items = output["result"]["items"]
n_items = len(items)
if n_items < 1:
print(f"Supports found: {n_items}")
return False
valid = []
valid_resolved = []
invalid = []
all_supports = []
all_resolved = []
for item in items:
s = srch.search_item(cid=item["claim_id"])
if not s:
invalid.append(item)
else:
valid.append(item)
valid_resolved.append(s)
all_supports.append(item)
all_resolved.append(s)
return {"all_supports": all_supports,
"all_resolved": all_resolved,
"valid_supports": valid,
"valid_resolved": valid_resolved,
"invalid_supports": invalid}
def list_supports(claim_id=False, invalid=False,
combine=True, claims=True, channels=True,
file=None, fdate=False, sep=";",
server="http://localhost:5279"):
"""Print supported claims, the amount, and the trending score.
Parameters
----------
claim_id: bool, optional
It defaults to `False`, in which case only the name of the claim
is shown.
If it is `True` the `'claim_id'` will be shown as well.
invalid: bool, optional
It defaults to `False`, in which case it will show all supported
claims, even those that are invalid.
If it is `True` it will only show invalid claims. Invalid are those
which were deleted by their authors, so the claim (channel
or content) is no longer available in the blockchain.
combine: bool, optional
It defaults to `True`, in which case the `global`, `group`, `local`,
and `mixed` trending scores are added into one combined score.
If it is `False` it will show the four values separately.
claims: bool, optional
It defaults to `True`, in which case supported claims will be shown.
If it is `False` simple claims won't be shown.
channels: bool, optional
It defaults to `True`, in which case supported channels will be shown.
If it is `False` channel claims (which start with the `@` symbol)
won't be shown.
file: str, optional
It defaults to `None`.
It must be a user writable path to which the summary will be written.
Otherwise the summary will be printed to the terminal.
fdate: bool, optional
It defaults to `False`.
If it is `True` it will add the date to the name of the summary file.
sep: str, optional
It defaults to `;`. It is the separator character between
the data fields in the printed summary. Since the claim name
can have commas, a semicolon `;` is used by default.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
list
The list of resolved claims, as returned by `lbrynet resolve`.
Each item is a dictionary with information from the supported claim
which may be a stream (video, music, document) or a channel.
False
If there is a problem or no list of supports, it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
supports = get_all_supports(server=server)
if not supports:
return False
items = supports["all_supports"]
resolved = supports["all_resolved"]
n_items = len(items)
out_list = []
for num, pair in enumerate(zip(items, resolved), start=1):
item = pair[0]
s = pair[1]
name = item["name"]
cid = item["claim_id"]
is_channel = True if name.startswith("@") else False
if is_channel and not channels:
continue
if not is_channel and not claims:
continue
obj = ""
if claim_id:
obj += f'"{cid}"' + f"{sep} "
_name = f'"{name}"'
if not s:
_name = "[" + _name + "]"
obj += f'{_name:58s}'
_amount = float(item["amount"])
amount = f"{_amount:14.8f}"
if not s:
m = {"support_amount": "0.0"}
s = {"amount": item["amount"]}
else:
if invalid:
continue
m = s["meta"]
existing_support = float(s["amount"]) + float(m["support_amount"])
trend_gl = m.get("trending_global", 0)
trend_gr = m.get("trending_group", 0)
trend_loc = m.get("trending_local", 0)
trend_mix = m.get("trending_mixed", 0)
combined = (trend_gl
+ trend_gr
+ trend_loc
+ trend_mix)
tr_gl = f'{trend_gl:7.2f}'
tr_gr = f'{trend_gr:7.2f}'
tr_loc = f'{trend_loc:7.2f}'
tr_mix = f'{trend_mix:7.2f}'
tr_combined = f'{combined:7.2f}'
is_spent = item["is_spent"]
out = f"{num:3d}/{n_items:3d}" + f"{sep} "
out += f"{obj}" + f"{sep} " + f"{amount}" + f"{sep} "
out += f"{existing_support:15.8f}" + f"{sep} "
if not is_spent:
if combine:
out += f"combined: {tr_combined}"
else:
out += f"mix: {tr_mix}" + f"{sep} "
out += f"glob: {tr_gl}" + f"{sep} "
out += f"grp: {tr_gr}" + f"{sep} "
out += f"loc: {tr_loc}"
else:
continue
out_list.append(out)
funcs.print_content(out_list, file=file, fdate=fdate)
return resolved
def get_base_support(uri=None, cid=None, name=None,
server="http://localhost:5279"):
"""Get the existing, base, and our support from a claim.
Returns
-------
dict
A dictionary with information on the support on a claim.
The keys are the following:
- 'canonical_url'
- 'claim_id'
- 'existing_support': total support that the claim has;
this is `'base_support'` + `'old_support'`.
- 'base_support': support that the claim has without our support.
- 'old_support': support that we have added to this claim;
it may be zero if this claim does not have any support from us.
False
If there is a problem or no list of supports, it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
item = srch.search_item(uri=uri, cid=cid, name=name, offline=False,
server=server)
if not item:
return False
uri = item["canonical_url"]
cid = item["claim_id"]
existing = float(item["amount"]) + float(item["meta"]["support_amount"])
msg = {"method": "support_list",
"params": {"claim_id": item["claim_id"]}}
output = requests.post(server, json=msg).json()
if "error" in output:
return False
supported_items = output["result"]["items"]
old_support = 0
if not supported_items:
# Old support remains 0
pass
else:
for su_item in supported_items:
old_support += float(su_item["amount"])
base_support = existing - old_support
return {"canonical_url": uri,
"claim_id": cid,
"existing_support": existing,
"base_support": base_support,
"old_support": old_support}
def create_support(uri=None, cid=None, name=None,
amount=0.0,
server="http://localhost:5279"):
"""Create a new support on the claim.
Parameters
----------
uri: str
A unified resource identifier (URI) to a claim on the LBRY network.
It can be full or partial.
::
uri = 'lbry://@MyChannel#3/some-video-name#2'
uri = '@MyChannel#3/some-video-name#2'
uri = 'some-video-name'
The URI is also called the `'canonical_url'` of the claim.
cid: str, optional
A `'claim_id'` for a claim on the LBRY network.
It is a 40 character alphanumeric string.
name: str, optional
A name of a claim on the LBRY network.
It is normally the last part of a full URI.
::
uri = 'lbry://@MyChannel#3/some-video-name#2'
name = 'some-video-name'
amount: float, optional
It defaults to `0.0`.
It is the amount of LBC support that will be deposited,
whether there is a previous support or not.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
dict
A dictionary with information on the result of the support.
The keys are the following:
- 'canonical_url': canonical URI of the claim.
- 'claim_id': unique 40 character alphanumeric string.
- 'existing_support': existing support before we add or remove ours;
this is the sum of `base_support` and `old_support`.
- 'base_support': existing minimum support that we do not control;
all published claims must have a positive `base_support`.
- 'old_support': support that we have added to this claim in the past;
it may be zero.
- 'new_support': new support that was successfully deposited
in the claim, equal to `keep`.
- 'txid': transaction ID in the blockchain that records the operation.
False
If there is a problem or non existing claim, or lack of funds,
it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
supports = get_base_support(uri=uri, cid=cid, name=name)
if not supports:
return False
uri = supports["canonical_url"]
claim_id = supports["claim_id"]
existing = supports["existing_support"]
base_support = supports["base_support"]
old_support = supports["old_support"]
new_support = 0.0
t_input = 0.0
t_output = 0.0
t_fee = 0.0
txid = None
amount = abs(amount)
msg = {"method": "support_create",
"params": {"claim_id": claim_id,
"amount": f"{amount:.8f}"}}
output = requests.post(server, json=msg).json()
if "error" in output:
error = output["error"]
if "data" in error:
print(">>> Error: {}, {}".format(error["data"]["name"],
error["message"]))
else:
print(f">>> Error: {error}")
print(f">>> Requested amount: {amount:.8f}")
return False
new_support = amount
t_input = float(output["result"]["total_input"])
t_output = float(output["result"]["total_output"])
t_fee = float(output["result"]["total_fee"])
txid = output["result"]["txid"]
out = [f"canonical_url: {uri}",
f"claim_id: {claim_id}",
f"Existing support: {existing:14.8f}",
f"Base support: {base_support:14.8f}",
f"Old support: {old_support:14.8f}",
f"New support: {new_support:14.8f}",
"",
f"Applied: {new_support:14.8f}",
f"total_input: {t_input:14.8f}",
f"total_output: {t_output:14.8f}",
f"total_fee: {t_fee:14.8f}",
f"txid: {txid}"]
print("\n".join(out))
return {"canonical_url": uri,
"claim_id": claim_id,
"existing_support": existing,
"base_support": base_support,
"old_support": old_support,
"new_support": new_support,
"txid": txid}
def calculate_abandon(claim_id=None, keep=0.0,
server="http://localhost:5279"):
"""Actually abandon the support and get the data."""
new_support = 0.0
t_input = 0.0
t_output = 0.0
t_fee = 0.0
txid = None
msg = {"method": "support_abandon",
"params": {"claim_id": claim_id}}
if keep:
msg["params"]["keep"] = f"{keep:.8f}"
output = requests.post(server, json=msg).json()
if "error" in output:
error = output["error"]
if "data" in error:
print(">>> Error: {}, {}".format(error["data"]["name"],
error["message"]))
else:
print(f">>> Error: {error}")
print(f">>> Requested amount: {keep:.8f}")
return False, False
new_support = keep
t_input = float(output["result"]["total_input"])
t_output = float(output["result"]["total_output"])
t_fee = float(output["result"]["total_fee"])
txid = output["result"]["txid"]
calc = {"new_support": new_support,
"t_input": t_input,
"t_output": t_output,
"t_fee": t_fee,
"txid": txid}
text = [f"Applied: {new_support:14.8f}",
f"total_input: {t_input:14.8f}",
f"total_output: {t_output:14.8f}",
f"total_fee: {t_fee:14.8f}",
f"txid: {txid}"]
return calc, text
def abandon_support(uri=None, cid=None, name=None,
keep=0.0,
server="http://localhost:5279"):
"""Abandon a support, or change it to a different amount.
Parameters
----------
uri: str
A unified resource identifier (URI) to a claim on the LBRY network.
It can be full or partial.
::
uri = 'lbry://@MyChannel#3/some-video-name#2'
uri = '@MyChannel#3/some-video-name#2'
uri = 'some-video-name'
The URI is also called the `'canonical_url'` of the claim.
cid: str, optional
A `'claim_id'` for a claim on the LBRY network.
It is a 40 character alphanumeric string.
name: str, optional
A name of a claim on the LBRY network.
It is normally the last part of a full URI.
::
uri = 'lbry://@MyChannel#3/some-video-name#2'
name = 'some-video-name'
keep: float, optional
It defaults to `0.0`.
It is the amount of LBC support that should remain in the claim
after we remove our previous support. That is, we can use
this parameter to assign a new support value.
If it is `0.0` all support is removed.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
dict
A dictionary with information on the result of the support.
The keys are the following:
- 'canonical_url': canonical URI of the claim.
- 'claim_id': unique 40 character alphanumeric string.
- 'existing_support': existing support before we add or remove ours;
this is the sum of `base_support` and `old_support`.
- 'base_support': existing minimum support that we do not control;
all published claims must have a positive `base_support`.
- 'old_support': support that we have added to this claim in the past;
it may be zero.
- 'new_support': new support that was successfully deposited
in the claim, equal to `keep`.
- 'txid': transaction ID in the blockchain that records the operation.
False
If there is a problem or non existing claim, or lack of funds,
it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
supports = get_base_support(uri=uri, cid=cid, name=name)
if not supports:
return False
uri = supports["canonical_url"]
claim_id = supports["claim_id"]
existing = supports["existing_support"]
base_support = supports["base_support"]
old_support = supports["old_support"]
calc, text = calculate_abandon(claim_id=claim_id, keep=keep,
server=server)
if not calc:
return False
new_support = calc["new_support"]
txid = calc["txid"]
out = [f"canonical_url: {uri}",
f"claim_id: {claim_id}",
f"Existing support: {existing:14.8f}",
f"Base support: {base_support:14.8f}",
f"Old support: {old_support:14.8f}",
f"New support: {keep:14.8f}",
""]
out += text
print("\n".join(out))
return {"canonical_url": uri,
"claim_id": claim_id,
"existing_support": existing,
"base_support": base_support,
"old_support": old_support,
"new_support": new_support,
"txid": txid}
def abandon_support_inv(invalids=None, cid=None, name=None,
keep=0.0,
server="http://localhost:5279"):
"""Abandon or change a support for invalid claims.
Parameters
----------
invalids: list of dict, optional
A list where each element is a dictionary indicating the support
for an 'invalid' claim.
Invalid claims no longer resolve online (the output has been spent)
but they may still have an existing support.
If this list is `None`, the list will be obtained
from `get_all_supports()['invalid_supports']`.
cid: str, optional
A `'claim_id'` for a claim on the LBRY network.
It is a 40 character alphanumeric string.
name: str, optional
A name of a claim on the LBRY network.
It is normally the last part of a full URI.
::
uri = 'lbry://@MyChannel#3/some-video-name#2'
name = 'some-video-name'
keep: float, optional
It defaults to `0.0`.
It is the amount of LBC support that should remain in the claim
after we remove our previous support. That is, we can use
this parameter to assign a new support value.
If it is `0.0` all support is removed.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
dict
A dictionary with information on the result of the support.
The keys are the following:
- 'claim_name': name of the claim; the canonical URI is not available
because the claim can't be resolved online any more.
- 'claim_id': unique 40 character alphanumeric string.
- 'existing_support': existing support before we add or remove ours;
this should be the same as `old_support`.
- 'base_support': since this claim does not resolve any more,
it should be zero.
- 'old_support': support that we have added to this claim in the past;
it cannot be zero because we use this method only with claims
that have been previously supported (and are now invalid).
- 'new_support': new support that was successfully deposited
in the claim, equal to `keep`.
- 'txid': transaction ID in the blockchain that records the operation.
False
If there is a problem or non existing claim, or lack of funds,
it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
if not cid and not name:
print(80 * "-")
print(f'cid={cid}\n'
f'name="{name}"')
return False
existing = 0
base_support = 0
old_support = 0
found = False
if not invalids:
all_supports = get_all_supports(server=server)
if not all_supports:
return False
invalids = all_supports["invalid_supports"]
for supp in invalids:
if ((cid and cid in supp["claim_id"])
or (name and name in supp["name"])):
existing = float(supp["amount"])
old_support = float(supp["amount"])
claim_id = supp["claim_id"]
c_name = supp["name"]
found = True
if not found:
print(80 * "-")
print("Claim not found among the invalid claims")
print(f'cid={cid}\n'
f'name="{name}"')
return False
calc, text = calculate_abandon(claim_id=claim_id, keep=keep,
server=server)
if not calc:
return False
new_support = calc["new_support"]
txid = calc["txid"]
out = [f"claim_name: {c_name}",
f"claim_id: {claim_id}",
f"Existing support: {existing:14.8f}",
f"Base support: {base_support:14.8f}",
f"Old support: {old_support:14.8f}",
f"New support: {keep:14.8f}",
""]
out += text
print("\n".join(out))
return {"claim_name": c_name,
"claim_id": claim_id,
"existing_support": existing,
"base_support": base_support,
"old_support": old_support,
"new_support": new_support,
"txid": txid}
def target_support(uri=None, cid=None, name=None,
target=0.0,
server="http://localhost:5279"):
"""Add an appropriate amount of LBC to reach a target support.
Parameters
----------
uri: str
A unified resource identifier (URI) to a claim on the LBRY network.
It can be full or partial.
::
uri = 'lbry://@MyChannel#3/some-video-name#2'
uri = '@MyChannel#3/some-video-name#2'
uri = 'some-video-name'
The URI is also called the `'canonical_url'` of the claim.
cid: str, optional
A `'claim_id'` for a claim on the LBRY network.
It is a 40 character alphanumeric string.
name: str, optional
A name of a claim on the LBRY network.
It is normally the last part of a full URI.
::
uri = 'lbry://@MyChannel#3/some-video-name#2'
name = 'some-video-name'
target: float, optional
It defaults to `0.0`.
It is the amount of LBC support that we want the claim to have
at the end of our support.
For example, if the current support is `100`, and we specify a target
of `500`, we will be supporting the claim with `400`
in order to reach the target.
server: str, optional
It defaults to `'http://localhost:5279'`.
This is the address of the `lbrynet` daemon, which should be running
in your computer before using any `lbrynet` command.
Normally, there is no need to change this parameter from its default
value.
Returns
-------
dict
A dictionary with information on the result of the support.
The keys are the following:
- 'canonical_url': canonical URI of the claim.
- 'claim_id': unique 40 character alphanumeric string.
- 'existing_support': existing support before we add or remove ours;
this is the sum of `base_support` and `old_support`.
- 'base_support': existing minimum support that we do not control;
all published claims must have a positive `base_support`.
- 'old_support': support that we have added to this claim in the past;
it may be zero.
- 'target': target support that we want after running this method.
It must be a positive number.
- 'must_add': amount of support that we must add or remove (negative)
to reach the `target`; it may be zero if `target`
is already below the `base_support`.
- 'new_support': new support that was successfully deposited
in the claim; it may be zero if `target` is already below
the `base_support`, or if `old_support` already satisfies
our `target`.
- 'txid': transaction ID in the blockchain that records the operation;
it may be `None` if the transaction was not made because the `target`
was already achieved before applying additional support.
False
If there is a problem or non existing claim, or lack of funds,
it will return `False`.
"""
if not funcs.server_exists(server=server):
return False
supports = get_base_support(uri=uri, cid=cid, name=name)
if not supports:
return False
uri = supports["canonical_url"]
claim_id = supports["claim_id"]
existing = supports["existing_support"]
base_support = supports["base_support"]
old_support = supports["old_support"]
target = abs(target)
out = [f"canonical_url: {uri}",
f"claim_id: {claim_id}",
f"Existing support: {existing:14.8f}",
f"Base support: {base_support:14.8f}",
f"Old support: {old_support:14.8f}",
"",
f"Target: {target:14.8f}"]
new_support = 0.0
must_add = 0.0
if target > base_support:
# Target above base, calculate addition
must_add = target - existing
new_support = old_support + must_add
elif target < base_support:
if not old_support:
# Target below base support, and no old support, nothing to add,
# reset support to 0
pass
else:
# Target below base support, and old support, remove it
must_add = -old_support
else:
# Same target as base support, nothing to add, reset support to 0
pass
out.append(f"Must add: {must_add:14.8f}")
out.append(f"New support: {new_support:14.8f}")
applied = 0.0
t_input = 0.0
t_output = 0.0
t_fee = 0.0
txid = None
# The SDK accepts the amount as a string, not directly as a number.
# The minimum amount is 0.00000001, so we convert all quantities
# to have 8 decimal significant numbers.
#
# Only perform the transaction if the new support is different
# from the old support
if new_support != old_support:
if not old_support and new_support > 0:
# No existing support, so we create it
msg = {"method": "support_create",
"params": {"claim_id": claim_id,
"amount": f"{new_support:.8f}"}}
output = requests.post(server, json=msg).json()
else:
# Existing support, so we update it with the new value
msg = {"method": "support_abandon",
"params": {"claim_id": claim_id,
"keep": f"{new_support:.8f}"}}
output = requests.post(server, json=msg).json()
if "error" in output:
error = output["error"]
if "data" in error:
print(">>> Error: {}, {}".format(error["data"]["name"],
error["message"]))
else:
print(f">>> Error: {error}")
print(f">>> Requested amount: {new_support:.8f}")
return False
applied = new_support
t_input = float(output["result"]["total_input"])
t_output = float(output["result"]["total_output"])
t_fee = float(output["result"]["total_fee"])
txid = output["result"]["txid"]
out += ["",
f"Applied: {applied:14.8f}",
f"total_input: {t_input:14.8f}",
f"total_output: {t_output:14.8f}",
f"total_fee: {t_fee:14.8f}",
f"txid: {txid}"]
print("\n".join(out))
return {"canonical_url": uri,
"claim_id": cid,
"existing_support": existing,
"base_support": base_support,
"old_support": old_support,
"target": target,
"must_add": must_add,
"new_support": new_support,
"txid": txid}
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
16529,
32284,
1303,
198,
2,
383,
17168,
13789,
357,
36393,
8,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.261122 | 13,959 |
import os,shutil,send2trash
fname = __file__+"_folder"
if(not os.path.isdir(fname)):
os.system("mkdir "+fname)
try:
for dosya in os.listdir():
if not dosya.endswith(".py"):
shutil.move(dosya,fname)
send2trash.send2trash(fname)
except shutil.Error:
print("Bu Script'i birden fazla çalıştırmışsınız gibi...") | [
11748,
28686,
11,
1477,
22602,
11,
21280,
17,
2213,
1077,
201,
198,
201,
198,
69,
3672,
796,
11593,
7753,
834,
10,
1,
62,
43551,
1,
201,
198,
361,
7,
1662,
28686,
13,
6978,
13,
9409,
343,
7,
69,
3672,
8,
2599,
201,
198,
220,
220... | 1.905759 | 191 |
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
import pypyodbc
#import sqlite3 as sql
# import pyodbc
from datetime import datetime
from datetime import timedelta
import csv
import os
app = Flask(__name__)
import sqlite3
##################
server = 'ilwin.database.windows.net'
database = 'ilwin'
username = 'ilwin'
password = 'esxi@S5n'
driver = '{SQL Server}'
cnxn = pypyodbc.connect("Driver={ODBC Driver 13 for SQL Server};"
"Server=tcp:ilwin.database.windows.net;Database=ilwin;Uid=ilwin;Pwd=esxi@S5n;")
# cnxn = pyodbc.connect(
# 'DRIVER=' + driver + ';PORT=1433;SERVER=' + server + ';PORT=1443;DATABASE=' + database + ';UID=' + username + ';PWD=' + password)
cursor = cnxn.cursor()
@app.route('/')
@app.route('/uploadCSV',methods=['POST'])
@app.route('/UI')
@app.route('/test1', methods=['GET', 'POST'])
@app.route('/addrec', methods=['POST', 'GET'])
# @app.route('/search', methods=['POST', 'GET'])
# def search():
# # print("here")
# # Search for 2.0 to 2.5, 2.5 to 3.0… for a week a day or the whole 30 days.
# if request.method == 'POST':
# # print("inside")
# rangeOne = request.form['range1']
# rangeTwo = request.form['range2']
# length = request.form['length']
# print(length)
# if(length=='week'):
# today = datetime.today()
# criteria=today-datetime.timedelta(days=7)
# if(length=='day'):
# today = datetime.date.today()
# criteria = today - datetime.timedelta(days=1)
# if(length=='month'):
# today = datetime.date.today()
# criteria = today - datetime.timedelta(days=30)
#
# print(today)
# print (criteria)
#
#
# # stringToday = today.dateutcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
# # stringCriteria = criteria.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
#
# # query = "select * from EarthquakeTwo where (timee between "+today +" and "+criteria+") and (mag between '"+rangeTwo+"' and '"+rangeTwo+"')"
# query = "select * from EarthquakeTwo where timee between "+today+" and "+criteria
# print(query)
# # cursor.execute(query,(today,criteria))
# result=cursor.fetchone()
# print("code here")
# print(result)
# value=result[0]
# print(value)
#
# return render_template("view.html", msg=value)
if __name__ == '__main__':
app.run(debug = True) | [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
18941,
11,
19016,
62,
1640,
11,
3758,
62,
6738,
62,
34945,
198,
11748,
279,
4464,
88,
375,
15630,
198,
2,
11748,
44161,
578,
18,
355,
44161,
198,
2,
1330,
12972,
375,
1563... | 2.207895 | 1,140 |
# Copyright (C) 2020 Alibaba Group Holding Limited
from Arm64Utils import *
idaapi.require("Arm64Utils")
import tagged_pointers
print "[+] Arm64Preprocessor loaded"
| [
2,
15069,
357,
34,
8,
12131,
41992,
4912,
31703,
15302,
198,
198,
6738,
7057,
2414,
18274,
4487,
1330,
1635,
198,
3755,
15042,
13,
46115,
7203,
26560,
2414,
18274,
4487,
4943,
198,
11748,
30509,
62,
47809,
628,
628,
198,
220,
220,
220,
... | 3.033333 | 60 |
cads = [[], [], []]
soma = 0
while True:
print('-=-' * 16)
nome = str(input('Nome: ')).strip().title()
cads[0].append(nome)
peso = float(input('Peso: '))
cads[1].append(peso)
soma += peso
m = max(cads[1])
if peso == m:
cads[2].append(nome)
resp = str(input('Continuar? (S/N) ')).upper()[0]
if resp in 'N':
break
num = cads[1].index(max(cads[1]))
pum = cads[1].index(min(cads[1]))
print(cads)
print('-=-' * 16)
print(f'São {len(cads[0])} pessoas cadastradas.')
print(f'O maior peso foi {max(cads[1])}Kg de ', end='')
print(f'{cads[0][num]}', end = '')
if len(cads[2]) > 0:
print(f'{cads[2][-1:1]}')
else:
print()
print(f'\nO menor peso foi de {min(cads[1])} de ', end='')
print(f'{cads[0][pum]}')
print('Calculando a média aritmética... fica...')
print(f'{soma} / {len(cads[0])} = {soma / len(cads[0]):.3f} \n ')
| [
66,
5643,
796,
16410,
4357,
685,
4357,
685,
11907,
198,
82,
6086,
796,
657,
198,
198,
4514,
6407,
25,
198,
220,
220,
220,
3601,
10786,
12,
10779,
6,
1635,
1467,
8,
198,
220,
220,
220,
299,
462,
796,
965,
7,
15414,
10786,
45,
462,
... | 1.850103 | 487 |
import time
import apytl
NTOTAL = 25
WAIT = 0.01
TEST_BAR = apytl.Bar()
tstring = 'Are these octothorpes?'
vs = 'V'*len(tstring)
print(tstring)
print(vs)
for ind, val in enumerate(range(NTOTAL)):
TEST_BAR.drawbar(ind, NTOTAL)
time.sleep(WAIT)
print('')
for emojikey, emojicode in TEST_BAR._EMOJI.items():
tstring = 'Are these {} emojis?'.format(emojikey)
vs = 'V'*len(tstring)
print(tstring)
print(vs)
for ind, val in enumerate(range(NTOTAL)):
TEST_BAR.drawbar(ind, NTOTAL, fill=emojicode)
time.sleep(WAIT)
print('')
tstring = 'Are these randomly selected?'
vs = 'V'*len(tstring)
print(tstring)
print(vs)
for ind, val in enumerate(range(NTOTAL)):
TEST_BAR.drawbar(ind, NTOTAL, fill='random')
time.sleep(WAIT)
print('')
| [
11748,
640,
198,
11748,
2471,
20760,
75,
198,
198,
11251,
27510,
796,
1679,
198,
15543,
2043,
796,
657,
13,
486,
198,
198,
51,
6465,
62,
33,
1503,
796,
2471,
20760,
75,
13,
10374,
3419,
198,
198,
83,
8841,
796,
705,
8491,
777,
19318... | 2.20339 | 354 |
import dataclasses
from datetime import timedelta
from enum import Enum
from typing_extensions import Protocol
from typing import Optional, Sequence
import zarr
import xarray as xr
import numpy as np
import fsspec
from vcm.fv3.metadata import gfdl_to_standard
from loaders._config import mapper_functions
from loaders.mappers._base import GeoMapper
from loaders.mappers._xarray import XarrayMapper
from loaders.mappers._fine_res_budget import (
compute_fine_res_sources,
column_integrated_fine_res_nudging_heating,
FineResBudget,
FINE_RES_STATE_NAMES,
FINE_RES_FLUX_NAMES,
)
@dataclasses.dataclass
class DynamicsDifferenceApparentSource:
"""
Q = (high_res dyn - coarse dyn) + high_res physics
= high res (storage - nudge - physics) + high_res physics - coarse dyn
= high-res storage - high res nudging - coarse dyn tendency
"""
include_temperature_nudging: bool
@mapper_functions.register
def open_fine_resolution(
approach: str,
fine_url: str,
include_temperature_nudging: bool = False,
additional_dataset_urls: Sequence[str] = None,
use_fine_res_state: bool = True,
use_fine_res_fluxes: bool = False,
) -> GeoMapper:
"""
Open the fine-res mapper using several configuration options
Args:
approach: one of a set of available approaches: 'apparent_sources_only',
'apparent_sources_plus_nudging_tendencies',
'apparent_sources_plus_dynamics_differences', or
'apparent_sources_extend_lower'.
fine_url: url where coarsened fine resolution data is stored
include_temperature_nudging: whether to include fine-res nudging in Q1
additional_dataset_urls: sequence of urls to zarrs containing additional
data to be merged into the resulting mapper dataset, e.g., ML input
features, the dynamics nudging tendencies, and the dynamics differences
as required by the above approaches
use_fine_res_state: set standard name state variables to point to the fine-res
data. Set to True if wanting to use fine-res state as ML inputs in training.
use_fine_res_fluxes: set standard name surface and TOA flux diagnostic variables
to point to the fine-res data. Set of True if wanting to use fine-res fluxes
as ML inputs in training.
Returns:
a mapper
"""
approach_enum = Approach[approach]
merged: FineResBudget = _open_merged_dataset(
fine_url=fine_url,
additional_dataset_urls=additional_dataset_urls,
use_fine_res_state=use_fine_res_state,
use_fine_res_fluxes=use_fine_res_fluxes,
)
budget: MLTendencies = compute_budget(
merged, approach_enum, include_temperature_nudging=include_temperature_nudging
)
return XarrayMapper(budget)
| [
11748,
4818,
330,
28958,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
62,
2302,
5736,
1330,
20497,
198,
6738,
19720,
1330,
32233,
11,
45835,
198,
11748,
1976,
3258,
198,
11748,
2124,
1874... | 2.675726 | 1,067 |
import pytest
import io
from http import client
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import metadata
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.dropbox import DropboxProvider
from waterbutler.providers.dropbox.metadata import DropboxFileMetadata
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
11748,
33245,
198,
6738,
2638,
1330,
5456,
198,
198,
11748,
257,
952,
4023,
16100,
198,
198,
6738,
1660,
4360,
1754,
13,
7295,
1330,
15190,
198,
6738,
1660,
4360,
1754,
13,
7295,
1330,
20150,
198,
6738,
166... | 3.141176 | 170 |
import os
import json
from django.core.exceptions import ImproperlyConfigured
path = os.path.dirname(__file__) + '/secrets.json'
with open(path) as f:
secrets = json.loads(f.read())
def get_secret(var_name):
"""Get the environment variable or return exception."""
try:
return secrets[var_name]
except KeyError:
error_msg = "Set the {} environment variable".format(var_name)
raise ImproperlyConfigured(error_msg)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_secret('SECRET_KEY')
ALLOWED_HOSTS = get_secret('ALLOWED_HOSTS')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'users',
'cms',
'social_django',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'
]
ROOT_URLCONF = 'bala7.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'users/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# Social auth
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
# User nav topics
'users.context_processors.add_nav_topics'
],
},
},
]
WSGI_APPLICATION = 'bala7.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/home/najiba/static'
# Media files
ENV_PATH = os.path.abspath(os.path.dirname(__file__))
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = "media/"
# Auth backend settings - Social auth
AUTHENTICATION_BACKENDS = [
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
]
# URL that redirected to after logout, unauthorized page.
LOGIN_REDIRECT_URl = '/users/profile'
LOGIN_URL = '/users/login'
# Social media auth pipline settings
SOCIAL_AUTH_PIPELINE = [
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'social_core.pipeline.social_auth.associate_by_email',
'users.models.make_social_new_profile',
]
# Storing user choises when completing with social.
SOCIAL_AUTH_FIELDS_STORED_IN_SESSION = ['first_form_data']
# Facebook auth settings
SOCIAL_AUTH_FACEBOOK_KEY = '1907562042790610'
SOCIAL_AUTH_FACEBOOK_SECRET = get_secret('FACEBOOK_SECRET')
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email, age_range'
}
# Twiiter auth settings
SOCIAL_AUTH_TWITTER_KEY = 'lHt8gjwWyvYWSkEdxkSc5C2C8'
SOCIAL_AUTH_TWITTER_SECRET = get_secret('TWITTER_SECRET')
# Celery configuration
CELERY_BROKER_URL = 'amqp://localhost'
# DRF Authentication configuration.
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
SITE_ID = 1 | [
11748,
28686,
198,
11748,
33918,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
198,
198,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
8,
1343,
31051,
2363,
8004,
13,
17752,
... | 2.411294 | 2,249 |
from collections import OrderedDict, defaultdict
from typing import List, Iterable, Optional, Dict
from flask_restplus import fields, Api, Model
from dedoc.config import get_config
from dedoc.data_structures.annotation import Annotation
from dedoc.data_structures.paragraph_metadata import ParagraphMetadata
from dedoc.data_structures.serializable import Serializable
from dedoc.structure_parser.heirarchy_level import HierarchyLevel
from dedoc.data_structures.line_with_meta import LineWithMeta
| [
6738,
17268,
1330,
14230,
1068,
35,
713,
11,
4277,
11600,
198,
6738,
19720,
1330,
7343,
11,
40806,
540,
11,
32233,
11,
360,
713,
198,
6738,
42903,
62,
2118,
9541,
1330,
7032,
11,
5949,
72,
11,
9104,
198,
198,
6738,
4648,
420,
13,
11... | 3.716418 | 134 |
"""
AES-specific mechanism implementations.
"""
import logging
from ctypes import c_void_p, cast, pointer, sizeof
from . import Mechanism
from ..attributes import to_byte_array
from ..cryptoki import (
CK_ULONG,
CK_BYTE,
CK_BYTE_PTR,
CK_AES_XTS_PARAMS,
CK_AES_GCM_PARAMS,
CK_KEY_DERIVATION_STRING_DATA,
CK_AES_CBC_ENCRYPT_DATA_PARAMS,
CK_AES_CTR_PARAMS,
c_ubyte,
)
LOG = logging.getLogger(__name__)
class IvMechanism(Mechanism):
"""
Mech class for flavors that require an IV set in the mechanism.
Will default to `[0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38]` if no IV is passed in
"""
OPTIONAL_PARAMS = ["iv"]
def to_c_mech(self):
"""
Convert extra parameters to ctypes, then build out the mechanism.
:return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
"""
super(IvMechanism, self).to_c_mech()
if self.params is None or "iv" not in self.params:
self.params["iv"] = [0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38]
LOG.warning("Using static IVs can be insecure! ")
if len(self.params["iv"]) == 0:
LOG.debug("Setting IV to NULL (using internal)")
iv_ba = None
iv_len = 0
else:
iv_ba, iv_len = to_byte_array(self.params["iv"])
self.mech.pParameter = iv_ba
self.mech.usParameterLen = iv_len
return self.mech
class Iv16Mechanism(Mechanism):
"""
Mech class for flavors that require an IV set in the mechanism.
Will default to `[1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]` if no IV is passed in
"""
OPTIONAL_PARAMS = ["iv"]
def to_c_mech(self):
"""
Convert extra parameters to ctypes, then build out the mechanism.
:return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
"""
super(Iv16Mechanism, self).to_c_mech()
if self.params is None or "iv" not in self.params:
self.params["iv"] = [1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]
LOG.warning("Using static IVs can be insecure! ")
if len(self.params["iv"]) == 0:
LOG.debug("Setting IV to NULL (using internal)")
iv_ba = None
iv_len = 0
else:
iv_ba, iv_len = to_byte_array(self.params["iv"])
self.mech.pParameter = iv_ba
self.mech.usParameterLen = iv_len
return self.mech
class AESXTSMechanism(Mechanism):
"""
Creates the AES-XTS specific param structure & converts python types to C types.
"""
REQUIRED_PARAMS = ["cb", "hTweakKey"]
def to_c_mech(self):
"""
Convert extra parameters to ctypes, then build out the mechanism.
:return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
"""
super(AESXTSMechanism, self).to_c_mech()
xts_params = CK_AES_XTS_PARAMS()
xts_params.cb = (CK_BYTE * 16)(*self.params["cb"])
xts_params.hTweakKey = CK_ULONG(self.params["hTweakKey"])
self.mech.pParameter = cast(pointer(xts_params), c_void_p)
self.mech.usParameterLen = CK_ULONG(sizeof(xts_params))
return self.mech
class AESGCMMechanism(Mechanism):
"""
Creates the AES-GCM specific param structure & converts python types to C types.
"""
REQUIRED_PARAMS = ["iv", "AAD", "ulTagBits"]
def to_c_mech(self):
"""
Convert extra parameters to ctypes, then build out the mechanism.
:return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
"""
super(AESGCMMechanism, self).to_c_mech()
gcm_params = CK_AES_GCM_PARAMS()
if len(self.params["iv"]) == 0:
LOG.debug("Setting IV to NULL (using internal)")
iv_ba = None
iv_len = 0
else:
iv_ba, iv_len = to_byte_array(self.params["iv"])
gcm_params.pIv = cast(iv_ba, CK_BYTE_PTR)
gcm_params.ulIvLen = iv_len
# Assuming 8 bits per entry in IV.
gcm_params.ulIvBits = CK_ULONG(len(self.params["iv"]) * 8)
aad, aadlen = to_byte_array(self.params["AAD"])
gcm_params.pAAD = cast(aad, CK_BYTE_PTR)
gcm_params.ulAADLen = aadlen
gcm_params.ulTagBits = CK_ULONG(self.params["ulTagBits"])
self.mech.pParameter = cast(pointer(gcm_params), c_void_p)
self.mech.usParameterLen = CK_ULONG(sizeof(gcm_params))
return self.mech
class AESECBEncryptDataMechanism(Mechanism):
"""
AES mechanism for deriving keys from encrypted data.
"""
REQUIRED_PARAMS = ["data"]
def to_c_mech(self):
"""
Convert extra parameters to ctypes, then build out the mechanism.
:return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
"""
super(AESECBEncryptDataMechanism, self).to_c_mech()
# from https://www.cryptsoft.com/pkcs11doc/v220
# /group__SEC__12__14__2__MECHANISM__PARAMETERS.html
# Note: data should be a multiple of 16 long.
params = CK_KEY_DERIVATION_STRING_DATA()
pdata, data_len = to_byte_array(self.params["data"])
params.pData = cast(pdata, CK_BYTE_PTR)
params.ulLen = data_len
self.mech.pParameter = cast(pointer(params), c_void_p)
self.mech.usParameterLen = CK_ULONG(sizeof(params))
return self.mech
class AESCBCEncryptDataMechanism(Mechanism):
"""
AES CBC mechanism for deriving keys from encrypted data.
"""
REQUIRED_PARAMS = ["iv", "data"]
def to_c_mech(self):
"""
Convert extra parameters to ctypes, then build out the mechanism.
:return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
"""
super(AESCBCEncryptDataMechanism, self).to_c_mech()
# https://www.cryptsoft.com/pkcs11doc/v220
# /group__SEC__12__14__KEY__DERIVATION__BY__DATA__ENCRYPTION______DES______AES.html
# #CKM_AES_CBC_ENCRYPT_DATA
# Note: data should be a multiple of 16 long.
params = CK_AES_CBC_ENCRYPT_DATA_PARAMS()
pdata, data_len = to_byte_array(self.params["data"])
# Note: IV should always be a length of 8.
params.pData = cast(pdata, CK_BYTE_PTR)
params.length = data_len
params.iv = (c_ubyte * 16)(*self.params["iv"])
self.mech.pParameter = cast(pointer(params), c_void_p)
self.mech.usParameterLen = CK_ULONG(sizeof(params))
return self.mech
class AESCTRMechanism(Mechanism):
"""
AES CTR Mechanism param conversion.
"""
REQUIRED_PARAMS = ["cb", "ulCounterBits"]
def to_c_mech(self):
"""
Convert extra parameters to ctypes, then build out the mechanism.
:return: :class:`~pycryptoki.cryptoki.CK_MECHANISM`
"""
super(AESCTRMechanism, self).to_c_mech()
ctr_params = CK_AES_CTR_PARAMS()
ctr_params.cb = (CK_BYTE * 16)(*self.params["cb"])
ctr_params.ulCounterBits = CK_ULONG(self.params["ulCounterBits"])
self.mech.pParameter = cast(pointer(ctr_params), c_void_p)
self.mech.usParameterLen = CK_ULONG(sizeof(ctr_params))
return self.mech
| [
37811,
198,
32,
1546,
12,
11423,
9030,
25504,
13,
198,
37811,
198,
11748,
18931,
198,
6738,
269,
19199,
1330,
269,
62,
19382,
62,
79,
11,
3350,
11,
17562,
11,
39364,
198,
198,
6738,
764,
1330,
13438,
1042,
198,
6738,
11485,
1078,
7657... | 2.131815 | 3,338 |
#!/usr/bin/python
"""
XOR decryption
https://projecteuler.net/problem=59
"""
import itertools
import re
import string
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
55,
1581,
875,
13168,
198,
5450,
1378,
16302,
68,
18173,
13,
3262,
14,
45573,
28,
3270,
198,
37811,
198,
198,
11748,
340,
861,
10141,
198,
11748,
302,
198,
11748,
4731,
628,
198,
... | 2.666667 | 60 |
import tensorflow as tf
import pandas as pd
import os
from timeit import default_timer as timer
start = timer()
dirname = os.getcwd()
dirname = os.path.dirname(dirname)
dataset_path = os.path.join(dirname, 'datasets/')
print(dataset_path)
gloveVectors = pd.read_csv(dataset_path+'glove.42B.10d.txt', sep=' ', header=None )
print(gloveVectors.shape)
words = gloveVectors.iloc[:,0:1]
vectors = gloveVectors.iloc[:,1:]
end = timer()
print('Time taken to load word embeddings (seconds): ', end-start)
tf.enable_eager_execution()
embeddings = tf.get_variable(name='embeddings', shape = vectors.shape, dtype=tf.float32, trainable=False) | [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
198,
198,
6738,
640,
270,
1330,
4277,
62,
45016,
355,
19781,
198,
198,
9688,
796,
19781,
3419,
198,
198,
15908,
3672,
796,
28686,
13,
1136,
... | 2.587045 | 247 |
import uuid
import logging
from rastervision.v2.core import _rv_config
log = logging.getLogger(__name__)
AWS_BATCH = 'aws_batch'
| [
11748,
334,
27112,
198,
11748,
18931,
198,
198,
6738,
374,
1603,
10178,
13,
85,
17,
13,
7295,
1330,
4808,
81,
85,
62,
11250,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
12298,
50,
62,
33,
11417,
7... | 2.607843 | 51 |
from flask_babelex import lazy_gettext
from flask_security.forms import ConfirmRegisterForm, EqualTo, get_form_field_label
from flask_wtf import FlaskForm
from wtforms import BooleanField, PasswordField, SubmitField
from wtforms.validators import DataRequired
from project.forms.common import get_accept_tos_markup
| [
6738,
42903,
62,
65,
11231,
2588,
1330,
16931,
62,
1136,
5239,
198,
6738,
42903,
62,
12961,
13,
23914,
1330,
7326,
2533,
38804,
8479,
11,
28701,
2514,
11,
651,
62,
687,
62,
3245,
62,
18242,
198,
6738,
42903,
62,
86,
27110,
1330,
46947... | 3.613636 | 88 |
A, B = map(int, input().split())
if min(A, B) % 2 == 1 and abs(A - B) <= 1:
print('Q')
else:
print('P')
| [
32,
11,
347,
796,
3975,
7,
600,
11,
5128,
22446,
35312,
28955,
198,
198,
361,
949,
7,
32,
11,
347,
8,
4064,
362,
6624,
352,
290,
2352,
7,
32,
532,
347,
8,
19841,
352,
25,
198,
220,
220,
220,
3601,
10786,
48,
11537,
198,
17772,
... | 2.054545 | 55 |
# Unit tests for the Juiceboard app using the python unittest framework
import sys
# Change working dir to juiceboard src folder
sys.path.append("..\\juiceboard\\")
import unittest
import subprocess
import dash
# Source modules
from database_helper import *
from visualizer_helper import *
from juiceboard import *
TEST_ID_INT = 170841655
TEST_ID_REG = 168754280
TEST_FEEDBACK_NONE = 171948923
TEST_DATABASE_IP = '127.0.0.1'
BAD_IP = '0.0.0.1'
PORT = '5432'
BAD_PORT = '11111'
if __name__ == '__main__':
print('Starting juiceboard unit tests... Commit prefix: ' +
str(subprocess.check_output(['git', 'describe','--always']).strip().decode('utf-8')))
unittest.main()
| [
2,
11801,
5254,
329,
262,
35161,
3526,
598,
1262,
262,
21015,
555,
715,
395,
9355,
201,
198,
201,
198,
11748,
25064,
201,
198,
201,
198,
2,
9794,
1762,
26672,
284,
13135,
3526,
12351,
9483,
201,
198,
17597,
13,
6978,
13,
33295,
7203,
... | 2.482877 | 292 |
from gurobipy import *
import numpy as np
import pandas as pd
import os
R_A_FY_QAP=0 # RDD lower bound or original FY model (1) 1: RDD lowerbound, 0: A-FY-QAP
cwd = r'..\NEOS_6'
link_df=pd.read_csv(os.path.join(cwd,'input_link.csv'))
agent_df=pd.read_csv(os.path.join(cwd,'input_agent.csv'))
node_df=pd.read_csv(os.path.join(cwd,'input_node.csv'))
agent_df['od_pair']=agent_df.apply(lambda x: (x.origin_node_id,x.destination_node_id),axis=1)
flow=agent_df[['od_pair','customized_cost_link_value']].set_index('od_pair').to_dict()['customized_cost_link_value']
link_df['od_pair']=link_df.apply(lambda x: (x.from_node_id,x.to_node_id),axis=1)
distance=link_df[['od_pair','trans_cost']].set_index('od_pair').to_dict()['trans_cost']
built_cost=link_df[['od_pair','built_cost']].set_index('od_pair').to_dict()['built_cost']
building_set_1=[]
building_set_2=[]
location_set_1=[]
location_set_2=[]
building_set=[]
location_set=[]
building_set_map=[]
location_set_map=[]
for i in range(len(node_df)):
if node_df.iloc[i].node_name == 'building node1':
building_set_1.append(node_df.iloc[i].node_id)
if node_df.iloc[i].node_name == 'building node2':
building_set_2.append(node_df.iloc[i].node_id)
if node_df.iloc[i].node_name == 'location node1':
location_set_1.append(node_df.iloc[i].node_id)
if node_df.iloc[i].node_name == 'location node2':
location_set_2.append(node_df.iloc[i].node_id)
location_set.extend(location_set_1)
location_set.extend(location_set_2)
location_set_map.extend(location_set_2)
location_set_map.extend(location_set_1)
building_set.extend(building_set_1)
building_set.extend(building_set_2)
building_set_map.extend(building_set_2)
building_set_map.extend(building_set_1)
enviroment = gurobipy.Env()
enviroment.setParam('TimeLimit', 360)
model=Model("quadratic_assignment",env=enviroment)
# Create variables
if R_A_FY_QAP==1:
assignment_1=model.addVars(building_set_1,location_set_1,name='assignment_1',lb=0,ub=1)
assignment_2=model.addVars(building_set_2,location_set_2,name='assignment_2',lb=0,ub=1)
path=model.addVars(building_set_1,location_set_1,location_set_2,building_set_2,name='path',lb=0,ub=1)
elif R_A_FY_QAP==0:
assignment_1=model.addVars(building_set_1,location_set_1,name='assignment_1',vtype=GRB.BINARY)
assignment_2=model.addVars(building_set_2,location_set_2,name='assignment_2',vtype=GRB.BINARY)
path=model.addVars(building_set_1,location_set_1,location_set_2,building_set_2,name='path',lb=0,ub=1)
# Assignment constraints
for k in location_set_1:
model.addConstr(quicksum(assignment_1[i,k] for i in building_set_1)==1,
"building assignment constraint[%s]%k")
for i in building_set_1:
model.addConstr(quicksum(assignment_1[i,k] for k in location_set_1)==1,
"location assignment constraint[%s]%i")
# Assignment constraints
for l in location_set_2:
model.addConstr(quicksum(assignment_2[j,l] for j in building_set_2)==1,
"building assignment constraint[%s]%k")
for j in building_set_2:
model.addConstr(quicksum(assignment_2[j,l] for l in location_set_2)==1,
"location assignment constraint[%s]%i")
# capacity constraints
##Relax the following two constraints when calculate GLB
for k in location_set_1:
for l in location_set_2:
for j in building_set_2:
model.addConstr(quicksum(path[i,k,l,j] for i in building_set_1)==assignment_2[j,l],
"cap[%s,%s,%s]%(k,l,j)")
for i in building_set_1:
for l in location_set_2:
for j in building_set_2:
model.addConstr(quicksum(path[i,k,l,j] for k in location_set_1)==assignment_2[j,l],
"cap[%s,%s,%s]%(i,l,j)")
for i in building_set_1:
for k in location_set_1:
for l in location_set_2:
model.addConstr(quicksum(path[i,k,l,j] for j in building_set_2)==assignment_1[i,k],
"cap[%s,%s,%s]%(i,k,l)")
for i in building_set_1:
for k in location_set_1:
for j in building_set_2:
model.addConstr(quicksum(path[i,k,l,j] for l in location_set_2)==assignment_1[i,k],
"cap[%s,%s,%s]%(i,k,j)")
#model.addConstr(quicksum(path[i,k,l,j]*distance[k,l]*flow[i,j] for i in building_set_1 for j in building_set_2 for k in location_set_1 for l in location_set_2)>=394)
model.setObjective(quicksum(path[i,k,l,j]*distance[k,l]*flow[i,j]
for i in building_set_1 for j in building_set_2 for k in location_set_1 for l in location_set_2)+\
quicksum(assignment_1[i,k]*built_cost[i,k] for i in building_set_1 for k in location_set_1)+\
quicksum(assignment_2[j,l]*built_cost[l,j] for j in building_set_2 for l in location_set_2))
model.optimize()
print(model.getAttr('x',assignment_1))
print(model.getAttr('x',assignment_2)) | [
6738,
915,
22609,
541,
88,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686,
220,
198,
49,
62,
32,
62,
43833,
62,
48,
2969,
28,
15,
1303,
371,
16458,
2793,
5421,
393,
2656,
2363... | 2.122458 | 2,311 |
import numpy as np
from sklearn.cluster import KMeans
from tqdm import tqdm
import matplotlib.pyplot as plt
from preliminaries.embedding import aggregateApiSequences
from utils.file import loadJson, dumpIterable, dumpJson
from utils.manager import PathManager
from baselines.alignment import apiCluster
from utils.timer import StepTimer
from utils.magic import sample, magicSeed, nRandom
from utils.stat import calBeliefeInterval
k = 10
n = 10
qk = 5
N = 20
#####################################################
# 将原json文件中的序列根据聚类结果替换为类簇序列,同时使用一个
# 最大长度截断,并保存为npy文件
#####################################################
#####################################################
# 给定一个类别中的所有序列,生成该类别的转换矩阵
#####################################################
##################################################
# 根据生成的转换矩阵组,根据最大转换值将序列转换为组内类别的
# 序列
##################################################
#############################################
# 根据输入序列,在多个类的转换矩阵中进行累计加分,
# 返回该序列在所有类的类转换矩阵中的总得分
#############################################
if __name__ == "__main__":
epoch = 5000
seq_len = 50
n_cluster = 30
n_range = (15,30)
mng = PathManager("HKS-api")
# # # findOptK(mng.WordEmbedMatrix(), k_range=(2,100))
# apiCluster(mng.WordEmbedMatrix(), mng.DataRoot()+"MarkovClusterMapping.json", cluster_num=n_cluster)
# makeClusteredData(json_path=mng.Folder(),
# cluster_path=mng.DataRoot()+"MarkovClusterMapping.json",
# word_map_path=mng.WordIndexMap(),
# dump_path=mng.DataRozot()+"MarkovClusteredData.npy",
# max_len=seq_len)
# scoreMarkovEpisode(clustered_data_path=mng.DataRoot()+"MarkovClusteredData.npy",
# epoch=2000,
# n_cluster=n_cluster,
# maxlen=seq_len)
# re = gridSearch(c_values=list(range(*n_range)),
# k_values=[i*50 for i in range(1,11)],
# per_epoch=1000)
# dumpJson(re, mng.DataRoot()+"GSs/GridSearchResult-%dshot-%dway-virushare20.json"%(k,n))
# re = loadJson(mng.DataRoot()+"GSs/GridSearchResult-%dshot-%dway-virushare20.json"%(k,n))
# n_cluster, seq_len = extractBestParam(re)
# n_cluster = int(n_cluster)
# seq_len = int(seq_len)
apiCluster(mng.WordEmbedMatrix(), mng.DataRoot()+"MarkovClusterMapping.json", cluster_num=n_cluster)
makeClusteredData(json_path=mng.Folder(),
cluster_path=mng.DataRoot()+"MarkovClusterMapping.json",
word_map_path=mng.WordIndexMap(),
dump_path=mng.DataRoot()+"MarkovClusteredData.npy",
max_len=seq_len)
scoreMarkovEpisode(clustered_data_path=mng.DataRoot()+"MarkovClusteredData.npy",
epoch=epoch,
n_cluster=n_cluster,
maxlen=seq_len)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
565,
5819,
1330,
509,
5308,
504,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
14541,
259,
316... | 1.902995 | 1,536 |
from distutils.core import setup
setup(
name='MarkdownLinkTarget',
version='0.1.0',
packages=['MarkdownLinkTarget',],
license='Apache License 2.0',
long_description=open('README.txt').read(),
url='https://github.com/ribalba/markdown.linktarget',
description='Adds a taget="_blank" attribute to HTML links in Markdown',
author='Didi Hoffmann',
author_email='didi@ribalba.de',
install_requires=[
"Markdown >= 2.3.1",
],
) | [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
9704,
2902,
11280,
21745,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
3256,
198,
220,
220,
220,
10392,
28,
17816,
9704,
290... | 2.593407 | 182 |
import matplotlib.pyplot as plt
import scipy.linalg as lin
import numpy as np
import cv2
for i in range(0,2):
#camera intrinsic parameters
Fx=Fy=2469
Cx=1228.876620888020
Cy=1012.976060035710
K = [[ Fx, 0., Cx],
[ 0., Fy, Cy],
[ 0., 0., 1. ]]
K = np.array(K)
#camera extrinsic parameters
R = np.eye(3)
t = np.array([[0],[0],[0]])
#projection matrix
P = K.dot(np.hstack((R,t)))
list1=mouse_right_click()
print("pixel values u and v for "+"image "+str(i+1)+" "+str(list1))
print()
x = np.array([list1[0][0],list1[0][1],1])
#calculating the 3D world coordinates
X = np.dot(lin.pinv(P),x)
print("X and Y 3D coordinates for " +"image"+str(i+1))
print("X= " +str(X[0]))
print("Y= " +str(X[1]))
print("-"*55)
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
629,
541,
88,
13,
75,
1292,
70,
355,
9493,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
269,
85,
17,
201,
198,
201,
198,
201,
198,
1640,
1312,
... | 1.820565 | 496 |
#references: http://stackoverflow.com/questions/3430372/how-to-get-full-path-of-current-files-directory-in-python
# http://www.tutorialspoint.com/python/python_reg_expressions.htm
import IPython.nbformat.current as nbf
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
import os
import re
currentDirectory = os.getcwd()
fileName='OfflineFFSSN.ipynb'
outputFile='FFSSN.ipynb'
outputText = '';
f = open(currentDirectory+'/'+fileName,"r")
lines = f.readlines()
f.close()
for line in lines:
match = re.match(r'(.*)(notebooks)(.*)', line)
if match != None:
outputText += match.group(1)+'github/tartavull/snn-rl/blob/master'+match.group(3)
else:
outputText += line
#nb = nbf.reads(outputText, 'ipynb')
#nbf.write(nb, open(outputFile, 'w'), 'ipynb')
f = open(currentDirectory+'/'+outputFile,"w")
f.write(outputText)
f.close()
print 'done' | [
2,
5420,
4972,
25,
2638,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
2682,
1270,
36720,
14,
4919,
12,
1462,
12,
1136,
12,
12853,
12,
6978,
12,
1659,
12,
14421,
12,
16624,
12,
34945,
12,
259,
12,
29412,
198,
2,
2638,
1378... | 2.650289 | 346 |
from django.db.models.query_utils import Q
from django.shortcuts import render
from ProjectHeart.models import Passwords
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
22766,
62,
26791,
1330,
1195,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
4935,
28541,
13,
27530,
1330,
6251,
10879,
628
] | 3.8125 | 32 |
from os import listdir
from os.path import isfile, join
import os
import json
import re
from pprint import pprint
import pandas as pd
import os
import errno
if __name__ == '__main__':
# execute only if run as the entry point into the program
glassnode_files_organizer() | [
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
302,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28686... | 3.267442 | 86 |
# coding=UTF-8
#!/usr/bin/python
from latin import replace_utf8
import codecs
bbl = codecs.open('ieee.bbl','r','utf-8')
# \newblock \emph{IEEE Trans. on Pattern Analysis and Machine Intelligence},
# 35\penalty0 (8):\penalty0 1798--1828, 2013.
# vol. 36, iss.8, pp. 1798-1828
M_NONE = 0
M_BIBITEM = 1
M_AUTHOR = 2
M_TITLE = 3
M_PUBLISH = 4
bibitem_block = False
read_author = False
mode = M_NONE
refer_ary = []
lines = bbl.readlines()
print(len(lines))
for idx, line in enumerate(lines):
line = line.replace('{\\natexlab{a}}','')
line = line.replace('{\\natexlab{b}}','')
# print("(%2d)[%3d]:%s " %(idx+1, len(line), line))
if line.startswith('\\bibitem'):
# print('in bibitem')
mode = M_BIBITEM
now_refer = Refer()
if mode == M_BIBITEM:
if '}' in line and not '{\\' in line:
mode = M_AUTHOR
elif mode == M_AUTHOR:
# print('in author')
if not line.startswith('\\newblock'):
now_refer.author += line
else:
now_refer.parse_author()
# print('author: ' + now_refer.author)
now_refer.title = line
mode = M_TITLE
elif mode == M_TITLE:
if not line.startswith('\\newblock'):
now_refer.title += line
else:
# print('title: ' + now_refer.title)
now_refer.parse_title()
now_refer.publish = line
mode = M_PUBLISH
elif mode == M_PUBLISH:
if len(line)>1:
now_refer.publish += line
else:
now_refer.parse_publish()
refer_ary.append(now_refer)
mode = M_NONE
f = codecs.open('refer.html','w', "utf-8")
f.write('<head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head><body>\r\n')
for i, r in enumerate(refer_ary):
s = '[{}] {} ,“{},” <i>{}</i>'.format(i+1, r.author, r.title, r.publish)
s = (s + ', vol.' + r.vol) if r.vol and len(r.vol) > 0 else s
s = (s + ', iss.' + r.iss) if r.iss else s
s = (s + ', pp.{}-{}'.format(r.pp_s, r.pp_e)) if r.pp_s else s
s = (s + ',' + str(r.year)) if r.year else s
s += '.<BR>\r\n'
f.write(s)
f.write('</body>\r\n')
| [
2,
19617,
28,
48504,
12,
23,
198,
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
6738,
3042,
259,
1330,
6330,
62,
40477,
23,
198,
11748,
40481,
82,
198,
65,
2436,
796,
40481,
82,
13,
9654,
10786,
494,
1453,
13,
65,
2436,
41707,
8... | 1.912088 | 1,183 |
"""Custom form widgets."""
from json import dumps
from typing import Any, Dict
from django.forms import Textarea
class TinyMCE(Textarea):
"""
A textarea :class:`~django.forms.Widget`
for `TinyMCE <https://www.tiny.cloud/>`_.
:param attrs: A dictionary of the widget's attributes.
"""
__all__ = ['TinyMCE']
| [
37811,
15022,
1296,
40803,
526,
15931,
198,
198,
6738,
33918,
1330,
45514,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
198,
198,
6738,
42625,
14208,
13,
23914,
1330,
8255,
20337,
628,
198,
4871,
20443,
44,
5222,
7,
8206,
20337,
2599,
19... | 2.775 | 120 |
from __future__ import annotations
import pytest
from ufoLib2.objects import Glyph
from ufoLib2.objects.contour import Contour
@pytest.fixture
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
334,
6513,
25835,
17,
13,
48205,
1330,
27949,
746,
198,
6738,
334,
6513,
25835,
17,
13,
48205,
13,
3642,
454,
1330,
2345,
454,
628,
198,
31,
9078,
928... | 3.170213 | 47 |
from numpy import arcsin, exp
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW27
A SlotW27 object
Returns
-------
point_dict: dict
A dict of the slot point coordinates
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z10|| = W0
alpha = float(arcsin(self.W0 / (2 * Rbo)))
# comp point coordinate (in complex)
Z0 = Rbo * exp(1j * 0)
Z1 = Z0 * exp(-1j * alpha)
if self.is_outwards():
Z2 = Z1 + self.H0
Z3 = Z2 - (self.W1 - self.W0) * 1j / 2.0
Z4 = Z3 + self.H1 - (self.W2 - self.W1) / 2.0 * 1j
Z5 = Z4 + self.H2 - (self.W3 - self.W2) / 2.0 * 1j
else: # inward slot
Z2 = Z1 - self.H0
Z3 = Z2 - (self.W1 - self.W0) * 1j / 2.0
Z4 = Z3 - self.H1 - (self.W2 - self.W1) / 2.0 * 1j
Z5 = Z4 - self.H2 - (self.W3 - self.W2) / 2.0 * 1j
point_dict = dict()
# symetry
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
point_dict["Z5"] = Z5
point_dict["Z6"] = Z5.conjugate()
point_dict["Z7"] = Z4.conjugate()
point_dict["Z8"] = Z3.conjugate()
point_dict["Z9"] = Z2.conjugate()
point_dict["Z10"] = Z1.conjugate()
return point_dict
| [
6738,
299,
32152,
1330,
44606,
259,
11,
1033,
628,
198,
4299,
4808,
5589,
62,
4122,
62,
37652,
4559,
7,
944,
2599,
198,
220,
220,
220,
37227,
7293,
1133,
262,
966,
22715,
2622,
284,
7110,
262,
32026,
13,
628,
220,
220,
220,
40117,
1... | 2.001479 | 676 |
# 1. 初始条件是 x, y
# 2. 辗转相除法利用的是 (a, b)的最大公约数 = (b, r)的最大公约数,于是可以利用这个降低问题规模
# 注:如果 a < b, 则 a % b = a, 即 a = a + 0 * b
# 3. 确定终止条件,如果余数为 0,就要终止,任意的x % 0 = x, 为返回值
# 1. 初始条件是 n,factorial(n) 得出 阶乘结果
# 2. n! = n * (n - 1)! 降低问题规模
# 3. 确定终止条件,如果 n = 1 或者 n = 0,就返回阶乘 1
# 1. 初始条件是 shorter, longer, k 。 divingBoard得出的是一个不重复的长度列表
# 2. 降低问题规模: 【k 个木板拼出的不同长度】 = 【k - 1 个木板拼出的不同长度 】 拼上 【长木板 】或【短木板】
# 3. 确定终止条件: 【0 块木板】 拼的长度为 0
# 注意:leetcode上这个答案过不了,因为这个题用递归会超出最大递归的深度,😂 当初看到这道题分类在【递归】下面,结果用递归做不了,是我大意了。很多时候把递归的函数改成非递归的函数,能节省更多内存。
if __name__ == '__main__':
main()
| [
2,
220,
220,
352,
13,
220,
10263,
230,
251,
34650,
233,
30266,
94,
20015,
114,
42468,
2124,
11,
331,
198,
2,
220,
220,
362,
13,
220,
5525,
122,
245,
164,
121,
105,
33566,
116,
165,
247,
97,
37345,
243,
26344,
102,
18796,
101,
2141... | 0.765605 | 785 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 27 16:09:19 2021
@author: alejandrobertolet
"""
from os import listdir
import numpy as np
import pydicom
from rt_utils import RTStructBuilder
import matplotlib.pylab as plt
from datetime import datetime
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
1737,
2681,
1467,
25,
2931,
25,
1129,
33448,
198,
198,
31,
9800,
25,
31341,
47983,
... | 2.162338 | 154 |
import json
from django.conf import settings
| [
11748,
33918,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198
] | 4.090909 | 11 |
import pickle
verbs_file = "morphs.txt"
with open(verbs_file,"r") as ip_file:
ip_lines = ip_file.readlines()
words = {}
for line in ip_lines:
line = line.strip().split()
if len(line) != 3:
print(line)
word = line[1]
word_form = line[0]
if word in words:
words[word].add(word_form)
else:
words[word]={word_form}
result = expand_dict(words)
pickle.dump(result,open("verbs.p","wb")) | [
11748,
2298,
293,
198,
46211,
62,
7753,
796,
366,
24503,
82,
13,
14116,
1,
628,
198,
4480,
1280,
7,
46211,
62,
7753,
553,
81,
4943,
355,
20966,
62,
7753,
25,
198,
197,
541,
62,
6615,
796,
20966,
62,
7753,
13,
961,
6615,
3419,
198,... | 2.352601 | 173 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: bookstore.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='bookstore.proto',
package='endpoints.examples.bookstore',
syntax='proto3',
serialized_pb=_b('\n\x0f\x62ookstore.proto\x12\x1c\x65ndpoints.examples.bookstore\x1a\x1cgoogle/api/annotations.proto\x1a\x1bgoogle/protobuf/empty.proto\"\"\n\x05Shelf\x12\n\n\x02id\x18\x01 \x01(\x03\x12\r\n\x05theme\x18\x02 \x01(\t\"1\n\x04\x42ook\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0e\n\x06\x61uthor\x18\x02 \x01(\t\x12\r\n\x05title\x18\x03 \x01(\t\"K\n\x13ListShelvesResponse\x12\x34\n\x07shelves\x18\x01 \x03(\x0b\x32#.endpoints.examples.bookstore.Shelf\"H\n\x12\x43reateShelfRequest\x12\x32\n\x05shelf\x18\x01 \x01(\x0b\x32#.endpoints.examples.bookstore.Shelf\" \n\x0fGetShelfRequest\x12\r\n\x05shelf\x18\x01 \x01(\x03\"#\n\x12\x44\x65leteShelfRequest\x12\r\n\x05shelf\x18\x01 \x01(\x03\"!\n\x10ListBooksRequest\x12\r\n\x05shelf\x18\x01 \x01(\x03\"F\n\x11ListBooksResponse\x12\x31\n\x05\x62ooks\x18\x01 \x03(\x0b\x32\".endpoints.examples.bookstore.Book\"T\n\x11\x43reateBookRequest\x12\r\n\x05shelf\x18\x01 \x01(\x03\x12\x30\n\x04\x62ook\x18\x02 \x01(\x0b\x32\".endpoints.examples.bookstore.Book\"-\n\x0eGetBookRequest\x12\r\n\x05shelf\x18\x01 \x01(\x03\x12\x0c\n\x04\x62ook\x18\x02 \x01(\x03\"0\n\x11\x44\x65leteBookRequest\x12\r\n\x05shelf\x18\x01 \x01(\x03\x12\x0c\n\x04\x62ook\x18\x02 \x01(\x03\x32\x98\x08\n\tBookstore\x12m\n\x0bListShelves\x12\x16.google.protobuf.Empty\x1a\x31.endpoints.examples.bookstore.ListShelvesResponse\"\x13\x82\xd3\xe4\x93\x02\r\x12\x0b/v1/shelves\x12\x80\x01\n\x0b\x43reateShelf\x12\x30.endpoints.examples.bookstore.CreateShelfRequest\x1a#.endpoints.examples.bookstore.Shelf\"\x1a\x82\xd3\xe4\x93\x02\x14\"\x0b/v1/shelves:\x05shelf\x12{\n\x08GetShelf\x12-.endpoints.examples.bookstore.GetShelfRequest\x1a#.endpoints.examples.bookstore.Shelf\"\x1b\x82\xd3\xe4\x93\x02\x15\x12\x13/v1/shelves/{shelf}\x12t\n\x0b\x44\x65leteShelf\x12\x30.endpoints.examples.bookstore.DeleteShelfRequest\x1a\x16.google.protobuf.Empty\"\x1b\x82\xd3\xe4\x93\x02\x15*\x13/v1/shelves/{shelf}\x12\x8f\x01\n\tListBooks\x12..endpoints.examples.bookstore.ListBooksRequest\x1a/.endpoints.examples.bookstore.ListBooksResponse\"!\x82\xd3\xe4\x93\x02\x1b\x12\x19/v1/shelves/{shelf}/books\x12\x8a\x01\n\nCreateBook\x12/.endpoints.examples.bookstore.CreateBookRequest\x1a\".endpoints.examples.bookstore.Book\"\'\x82\xd3\xe4\x93\x02!\"\x19/v1/shelves/{shelf}/books:\x04\x62ook\x12\x85\x01\n\x07GetBook\x12,.endpoints.examples.bookstore.GetBookRequest\x1a\".endpoints.examples.bookstore.Book\"(\x82\xd3\xe4\x93\x02\"\x12 /v1/shelves/{shelf}/books/{book}\x12\x7f\n\nDeleteBook\x12/.endpoints.examples.bookstore.DeleteBookRequest\x1a\x16.google.protobuf.Empty\"(\x82\xd3\xe4\x93\x02\"* /v1/shelves/{shelf}/books/{book}B;\n\'com.google.endpoints.examples.bookstoreB\x0e\x42ookstoreProtoP\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_SHELF = _descriptor.Descriptor(
name='Shelf',
full_name='endpoints.examples.bookstore.Shelf',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='endpoints.examples.bookstore.Shelf.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='theme', full_name='endpoints.examples.bookstore.Shelf.theme', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=142,
)
_BOOK = _descriptor.Descriptor(
name='Book',
full_name='endpoints.examples.bookstore.Book',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='endpoints.examples.bookstore.Book.id', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='author', full_name='endpoints.examples.bookstore.Book.author', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='title', full_name='endpoints.examples.bookstore.Book.title', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=193,
)
_LISTSHELVESRESPONSE = _descriptor.Descriptor(
name='ListShelvesResponse',
full_name='endpoints.examples.bookstore.ListShelvesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelves', full_name='endpoints.examples.bookstore.ListShelvesResponse.shelves', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=270,
)
_CREATESHELFREQUEST = _descriptor.Descriptor(
name='CreateShelfRequest',
full_name='endpoints.examples.bookstore.CreateShelfRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelf', full_name='endpoints.examples.bookstore.CreateShelfRequest.shelf', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=272,
serialized_end=344,
)
_GETSHELFREQUEST = _descriptor.Descriptor(
name='GetShelfRequest',
full_name='endpoints.examples.bookstore.GetShelfRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelf', full_name='endpoints.examples.bookstore.GetShelfRequest.shelf', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=346,
serialized_end=378,
)
_DELETESHELFREQUEST = _descriptor.Descriptor(
name='DeleteShelfRequest',
full_name='endpoints.examples.bookstore.DeleteShelfRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelf', full_name='endpoints.examples.bookstore.DeleteShelfRequest.shelf', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=380,
serialized_end=415,
)
_LISTBOOKSREQUEST = _descriptor.Descriptor(
name='ListBooksRequest',
full_name='endpoints.examples.bookstore.ListBooksRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelf', full_name='endpoints.examples.bookstore.ListBooksRequest.shelf', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=417,
serialized_end=450,
)
_LISTBOOKSRESPONSE = _descriptor.Descriptor(
name='ListBooksResponse',
full_name='endpoints.examples.bookstore.ListBooksResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='books', full_name='endpoints.examples.bookstore.ListBooksResponse.books', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=452,
serialized_end=522,
)
_CREATEBOOKREQUEST = _descriptor.Descriptor(
name='CreateBookRequest',
full_name='endpoints.examples.bookstore.CreateBookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelf', full_name='endpoints.examples.bookstore.CreateBookRequest.shelf', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='book', full_name='endpoints.examples.bookstore.CreateBookRequest.book', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=524,
serialized_end=608,
)
_GETBOOKREQUEST = _descriptor.Descriptor(
name='GetBookRequest',
full_name='endpoints.examples.bookstore.GetBookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelf', full_name='endpoints.examples.bookstore.GetBookRequest.shelf', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='book', full_name='endpoints.examples.bookstore.GetBookRequest.book', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=610,
serialized_end=655,
)
_DELETEBOOKREQUEST = _descriptor.Descriptor(
name='DeleteBookRequest',
full_name='endpoints.examples.bookstore.DeleteBookRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shelf', full_name='endpoints.examples.bookstore.DeleteBookRequest.shelf', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='book', full_name='endpoints.examples.bookstore.DeleteBookRequest.book', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=657,
serialized_end=705,
)
_LISTSHELVESRESPONSE.fields_by_name['shelves'].message_type = _SHELF
_CREATESHELFREQUEST.fields_by_name['shelf'].message_type = _SHELF
_LISTBOOKSRESPONSE.fields_by_name['books'].message_type = _BOOK
_CREATEBOOKREQUEST.fields_by_name['book'].message_type = _BOOK
DESCRIPTOR.message_types_by_name['Shelf'] = _SHELF
DESCRIPTOR.message_types_by_name['Book'] = _BOOK
DESCRIPTOR.message_types_by_name['ListShelvesResponse'] = _LISTSHELVESRESPONSE
DESCRIPTOR.message_types_by_name['CreateShelfRequest'] = _CREATESHELFREQUEST
DESCRIPTOR.message_types_by_name['GetShelfRequest'] = _GETSHELFREQUEST
DESCRIPTOR.message_types_by_name['DeleteShelfRequest'] = _DELETESHELFREQUEST
DESCRIPTOR.message_types_by_name['ListBooksRequest'] = _LISTBOOKSREQUEST
DESCRIPTOR.message_types_by_name['ListBooksResponse'] = _LISTBOOKSRESPONSE
DESCRIPTOR.message_types_by_name['CreateBookRequest'] = _CREATEBOOKREQUEST
DESCRIPTOR.message_types_by_name['GetBookRequest'] = _GETBOOKREQUEST
DESCRIPTOR.message_types_by_name['DeleteBookRequest'] = _DELETEBOOKREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Shelf = _reflection.GeneratedProtocolMessageType('Shelf', (_message.Message,), dict(
DESCRIPTOR = _SHELF,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.Shelf)
))
_sym_db.RegisterMessage(Shelf)
Book = _reflection.GeneratedProtocolMessageType('Book', (_message.Message,), dict(
DESCRIPTOR = _BOOK,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.Book)
))
_sym_db.RegisterMessage(Book)
ListShelvesResponse = _reflection.GeneratedProtocolMessageType('ListShelvesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTSHELVESRESPONSE,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListShelvesResponse)
))
_sym_db.RegisterMessage(ListShelvesResponse)
CreateShelfRequest = _reflection.GeneratedProtocolMessageType('CreateShelfRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATESHELFREQUEST,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.CreateShelfRequest)
))
_sym_db.RegisterMessage(CreateShelfRequest)
GetShelfRequest = _reflection.GeneratedProtocolMessageType('GetShelfRequest', (_message.Message,), dict(
DESCRIPTOR = _GETSHELFREQUEST,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.GetShelfRequest)
))
_sym_db.RegisterMessage(GetShelfRequest)
DeleteShelfRequest = _reflection.GeneratedProtocolMessageType('DeleteShelfRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETESHELFREQUEST,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.DeleteShelfRequest)
))
_sym_db.RegisterMessage(DeleteShelfRequest)
ListBooksRequest = _reflection.GeneratedProtocolMessageType('ListBooksRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTBOOKSREQUEST,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListBooksRequest)
))
_sym_db.RegisterMessage(ListBooksRequest)
ListBooksResponse = _reflection.GeneratedProtocolMessageType('ListBooksResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTBOOKSRESPONSE,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.ListBooksResponse)
))
_sym_db.RegisterMessage(ListBooksResponse)
CreateBookRequest = _reflection.GeneratedProtocolMessageType('CreateBookRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATEBOOKREQUEST,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.CreateBookRequest)
))
_sym_db.RegisterMessage(CreateBookRequest)
GetBookRequest = _reflection.GeneratedProtocolMessageType('GetBookRequest', (_message.Message,), dict(
DESCRIPTOR = _GETBOOKREQUEST,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.GetBookRequest)
))
_sym_db.RegisterMessage(GetBookRequest)
DeleteBookRequest = _reflection.GeneratedProtocolMessageType('DeleteBookRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETEBOOKREQUEST,
__module__ = 'bookstore_pb2'
# @@protoc_insertion_point(class_scope:endpoints.examples.bookstore.DeleteBookRequest)
))
_sym_db.RegisterMessage(DeleteBookRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\'com.google.endpoints.examples.bookstoreB\016BookstoreProtoP\001'))
_BOOKSTORE = _descriptor.ServiceDescriptor(
name='Bookstore',
full_name='endpoints.examples.bookstore.Bookstore',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=708,
serialized_end=1756,
methods=[
_descriptor.MethodDescriptor(
name='ListShelves',
full_name='endpoints.examples.bookstore.Bookstore.ListShelves',
index=0,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=_LISTSHELVESRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\r\022\013/v1/shelves')),
),
_descriptor.MethodDescriptor(
name='CreateShelf',
full_name='endpoints.examples.bookstore.Bookstore.CreateShelf',
index=1,
containing_service=None,
input_type=_CREATESHELFREQUEST,
output_type=_SHELF,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\024\"\013/v1/shelves:\005shelf')),
),
_descriptor.MethodDescriptor(
name='GetShelf',
full_name='endpoints.examples.bookstore.Bookstore.GetShelf',
index=2,
containing_service=None,
input_type=_GETSHELFREQUEST,
output_type=_SHELF,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\025\022\023/v1/shelves/{shelf}')),
),
_descriptor.MethodDescriptor(
name='DeleteShelf',
full_name='endpoints.examples.bookstore.Bookstore.DeleteShelf',
index=3,
containing_service=None,
input_type=_DELETESHELFREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\025*\023/v1/shelves/{shelf}')),
),
_descriptor.MethodDescriptor(
name='ListBooks',
full_name='endpoints.examples.bookstore.Bookstore.ListBooks',
index=4,
containing_service=None,
input_type=_LISTBOOKSREQUEST,
output_type=_LISTBOOKSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\033\022\031/v1/shelves/{shelf}/books')),
),
_descriptor.MethodDescriptor(
name='CreateBook',
full_name='endpoints.examples.bookstore.Bookstore.CreateBook',
index=5,
containing_service=None,
input_type=_CREATEBOOKREQUEST,
output_type=_BOOK,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002!\"\031/v1/shelves/{shelf}/books:\004book')),
),
_descriptor.MethodDescriptor(
name='GetBook',
full_name='endpoints.examples.bookstore.Bookstore.GetBook',
index=6,
containing_service=None,
input_type=_GETBOOKREQUEST,
output_type=_BOOK,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\"\022 /v1/shelves/{shelf}/books/{book}')),
),
_descriptor.MethodDescriptor(
name='DeleteBook',
full_name='endpoints.examples.bookstore.Bookstore.DeleteBook',
index=7,
containing_service=None,
input_type=_DELETEBOOKREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\"* /v1/shelves/{shelf}/books/{book}')),
),
])
_sym_db.RegisterServiceDescriptor(_BOOKSTORE)
DESCRIPTOR.services_by_name['Bookstore'] = _BOOKSTORE
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
44346,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
60,
27,
18,
290,
357,
50033,
2124,
... | 2.456194 | 9,097 |
from logging import debug, info, warn
import boto3
from lgw.api_gateway import lookup_api_gateway
from lgw.route53 import update_dns_a_record
| [
6738,
18931,
1330,
14257,
11,
7508,
11,
9828,
198,
11748,
275,
2069,
18,
198,
6738,
300,
70,
86,
13,
15042,
62,
10494,
1014,
1330,
35847,
62,
15042,
62,
10494,
1014,
198,
6738,
300,
70,
86,
13,
38629,
4310,
1330,
4296,
62,
67,
5907,... | 2.92 | 50 |
from pie.json_loader import JsonLoader
print(JsonLoader(__file__, __name__).load())
| [
6738,
2508,
13,
17752,
62,
29356,
1330,
449,
1559,
17401,
198,
198,
4798,
7,
41,
1559,
17401,
7,
834,
7753,
834,
11,
11593,
3672,
834,
737,
2220,
28955,
198
] | 2.931034 | 29 |
#El método "isupper()" es la versión en mayúscula de "islower()"
# se concentra solo en letras mayúsculas
print("Moooo".isupper())
print('moooo'.isupper())
print('MOOOO'.isupper()) | [
2,
9527,
285,
25125,
24313,
366,
271,
45828,
3419,
1,
1658,
8591,
1646,
72,
18840,
551,
743,
21356,
1416,
4712,
390,
366,
3044,
789,
3419,
1,
198,
2,
384,
5280,
430,
12199,
551,
1309,
8847,
743,
21356,
1416,
25283,
198,
4798,
7203,
... | 2.727273 | 66 |
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
path
df = pd.read_csv(path)
X = df.drop(['Churn','customerID'],1)
y = df['Churn']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['TotalCharges'].replace(' ',np.NaN, inplace=True)
X_test['TotalCharges'].replace(' ',np.NaN, inplace=True)
X_train['TotalCharges'] = X_train['TotalCharges'].astype(float)
X_test['TotalCharges'] = X_test['TotalCharges'].astype(float)
X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean(),inplace=True)
X_test['TotalCharges'].fillna(X_train['TotalCharges'].mean(), inplace=True)
print(X_train.isnull().sum())
cat_cols = X_train.select_dtypes(include='O').columns.tolist()
#print(cat_cols)
for x in cat_cols:
le = LabelEncoder()
X_train[x] = le.fit_transform(X_train[x])
for x in cat_cols:
le = LabelEncoder()
X_test[x] = le.fit_transform(X_test[x])
#performing label encoding on train and test data
#encoding train data
y_train = y_train.replace({'No':0, 'Yes':1})
#encoding test data
y_test = y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
ada_model = AdaBoostClassifier(random_state=0)
ada_model.fit(X_train,y_train)
y_pred = ada_model.predict(X_test)
ada_score = accuracy_score(y_test,y_pred)
print("Accuracy: ",ada_score)
ada_cm=confusion_matrix(y_test,y_pred)
print('Confusion matrix: \n', ada_cm)
ada_cr=classification_report(y_test,y_pred)
print('Classification report: \n', ada_cr)
# Code ends here
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_clf = XGBClassifier(random_state=0)
xgb_clf.fit(X_train, y_train)
y_pred = xgb_clf.predict(X_test)
print(y_pred)
xgb_score = accuracy_score(y_test, y_pred)
xgb_cm = confusion_matrix(y_test, y_pred)
xgb_cr = classification_report(y_test, y_pred)
clf_model = GridSearchCV(estimator=xgb_clf, param_grid=parameters)
clf_model.fit(X_train, y_train)
y_pred = clf_model.predict(X_test)
print(y_pred)
clf_score = accuracy_score(y_test, y_pred)
clf_cm = confusion_matrix(y_test, y_pred)
clf_cr = classification_report(y_test, y_pred)
print(xgb_score, clf_score)
# Code ends here
| [
2,
220,
26171,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
2,
6978,
532,
10644,
286,
2393,
220,
198,
198,
2,
6127,
4940,
994,
198,
6978,
198,
7568,
796,
... | 2.528046 | 1,034 |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
from tf.msg import tfMessage
from std_srvs.srv import Trigger, TriggerResponse
from urdf_parser_py.urdf import URDF
from recordit.recorder import Recorder, sm
from recordit.track import Track, LinJTrack, RotJTrack
class ROSRecorder(Recorder):
"""
Uses the Record lib to record a robot's movement and maps it to ROS1.
It can process joint states and transformations(intended for mobile robots).
Recorder can be controlled via ROS services.
"""
@sm(requ=["UNCONF"], trans="CONF")
@sm(requ=["RUNNING"])
def js_callback(self, data):
"""
Joint states yield the robot's joint movements
"""
self.get_time(data)
for i, key in enumerate(data.name):
if not self.tracks.has_key(key):
joint = self.j_map[key]
if joint.type == "prismatic":
self.new_track(key, LinJTrack(key, joint))
elif joint.type in ["revolute", "continuous"]:
self.new_track(key, RotJTrack(key, joint))
else:
rospy.loginfo("Joint of type %s not supported!", joint.type)
continue
self.append_to_track(key, data.position[i])
@staticmethod
@sm(requ=["RUNNING"])
def tf_callback(self, data):
"""
TF yields the movement of the (mobile) robot in relation to the world.
Default is one tf: map <--> base_link
"""
self.get_time(data.transforms[0])
for tf in data.transforms:
for item in self.tf_items(tf):
name = tf.child_frame_id[1:]
key = name + len(item)
if not self.tracks.has_key(key):
self.new_track(key, Track(name, is_rot=len(item) == 4))
rospy.loginfo("Created track %s.", key)
self.append_to_track(key, item)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
6738,
12694,
62,
907,
14542,
13,
19662,
1330,
16798,
9012,
198,
6738,
48700,
13,
19662,
1330,
48700,
12837,
198,
6738,
14367,
62,
27891,
14259,
13,
27891,
85,
... | 2.165198 | 908 |
# Generated by Django 3.2.8 on 2021-11-21 11:38
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
23,
319,
33448,
12,
1157,
12,
2481,
1367,
25,
2548,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import time
import zmq
HOST = '0.0.0.0'
PORT = '4444'
_context = zmq.Context()
_publisher = _context.socket(zmq.PUB)
url = 'tcp://{}:{}'.format(HOST, PORT)
from flask import Flask
import random
app = Flask(__name__)
@app.route('/')
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0',port=5000)
| [
11748,
640,
198,
11748,
1976,
76,
80,
198,
198,
39,
10892,
796,
705,
15,
13,
15,
13,
15,
13,
15,
6,
198,
15490,
796,
705,
2598,
2598,
6,
198,
198,
62,
22866,
796,
1976,
76,
80,
13,
21947,
3419,
198,
62,
12984,
8191,
796,
4808,
... | 2.204225 | 142 |
from distutils.core import setup, Extension
execfile("oid_translate/version.py")
_oid_translate = Extension("oid_translate._oid_translate",
libraries = ["netsnmp"],
sources = ["oid_translate/_oid_translate.c"])
kwargs = {
"name": "oid_translate",
"version": str(__version__),
"packages": ["oid_translate"],
"ext_modules": [_oid_translate],
"description": "Python OID/MIB Name Translator",
# PyPi, despite not parsing markdown, will prefer the README.md to the
# standard README. Explicitly read it here.
"long_description": open("README").read(),
"author": "Gary M. Josack",
"maintainer": "Gary M. Josack",
"author_email": "gary@dropbox.com",
"maintainer_email": "gary@dropbox.com",
"license": "Apache",
"url": "https://github.com/dropbox/oid_translate",
"download_url": "https://github.com/dropbox/oid_translate/archive/master.tar.gz",
"classifiers": [
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
]
}
setup(**kwargs)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
11,
27995,
198,
198,
18558,
7753,
7203,
1868,
62,
7645,
17660,
14,
9641,
13,
9078,
4943,
198,
198,
62,
1868,
62,
7645,
17660,
796,
27995,
7203,
1868,
62,
7645,
17660,
13557,
1868,
62,
7645,
176... | 2.597826 | 460 |
""" single source of truth for ipyml version
"""
__version__ = "0.1.0"
| [
37811,
2060,
2723,
286,
3872,
329,
20966,
88,
4029,
2196,
198,
37811,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
16,
13,
15,
1,
198
] | 2.769231 | 26 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
from subprocess import call
import sys
sys.path.insert(0, 'python')
#
import SpectrumSteering
from SpectrumSteering import Gen
from SpectrumSteering import Plot
from SpectrumSteering import Graph
from SpectrumSteering import EtaLoop
#from optparse import OptionParser
if __name__ == "__main__":
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
if len(sys.argv) < 3:
print 'launch with atlas_incljet2012_syst_classes ieta ir '
exit()
eta=int(sys.argv[1])
rad=int((sys.argv[2]))
print 'eta= ',eta,' rad= ',rad
name='main'
gen=Gen()
graph=Graph()
# print name,' \n Set attributes '
gen.debug=False
graph.plot_band=False
graph.plot_marker=True
graph.plot_staggered=True
graph.match_binning=True
graph.apply_grid_corr=True
# graph.show_systematics_as_lines=5.
graph.show_individual_systematics=0
graph.show_total_systematics=1
graph.order_systematic_colorbyalphabeth=True
# graph.calculate_chi2 = 1
# graph.label_chi2 = True
# graph.calculate_chi2 = 0
graph.label_chi2 = False
graph.grid_parameter_scan=False
graph.alternative_grid_divided_by_doublediff_bin_width=False
graph.label_date=False
graph.label_sqrt_s=True
graph.label_scaleform=True
graph.x_legend=0.35
graph.y_legend=0.92
graph.x_info_legend=0.45
graph.y_info_legend=0.27
graph.band_with_pdf= False
graph.band_with_alphas= False
graph.band_with_scale = False
graph.band_with_gridcorrection = False
graph.band_total = False
graph.label_informationlegend="ATLAS internal"
graph.grid_parameter_scan = False
graph.label_chi2 = False
year=2012
griddir=''
dataset=''
if year==2010:
dataset=''
if year==2011:
dataset=''
if year==2012:
dataset='_highmu'
gen.CreateSteering()
graph.CreateSteering()
#loop over eta
# rlist=['4','6']
rlist=['4']
iplot=0
# Attention Spectrum software does not work in loop need to pot one by one
# for ieta in range(1,7):
for ieta in range(eta,eta+1):
# for ieta in range(1,2):
for ir in range(rad,rad+1):
# for ir in rlist:
print 'in plot ', ieta ,' r= ',ir,' iplot= ',iplot
plot=Plot()
plot.name='[Plot_'+str(iplot)+']'
iplot=iplot+1
#plot.data_cut_xmax=1500
#plot.data_cut_xmin=100
plot.remove_systematic_group = 'lumi'
plot.display_systematic_group = 'JES,JER,other'
plot.display_systematic_group_fill_color = '623,400,1'
plot.display_systematic_group_edge_color = '629,419,-1'
plot.display_systematic_group_edge_style = '1,1,2'
plot.display_systematic_group_edge_width = '4,4,4'
# if ieta==1:
# plot.y_ratio_max=2.0
# plot.y_ratio_min=0.5
# plot.x_info_legend=0.8
# if ieta==1:
# plot.y_ratio_max=1.8
# plot.y_ratio_min=0.4
# if ieta==2:
# plot.y_ratio_max=2.0
# plot.y_ratio_min=0.3
# if ieta==3:
# plot.y_ratio_max=4.1
# plot.y_ratio_min=-0.2
# if ieta==4:
# plot.y_ratio_max=2.0
# plot.y_ratio_min=0.41
# if ieta==5:
# plot.y_ratio_max=3.0
# plot.y_ratio_min=0.2
# if ieta==6:
# plot.y_ratio_max=3.0
# plot.y_ratio_min=0.2
# plot.x_legend = 0.3
# plot.x_info_legend = 0.8
plot.data_directory = 'Data/jet/atlas/incljets'+str(year)
plot.data_steering_files = 'atlas_'+str(year)+'_jet_antiktr0'+str(ir)+'_incljetpt_eta'+str(ieta)+dataset+'.txt'
plot.plot_type = 'data'
plot.desc = 'atlas_inclusive_jet'+str(year)+'_data_syst_groups'+dataset+'_r0'+str(ir)+'_eta'+str(ieta)
plot.x_log=True
plot.y_log=True
plot.display_style = 'ratio'
plot.overlay_style = 'data'
plot.ratio_title = 'Systematic uncertainties'
plot.ratio_style_0 = 'data/ !data'
plot.ratio_0 = 'data_0 / data_0'
plot.CreateSteering()
# graph.ShowAll()
# plot.ShowAll()
# graph.launch()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
10688,
198,
6738,
850,
14681,
1330,
869,
198,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
7... | 2.295141 | 1,667 |
# -*- coding: utf8 -*-
# test encoding: à-é-è-ô-ï-€
# Copyright 2021 Adrien Crovato
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Mesh data structure
# Adrien Crovato
import numpy as np
from msh.node import EqNodes
from msh.cell import CTYPE
from msh.interface import Vertex
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
1332,
21004,
25,
28141,
12,
2634,
12,
14064,
12,
27083,
12,
26884,
12,
26391,
198,
198,
2,
15069,
33448,
1215,
15355,
9325,
85,
5549,
198,
2,
198,
2,
49962,
739,
262,
... | 3.395652 | 230 |
#!/bin/python3
import math
import os
# Complete the countingValleys function below.
if __name__ == '__main__':
n = 9
s = ['U', 'D', 'D', 'D', 'U', 'D', 'U', 'U']
print(countingValleys(n, s))
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
628,
198,
2,
13248,
262,
14143,
53,
6765,
893,
2163,
2174,
13,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
299,
79... | 2.285714 | 91 |
from netCDF4 import Dataset, num2date
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import argparse
import ast
import gc
import logging
import math
import sys
import time
import numpy as np
import pandas as pd
import psutil
import tensorflow as tf
from tensorflow.keras.mixed_precision import experimental as mixed_precision
try:
import tensorflow_addons as tfa
except Exception as e:
tfa = None
import data_generators
import custom_losses as cl
import hparameters
import models
import utility
tf.keras.backend.set_floatx('float16')
tf.keras.backend.set_epsilon(1e-3)
try:
gpu_devices = tf.config.list_physical_devices('GPU')
except Exception as e:
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
Monkey patch: incompatibility issues between tfa.optimizers and mixed precision training
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = tf.dtypes.as_dtype(other)
if self._type_enum==19 and other.as_datatype_enum==1:
return True
return self._type_enum in (other.as_datatype_enum,
other.base_dtype.as_datatype_enum)
tf.DType.is_compatible_with = is_compatible_with
class WeatherModel():
"""Handles the Training of the Deep Learning Weather model
Example of how to use:
WeatherModel = WeatherModel( t_params, m_params)
WeatherModel.initialize_scheme_era5Eobs() #Initializes datasets for ERA5 and Eobs
WeatherModel.train_model() #Trains and saves model
"""
def __init__(self, t_params, m_params):
"""Train the TRU_NET Model
"""
self.t_params = t_params
self.m_params = m_params
def initialize_scheme_era5Eobs(self):
"""Initialization scheme for the ERA5 and E-OBS datasets.
This method creates the datasets
"""
# region ---- Parameters related to training length and training reporting frequency
era5_eobs = data_generators.Era5_Eobs( self.t_params, self.m_params)
# hparameters files calculates train_batches assuing we are only evaluating one location,
# therefore we must adjust got multiple locations (loc_count)
self.t_params['train_batches'] = int(self.t_params['train_batches'] * era5_eobs.loc_count)
self.t_params['val_batches'] = int(self.t_params['val_batches'] * era5_eobs.loc_count)
# The fequency at which we report during training and validation i.e every 10% of minibatches report training loss and training mse
self.train_batch_report_freq = max( int(self.t_params['train_batches']*self.t_params['reporting_freq']), 3)
self.val_batch_report_freq = max( int(self.t_params['val_batches']*self.t_params['reporting_freq']), 3)
#endregion
# region ---- Restoring/Creating New Training Records and Restoring training progress
#This training records keeps track of the losses on each epoch
try:
self.df_training_info = pd.read_csv( "checkpoints/{}/checkpoint_scores.csv".format(utility.model_name_mkr(m_params,t_params=self.t_params,htuning=m_params.get('htuning',False))), header=0, index_col=False)
self.df_training_info = self.df_training_info[['Epoch','Train_loss','Train_mse','Val_loss','Val_mse','Checkpoint_Path','Last_Trained_Batch']]
self.start_epoch = int(max([self.df_training_info['Epoch'][0]], default=0))
last_batch = int( self.df_training_info.loc[self.df_training_info['Epoch']==self.start_epoch,'Last_Trained_Batch'].iloc[0] )
if(last_batch in [-1, self.t_params['train_batches']] ):
self.start_epoch = self.start_epoch + 1
self.batches_to_skip = 0
else:
self.batches_to_skip = last_batch
print("Recovered training records")
except FileNotFoundError as e:
#If no file found, then make new training records file
self.df_training_info = pd.DataFrame(columns=['Epoch','Train_loss','Train_mse','Val_loss','Val_mse','Checkpoint_Path','Last_Trained_Batch'] )
self.batches_to_skip = 0
self.start_epoch = 0
print("Did not recover training records. Starting from scratch")
# endregion
# region ---- Defining Model / Optimizer / Losses / Metrics / Records / Checkpoints / Tensorboard
devices = tf.config.get_visible_devices() #tf.config.experimental.list_physical_devices('GPU')
#gpus_names = [ device.name for device in devices if device.device_type == "GPU" ]
#self.strategy = tf.distribute.MirroredStrategy( devices=gpus_names ) #OneDeviceStrategy(device="/GPU:0") #
self.strategy = tf.distribute.MirroredStrategy( )
assert self.t_params['batch_size'] % self.strategy.num_replicas_in_sync == 0
print("Number of Devices used in MirroredStrategy: {}".format(self.strategy.num_replicas_in_sync))
with self.strategy.scope():
#Model
self.strategy_gpu_count = self.strategy.num_replicas_in_sync
self.t_params['gpu_count'] = self.strategy.num_replicas_in_sync
self.model = models.model_loader( self.t_params, self.m_params )
#Optimizer
optimizer = tfa.optimizers.RectifiedAdam( **self.m_params['rec_adam_params'], total_steps=self.t_params['train_batches']*20)
self.optimizer = mixed_precision.LossScaleOptimizer( optimizer, loss_scale=tf.mixed_precision.experimental.DynamicLossScale() )
# These objects will aggregate losses and metrics across batches and epochs
self.loss_agg_batch = tf.keras.metrics.Mean(name='loss_agg_batch' )
self.loss_agg_epoch = tf.keras.metrics.Mean(name="loss_agg_epoch")
self.mse_agg_epoch = tf.keras.metrics.Mean(name='mse_agg_epoch')
self.loss_agg_val = tf.keras.metrics.Mean(name='loss_agg_val')
self.mse_agg_val = tf.keras.metrics.Mean(name='mse_agg_val')
#checkpoints (For Epochs)
#The CheckpointManagers can be called to serializae the weights within TRUNET
checkpoint_path_epoch = "./checkpoints/{}/epoch".format(utility.model_name_mkr(m_params,t_params=self.t_params, htuning=m_params.get('htuning',False) ))
os.makedirs(checkpoint_path_epoch,exist_ok=True)
with self.strategy.scope():
ckpt_epoch = tf.train.Checkpoint(model=self.model, optimizer=self.optimizer)
self.ckpt_mngr_epoch = tf.train.CheckpointManager(ckpt_epoch, checkpoint_path_epoch, max_to_keep=self.t_params['checkpoints_to_keep'], keep_checkpoint_every_n_hours=None)
#restoring last checkpoint if it exists
if self.ckpt_mngr_epoch.latest_checkpoint:
# compat: Initializing model and optimizer before restoring from checkpoint
try:
ckpt_epoch.restore(self.ckpt_mngr_epoch.latest_checkpoint).assert_consumed()
except AssertionError as e:
ckpt_epoch.restore(self.ckpt_mngr_epoch.latest_checkpoint)
print (' Restoring model from best checkpoint')
else:
print (' Initializing model from scratch')
#Tensorboard
os.makedirs("log_tensboard/{}".format(utility.model_name_mkr(m_params, t_params=self.t_params, htuning=self.m_params.get('htuning',False) )), exist_ok=True )
#self.writer = tf.summary.create_file_writer( "log_tensboard/{}".format(utility.model_name_mkr(m_params,t_params=self.t_params, htuning=self.m_params.get('htuning',False) ) ) )
# endregion
# region ---- Making Datasets
#caching dataset to file post pre-processing steps have been completed
cache_suffix = utility.cache_suffix_mkr( m_params, self.t_params )
os.makedirs( './Data/data_cache/', exist_ok=True )
_ds_train_val, _ = era5_eobs.load_data_era5eobs( self.t_params['train_batches'] + self.t_params['val_batches'] , self.t_params['start_date'], self.t_params['parallel_calls'] )
ds_train = _ds_train_val.take(self.t_params['train_batches'] )
ds_val = _ds_train_val.skip(self.t_params['train_batches'] ).take(self.t_params['val_batches'])
#TODO: undo cache
ds_train = ds_train.cache('Data/data_cache/train'+cache_suffix )
ds_val = ds_val.cache('Data/data_cache/val'+cache_suffix )
ds_train = ds_train.unbatch().shuffle( self.t_params['batch_size']*int(self.t_params['train_batches']/5), reshuffle_each_iteration=True).batch(self.t_params['batch_size']) #.repeat(self.t_params['epochs']-self.start_epoch)
ds_train_val = ds_train.concatenate(ds_val)
ds_train_val = ds_train_val.repeat(self.t_params.get('epochs',100)-self.start_epoch)
self.ds_train_val = self.strategy.experimental_distribute_dataset(dataset=ds_train_val)
self.iter_train_val = enumerate(self.ds_train_val)
bc_ds_in_train = int( self.t_params['train_batches']/era5_eobs.loc_count ) #batch_count
bc_ds_in_val = int( self.t_params['val_batches']/era5_eobs.loc_count )
self.reset_idxs_training = np.cumsum( [bc_ds_in_train]*era5_eobs.loc_count )
self.reset_idxs_validation = np.cumsum( [bc_ds_in_val]*era5_eobs.loc_count )
# endregion
def train_model(self):
"""During training we produce a prediction for a (n by n) square patch.
But we caculate losses on a central (h, w) region within the (n by n) patch
This central region is defined by "bounds" below
"""
bounds = cl.central_region_bounds(self.m_params['region_grid_params']) #list [ lower_h_bound[0], upper_h_bound[0], lower_w_bound[1], upper_w_bound[1] ]
#Training for n epochs
#self.t_params['train_batches'] = self.t_params['train_batches'] if self.m_params['time_sequential'] else int(self.t_params['train_batches']*self.t_params['lookback_target'] )
#self.t_params['val_batches'] = self.t_params['val_batches'] if self.m_params['time_sequential'] else int(self.t_params['val_batches']*self.t_params['lookback_target'] )
for epoch in range(self.start_epoch, int(self.t_params['epochs']) ):
#region resetting metrics, losses, records, timers
self.loss_agg_batch.reset_states()
self.loss_agg_epoch.reset_states()
self.mse_agg_epoch.reset_states()
self.loss_agg_val.reset_states()
self.mse_agg_val.reset_states()
self.df_training_info = self.df_training_info.append( { 'Epoch':epoch, 'Last_Trained_Batch':0 }, ignore_index=True )
start_epoch_train = time.time()
start_batch_group_time = time.time()
batch=0
print("\n\nStarting EPOCH {}".format(epoch ))
#endregion
# --- Training Loops
for batch in range(self.batches_to_skip+1,self.t_params['train_batches'] +1):
# get next set of training datums
idx, (feature, target, mask) = next(self.iter_train_val)
gradients = self.distributed_train_step( feature, target, mask, bounds, 0.0 )
#print(gradients)
# reporting
if( batch % self.train_batch_report_freq==0 or batch == self.t_params['train_batches']):
batch_group_time = time.time() - start_batch_group_time
est_completion_time_seconds = (batch_group_time/self.t_params['reporting_freq']) * (1 - batch/self.t_params['train_batches'])
est_completion_time_mins = est_completion_time_seconds/60
print("\t\tBatch:{}/{}\tTrain Loss: {:.8f} \t Batch Time:{:.4f}\tEpoch mins left:{:.1f}".format(batch, self.t_params['train_batches'], self.loss_agg_batch.result(), batch_group_time, est_completion_time_mins ) )
# resetting time and losses
start_batch_group_time = time.time()
# Updating record of the last batch to be operated on in training epoch
self.df_training_info.loc[ ( self.df_training_info['Epoch']==epoch) , ['Last_Trained_Batch'] ] = batch
self.df_training_info.to_csv( path_or_buf="checkpoints/{}/checkpoint_scores.csv".format(utility.model_name_mkr(self.m_params,t_params=self.t_params, htuning=m_params.get('htuning',False) )), header=True, index=False )
li_losses = [self.loss_agg_batch.result()]
li_names = ['train_loss_batch']
step = batch + (epoch)*self.t_params['train_batches']
#utility.tensorboard_record( self.writer.as_default(), li_losses, li_names, step, gradients, self.model.trainable_variables )
#utility.tensorboard_record( self.writer.as_default(), li_losses, li_names, step, None, None )
self.loss_agg_batch.reset_states()
if batch in self.reset_idxs_training:
self.model.reset_states()
# --- Tensorboard record
li_losses = [self.loss_agg_epoch.result(), self.mse_agg_epoch.result()]
li_names = ['train_loss_epoch','train_mse_epoch']
#utility.tensorboard_record( self.writer.as_default(), li_losses, li_names, epoch)
print("\tStarting Validation")
start_batch_group_time = time.time()
# --- Validation Loops
for batch in range(1, self.t_params['val_batches']+1):
# next datum
idx, (feature, target, mask) = next(self.iter_train_val)
bool_cmpltd = self.distributed_val_step(feature, target, mask, bounds)
# Reporting for validation
if batch % self.val_batch_report_freq == 0 or batch==self.t_params['val_batches'] :
batch_group_time = time.time() - start_batch_group_time
est_completion_time_seconds = (batch_group_time/self.t_params['reporting_freq']) * (1 - batch/self.t_params['val_batches'])
est_completion_time_mins = est_completion_time_seconds/60
print("\t\tCompleted Validation Batch:{}/{} \t Time:{:.4f} \tEst Time Left:{:.1f}".format( batch, self.t_params['val_batches'], batch_group_time, est_completion_time_mins))
start_batch_group_time = time.time()
if batch in self.reset_idxs_validation:
self.model.reset_states()
# region - End of Epoch Reporting and Early iteration Callback
print("\tEpoch:{}\t Train Loss:{:.8f}\t Train MSE:{:.5f}\t Val Loss:{:.5f}\t Val MSE:{:.5f}\t Time:{:.5f}".format(epoch, self.loss_agg_epoch.result(), self.mse_agg_epoch.result(),
self.loss_agg_val.result(), self.mse_agg_val.result() ,time.time()-start_epoch_train ) )
#utility.tensorboard_record( self.writer.as_default(), [self.loss_agg_val.result(), self.mse_agg_val.result()], ['Validation Loss', 'Validation MSE' ], epoch )
self.df_training_info = utility.update_checkpoints_epoch(self.df_training_info, epoch, self.loss_agg_epoch, self.loss_agg_val, self.ckpt_mngr_epoch, self.t_params,
self.m_params, self.mse_agg_epoch ,self.mse_agg_val, self.t_params['objective'] )
# Early Stop Callback
if epoch > ( max( self.df_training_info.loc[:, 'Epoch'], default=0 ) + self.t_params['early_stopping_period']) :
print("Model Stopping Early at EPOCH {}".format(epoch))
print(self.df_training_info)
break
# endregion
print("Model Training Finished")
@tf.function
@tf.function
if __name__ == "__main__":
s_dir = utility.get_script_directory(sys.argv[0])
args_dict = utility.parse_arguments(s_dir)
# get training and model params
t_params, m_params = utility.load_params(args_dict)
# Initialize and train model
weather_model = WeatherModel(t_params, m_params)
weather_model.initialize_scheme_era5Eobs()
weather_model.train_model()
| [
6738,
2010,
34,
8068,
19,
1330,
16092,
292,
316,
11,
997,
17,
4475,
201,
198,
11748,
28686,
201,
198,
418,
13,
268,
2268,
14692,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
62,
2538,
18697,
8973,
796,
366,
17,
1,
201,
198,
418,
13,... | 2.10869 | 8,354 |
# Generated by Django 2.1.7 on 2019-05-12 00:19
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
2713,
12,
1065,
3571,
25,
1129,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
"""
This module contains `jsonclass`, the decorator for JSON Classes.
"""
from __future__ import annotations
from jsonclasses.keypath import identical_key
from typing import (
Optional, Union, Callable, TypeVar, overload, cast, TYPE_CHECKING
)
from dataclasses import dataclass
from .jconf import (
JConf, OnCreate, CanCreate, OnDelete, CanUpdate, CanDelete, CanRead,
OnUpdate
)
from .jfield import JField
from .cdef import Cdef
from .jsonclassify import jsonclassify
from .jobject import JObject
if TYPE_CHECKING:
from .types import Types
T = TypeVar('T', bound=type)
@overload
@overload
@overload
def jsonclass(
cls: Optional[T] = None,
class_graph: Optional[str] = 'default',
key_encoding_strategy: Optional[Callable[[str], str]] = None,
key_decoding_strategy: Optional[Callable[[str], str]] = None,
camelize_json_keys: Optional[bool] = None,
strict_input: Optional[bool] = None,
ref_key_encoding_strategy: Optional[Callable[[JField], str]] = None,
validate_all_fields: Optional[bool] = None,
abstract: Optional[bool] = None,
reset_all_fields: Optional[bool] = None,
output_null: Optional[bool] = None,
on_create: OnCreate | list[OnCreate] | Types | None = None,
on_update: OnUpdate | list[OnUpdate] | Types | None = None,
on_delete: OnDelete | list[OnDelete] | Types | None = None,
can_create: CanCreate | list[CanCreate] | Types | None = None,
can_update: CanUpdate | list[CanUpdate] | Types | None = None,
can_delete: CanDelete | list[CanDelete] | Types | None = None,
can_read: CanRead | list[CanRead] | Types | None = None,
) -> Union[Callable[[T], T | type[JObject]], T | type[JObject]]:
"""The jsonclass object class decorator. To declare a jsonclass class, use
this syntax:
@jsonclass
class MyObject:
my_field_one: str
my_field_two: bool
"""
if cls is not None:
if not isinstance(cls, type):
raise ValueError('@jsonclass should be used to decorate a class.')
if camelize_json_keys is False:
key_encoding_strategy = identical_key
key_decoding_strategy = identical_key
jconf = JConf(
cgraph=cast(str, class_graph),
key_encoding_strategy=key_encoding_strategy,
key_decoding_strategy=key_decoding_strategy,
strict_input=strict_input,
ref_key_encoding_strategy=ref_key_encoding_strategy,
validate_all_fields=validate_all_fields,
abstract=abstract,
reset_all_fields=reset_all_fields,
output_null=output_null,
on_create=on_create,
on_update=on_update,
on_delete=on_delete,
can_create=can_create,
can_update=can_update,
can_delete=can_delete,
can_read=can_read)
dcls: type = dataclass(init=False)(cls)
jcls = jsonclassify(dcls)
cdef = Cdef(jcls, jconf)
jcls.cdef = cdef
jconf.cgraph.put(cdef)
return jcls
else:
return parametered_jsonclass
| [
37811,
198,
1212,
8265,
4909,
4600,
17752,
4871,
47671,
262,
11705,
1352,
329,
19449,
38884,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
6738,
33918,
37724,
13,
2539,
6978,
1330,
10411,
62,
2539,
198,
6738,
19720,
1330... | 2.388589 | 1,297 |
# @Time : 2021/03/20
# @Author : Yushuo Chen
# @Email : chenyushuo@ruc.edu.cn
"""
save and load example
========================
Here is the sample code for the save and load in RecBole.
The path to saved data or model can be found in the output of RecBole.
"""
import pickle
from logging import getLogger
import torch
from recbole.config import Config
from recbole.data import create_dataset, data_preparation, save_split_dataloaders, load_split_dataloaders
from recbole.utils import init_seed, init_logger, get_model, get_trainer
if __name__ == '__main__':
save_example()
| [
2,
2488,
7575,
220,
220,
1058,
33448,
14,
3070,
14,
1238,
198,
2,
2488,
13838,
1058,
575,
1530,
20895,
12555,
198,
2,
2488,
15333,
220,
1058,
269,
47413,
1530,
20895,
31,
622,
66,
13,
15532,
13,
31522,
198,
198,
37811,
198,
21928,
2... | 3.056995 | 193 |
import cv2 as cv
# Lendo Imagens/Reading Images
img = cv.imread('Exemplos Python OpenCV/Resources/Photos/cats.jpg') #cv.comando('NOME_JANELA')
cv.imshow('Cats', img) #cv.comando('NOME_JANELA',VARIAVEL)
cv.waitKey(0) #espera o usuário apertar o teclado
# Lendo Videos/Reading Videos
capture = cv.VideoCapture('Exemplos Python OpenCV/Resources/Videos/dog.mp4')
while True:
isTrue, frame = capture.read()
cv.imshow('Video', frame)
if cv.waitKey(20) & 0xFF==ord('d'):
break
capture.release() #desassocia a variavel 'capture' do video
cv.destroyAllWindows()
| [
11748,
269,
85,
17,
355,
269,
85,
198,
198,
2,
406,
31110,
39440,
641,
14,
36120,
5382,
198,
9600,
796,
269,
85,
13,
320,
961,
10786,
3109,
18856,
418,
11361,
4946,
33538,
14,
33236,
14,
21197,
14,
24619,
13,
9479,
11537,
220,
220,
... | 2.293893 | 262 |
import os, json
from auxshaker import CONFIG
from .serial import send_serial
#TODO: Expose advanced features if needed.
| [
11748,
28686,
11,
33918,
198,
6738,
27506,
1477,
3110,
1330,
25626,
198,
6738,
764,
46911,
1330,
3758,
62,
46911,
198,
220,
220,
220,
1303,
51,
3727,
46,
25,
1475,
3455,
6190,
3033,
611,
2622,
13,
628
] | 3.472222 | 36 |
import argparse
import random
import os
import json
import string
from tqdm import tqdm
import re
from multiprocessing import Pool, Manager
import requests
import time
import numpy as np
# from newspaper import Article, Config
# manager = Manager()
# articles = manager.dict()
# config = Config()
# config.fetch_images = False
transl_table = dict([(ord(x), ord(y)) for x, y in zip(u"‘’´“”–-", u"'''\"\"--")])
url_pattern = re.compile(r'(https:\/\/t\.co\/[\w]*\b)( QT)?')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', required=True)
parser.add_argument('-o', '--output_path', required=True)
# parser.add_argument('-a', '--article_cache', required=True)
parser.add_argument('-s', '--seed', default=0, type=int)
args = parser.parse_args()
np.random.seed(args.seed)
random.seed(args.seed)
print(f'reading {args.input_path}')
tweets = {}
tweet_list = read_jsonl(args.input_path)
for tweet in tweet_list:
tweet_id = tweet['data']['id']
tweets[tweet_id] = tweet
print(f'Total tweets read: {len(tweets)}')
print('Adding tweet references...')
with open(args.output_path, 'w') as f:
with Pool(processes=8) as p:
for tweet_id, tweet in tqdm(p.imap_unordered(parse_tweet, tweets.items()), total=len(tweets)):
f.write(json.dumps(tweet) + '\n', ensure_ascii=False)
print('Done!')
| [
198,
11748,
1822,
29572,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
4731,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
302,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
11,
9142,
198,
11748,
700... | 2.567669 | 532 |
# Generated by Django 3.2.5 on 2021-08-20 18:46
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33448,
12,
2919,
12,
1238,
1248,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |