content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# inval.py - This file is part of the PySptools package.
#
"""
"""
import pysptools.util as util
# PPI
# NFINDR
# ATGP
# FIPPI
| [
2,
201,
198,
2,
10097,
26171,
201,
198,
2,
15069,
357,
66,
8,
2211,
12,
4967,
11,
4302,
12634,
2013,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345... | 3.492481 | 266 |
#!/usr/bin/env python
# -*- coding: ASCII -*-
"""
Merge/Adapter trim reads stored in BAM
:Author: Martin Kircher
:Contact: mkircher@uw.edu
"""
import sys
import os
import math
import pysam
from optparse import OptionParser,OptionGroup
import string
table = string.maketrans('TGCAMRWSYKVHDBtgcamrwsykvhdb','ACGTKYWSRMBDHVacgtkywsrmbdhv') # COMPLEMENT DNA
quality_offset = 33
parser = OptionParser("%prog [options] BAMfile")
parser.add_option("-p","--PIPE",dest="pipe",help="Read BAM from and write it to PIPE",default=False,action="store_true")
parser.add_option("-o", "--outdir", dest="outdir", help="Create output files in another directory.")
parser.add_option("-c", "--consensus", dest="consensus", help="Report PCR duplicate consensus instead of sequence with highest sum of base qualities.",default=False,action="store_true")
parser.add_option("", "--outprefix", dest="outprefix", help="Prefix for output files (default 'PCRconsensus').",default="PCRconsensus")
parser.add_option("", "--outnewconsensus", dest="outnewconsensus", help="Save reads with new consensus sequence for realignment to separate FastQ output files with this prefix (default OFF).",default=None)
parser.add_option("", "--SAM", dest="SAM", help="Input/Output SAM not BAM.",default=False,action="store_true")
parser.add_option("", "--ignore_RG", dest="ignore_RG", help="Ignore the RG when looking for PCR duplicates. The consensus reads gets the RG of the template used.",default=False,action="store_true")
parser.add_option("", "--fixID", dest="fixID", help="Fix read ID, take only first part up to first / character",default=False,action="store_true")
parser.add_option("--library", dest="library", help="Use library name from RG read header rather than the readgroup ID",default=False,action="store_true")
parser.add_option("--UMI", dest="UMI", help="Use unique molecule identifier (UMI, in second index field) for grouping ",default=False,action="store_true")
parser.add_option("-v", "--verbose", dest="verbose", help="Turn all messages on",default=False,action="store_true")
group = OptionGroup(parser, "Filter options")
group.add_option("--include_qcfail",dest="include_qcfail",help="Consider reads that have the QC fail flag",default=False,action='store_true')
group.add_option("--rescue_qcfail",dest="rescue_qcfail",help="Remove fail quality flag of reads if PCR duplicates are observed",default=False,action='store_true')
group.add_option("--frequency_cutoff",dest="frequency",help="Keep only sequences with at least X PCR duplicates (default X = None)",default=None,type="int")
group.add_option("-f","--5p",dest="fivePrime",help="Cluster reads on five prime coordinate",default=False,action='store_true')
group.add_option("--max_length",dest="MaxLength",help="Longest possible read length stored in the input BAM (only relevant for SR 5' clustering and PE reads spanning multiple contigs, def 800)",default=800,type="int")
group.add_option("-k","--keep",dest="keep",help="Keep unmapped sequences",default=False,action='store_true')
group.add_option("--buffer",dest="rbuffer",help="Lowest number of PE reads buffered before write (def 5000)",default=5000,type="int")
group.add_option("-m","--merged",dest="merged",help="Keep only SR reads that are merged",default=False,action='store_true')
parser.add_option_group(group)
(options, args) = parser.parse_args()
#------------------------------
# PE data
#------------------------------
#The --5p parameter does not trigger any special code in the PE handling. The outer coordinates of PE reads are defined by chromosome, coordinate forward read and coordinate reverse read.
#(1) chromosome is a string or can be a tuple of strings for PEs mapped across contigs/chromosomes
#(2) Coordinate of a read aligned without reversal: reported five prime position in the BAM
#(3) Coordinate of a read aligned as reverse complement: five prime position + alignment length
#When PE reads are collected, they are first collected incomplete pairs and then as complete pairs. Complete pairs are stored until a buffer limit is reached, incomplete pairs until the end of the script -- where they are essentially forgotten, ehh, removed from the BAM file ;-) . So if the buffer is full, a PE read cluster is processed if:
#(1) The chromosome is a string and we are already on a different chromosome
#(2) or, we are on the same chromosome, but more than molecule length away from the largest 5' coordinate of the cluster
#(3) or, chromosome is a tuple and none of the strings in the tuple matches the current chromosome
#(4) or, one of the strings is the current chromosome (implicating that we have finished the other chromosome since we have complete PE read clusters at hand) and we are more than 5p_max_length bases away from the 5' position of that read.
#------------------------------
#Merged parameter
#------------------------------
#Removes reads that are not flagged "paired in sequencing" and where the read ID does not start in "M_". If your incomplete PE reads do not have the paired in sequencing flag, this would remove them. If they are marked as paired in sequencing, but the second read is just missing from the file that would of course not help -- but those are by default removed (see above).
if options.outprefix == "":
sys.stderr.write("Outprefix can not be empty!\n")
sys.exit()
if options.outdir != None and not os.path.isdir(options.outdir):
sys.stderr.write("Output folder does not exist!\n")
sys.exit()
elif options.outdir == None:
options.outdir = ""
else:
options.outdir = options.outdir.rstrip('/')+'/'
cfastq_SR, cfastq_PE = 0,0
if options.outnewconsensus != None:
outfilename = options.outdir+options.outnewconsensus+"_SR.fastq"
if options.verbose: sys.stderr.write("Creating: %s\n"%outfilename)
fastq_SR = open(outfilename,'w')
outfilename = options.outdir+options.outnewconsensus+"_r1.fastq"
if options.verbose: sys.stderr.write("Creating: %s\n"%outfilename)
fastq_r1 = open(outfilename,'w')
outfilename = options.outdir+options.outnewconsensus+"_r2.fastq"
if options.verbose: sys.stderr.write("Creating: %s\n"%outfilename)
fastq_r2 = open(outfilename,'w')
## CREATE OUTPUT FILE(S)/STREAM
fileflags = 'wb'
if options.SAM: fileflags = 'w'
files = args
if options.pipe: files = [None]
if len(files) > 1:
files=files[:1]
sys.stderr.write("This script supports only one input file! Limiting processing to first filename.\n")
outfile = None
sys.stderr.write("WARNING: This script will 'cluster' reads based on both outer coordinates and pick a representative sequence of the cluster as the one with the dominant CIGAR string. Other sequences are lost and will not be considered in consensus calling.\n")
for filename in files:
if filename == None and not options.SAM:
infile = pysam.Samfile( "-", 'rb' )
elif filename == None and options.SAM:
infile = pysam.Samfile( "-", 'r' )
else:
infile = pysam.Samfile( filename, 'rb' )
id2lib = {}
if options.library and 'RG' in infile.header:
for rgroup in infile.header['RG']:
if 'LB' in rgroup and 'ID' in rgroup:
id2lib[rgroup['ID']] = rgroup['LB']
if outfile == None:
if options.verbose: sys.stderr.write("Creating output files/streams...\n")
if options.pipe:
outfile = pysam.Samfile( "-", fileflags, template = infile)
if options.verbose: sys.stderr.write("BAM/SAM output on stdout...\n")
else:
outfilename = options.outdir+options.outprefix+".bam"
if options.verbose: sys.stderr.write("Creating: %s\n"%outfilename)
outfile = pysam.Samfile( outfilename , fileflags, template = infile)
if ('HD' in outfile.header) and ('SO' in outfile.header['HD']):
outfile.header['HD']['SO'] = 'unsorted'
else:
outfile.header['HD'] = {'VN': '1.4','SO':'unsorted'}
lcheck = None,None
variants = {}
incomplete_variants = {}
curpos = None
curvariants = {}
total_reads = 0
out_reads = 0
out_reads_SR = 0
out_reads_kept = 0
for read in infile:
if read.qual == None: continue
if options.fixID: read.qname = read.qname.split('/')[0]
total_reads += 1
if options.verbose and total_reads % 100000 == 0:
sys.stderr.write("Reads in %d / PCR dups out %d PE | %d SR / Unmapped out %d / FastQ realignment %d PE | %d SR ( %.2f%% ; current pos: %s)\n"%(total_reads,out_reads,out_reads_SR,out_reads_kept,cfastq_PE,cfastq_SR,(out_reads*2+out_reads_SR+out_reads_kept+cfastq_SR+cfastq_PE*2)/float(total_reads)*100,str(curpos)))
if read.is_qcfail and not options.include_qcfail and not options.rescue_qcfail:
#if options.verbose: sys.stderr.write("QC FAIL\n")
continue
if read.is_unmapped and not options.keep:
#if options.verbose: sys.stderr.write("UNMAPPED\n")
continue
elif read.is_unmapped and options.keep:
if not read.is_qcfail:
outfile.write(read)
out_reads_kept += 1
continue
if not read.is_paired and options.merged and not read.qname.startswith("M_"):
#if options.verbose: sys.stderr.write("MERGED\n")
continue
RG = None
if not options.ignore_RG:
if read.tags != None:
for key,value in read.tags:
if key == "RG":
if value in id2lib: RG = id2lib[value]
else: RG = value
break
if options.UMI:
if read.tags != None:
for key,value in read.tags:
if key == "XJ":
RG = value
break
if RG not in variants: variants[RG] = {}
if RG not in incomplete_variants: incomplete_variants[RG] = {}
if RG not in curvariants: curvariants[RG] = {}
if sum(map(len,variants.itervalues())) > options.rbuffer and ((lcheck[0] != curpos[0]) or (lcheck[1]+options.MaxLength < curpos[1])):
lcheck = (curpos[0],curpos[1])
if options.verbose: sys.stderr.write("Full buffer (%d)"%sum(map(len,variants.itervalues()))+str(curpos)+" \n")
for cRG in variants.keys():
hvariants = {}
for (hchr,outpos,outpos_r2),reads in variants[cRG].iteritems():
if ((type(hchr) != type(()) and # SINGLE CHROM MAPPING PE
((hchr != curpos[0]) or
((hchr == curpos[0]) and (max(outpos[1],outpos_r2[1])+options.MaxLength < curpos[1])))) or
(type(hchr) == type(()) and # CROSS CONTIG MAPPING PE
(((hchr[0] != curpos[0]) and (hchr[1] != curpos[0])) or
((hchr[0] == curpos[0]) and (outpos[1]+options.MaxLength < curpos[1])) or
((hchr[1] == curpos[0]) and (outpos_r2[1]+options.MaxLength < curpos[1]))))):
forward,reverse = get_consensus(reads)
if forward == None or reverse == None:
#if options.verbose: sys.stderr.write("FAILED CONSENSUS\n")
continue
elif (forward.is_unmapped or reverse.is_unmapped) and options.outnewconsensus != None:
cfastq_PE+=1
seq = forward.seq
qual = forward.qual
if forward.is_reverse:
seq = seq.translate(table)[::-1]
qual = qual[::-1]
fastq_r1.write("@%s/1\n%s\n+\n%s\n"%(forward.qname,seq,qual))
seq = reverse.seq
qual = reverse.qual
if reverse.is_reverse:
seq = seq.translate(table)[::-1]
qual = qual[::-1]
fastq_r2.write("@%s/2\n%s\n+\n%s\n"%(reverse.qname,seq,qual))
else:
outfile.write(forward)
outfile.write(reverse)
out_reads += 1
else:
hvariants[(hchr,outpos,outpos_r2)]=reads
if (len(hvariants) > 0) or (RG == cRG): variants[cRG] = hvariants
else: del variants[cRG]
if options.verbose: sys.stderr.write("- Full buffer (%d)"%sum(map(len,variants.itervalues()))+str(curpos)+" \n")
if read.is_paired: # PE DATA
if read.mate_is_unmapped and options.keep:
outfile.write(read)
out_reads_kept += 1
continue
#else:
#if options.verbose: sys.stderr.write("UNMAPPED\n")
curpos = (read.tid,read.pos)
hchr = read.tid
outpos = (read.tid,read.pos)
if read.is_reverse: outpos = (read.tid,read.pos+aln_length(read.cigar))
if read.is_read1: #FORWARD READ
if read.qname not in incomplete_variants[RG]:
incomplete_variants[RG][read.qname] = [read,outpos]
else:
read_r2,outpos_r2 = incomplete_variants[RG][read.qname]
del incomplete_variants[RG][read.qname]
if outpos_r2[0] != hchr: hchr = hchr,outpos_r2[0]
if (hchr,outpos,outpos_r2) not in variants[RG]:
variants[RG][(hchr,outpos,outpos_r2)] = [(read,read_r2)]
else:
variants[RG][(hchr,outpos,outpos_r2)].append((read,read_r2))
elif read.is_read2: #REVERSE READ
if read.qname not in incomplete_variants[RG]:
incomplete_variants[RG][read.qname] = [read,outpos]
else:
read_r1,outpos_r1 = incomplete_variants[RG][read.qname]
del incomplete_variants[RG][read.qname]
if outpos_r1[0] != hchr: hchr = outpos_r1[0],hchr
if (hchr,outpos_r1,outpos) not in variants[RG]:
variants[RG][(hchr,outpos_r1,outpos)] = [(read_r1,read)]
else:
variants[RG][(hchr,outpos_r1,outpos)].append((read_r1,read))
else:
sys.stderr.write("Should not happen!")
else: # SR DATA
if (curpos != None) and ((read.tid,read.pos) != curpos):
if options.fivePrime and (read.tid == curpos[0]):
hpos = read.pos-options.MaxLength
hvariants = {}
for key,value in curvariants[RG].iteritems():
if (key[1] < hpos):
cread = get_consensus_SR(value[0])
if cread == None:
#if options.verbose: sys.stderr.write("FAILED CONSENSUS\n")
continue
elif cread.is_unmpapped and options.outnewconsensus != None:
cfastq_SR+=1
seq = cread.seq
qual = cread.qual
if cread.is_reverse:
seq = seq.translate(table)[::-1]
qual = qual[::-1]
fastq_SR.write("@%s\n%s\n+\n%s\n"%(cread.qname,seq,qual))
else:
outfile.write(cread)
out_reads_SR += 1
else:
hvariants[key]=value
curvariants[RG] = hvariants
else:
for key,value in curvariants[RG].iteritems():
cread = get_consensus_SR(value[0])
if cread == None:
#if options.verbose: sys.stderr.write("FAILED CONSENSUS\n")
continue
elif cread.is_unmapped and options.outnewconsensus != None:
cfastq_SR+=1
seq = cread.seq
qual = cread.qual
if cread.is_reverse:
seq = seq.translate(table)[::-1]
qual = qual[::-1]
fastq_SR.write("@%s\n%s\n+\n%s\n"%(cread.qname,seq,qual))
else:
outfile.write(cread)
out_reads_SR += 1
curvariants[RG] = {}
curpos = (read.tid,read.pos)
strand = read.is_reverse
outpos = curpos[1]
if strand and options.fivePrime: outpos+=aln_length(read.cigar)
nkey = (strand,outpos)
if not options.fivePrime: nkey = (strand,outpos,aln_length(read.cigar))
if nkey in curvariants[RG]:
curvariants[RG][nkey][0].append(read)
curvariants[RG][nkey][1]+=1
else:
curvariants[RG][nkey] = [[read],1]
for RG in curvariants.keys():
for key,value in curvariants[RG].iteritems():
read = get_consensus_SR(value[0])
if read == None:
#if options.verbose: sys.stderr.write("FAILED CONSENSUS\n")
continue
elif read.is_unmapped and options.outnewconsensus != None:
cfastq_SR+=1
seq = read.seq
qual = read.qual
if read.is_reverse:
seq = seq.translate(table)[::-1]
qual = qual[::-1]
fastq_SR.write("@%s\n%s\n+\n%s\n"%(read.qname,seq,qual))
else:
outfile.write(read)
out_reads_SR += 1
del curvariants[RG]
for RG in variants.keys():
for key,value in variants[RG].iteritems():
read1,read2 = get_consensus(value)
if read1 == None or read2 == None:
#if options.verbose: sys.stderr.write("FAILED CONSENSUS\n")
continue
elif (read1.is_unmapped or read2.is_unmapped) and options.outnewconsensus != None:
cfastq_PE+=1
seq = read1.seq
qual = read1.qual
if read1.is_reverse:
seq = seq.translate(table)[::-1]
qual = qual[::-1]
fastq_r1.write("@%s/1\n%s\n+\n%s\n"%(read1.qname,seq,qual))
seq = read2.seq
qual = read2.qual
if read2.is_reverse:
seq = seq.translate(table)[::-1]
qual = qual[::-1]
fastq_r2.write("@%s/2\n%s\n+\n%s\n"%(read2.qname,seq,qual))
else:
outfile.write(read1)
outfile.write(read2)
out_reads += 1
del variants[RG]
if options.outnewconsensus != None:
fastq_SR.close()
fastq_r1.close()
fastq_r2.close()
if cfastq_PE == 0:
outfilename = options.outdir+options.outnewconsensus+"_r1.fastq"
if options.verbose: sys.stderr.write("Removing empty file: %s\n"%outfilename)
os.remove(outfilename)
outfilename = options.outdir+options.outnewconsensus+"_r2.fastq"
if options.verbose: sys.stderr.write("Removing empty file: %s\n"%outfilename)
os.remove(outfilename)
if cfastq_SR == 0:
outfilename = options.outdir+options.outnewconsensus+"_SR.fastq"
if options.verbose: sys.stderr.write("Removing empty file: %s\n"%outfilename)
os.remove(outfilename)
if options.verbose:
sys.stderr.write("Total reads in %d / PCR dups out %d PE | %d SR / Unmapped out %d / FastQ realignment %d PE | %d SR ( %.2f%%)\n"%(total_reads,out_reads,out_reads_SR,out_reads_kept,cfastq_PE,cfastq_SR,(out_reads*2+out_reads_SR+out_reads_kept+cfastq_SR+cfastq_PE*2)/float(total_reads)*100))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
37101,
532,
9,
12,
198,
198,
37811,
198,
198,
13102,
469,
14,
47307,
15797,
9743,
8574,
287,
347,
2390,
198,
198,
25,
13838,
25,
5780,
509,
1980,
372,
198... | 2.358127 | 7,690 |
# 295. Find Median from Data Stream
# Runtime: 2710 ms, faster than 5.93% of Python3 online submissions for Find Median from Data Stream.
# Memory Usage: 35.4 MB, less than 94.61% of Python3 online submissions for Find Median from Data Stream.
import bisect
# Insertion Sort
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian() | [
2,
34772,
13,
9938,
26178,
422,
6060,
13860,
198,
198,
2,
43160,
25,
2681,
940,
13845,
11,
5443,
621,
642,
13,
6052,
4,
286,
11361,
18,
2691,
22129,
329,
9938,
26178,
422,
6060,
13860,
13,
198,
198,
2,
14059,
29566,
25,
3439,
13,
... | 3.430894 | 123 |
import base64
from io import StringIO
with open('f:/a/a.txt','r') as fi:
fo = StringIO()
base64.encode(fi,fo)
print(fo.getvalue()) | [
11748,
2779,
2414,
198,
6738,
33245,
1330,
10903,
9399,
198,
4480,
1280,
10786,
69,
14079,
64,
14,
64,
13,
14116,
41707,
81,
11537,
355,
25912,
25,
198,
220,
220,
220,
220,
220,
11511,
796,
10903,
9399,
3419,
198,
220,
220,
220,
220,
... | 2.242424 | 66 |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import get_language, to_locale
from livesettings import config_value
from satchmo_utils.numbers import trunc_decimal
import locale
import logging
log = logging.getLogger('l10n.utils')
#backport from python2.5
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
#perform the grouping from right to left
#backport from python2.5
def format(percent, value, loc_conv, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
if percent[0] != '%':
raise ValueError("format() must be given exactly one %char "
"format specifier")
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], loc_conv, monetary=monetary)
decimal_point = loc_conv[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
while seps:
sp = formatted.find(' ')
if sp == -1: break
formatted = formatted[:sp] + formatted[sp+1:]
seps -= 1
elif percent[-1] in 'diu':
if grouping:
formatted = _group(formatted, monetary=monetary)[0]
return formatted
def moneyfmt(val, curr=None, places=-1, grouping=True, wrapcents='', current_locale=None):
"""Formats val according to the currency settings in the current locale.
Ported-and-modified from Python 2.5
"""
conv = get_locale_conv(current_locale)
if places < 0:
places = conv['int_frac_digits']
val = trunc_decimal(val, places)
try: # Required because Python < 2.5 does not have monetary arg
s = format('%%.%if' % places, abs(val), conv, grouping, monetary=True)
except TypeError:
s = format('%%.%if' % places, abs(val), conv, grouping)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if curr is None:
curr = config_value('LANGUAGE','CURRENCY')
curr = curr.replace("_", " ")
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = curr + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + curr
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
val = s.replace('<', '').replace('>', '')
if wrapcents:
pos = s.rfind(conv['decimal_point'])
if pos>-1:
pos +=1
val = u"%s<%s>%s</%s>" % val[:pos], wrapcents, val[pos:], wrapcents
return val
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
62,
16129,
11,
284,
62,
17946,
1000,
198,
6738,... | 2.35511 | 1,546 |
#
#1.3: Design an algorithm and write code to remove the duplicate characters in a string without using any additional buffer.
#NOTE: one or two additional variables are fine. An extra copy of the array is not.
#
myString = "hubba bubba bubble tape"
noDuplicates = []
for letter in myString:
if letter not in noDuplicates:
noDuplicates.append(letter)
myString = ''.join(noDuplicates)
print myString
| [
2,
198,
2,
16,
13,
18,
25,
8495,
281,
11862,
290,
3551,
2438,
284,
4781,
262,
23418,
3435,
287,
257,
4731,
1231,
1262,
597,
3224,
11876,
13,
198,
2,
16580,
25,
530,
393,
734,
3224,
9633,
389,
3734,
13,
1052,
3131,
4866,
286,
262,
... | 3.277778 | 126 |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 10:56:23 2021
@author: linca
"""
import time
import os
import numpy as np
import pandas as pd
if __name__=='__main__':
beacon_list = [1,5,6,7,10,11,15,16,17,19,21,22,23,24,25,26,28,29,30,32,34,36,38,40,41,44,46,48]
bpeace = BPeace(beacon_list)
start = time.perf_counter()
bpeace.process_beacon()
end = time.perf_counter()
print(f'{end-start} seconds')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
3158,
2608,
838,
25,
3980,
25,
1954,
33448,
198,
198,
31,
9800,
25,
300,
1939,
64,
198,
37811,
198,
11748,
640,
198,
11748,
28686,
198,
... | 2.022624 | 221 |
'''
Parts from https://en.wikipedia.org/wiki/User:Ritchie333/afcbios.py, licensed CC-BY-SA-3.0
'''
import re
from botbase import *
titles = []
page_to_update = "Wikipedia:WikiProject Women in Red/Drafts"
reMarker = re.compile("<ref.*\/ref>|{{.*}}|<!--.*-->|\'\'\'|----")
reTitle = re.compile( '\(.*\)' )
header_new = "New Additions"
header_old = "Existing Pages"
wikitext = "{{/Header}}\n"
wikitext_header_2 = "== {} ==\n"
wikitext_header_3 = "=== {} - {} ===\n"
wikitext_entry = "* [[{}]]\n::<small><nowiki>{}</nowiki></small>\n:::<small><nowiki>{} - {}</nowiki></small>\n"
search_query = 'incategory:"{}" "{}"'
categories = [ "AfC submissions declined as a non-notable biography", "AfC submissions declined as a non-notable academic topic" ]
keywords = [ "she was", "she is", "her book", "her work" ]
for category in categories:
for keyword in keywords:
titles += run_search(category, keyword)
titles = set(titles)
with open('last_titles.txt', 'r') as last_titles_file:
last_titles = set(last_titles_file.read().split("|"))
with open('last_titles.txt', 'w') as last_titles_file:
last_titles_file.write("|".join(titles))
new_titles = titles - last_titles
old_titles = titles & last_titles
wikitext += (generate_entries(new_titles, header_new) + generate_entries(old_titles, header_old))
page = p.Page(site, page_to_update)
page.text = wikitext
page.savewithshutoff(summary = 'Update "Women in Red drafts" report', minor = False)
| [
7061,
6,
198,
42670,
422,
3740,
1378,
268,
13,
31266,
13,
2398,
14,
15466,
14,
12982,
25,
49,
48423,
20370,
14,
1878,
21101,
4267,
13,
9078,
11,
11971,
12624,
12,
17513,
12,
4090,
12,
18,
13,
15,
198,
7061,
6,
198,
198,
11748,
302... | 2.636861 | 548 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
33918,
198,
198,
6738,
435,
541,
323,
13,
64,
404,
13,
15042,
13,
9979,
415,
13,
22973,
34184,
1187,
1330,
163... | 2.446809 | 47 |
"""``runway gen-sample k8s-flux-repo`` command."""
import logging
import shutil
import sys
from typing import Any # pylint: disable=W
import click
from ... import options
from .utils import TEMPLATES, convert_gitignore, copy_sample, write_tfstate_template
if sys.version_info.major > 2:
from pathlib import Path # pylint: disable=E
else:
from pathlib2 import Path # pylint: disable=E
LOGGER = logging.getLogger(__name__.replace("._", "."))
@click.command("k8s-flux-repo", short_help="k8s + flux + tf (k8s-tf-infrastructure)")
@options.debug
@options.no_color
@options.verbose
@click.pass_context
def k8s_flux_repo(ctx, **_):
# type: (click.Context, Any) -> None
"""Generate a sample Kubernetes cluster with Flux CD managed via Terraform."""
src = TEMPLATES / "k8s-flux-repo"
dest = Path.cwd() / "k8s-tf-infrastructure"
src_awscli = TEMPLATES / "k8s-cfn-repo/k8s-master.cfn/k8s_hooks/awscli.py"
dest_awscli = dest / "gen-kubeconfig.cfn/k8s_hooks/awscli.py"
copy_sample(ctx, src, dest)
tf_eks_base = TEMPLATES / "k8s-tf-repo" / "eks-base.tf"
copy_sample(ctx, tf_eks_base, dest / tf_eks_base.parts[-1])
convert_gitignore(dest / "_gitignore")
gen_kubeconfig_src_dir = TEMPLATES / "k8s-tf-repo" / "gen-kubeconfig.cfn"
copy_sample(ctx, gen_kubeconfig_src_dir, dest / gen_kubeconfig_src_dir.parts[-1])
LOGGER.debug('copying "%s" to "%s"', src_awscli, dest_awscli)
shutil.copyfile(str(src_awscli), str(dest_awscli))
tfstate_src_dir = TEMPLATES / "k8s-tf-repo" / "tfstate.cfn"
copy_sample(ctx, tfstate_src_dir, dest / tfstate_src_dir.parts[-1])
tfstate_templates_dir = dest / "tfstate.cfn/templates"
tfstate_templates_dir.mkdir()
write_tfstate_template(tfstate_templates_dir / "tf_state.yml")
LOGGER.success("Sample k8s infrastructure repo created at %s", dest)
LOGGER.notice("See the README for setup and deployment instructions.")
| [
37811,
15506,
5143,
1014,
2429,
12,
39873,
479,
23,
82,
12,
69,
22564,
12,
260,
7501,
15506,
3141,
526,
15931,
198,
11748,
18931,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
6738,
19720,
1330,
4377,
220,
1303,
279,
2645,
600,
25,
1... | 2.402242 | 803 |
from Werewolf.agents.AgentPlayer import AgentPlayer;
from Werewolf.game.actions.Vote import Vote;
from Shared.enums.AgentTypeEnum import AgentTypeEnum;
import random;
| [
6738,
40261,
13,
49638,
13,
36772,
14140,
1330,
15906,
14140,
26,
198,
6738,
40261,
13,
6057,
13,
4658,
13,
37394,
1330,
19175,
26,
198,
198,
6738,
39403,
13,
268,
5700,
13,
36772,
6030,
4834,
388,
1330,
15906,
6030,
4834,
388,
26,
19... | 3.595745 | 47 |
from ..internal.utils.importlib import func_name # noqa
from ..internal.utils.importlib import module_name # noqa
from ..internal.utils.importlib import require_modules # noqa
| [
6738,
11485,
32538,
13,
26791,
13,
11748,
8019,
1330,
25439,
62,
3672,
220,
1303,
645,
20402,
198,
6738,
11485,
32538,
13,
26791,
13,
11748,
8019,
1330,
8265,
62,
3672,
220,
1303,
645,
20402,
198,
6738,
11485,
32538,
13,
26791,
13,
1174... | 3.509804 | 51 |
from datetime import datetime
from getCryptocurrencyRate import CryptoCurrencyRate
product = "BTC_JPY"
# ["BTC_JPY", "XRP_JPY", "ETH_JPY", "XTZ_JPY", "XLM_JPY", "XEM_JPY", "BAT_JPY", "ETC_JPY", "LTC_JPY", "BCH_JPY", "MONA_JPY", "LSK_JPY"]
scale = "hour"
# ["hour","day","week","month","year"]
res = CryptoCurrencyRate(product, scale).get()
print("\n***情報***")
print("リクエストステータス " + str(res.status))
print("現在 " + res.price_info_list[-1].price_str + "JPY")
print("推移 " + res.change_str + "%")
print("\n***一覧***")
for price_info in res.price_info_list:
print(datetime.fromtimestamp(price_info.timestamp))
print(price_info.price_str + "JPY")
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
651,
23919,
420,
13382,
32184,
1330,
36579,
34,
13382,
32184,
198,
198,
11167,
796,
366,
35964,
62,
12889,
56,
1,
198,
2,
14631,
35964,
62,
12889,
56,
1600,
366,
55,
20031,
62,
12889... | 2.229452 | 292 |
nombres_magos = ['Luis', 'Pedro', 'Antonio']
show_magicians(nombres_magos)
print("\nGrandes Magos:")
great_magicians = make_great(nombres_magos[:])
show_magicians(great_magicians)
print("\nMagos Originales:")
show_magicians(nombres_magos)
| [
198,
77,
2381,
411,
62,
19726,
418,
796,
37250,
25596,
271,
3256,
705,
43468,
305,
3256,
705,
13217,
261,
952,
20520,
198,
12860,
62,
19726,
5106,
7,
77,
2381,
411,
62,
19726,
418,
8,
198,
198,
4798,
7203,
59,
77,
23581,
274,
2944,
... | 2.372549 | 102 |
from decimal import Decimal
from typing import (
Any,
Dict,
Optional,
)
import asyncio
from hummingbot.core.event.events import (
OrderType,
TradeType
)
from hummingbot.connector.in_flight_order_base import InFlightOrderBase
from .altmarkets_constants import Constants
s_decimal_0 = Decimal(0)
| [
6738,
32465,
1330,
4280,
4402,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
4377,
11,
198,
220,
220,
220,
360,
713,
11,
198,
220,
220,
220,
32233,
11,
198,
8,
198,
11748,
30351,
952,
198,
6738,
41465,
13645,
13,
7295,
13,
15596,... | 2.925926 | 108 |
from subprocess import call
import shutil
import os
from Converter import ( UrlConverter, LinkConverter )
# import config
import json
import logging
| [
6738,
850,
14681,
1330,
869,
198,
11748,
4423,
346,
198,
11748,
28686,
198,
6738,
35602,
353,
1330,
357,
8799,
75,
3103,
332,
353,
11,
7502,
3103,
332,
353,
1267,
198,
2,
1330,
4566,
198,
11748,
33918,
198,
11748,
18931,
198
] | 3.725 | 40 |
"""oai.py module."""
import json
import logging
from typing import Iterator, Optional
import smart_open
from sickle import Sickle
from sickle.models import Record
logger = logging.getLogger(__name__)
| [
37811,
78,
1872,
13,
9078,
8265,
526,
15931,
198,
11748,
33918,
198,
11748,
18931,
198,
6738,
19720,
1330,
40806,
1352,
11,
32233,
198,
198,
11748,
4451,
62,
9654,
198,
6738,
6639,
293,
1330,
32181,
293,
198,
6738,
6639,
293,
13,
27530,... | 3.416667 | 60 |
f=open("./level_def.txt","r")
lines = f.readlines()
idx=0
level_id=1
for line in lines:
if idx % 16 == 0:
fo = open("level/level-%i.txt" % level_id, "w")
fo.write("%s" %(line))
if idx % 16 == 15:
fo.close()
level_id+=1
idx+=1
| [
69,
28,
9654,
7,
1911,
14,
5715,
62,
4299,
13,
14116,
2430,
81,
4943,
198,
6615,
796,
277,
13,
961,
6615,
3419,
198,
312,
87,
28,
15,
198,
5715,
62,
312,
28,
16,
198,
1640,
1627,
287,
3951,
25,
198,
220,
220,
220,
611,
4686,
8... | 1.886525 | 141 |
from dagster_graphql.test.utils import execute_dagster_graphql
from dagster.core.instance import DagsterInstance
from .execution_queries import START_PIPELINE_EXECUTION_QUERY
from .setup import define_test_context, get_retry_multi_execution_params, retry_config
NON_PERSISTENT_INTERMEDIATES_ERROR = (
'Cannot perform reexecution with non persistent intermediates manager'
)
| [
6738,
48924,
1706,
62,
34960,
13976,
13,
9288,
13,
26791,
1330,
12260,
62,
67,
363,
1706,
62,
34960,
13976,
198,
198,
6738,
48924,
1706,
13,
7295,
13,
39098,
1330,
32167,
1706,
33384,
198,
198,
6738,
764,
18558,
1009,
62,
421,
10640,
... | 3.112 | 125 |
import db_operations
from user import User
# from event import Event
# def add_test_events():
# for i in range(5):
# tmp_event = Event()
# db_operations.add_event(tmp_event._date, tmp_event.getAuthInBit())
# def list_test_events():
# events = db_operations.get_events()
# for event in events:
# print(event)
if __name__ == "__main__":
main() | [
11748,
20613,
62,
3575,
602,
198,
6738,
2836,
1330,
11787,
198,
2,
422,
1785,
1330,
8558,
198,
198,
2,
825,
751,
62,
9288,
62,
31534,
33529,
198,
2,
220,
220,
220,
220,
329,
1312,
287,
2837,
7,
20,
2599,
198,
2,
220,
220,
220,
2... | 2.421384 | 159 |
from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Variable, AdvectionRK4, AdvectionRK4_3D, RectilinearZGrid, ErrorCode
from parcels.field import Field, VectorField
from parcels.tools.converters import TimeConverter
from datetime import timedelta as delta
import datetime
import numpy as np
import math
import pytest
from os import path
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
def test_fieldset_from_data(xdim, ydim):
""" Simple test for fieldset initialisation from data. """
data, dimensions = generate_fieldset(xdim, ydim)
fieldset = FieldSet.from_data(data, dimensions)
assert len(fieldset.U.data.shape) == 3
assert len(fieldset.V.data.shape) == 3
assert np.allclose(fieldset.U.data[0, :], data['U'], rtol=1e-12)
assert np.allclose(fieldset.V.data[0, :], data['V'], rtol=1e-12)
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 50])
def test_fieldset_from_data_different_dimensions(xdim, ydim, zdim=4, tdim=2):
""" Test for fieldset initialisation from data using
dict-of-dict for dimensions. """
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
depth = np.zeros(zdim, dtype=np.float32)
time = np.zeros(tdim, dtype=np.float64)
U = np.zeros((xdim, ydim), dtype=np.float32)
V = np.ones((xdim, ydim), dtype=np.float32)
P = 2 * np.ones((int(xdim/2), int(ydim/2), zdim, tdim), dtype=np.float32)
data = {'U': U, 'V': V, 'P': P}
dimensions = {'U': {'lat': lat, 'lon': lon},
'V': {'lat': lat, 'lon': lon},
'P': {'lat': lat[0::2], 'lon': lon[0::2], 'depth': depth, 'time': time}}
fieldset = FieldSet.from_data(data, dimensions, transpose=True)
assert len(fieldset.U.data.shape) == 3
assert len(fieldset.V.data.shape) == 3
assert len(fieldset.P.data.shape) == 4
assert fieldset.P.data.shape == (tdim, zdim, ydim/2, xdim/2)
assert np.allclose(fieldset.U.data, 0., rtol=1e-12)
assert np.allclose(fieldset.V.data, 1., rtol=1e-12)
assert np.allclose(fieldset.P.data, 2., rtol=1e-12)
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
def test_fieldset_from_parcels(xdim, ydim, tmpdir, filename='test_parcels'):
""" Simple test for fieldset initialisation from Parcels FieldSet file format. """
filepath = tmpdir.join(filename)
data, dimensions = generate_fieldset(xdim, ydim)
fieldset_out = FieldSet.from_data(data, dimensions)
fieldset_out.write(filepath)
fieldset = FieldSet.from_parcels(filepath)
assert len(fieldset.U.data.shape) == 3 # Will be 4 once we use depth
assert len(fieldset.V.data.shape) == 3
assert np.allclose(fieldset.U.data[0, :], data['U'], rtol=1e-12)
assert np.allclose(fieldset.V.data[0, :], data['V'], rtol=1e-12)
@pytest.mark.parametrize('indslon', [range(10, 20), [1]])
@pytest.mark.parametrize('indslat', [range(30, 60), [22]])
def test_fieldset_from_file_subsets(indslon, indslat, tmpdir, filename='test_subsets'):
""" Test for subsetting fieldset from file using indices dict. """
data, dimensions = generate_fieldset(100, 100)
filepath = tmpdir.join(filename)
fieldsetfull = FieldSet.from_data(data, dimensions)
fieldsetfull.write(filepath)
indices = {'lon': indslon, 'lat': indslat}
indices_back = indices.copy()
fieldsetsub = FieldSet.from_parcels(filepath, indices=indices)
assert indices == indices_back
assert np.allclose(fieldsetsub.U.lon, fieldsetfull.U.grid.lon[indices['lon']])
assert np.allclose(fieldsetsub.U.lat, fieldsetfull.U.grid.lat[indices['lat']])
assert np.allclose(fieldsetsub.V.lon, fieldsetfull.V.grid.lon[indices['lon']])
assert np.allclose(fieldsetsub.V.lat, fieldsetfull.V.grid.lat[indices['lat']])
ixgrid = np.ix_([0], indices['lat'], indices['lon'])
assert np.allclose(fieldsetsub.U.data, fieldsetfull.U.data[ixgrid])
assert np.allclose(fieldsetsub.V.data, fieldsetfull.V.data[ixgrid])
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
@pytest.mark.parametrize('dx, dy', [('e1u', 'e2u'), ('e1v', 'e2v')])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('swapUV', [False, True])
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('time_periodic', [True, False])
@pytest.mark.parametrize('dt_sign', [-1, 1])
@pytest.mark.parametrize('fail', [False, pytest.param(True, marks=pytest.mark.xfail(strict=True))])
@pytest.mark.parametrize('zdim', [2, 8])
@pytest.mark.parametrize('scale_fac', [0.2, 4, 1])
| [
6738,
49796,
1330,
7663,
7248,
11,
2142,
1548,
7248,
11,
1446,
541,
88,
7841,
1548,
11,
449,
2043,
7841,
1548,
11,
35748,
11,
1215,
303,
596,
49,
42,
19,
11,
1215,
303,
596,
49,
42,
19,
62,
18,
35,
11,
48599,
346,
259,
451,
57,
... | 2.389741 | 2,086 |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
#-- @testpoint:opengauss关键字deref(非保留),作为目录对象名
'''
import unittest
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
logger = Logger()
commonsh = CommonSH('dbuser')
constant = Constant()
# 关键字作为目录对象名不带双引号 - 成功
# 关键字作为目录对象名带双引号—成功
# 关键字作为目录对象名带单引号 - 合理报错
#关键字作为目录对象名带反引号 - 合理报错 | [
37811,
198,
15269,
357,
66,
8,
33160,
43208,
21852,
1766,
1539,
43,
8671,
13,
198,
198,
9654,
35389,
1046,
318,
11971,
739,
17996,
272,
6599,
43,
410,
17,
13,
198,
1639,
460,
779,
428,
3788,
1864,
284,
262,
2846,
290,
3403,
286,
262... | 1.927505 | 469 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tb_paddle/proto/plugin_pr_curve.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tb_paddle/proto/plugin_pr_curve.proto',
package='tb_paddle',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n%tb_paddle/proto/plugin_pr_curve.proto\x12\ttb_paddle\"<\n\x11PrCurvePluginData\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x16\n\x0enum_thresholds\x18\x02 \x01(\rb\x06proto3'
)
_PRCURVEPLUGINDATA = _descriptor.Descriptor(
name='PrCurvePluginData',
full_name='tb_paddle.PrCurvePluginData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='tb_paddle.PrCurvePluginData.version', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_thresholds', full_name='tb_paddle.PrCurvePluginData.num_thresholds', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=112,
)
DESCRIPTOR.message_types_by_name['PrCurvePluginData'] = _PRCURVEPLUGINDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PrCurvePluginData = _reflection.GeneratedProtocolMessageType('PrCurvePluginData', (_message.Message,), {
'DESCRIPTOR' : _PRCURVEPLUGINDATA,
'__module__' : 'tb_paddle.proto.plugin_pr_curve_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.PrCurvePluginData)
})
_sym_db.RegisterMessage(PrCurvePluginData)
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
256,
65,
62,
79,
37382,
14,
1676,
1462,
14,
33803,
62,
1050,
62,
22019,
... | 2.464358 | 982 |
from test.database import SurrogatePK, Model, db
from sqlalchemy import Column, String,DateTime, Integer
| [
6738,
1332,
13,
48806,
1330,
4198,
3828,
378,
40492,
11,
9104,
11,
20613,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
10903,
11,
10430,
7575,
11,
34142,
198
] | 3.75 | 28 |
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deletes the backend binding.
This removes the binding between the Compute
Engine backend service and your KubeRun service.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.kuberun import flags
from googlecloudsdk.command_lib.kuberun import kuberun_command
from googlecloudsdk.core import log
_DETAILED_HELP = {
'EXAMPLES':
"""
To delete a backend binding ``BACKEND_BINDING'' in the default
namespace, run:
$ {command} BACKEND_BINDING
To delete a backend binding ``BACKEND_BINDING'' in a specific namespace
``NAMESPACE'', run:
$ {command} BACKEND_BINDING --namespace=NAMESPACE
""",
}
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Delete(kuberun_command.KubeRunCommand, base.DeleteCommand):
"""Deletes a backend binding."""
detailed_help = _DETAILED_HELP
flags = [flags.NamespaceFlag(), flags.ClusterConnectionFlags()]
@classmethod
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
1303,
198,
2,
15069,
33448,
3012,
11419,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198... | 3.094891 | 548 |
import pytest
from yadage.wflownode import YadageNode
from yadage.tasks import packtivity_task
from yadage.controllers import YadageController
| [
11748,
12972,
9288,
198,
6738,
331,
324,
496,
13,
86,
2704,
593,
1098,
1330,
41449,
496,
19667,
198,
6738,
331,
324,
496,
13,
83,
6791,
1330,
2353,
83,
3458,
62,
35943,
198,
6738,
331,
324,
496,
13,
3642,
36667,
1330,
41449,
496,
22... | 3.272727 | 44 |
import numpy as np
from elmo import load_elmo_context_embeddings
from word_embeddings import get_w2v_word_embeddings, get_elmo_word_embeddings, get_w2v_multiword_embeddings
from word2vec import get_w2v_context_embeddings_Default
from utils import task_to_df
import pathlib
# ---------------------------------------
# ---------------------------------------
import ud2csv
from utils import df_to_csv, csv_to_df
# ---------------------------------------
# dump elmo default layer for contexts, google word2vec, tfidf matrix
from elmo import dump_elmo_context_embeddings
from word2vec import load_w2v_model, load_tfidf
# ------------------------------------------------------
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-w2v', '--w2v_file', help="path to GoogleNews-vectors-negative300.bin?",
default='./input/models/GoogleNews-vectors-negative300.bin')
args = parser.parse_args()
write_input_CSVs()
write_vectors()
dump_models_resources(args.w2v_file) | [
11748,
299,
32152,
355,
45941,
198,
6738,
1288,
5908,
1330,
3440,
62,
417,
5908,
62,
22866,
62,
20521,
67,
654,
198,
6738,
1573,
62,
20521,
67,
654,
1330,
651,
62,
86,
17,
85,
62,
4775,
62,
20521,
67,
654,
11,
651,
62,
417,
5908,
... | 3.141066 | 319 |
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey
| [
198,
6738,
45898,
13,
71,
1031,
6759,
13,
1891,
2412,
1330,
4277,
62,
1891,
437,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
1330,
11389,
1634,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
13,
4107,
3020,
19482,... | 3.565217 | 69 |
import sys,os,time
import argparse
sys.path.append(os.getenv("MARRTINO_APPS_HOME")+"/program")
import robot_cmd_ros
from robot_cmd_ros import *
robot_cmd_ros.use_audio = False
robot_cmd_ros.tv_good = 0.5
import move
# main
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='navigator')
parser.add_argument('path', type=str, help='File with path to run')
args = parser.parse_args()
begin(nodename='navigator')
enableObstacleAvoidance(True)
r = do_path(args.path)
print("Path completed: %r" %r)
end()
| [
11748,
25064,
11,
418,
11,
2435,
198,
11748,
1822,
29572,
198,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
1136,
24330,
7203,
40569,
14181,
46016,
62,
2969,
3705,
62,
39069,
4943,
10,
1,
14,
23065,
4943,
198,
198,
11748,
9379,
62,
... | 2.581818 | 220 |
import logging
from typing import Set
import falcon
from common.consts import HTTP_WRITE_METHODS
from common.falcon_utils import auth_token
from common.util import is_public
from ui import BackendController
| [
11748,
18931,
198,
6738,
19720,
1330,
5345,
198,
198,
11748,
24215,
1102,
198,
198,
6738,
2219,
13,
1102,
6448,
1330,
14626,
62,
18564,
12709,
62,
49273,
50,
198,
6738,
2219,
13,
42932,
1102,
62,
26791,
1330,
6284,
62,
30001,
198,
6738,... | 3.637931 | 58 |
'''
Copyright 2015 Stefan Andrei Chelariu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import smbus
import math
from time import sleep
| [
7061,
6,
198,
15269,
1853,
28842,
10948,
72,
11425,
2743,
84,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789... | 3.892405 | 158 |
import IceRayCpp
| [
11748,
6663,
19591,
34,
381,
201,
198
] | 2.571429 | 7 |
import pandas as pd
import pyttsx3
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier,_tree
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
import csv
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
training = pd.read_csv('Training.csv')
testing= pd.read_csv('Testing.csv')
cols= training.columns
cols= cols[:-1]
x = training[cols]
y = training['prognosis']
y1= y
reduced_data = training.groupby(training['prognosis']).max()
#mapping strings to numbers
le = preprocessing.LabelEncoder()
le.fit(y)
y = le.transform(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
testx = testing[cols]
testy = testing['prognosis']
testy = le.transform(testy)
clf1 = DecisionTreeClassifier()
clf = clf1.fit(x_train,y_train)
# print(clf.score(x_train,y_train))
# print ("cross result========")
scores = cross_val_score(clf, x_test, y_test, cv=3)
# print (scores)
print (scores.mean())
model=SVC()
model.fit(x_train,y_train)
print("for svm: ")
print(model.score(x_test,y_test))
importances = clf.feature_importances_
indices = np.argsort(importances)[::-1]
features = cols
severityDictionary=dict()
description_list = dict()
precautionDictionary=dict()
symptoms_dict = {}
for index, symptom in enumerate(x):
symptoms_dict[symptom] = index
getSeverityDict()
getDescription()
getprecautionDict()
getInfo()
tree_to_code(clf,cols)
| [
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
83,
912,
87,
18,
198,
6738,
1341,
35720,
1330,
662,
36948,
198,
6738,
1341,
35720,
13,
21048,
1330,
26423,
27660,
9487,
7483,
11,
62,
21048,
198,
11748,
299,
32152,
355,
45... | 2.65365 | 589 |
import os
import urllib.request
PATH_OCCA = os.path.expanduser('~/work/data/OCCA/')
os.makedirs(PATH_OCCA, exist_ok=True)
varnames = "theta salt phihyd etan".split()
ftp = "ftp://mit.ecco-group.org/ecco_for_las/OCCA_1x1_v2/2004-6/annual/"
for name in varnames:
fname = f"DD{name}.0406annclim.nc"
url = f"{ftp}{fname}"
dest = f"{PATH_OCCA}{fname}"
print("retrieving:", url, dest)
ret = urllib.request.urlretrieve(url, dest)
print("returned:", ret)
| [
11748,
28686,
198,
11748,
2956,
297,
571,
13,
25927,
628,
198,
34219,
62,
46,
4093,
32,
796,
28686,
13,
6978,
13,
11201,
392,
7220,
10786,
93,
14,
1818,
14,
7890,
14,
46,
4093,
32,
14,
11537,
198,
418,
13,
76,
4335,
17062,
7,
3421... | 2.10177 | 226 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
cec20_extension = Extension(
name="cec20",
sources=["cec20.pyx"],
libraries=["cec20"],
library_dirs=["lib"],
include_dirs=["lib"],
)
setup(name="cec20", ext_modules=cythonize([cec20_extension]))
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
6738,
1233,
26791,
13,
2302,
3004,
1330,
27995,
198,
6738,
327,
7535,
13,
15580,
1330,
3075,
400,
261,
1096,
198,
198,
344,
66,
1238,
62,
2302,
3004,
796,
27995,
7,
198,
220,
220,
220,
1... | 2.587302 | 126 |
import importlib.resources as pkg_resources
import os
OS_FEDORA = "fedora"
OS_UBUNTU = "ubuntu"
OS_MAC = "macintosh"
HOME = os.environ["HOME"]
RESOURCES_PATH = str(pkg_resources.path("workstation", "resources"))
| [
11748,
1330,
8019,
13,
37540,
355,
279,
10025,
62,
37540,
198,
11748,
28686,
198,
198,
2640,
62,
37,
1961,
1581,
32,
796,
366,
19082,
5799,
1,
198,
2640,
62,
10526,
4944,
51,
52,
796,
366,
32230,
1,
198,
2640,
62,
44721,
796,
366,
... | 2.675 | 80 |
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import json
from cloudify.workflows import ctx
from cloudify import broker_config
from cloudify.manager import get_rest_client
from cloudify.utils import get_broker_ssl_cert_path
from .utils import is_compute, get_tenants_list
from .constants import BROKER_DEFAULT_VHOST, V_4_1_0
| [
7804,
198,
2,
15069,
357,
66,
8,
1853,
402,
13827,
4561,
2114,
21852,
12052,
13,
1439,
2489,
10395,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
4... | 3.444853 | 272 |
#
# Copyright 2014-2016 Vinay Vasista, Ravi Teja Mullapudi, Uday Bondhugula,
# and others from Multicore Computing Lab, Department of Computer Science
# and Automation, Indian Institute of Science
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# poly.py : Polyhedral representation of pipeline functions.
#
from __future__ import absolute_import, division, print_function
import math
import time
import islpy as isl
from constructs import *
from expression import *
from utils import *
import pipe
import align_scale as aln_scl
# Static method 'alloc' for isl Id does not allow the user to be
# not None, as of now. We need an exclusive dictionary to map the
# users of an Id to that Id object.
isl_id_user_map = {}
class PolyRep(object):
""" The PolyRep class is the polyhedral representation of a
group. It gives piece-wise domain and schedule for each compute
object in the group. Polyhedral transformations modify the
piece-wise domains as well as the schedules.
"""
| [
2,
198,
2,
15069,
1946,
12,
5304,
11820,
323,
23663,
12523,
11,
371,
15820,
1665,
6592,
19574,
499,
47928,
11,
471,
820,
12812,
71,
1018,
4712,
11,
198,
2,
290,
1854,
422,
7854,
291,
382,
38589,
3498,
11,
2732,
286,
13851,
5800,
198... | 3.636145 | 415 |
import unittest
from simtk.openmm import *
from simtk.openmm.app import *
from simtk.unit import *
class TestMetadynamics(unittest.TestCase):
"""Test the Metadynamics class"""
def testHarmonicOscillator(self):
"""Test running metadynamics on a harmonic oscillator."""
system = System()
system.addParticle(1.0)
system.addParticle(1.0)
force = HarmonicBondForce()
force.addBond(0, 1, 1.0, 100000.0)
system.addForce(force)
cv = CustomBondForce('r')
cv.addBond(0, 1)
bias = BiasVariable(cv, 0.94, 1.06, 0.02)
meta = Metadynamics(system, [bias], 300*kelvin, 2.0, 5.0, 10)
integrator = LangevinIntegrator(300*kelvin, 10/picosecond, 0.001*picosecond)
topology = Topology()
chain = topology.addChain()
residue = topology.addResidue('H2', chain)
topology.addAtom('H1', element.hydrogen, residue)
topology.addAtom('H2', element.hydrogen, residue)
simulation = Simulation(topology, system, integrator, Platform.getPlatformByName('Reference'))
simulation.context.setPositions([Vec3(0, 0, 0), Vec3(1, 0, 0)])
meta.step(simulation, 200000)
fe = meta.getFreeEnergy()
center = bias.gridWidth//2
fe -= fe[center]
# Energies should be reasonably well converged over the central part of the range.
for i in range(center-3, center+4):
r = bias.minValue + i*(bias.maxValue-bias.minValue)/(bias.gridWidth-1)
e = 0.5*100000.0*(r-1.0)**2*kilojoules_per_mole
assert abs(fe[i]-e) < 1.0*kilojoules_per_mole | [
11748,
555,
715,
395,
198,
6738,
985,
30488,
13,
9654,
3020,
1330,
1635,
198,
6738,
985,
30488,
13,
9654,
3020,
13,
1324,
1330,
1635,
198,
6738,
985,
30488,
13,
20850,
1330,
1635,
198,
198,
4871,
6208,
9171,
324,
4989,
873,
7,
403,
... | 2.192722 | 742 |
import pytest
from os import path
from licensify.apply_license import apply_license_header
from licensify.errors import LicensesOutOfDateError
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
6738,
28686,
1330,
3108,
198,
198,
6738,
8240,
1958,
13,
39014,
62,
43085,
1330,
4174,
62,
43085,
62,
25677,
198,
6738,
8240,
1958,
13,
48277,
1330,
10483,
4541,
7975,
5189,
10430,
12331,
198,
198,
31,
9078... | 3.521739 | 46 |
import xml.etree.ElementTree as ET
import sys
import os
import re
newLine = re.compile(r"^\n\s+")
NS = {'mods': 'http://www.loc.gov/mods/v3'}
fileName = os.path.splitext(sys.argv[1])[0]
pull(fileName)
| [
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
302,
198,
198,
3605,
13949,
796,
302,
13,
5589,
576,
7,
81,
1,
61,
59,
77,
59,
82,
10,
4943,
198,
8035,
796,
1391,
6,
2412... | 2.255556 | 90 |
#!/usr/bin/env python3
import socket
import sys
import threading
import mmonitor
#print('esqueci de fazer o exercicio 2A')
t = threading.Thread(target=mmonitor.Console)
t.start()
porta = int(input('Porta para ouvir sensores: '))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', porta))
except:
print('# erro de bind')
sys.exit()
s.listen(5)
print('aguardando sensores em ', porta)
while True:
conn, addr = s.accept()
print('recebi uma conexao de ', addr)
t = threading.Thread( target=mmonitor.TrataSensor, args=(conn,addr,))
t.start()
print('o servidor encerrou!')
s.close() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
17802,
198,
11748,
25064,
198,
11748,
4704,
278,
198,
11748,
285,
41143,
198,
198,
2,
4798,
10786,
274,
421,
721,
72,
390,
277,
19178,
267,
4208,
46441,
362,
32,
11537,
198,... | 2.494071 | 253 |
import pandas as pd
import numpy as np
import os
from os.path import isfile, join
import math
from utils import log, list_all_files, abs_path
if __name__ == "__main__":
main() | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
198,
11748,
10688,
198,
198,
6738,
3384,
4487,
1330,
2604,
11,
1351,
62,
439,
62,
1662... | 2.873016 | 63 |
"""
给你一个长度为n的数组,其中只有一个数字出现了1次,其他均出现2次,问如何快速的找到这个数字。
"""
nums = [6, 4, 7, 6, 7, 4, 5]
print(solution(nums))
nums.sort()
print(nums)
| [
37811,
198,
220,
220,
220,
13328,
119,
247,
19526,
254,
31660,
10310,
103,
165,
243,
123,
41753,
99,
10310,
118,
77,
21410,
46763,
108,
163,
119,
226,
171,
120,
234,
17739,
114,
40792,
20998,
103,
17312,
231,
31660,
10310,
103,
46763,
... | 0.945205 | 146 |
from scipy import sparse
import numpy as np
import math
import implicit
import logging
logger = logging.getLogger(__name__)
def predict_most_similar(visits, num_users, num_jobs, UserJobs, factors=50, cut_off=300, log_discrete=True):
"""
Matrix Factorization based
Still Collaborative filtering but this time based on alternating
least squares with an efficient implementation from implicit.
Still not very fast as some of the list to matrix stuff still applies.
But it should scale better. Maybe it is worth storing this in memory
and requesting values when a user needs some
args:
visits: a list of objects with a user_id, job_id and duration value
num_users: integer, number of users = max user_id
num_jobs: integer, number of jobs = max job_id
UserJobs: django or SQLAlechmy model where the similarities are saved
cut_off: integer, top cut off time in seconds
log_discrete: boolean, if true converts to log discrete values
"""
tic = datetime.now()
#we only operate on the user vectors
#this expects integer ids as users if this isn't the case you might want
# to have a dict for row & col keys
M_t = sparse.csr_matrix((num_jobs, num_users), dtype=np.uint8)
#TODO can you vectorize this?
for visit in visits:
M_t[visit.job_id, visit.user_id] = calc_time(visit.duration)
logger.debug("M_t took {} ms".format((datetime.now() - tic).microseconds))
tic = datetime.now()
# initialize a model
model = implicit.als.AlternatingLeastSquares(factors=factors)
logger.debug("Loading model took {} ms".format((datetime.now() - tic).microseconds))
tic = datetime.now()
# train the model on a sparse matrix of item/user/confidence weights
model.fit(M_t)
logger.debug("Fitting model took {} ms".format((datetime.now() - tic).microseconds))
tic = datetime.now()
# recommend items for a user
for user_id in range(num_users):
preds = model.recommend(user_id, M_t.T)
only saves the non-zero ones
for pred in preds:
userjob = UserJobs.objects.filter(user_id=user_id, job_id=pred[0]).first()
if userjob is None:
UserJobs.create(user_id=user_id, job_id=pred[0], similarity_Skill=None, similarity_CF=pred[1])
else:
userjob.similarity_CF = pred[1]
logger.debug("Predicting took {} ms".format((datetime.now() - tic).microseconds))
| [
6738,
629,
541,
88,
1330,
29877,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
11748,
16992,
198,
11748,
18931,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
4299,
4331,
62,
1712,
62... | 2.632568 | 958 |
from numpy.distutils.core import setup, Extension
import subprocess
import os
ext_call_collect = Extension(name="mlmicrophysics.call_collect",
sources=["mlmicrophysics/call_collect.f90"],
extra_objects=["mlmicrophysics/stochastic_collect_tau_cam.o"])
with open("README.md", "r") as fh:
long_description = fh.read()
if __name__ == "__main__":
#fortran_compiler = "gfortran"
#os.chdir("mlmicrophysics")
#subprocess.call([fortran_compiler, "-c", "stochastic_collect_tau_cam.f90"])
#os.chdir("../")
setup(name="mlmicrophysics",
version="0.1",
description="Machine learning emulator testbed for microphysics.",
long_description=long_description,
long_description_content_type="text/markdown",
author="David John Gagne and Gabrielle Gantos",
author_email="dgagne@ucar.edu",
license="MIT",
url="https://github.com/NCAR/mlmicrophysics",
packages=["mlmicrophysics"],
#data_files=[("mlmicrophysics", ["mlmicrophysics/KBARF"])],
install_requires=["numpy",
"scipy",
"pandas",
"matplotlib",
"xarray",
"tensorflow",
"netcdf4",
"scikit-learn",
"pyyaml",
"pyarrow"],
#ext_modules=[ext_call_collect]
)
| [
6738,
299,
32152,
13,
17080,
26791,
13,
7295,
1330,
9058,
11,
27995,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
198,
2302,
62,
13345,
62,
33327,
796,
27995,
7,
3672,
2625,
4029,
24055,
746,
23154,
13,
13345,
62,
33327,
1600,
198,
... | 1.86211 | 834 |
#!/usr/bin/python3
from botbase import *
_heinsberg_c = re.compile(r"([0-9.]+) bestätigte")
_heinsberg_d = re.compile(r"Verstorbenen liegt im Kreis Heinsberg bei ([0-9.]+)\.")
_heinsberg_a = re.compile(r"([0-9.]+) Personen als noch nicht genesen")
schedule.append(Task(9, 15, 15, 35, 600, heinsberg, 5370))
if __name__ == '__main__': heinsberg(googlesheets())
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
6738,
10214,
8692,
1330,
1635,
198,
198,
62,
258,
1040,
3900,
62,
66,
796,
302,
13,
5589,
576,
7,
81,
18109,
58,
15,
12,
24,
8183,
28988,
1266,
11033,
83,
328,
660,
4943,
198,
62,
2... | 2.2625 | 160 |
# THIS PROGRAM IS IN EARLY DEVELOPMENT
# AND DOES **NOT** WORK YET.
# I2C sensor reading works, but control of the tank does not.
# I think the problem may be in the poor accuracy of Python's
# time.sleep() as opposed to C's usleep().
# This may mean that a pure Python version will never work :(
# Raspberry Tank Control Script
# v2 - Python via WebSockets
# Ian Renton, April 2014
# http://raspberrytank.ianrenton.com
import smbus
import time
import threading
import Queue
import RPi.GPIO as GPIO
#################################
## SETUP ##
#################################
# Tank control codes
IDLE = 0xFE40121C
IGNITION = 0xFE401294
TURRET_LEFT = 0xFE408F0C
TURRET_ELEV = 0xFE404F3C
# I2C Setup
i2cBus = smbus.SMBus(0)
i2cCompassAddress = 0x60
i2cRangefinderAddress = 0x70
# GPIO Setup
GPIO_PIN = 7
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_PIN, GPIO.OUT)
GPIO.output(GPIO_PIN, True)
# Inter-thread communication queues
bearingQueue = Queue.Queue(1)
rangeQueue = Queue.Queue(1)
#################################
## FUNCTIONS ##
#################################
# Send a complete command code
# Send a single bit using the tank's Manchester encoding scheme. The high-low and
# low-high transitions are inverted compared to normal because the transistor circuit
# I use sends 0v to the tank when the GPIO pin is high, and 4v when the GPIO pin is
# low.
# Get the bearing of the tank from the Compass module, in degrees
# Get the range to target from the Rangefinder module, in metres
#################################
## THREADS ##
#################################
# Control thread. Passes on the requested control signal from the GUI or autonomy
# to the tank.
# Sensor thread. Acquires bearing and range data as fast as it can, and puts the
# values in the bearing and range queues.
# Autonomy thread. Checks range values and drives accordingly
#################################
## MAIN PROGRAM ##
#################################
# Start threads
threadLock = threading.Lock()
threads = []
controlThread = ControlThread()
threads.append(controlThread)
controlThread.start()
sensorThread = SensorThread()
threads.append(sensorThread)
sensorThread.start()
autonomyThread = AutonomyThread()
threads.append(autonomyThread)
autonomyThread.start()
# Wait for threads to complete
for t in threads:
t.join()
print "All threads finished, exiting" | [
2,
12680,
46805,
3180,
3268,
31834,
11319,
5550,
18697,
3185,
10979,
198,
2,
5357,
38359,
12429,
11929,
1174,
30936,
575,
2767,
13,
198,
198,
2,
314,
17,
34,
12694,
3555,
2499,
11,
475,
1630,
286,
262,
6873,
857,
407,
13,
198,
2,
31... | 3.231579 | 760 |
# no stream support
# reserved names: callMethod, onCompletion
| [
2,
645,
4269,
1104,
628,
198,
198,
2,
10395,
3891,
25,
869,
17410,
11,
319,
5377,
24547,
628
] | 3.722222 | 18 |
import random
import pandas as pd
if __name__ == "__main__":
generate_new_data()
| [
11748,
4738,
198,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
7716,
62,
3605,
62,
7890,
3419,
198
] | 2.617647 | 34 |
from django import forms
from django.contrib.auth.models import User
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198
] | 3.631579 | 19 |
#!/usr/bin/env python
# DISABLE SELECT PYLINT TESTS
# pylint: disable=import-error, line-too-long, too-few-public-methods
# pylint: disable=bad-continuation
r"""
╔════════════════════════════════════════════════════╗
║ ╔═╗╦═╗╔═╗╔═╗╦ ╦╔═╗╔╗╔╔═╗ ╔╦╗╔═╗╔╦╗╔═╗╔╗╔╔═╗╔╦╗╔═╗ ║
║ ║ ╦╠╦╝╠═╣╠═╝╠═╣║╣ ║║║║╣ ║║║║╣ ║ ╠═╣║║║║ ║ ║║║╣ ║
║ ╚═╝╩╚═╩ ╩╩ ╩ ╩╚═╝╝╚╝╚═╝ ╩ ╩╚═╝ ╩ ╩ ╩╝╚╝╚═╝═╩╝╚═╝ ║
╚════════════════════════════════════════════════════╝
~
GLOBAL CONSTANTS AND USER CONFIGURATION FOR DEX CONNECTIVITY
"""
# STANDARD MODULES
from decimal import Decimal
from random import randint
# GRAPHENE MODULES
# ~ *soon* from hummingbot.connector.exchange.graphene.
from graphene_utils import assets_from_pairs, invert_pairs, sls, it
class GrapheneConstants:
"""
the base class contains constants relevant to all graphene chains
and chain specific constants for <your chain>
the aim here is to have a single object
which can be instatied either as:
# chain agnostic constants, eg.
# constants = GrapheneConstants()
# constants.core.BASE58
# constants.metanode.TIMEOUT
# constants.signing.TIMEOUT
# chain specific constants, eg.
# constants = GrapheneConstants(chain_name)
# constants.chain.NODES
# constants.chain.PAIRS
# constants.chain.ACCOUNT
and then passed through instantiated class objects as self.constants
"""
def __init__(self, chain_name=None):
"""
this requires no user configuration,
advanced might configure a testnet or additional graphene based blockchain here
"""
chains = {
"peerplays": {
"core": "PPY",
"config": PeerplaysConfig,
"id": (
"6b6b5f0ce7a36d323768e534f3edb41c6d6332a541a95725b98e28d140850134"
),
},
"peerplays testnet": {
"core": "TEST",
"config": PeerplaysTestnetConfig,
"id": (
"7c1c72eb738b3ff1870350f85daca27e2d0f5dd25af27df7475fbd92815e421e"
),
},
"bitshares": {
"core": "BTS",
"config": BitsharesConfig,
"id": (
"4018d7844c78f6a6c41c6a552b898022310fc5dec06da467ee7905a8dad512c8"
),
},
"bitshares testnet": {
"core": "TEST",
"config": BitsharesTestnetConfig,
"id": (
"39f5e2ede1f8bc1a3a54a7914414e3779e33193f1f5693510e73cb7a87617447"
),
},
# ~ "rudex": {
# ~ "core": "GPH",
# ~ "config": RudexConfig,
# ~ "id": (
# ~ "7fcf452d6bb058949cdc875b13c8908c8f54b0f264c39faf8152b682af0740ee"
# ~ ),
# ~ },
# ~ "hive": {
# ~ "core": "HIVE",
# ~ "config": HiveConfig,
# ~ "id": (
# ~ "18dcf0a285365fc58b71f18b3d3fec954aa0c141c44e4e5cb4cf777b9eab274e"
# ~ ),
# ~ },
}
# instantiate hummingbot and graphene core constants
self.core = CoreConstants
self.core.CHAINS = list(chains.keys())
# instantiate user configuration for public and private api connectivity
self.metanode = MetanodeConfig
self.signing = SigningConfig
# instantiate user configuration specific to one blockchain
# normalize user inputs derive some constants that will prove useful later
# constants derived at instantiation still formatted upper `constants.chain.XXX`
if chain_name is not None:
self.chain = chains[chain_name.lower()]["config"]
self.chain.NAME = chain_name.lower()
self.chain.CORE = chains[self.chain.NAME]["core"].upper()
self.chain.ID = chains[self.chain.NAME]["id"]
self.chain.NODES = [node.lower() for node in sls(self.chain.NODES)]
self.chain.PAIRS = [pair.upper() for pair in sls(self.chain.PAIRS)]
# filter out duplicate inverted pairs
self.chain.PAIRS = [
i for i in self.chain.PAIRS if i not in invert_pairs(self.chain.PAIRS)
]
self.chain.INVERT_PAIRS = invert_pairs(self.chain.PAIRS)
self.chain.ASSETS = assets_from_pairs(self.chain.PAIRS)
self.chain.DATABASE = (
"database/" + self.chain.NAME.replace(" ", "_") + ".db"
)
self.chain.TITLE = self.chain.NAME.title()
if not hasattr(self.chain, "PREFIX"):
self.chain.PREFIX = self.chain.CORE
class CoreConstants:
"""
╔═╗╔═╗╦═╗╔═╗
║ ║ ║╠╦╝║╣
╚═╝╚═╝╩╚═╚═╝
these constants require no user configuration
"""
# about 75 years in future; used for expiration date of limit orders
END_OF_TIME = 4 * 10 ** 9
# membership_expiration_date is set to this date if lifetime member
LTM = "2106-02-07T06:28:15"
# ISO8601 time format; 'graphene time'
ISO8601 = "%Y-%m-%dT%H:%M:%S%Z"
# bitsharesbase/operationids.py
OP_IDS = {
"LimitOrderCreate": 1,
"LimitOrderCancel": 2,
}
# swap keys/values to index names by number
OP_NAMES = {v: k for k, v in OP_IDS.items()}
# bitsharesbase/objecttypes.py used by ObjectId() to confirm a.b.c
TYPES = {
"account": 2,
"asset": 3,
"limit_order": 7,
} # 1.2.x # 1.3.x # 1.7.x
# base58 encoding and decoding; this is alphabet defined as bytes
# ~ BASE58 = "".join([i for i in string.digits + string.ascii_letters if i not in "Il0O"]).encode()
# ~ print(b"123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ")
# ~ print(BASE58)
# ~ # hex encoding and decoding
# ~ HEXDIGITS = string.hexdigits
# ~ print(f"0123456789abcdefABCDEF\n{HEXDIGITS}")
# base58 encoding and decoding; this is alphabet defined:
BASE58 = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
# hex encoding and decoding
HEXDIGITS = "0123456789abcdefABCDEF"
# numerical constants
GRAPHENE_MAX = int(10 ** 15)
DECIMAL_NIL = Decimal(1) / GRAPHENE_MAX
DECIMAL_NAN = Decimal("nan")
DECIMAL_0 = Decimal(0)
DECIMAL_SATOSHI = Decimal(0.00000001)
DECIMAL_SIXSIG = Decimal(0.999999)
class MetanodeConfig:
"""
╔╦╗╔═╗╔╦╗╔═╗╔╗╔╔═╗╔╦╗╔═╗
║║║║╣ ║ ╠═╣║║║║ ║ ║║║╣
╩ ╩╚═╝ ╩ ╩ ╩╝╚╝╚═╝═╩╝╚═╝
these constants relate to the timing of the metanode server and trustless client
metanode can run with a single node, a few nodes, or a large selection of nodes
depending on the size of the public api network you've whitelisted,
some configuration may be required
its suggested that you familiarize yourself with the codebase
prior to adjusting anything here
"""
# ==================================================================================
# SECURITY hard coded list prevents SQL injection in _get_table()
# ==================================================================================
VALID_TABLES = [
"chain",
"account",
"objects",
"pairs",
"assets",
"nodes",
"timing",
]
# ==================================================================================
# SECURITY this hard coded list prevents SQL injection in maven and oracle updates
# ==================================================================================
TRACKER_TABLE = {
# account table
"fees_account": "account",
"ltm": "account",
"cancels": "account",
# assets table
"supply": "assets",
"fees_asset": "assets",
"balance": "assets",
# pairs table
"ops": "pairs",
"last": "pairs",
"book": "pairs",
"history": "pairs",
"opens": "pairs",
"fills": "pairs",
# timing table
"ping": "timing",
"handshake": "timing",
"blocktime": "timing",
"server": "timing",
"blocknum": "timing",
"read": "timing",
}
STATUS_CODES = { # used by latency testing
200: "CONNECTED",
1001: "NO HISTORY",
1002: "WRONG CHAIN ID",
1003: "FORKED FROM MAINNET",
1004: "STALE BLOCKTIME",
1005: "SLOW HANDSHAKE",
1006: "SLOW PING",
1007: "CONNECTION FAILED",
1008: "CONNECTION TIMEOUT",
}
DEV = True # additional printing in terminal
REGENERATION_TUPLE = randint(120, 240)
MAVENS = 7 # number of processes collecting data
MAVEN_WINDOW = 7 # window depth for mode(sooths)
LATENCY_THRESHER_TIMEOUT = 10 # if status 1008 on all nodes, increase
LATENCY_TASK_PAUSE = 60 # time between testing same node twice
MAVEN_CACHE_HARVEST_JOIN = 8
CACHE_RESTART_JOIN = 10
MAVEN_RPC_RATIO = 3
MAVEN_HIGH_LOW_RATIO = 20
MAVEN_PAUSE = 0.1
ORACLE_PAUSE = 0.5
MAX_PING = 1
SQL_EXECUTE_PAUSE = (0.2, True)
class SigningConfig:
"""
╔═╗╦╔═╗╔╗╔╦╔╗╔╔═╗
╚═╗║║ ╦║║║║║║║║ ╦
╚═╝╩╚═╝╝╚╝╩╝╚╝╚═╝
these constants relate to the client side graphene scripting of
transcription, serialization, signing, and broadcast
of authenticate, buy, sell, and cancel operations
"""
# timeout during websocket handshake; default 4 seconds
HANDSHAKE_TIMEOUT = 4
# multiprocessing handler lifespan, default 20 seconds
PROCESS_TIMEOUT = 20
# default False for persistent limit orders
KILL_OR_FILL = False
# default True scales elements of oversize gross order to means
AUTOSCALE = True
# default True to never spend last 2 core tokens (for fees)
CORE_FEES = True
# multiprocessing incarnations, default 3 attempts
ATTEMPTS = 3
# prevent extreme number of AI generated edicts; default 20
# NOTE batch transactions are currently disable
# so this parameter is moot at the hummingbot level
LIMIT = 20
# default True to execute order in primary script process
JOIN = True
# ignore orders value less than ~DUST core in value; 0 to disable
DUST = 0
# True = heavy print output
DEV = True
class PeerplaysConfig:
"""
╔═════════════════════════════╗
║ HUMMINGBOT GRAPHENE ║
║ ╔═╗╔═╗╔═╗╦═╗╔═╗╦ ╔═╗╦ ╦╔═╗ ║
║ ╠═╝║╣ ║╣ ╠╦╝╠═╝║ ╠═╣╚╦╝╚═╗ ║
║ ╩ ╚═╝╚═╝╩╚═╩ ╩═╝╩ ╩ ╩ ╚═╝ ║
║ DEX MARKET MAKING CONNECTOR ║
╚═════════════════════════════╝
configuration details specific to peerplays mainnet
"""
ACCOUNT = ""
NODES = ["wss://peerplaysblockchain.net/mainnet/api"]
PAIRS = ["BTC-PPY", "HIVE-PPY", "HBD-PPY"]
class PeerplaysTestnetConfig:
"""
configuration details specific to peerplays testnet
"""
ACCOUNT = "litepresence1"
NODES = ["wss://ymir.peerplays.download/api"]
PAIRS = ["TEST-ABC", "TEST-XYZ"]
class BitsharesConfig:
"""
╔═════════════════════════════╗
║ HUMMINGBOT GRAPHENE ║
║ ╔╗ ╦╔╦╗╔═╗╦ ╦╔═╗╦═╗╔═╗╔═╗ ║
║ ╠╩╗║ ║ ╚═╗╠═╣╠═╣╠╦╝║╣ ╚═╗ ║
║ ╚═╝╩ ╩ ╚═╝╩ ╩╩ ╩╩╚═╚═╝╚═╝ ║
║ DEX MARKET MAKING CONNECTOR ║
╚═════════════════════════════╝
configuration details specific to bitshares mainnet
"""
ACCOUNT = ""
NODES = [
"wss://api.bts.mobi/wss",
"wss://api-us.61bts.com/wss",
"wss://cloud.xbts.io/ws",
"wss://api.dex.trading/wss",
"wss://eu.nodes.bitshares.ws/ws",
"wss://api.pindd.club/ws",
"wss://dex.iobanker.com/ws",
"wss://public.xbts.io/ws",
"wss://node.xbts.io/ws",
"wss://node.market.rudex.org/ws",
"wss://nexus01.co.uk/ws",
"wss://api-bts.liondani.com/ws",
"wss://api.bitshares.bhuz.info/wss",
"wss://btsws.roelandp.nl/ws",
"wss://hongkong.bitshares.im/ws",
"wss://node1.deex.exchange/wss",
"wss://api.cnvote.vip:888/wss",
"wss://bts.open.icowallet.net/ws",
"wss://api.weaccount.cn/ws",
"wss://api.61bts.com",
"wss://api.btsgo.net/ws",
"wss://bitshares.bts123.cc:15138/wss",
"wss://singapore.bitshares.im/wss",
]
PAIRS = ["BTS-HONEST", "BTS-HONEST.USD", "HONEST.XAU-CNY"]
class BitsharesTestnetConfig:
"""
configuration details specific to bitshares testnet
"""
ACCOUNT = ""
NODES = [
"wss://testnet.bitshares.im/ws",
"wss://testnet.dex.trading/",
"wss://testnet.xbts.io/ws",
"wss://api-testnet.61bts.com/ws",
]
PAIRS = ["TEST-USD", "TEST-CNY"]
# NOTE these are not yet tested... may require some dev; pull requests welcome
# ~ class RudexConfig:
# ~ """
# ~ ╔═════════════════════════════╗
# ~ ║ HUMMINGBOT GRAPHENE ║
# ~ ║ ╦═╗╦ ╦╔╦╗╔═╗╔╗╔═ ║
# ~ ║ ╠╦╝║ ║ ║║║╣ ╠╣ ║
# ~ ║ ╩╚═╚═╝═╩╝╚═╝═╝╚╝ ║
# ~ ║ DEX MARKET MAKING CONNECTOR ║
# ~ ╚═════════════════════════════╝
# ~ configuration details specific to rudex mainnet
# ~ """
# ~ FIXME needs to be debugged / unit tested, may be some rpc differences
# ~ /testnet?
# ~ ACCOUNT = "litepresence1"
# ~ NODES = ["wss://node.gph.ai"]
# ~ PAIRS = ["GPH-BTS", "PPY-BTS"]
# ~ class HiveConfig:
# ~ """
# ~ ╔═════════════════════════════╗
# ~ ║ HUMMINGBOT GRAPHENE ║
# ~ ║ ╦ ╦╦╦ ╦╔═╗ ║
# ~ ║ ╠═╣║╚╗╔╝║╣ ║
# ~ ║ ╩ ╩╩ ╚╝ ╚═╝ ║
# ~ ║ DEX MARKET MAKING CONNECTOR ║
# ~ ╚═════════════════════════════╝
# ~ configuration details specific to hive mainnet
# ~ """
# ~ raise NotImplementedError
# ~ FIXME needs to be debugged / unit tested, may be some rpc differences
# ~ /testnet?
# ~ https://developers.hive.io/quickstart/hive_full_nodes.html
# ~ https://steemit.com/full-nodes/@fullnodeupdate/full-api-node-update---762018
# ~ # https://github.com/openhive-network/hive
# ~ # https://api.hive.blog
# ~ # https://testnet.openhive.network
# ~ ACCOUNT = "rolandp"
# ~ NODES = ["ws://testnet.openhive.network:8090"]
# ~ NODES = [
# ~ "wss://rpc.steemviz.com/wss",
# ~ "wss://steemd.minnowsupportproject.org/wss",
# ~ "wss://steemd.pevo.science/wss",
# ~ "wss://steemd.privex.io/wss",
# ~ "wss://rpc.buildteam.io/wss",
# ~ "wss://gtg.steem.house:8090/wss",
# ~ ]
# ~ PAIRS = ["HBD-HIVE"]
def unit_test():
"""
test class inheritance
"""
# FIXME state what is being printed
# chain agnostic constants, eg.
constants = GrapheneConstants()
dispatch = {str(idx): chain for idx, chain in enumerate(constants.core.CHAINS)}
for key, value in dispatch.items():
if "testnet" not in value:
print(key + ": " + it("blue", value))
else:
print(key + ": " + it("purple", value))
chain = dispatch[input("Enter choice: ")]
CONSTANTS = GrapheneConstants() # pylint: disable=invalid-name
print(CONSTANTS.core.BASE58)
print(CONSTANTS.metanode.STATUS_CODES)
print(CONSTANTS.signing.ATTEMPTS)
# chain specific constants, eg.
constants = GrapheneConstants(chain)
print(constants.chain.NODES)
print(constants.chain.PAIRS)
print(constants.chain.INVERT_PAIRS)
print(constants.chain.ASSETS)
print(constants.chain.CORE)
print(constants.chain.PREFIX)
# note core / metanode / etc. constants still work this way
print(constants.metanode.STATUS_CODES)
if __name__ == "__main__":
unit_test()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
13954,
17534,
33493,
350,
45448,
12394,
309,
1546,
4694,
198,
2,
279,
2645,
600,
25,
15560,
28,
11748,
12,
18224,
11,
1627,
12,
18820,
12,
6511,
11,
1165,
12,
32146,
12,
11377,
12... | 1.951287 | 7,924 |
# coding: utf8
import json
import os
import urllib.request
from pathlib import Path
from spacy.language import Language
from spacy.util import get_lang_class
BASE_URL = "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-2998/"
MODELS_DIR = os.path.join(Path(__file__).parent, "models")
langs_path = os.path.join(Path(__file__).parent, "languages.json")
with open(langs_path, "r") as f:
LANGUAGES = json.load(f)
def download(lang):
"""Download the UDPipe pretrained model.
lang (unicode): ISO 639-1 language code or shorthand UDPipe model name.
"""
_check_language(lang)
try:
_check_models_dir(lang)
except:
os.makedirs(MODELS_DIR)
if LANGUAGES[lang] in os.listdir(MODELS_DIR):
msg = "Already downloaded a model for the" \
" '{}' language".format(lang)
print(msg)
return
url = BASE_URL + LANGUAGES[lang]
fname = os.path.join(MODELS_DIR, LANGUAGES[lang])
urllib.request.urlretrieve(url=url, filename=fname)
msg = "Successfully downloaded the pretrained UDPipe" \
" model for the '{}' language".format(lang)
print(msg)
def get_path(lang):
"""Get the path to the UDPipe pretrained model if it was downloaded.
lang (unicode): ISO 639-1 language code or shorthand UDPipe model name.
RETURNS (unicode): The path to the UDPipe pretrained model.
"""
_check_language(lang)
_check_models_dir(lang)
if not LANGUAGES[lang] in os.listdir(MODELS_DIR):
msg = "Use spacy_udpipe.download to download the pretrained" \
" UDPipe model for the '{}' language".format(lang)
raise Exception(msg)
path = os.path.join(MODELS_DIR, LANGUAGES[lang])
return path
def get_defaults(lang):
"""Get the language-specific defaults, if available in spaCy. This allows
using lexical attribute getters that depend on static language data, e.g.
Token.like_num, Token.is_stop, Doc.noun_chunks etc.
lang (unicode): ISO 639-1 language code.
RETURNS (Language.Defaults): The language defaults.
"""
try:
lang_cls = get_lang_class(lang)
return lang_cls.Defaults
except ImportError:
return Language.Defaults
| [
2,
19617,
25,
3384,
69,
23,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
599,
1590,
13,
16129,
1330,
15417,
198,
6738,
599,
1590,
13,
22602,
1330,
651... | 2.523756 | 884 |
import typing
if __name__ == "__main__":
main()
| [
11748,
19720,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
1388,
3419,
201,
198
] | 2.103448 | 29 |
print((int(input()) + 3) % 7)
| [
4798,
19510,
600,
7,
15414,
28955,
1343,
513,
8,
4064,
767,
8,
198
] | 2.307692 | 13 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Semantic Machines\N{TRADE MARK SIGN} software.
Creates text data (source-target pairs) to be used for training OpenNMT models.
"""
import argparse
import dataclasses
from typing import Dict, Iterator
import jsons
from tqdm import tqdm
from dataflow.core.dialogue import AgentUtterance, Turn
from dataflow.core.turn_prediction import UtteranceWithContext
from dataflow.onmt_helpers.create_onmt_text_data import (
OnmtTextDatum,
create_context_turns,
create_onmt_text_datum_for_turn,
)
# We assume all dialogues start from turn 0.
# This is true for MultiWoZ and CalFlow datasets.
_MIN_TURN_INDEX = 0
def create_onmt_text_data_for_contextualized_turn(
contextualized_turn: UtteranceWithContext,
num_context_turns: int,
min_turn_index: int,
include_program: bool,
include_agent_utterance: bool,
include_described_entities: bool,
) -> Iterator[OnmtTextDatum]:
"""Yields OnmtTextDatum for a dialogue."""
turn_lookup: Dict[int, Turn] = {
turn.turn_index: turn for turn in contextualized_turn.context.turns
}
context_turns = create_context_turns(
turn_lookup=turn_lookup,
curr_turn_index=contextualized_turn.datum_id.turn_index,
num_context_turns=num_context_turns,
min_turn_index=min_turn_index,
)
onmt_text_datum = create_onmt_text_datum_for_turn(
dialogue_id=contextualized_turn.datum_id.dialogue_id,
curr_turn=Turn(
turn_index=contextualized_turn.datum_id.turn_index,
user_utterance=contextualized_turn.user_utterance,
agent_utterance=AgentUtterance(
original_text="", tokens=[], described_entities=[]
),
lispress="()",
skip=False,
),
context_turns=context_turns,
include_program=include_program,
include_agent_utterance=include_agent_utterance,
include_described_entities=include_described_entities,
)
yield onmt_text_datum
if __name__ == "__main__":
cmdline_parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
add_arguments(cmdline_parser)
args = cmdline_parser.parse_args()
print("Semantic Machines\N{TRADE MARK SIGN} software.")
main(
dataflow_dialogues_jsonl=args.dialogues_jsonl,
num_context_turns=args.num_context_turns,
min_turn_index=_MIN_TURN_INDEX,
include_program=args.include_program,
include_agent_utterance=args.include_agent_utterance,
include_described_entities=args.include_described_entities,
onmt_text_data_outbase=args.onmt_text_data_outbase,
)
| [
2,
220,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
220,
49962,
739,
262,
17168,
5964,
13,
198,
37811,
198,
13900,
5109,
31182,
59,
45,
90,
5446,
19266,
39641,
36771,
92,
3788,
13,
198,
198,
16719,
274,
2420,
1366,
357,
10459,
12,
... | 2.425419 | 1,133 |
#!/usr/bin/python3
#
# Corona Circles, codevscovid19 hackathon Zurich
# by Christopher Rehm 29-30 mar 2020, christopherrehm@web.de
import sys
import MySQLdb
if __name__ == "__main__":
if len(sys.argv)-1 == 1:
findCircleIDbyName(sys.argv[1])
else:
print("wrong number of arguments")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
198,
2,
44574,
21239,
5427,
11,
2438,
85,
1416,
709,
312,
1129,
8156,
12938,
43412,
198,
2,
416,
12803,
797,
23940,
2808,
12,
1270,
1667,
12131,
11,
33826,
8803,
260,
23940,
31,
1238... | 2.460317 | 126 |
#!/usr/bin/python
import time
import os
import argparse
from subprocess import call
from joblib import Parallel, delayed
import multiprocessing
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--orig',
type=str,
help="Original directory")
parser.add_argument(
'--new',
type=str,
help="Latest directory")
parser.add_argument(
'--patch',
type=str,
help="Patched directory")
parser.add_argument('--verbose', action='store_true')
global args
args = parser.parse_args()
num_cores = multiprocessing.cpu_count()
Parallel(n_jobs=num_cores)(delayed(bispatch)(i) for i in os.listdir(args.orig))
"""
pool = multiprocessing.Pool(num_cores)
results = []
for filename in os.listdir(args.orig):
results.append(pool.apply_async(bisdiff, filename))
"""
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
6738,
850,
14681,
1330,
869,
198,
198,
6738,
1693,
8019,
1330,
42945,
11,
11038,
198,
11748,
18540,
305,
919,
278,
198,
220,
198,
... | 2.403141 | 382 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from os import path
import binascii
'''
Export file with h2o.export_file compressed with 'gzip'
'''
if __name__ == "__main__":
pyunit_utils.standalone_test(export_gzip)
else:
export_gzip()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
16,
553,
40720,
40720,
40720,
4943,
198,
11748,
289,
17,
78,
198,
6738,
5254,
1330,
12972,
20850,
62,
26791,
198,
6738,
28686,
1330,
... | 2.771186 | 118 |
# -*- coding: utf-8 -*-
# Copyright 2014 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os.path
import dateutil.parser
from dateutil import tz
from oslo_config import cfg
import caso.extract.manager
import caso.messenger
from caso import utils
opts = [
cfg.ListOpt('messengers',
default=['caso.messenger.noop.NoopMessenger'],
help='List of messenger that will dispatch records. '
'valid values are %s' %
["%s.%s" % (i.__module__, i.__name__)
for i in caso.messenger.all_managers()]),
cfg.StrOpt('spooldir',
default='/var/spool/caso',
help='Spool directory.'),
]
cli_opts = [
cfg.BoolOpt('dry_run',
default=False,
help='Extract records but do not push records to SSM. This '
'will not update the last run date.'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
CONF.register_cli_opts(cli_opts)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
1946,
7897,
2351,
4992,
4281,
357,
7902,
2149,
8,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
... | 2.525535 | 607 |
from compas.geometry import Translation, Vector, Transformation, Frame
from integral_timber_joints.assembly import BeamAssemblyMethod
from integral_timber_joints.process.dependency import ComputationalResult
try:
from typing import Dict, List, Optional, Tuple
from integral_timber_joints.process import RobotClampAssemblyProcess
from integral_timber_joints.tools import Gripper, Tool, Screwdriver
except:
pass
# ---------------------------------------------------------------
# This file contains functions to be imported into Process class.
# They are separated here to keep individual files smaller.
# ---------------------------------------------------------------
# Computing Gripper Related Attributes
# ------------------------------------
# Automatically Invoked Functions
# -------------------------------------
def assign_gripper_to_beam(process, beam_id, verbose=False):
# type: (RobotClampAssemblyProcess, str, bool) -> ComputationalResult
"""Assign a gripper type using available grippers based on the beam's length.
Beam must fit within gripper `beam_length_limits`, if multiple options allow,
the gripper with the closest `target_beam_length` will be chosen.
If the attribute `gripper_type` is already assigned, this function will not chage it.
For beam that is SCREWED_WITHOUT_GRIPPER. It will find the grasping joint,
and copy the `tool_type` and `tool_id` to 'gripper_type' and 'gripper_id'
State Change
------------
This functions sets the following beam_attribute
- 'gripper_type'
- 'gripper_id'
Return
------
`ComputationalResult.ValidCannotContinue` if no suitable gripper can be found
`ComputationalResult.ValidCanContinue` if a suitable gripper can be found
"""
beam_length = process.assembly.beam(beam_id).length
chosen_gripper_type = None
chosen_gripper_ideal = None
assembly_method = process.assembly.get_assembly_method(beam_id)
# * Skip MANUAL_ASSEMBLY
if assembly_method == BeamAssemblyMethod.MANUAL_ASSEMBLY:
if verbose:
print("Skipping assign_gripper_to_beam for MANUAL_ASSEMBLY")
return ComputationalResult.ValidCanContinue
# * Handle the copy and paste for SCREWED_WITHOUT_GRIPPER
if assembly_method == BeamAssemblyMethod.SCREWED_WITHOUT_GRIPPER:
grasping_joint_id = process.assembly.get_grasping_joint_id(beam_id)
tool_type = process.assembly.get_joint_attribute(grasping_joint_id, "tool_type")
tool_id = process.assembly.get_joint_attribute(grasping_joint_id, "tool_id")
process.assembly.set_beam_attribute(beam_id, "gripper_type", tool_type)
process.assembly.set_beam_attribute(beam_id, "gripper_id", tool_id)
return ComputationalResult.ValidCanContinue
# Do not change anything if gripper_type is already set
already_set = False
gripper_type = process.assembly.get_beam_attribute(beam_id, "gripper_type")
if gripper_type is not None:
if verbose:
print("assign_gripper_to_beam: gripper_type set")
gripper_id = process.assembly.get_beam_attribute(beam_id, "gripper_id")
if gripper_id is not None:
if verbose:
print("assign_gripper_to_beam: gripper_id set")
# Check that the gripper_id is sensible
if assembly_method == BeamAssemblyMethod.SCREWED_WITHOUT_GRIPPER:
if verbose:
print("assign_gripper_to_beam: assembly method = %s " % assembly_method)
if gripper_id in [tool.name for tool in process.screwdrivers]:
if verbose:
print("assign_gripper_to_beam: gripper_id %s is valid and will not be changed." % process.assembly.get_beam_attribute(beam_id, "gripper_id"))
already_set = True
else:
if gripper_id in [tool.name for tool in process.grippers]:
if process.tool(gripper_id).type_name == gripper_type:
if verbose:
print("assign_gripper_to_beam: gripper_id %s is valid and will not be changed." % process.assembly.get_beam_attribute(beam_id, "gripper_id"))
already_set = True
if already_set:
if verbose:
print("Beam (%s) gripper_type (%s) has already been set. No change made by assign_gripper_to_beam()." %
(beam_id, gripper_type))
return ComputationalResult.ValidNoChange
if assembly_method == BeamAssemblyMethod.SCREWED_WITHOUT_GRIPPER:
joint_ids = process.assembly.get_joint_ids_with_tools_for_beam(beam_id)
first_screwdriver = process.get_tool_of_joint(joint_ids[0])
chosen_gripper_type = first_screwdriver.type_name
gripper_id = first_screwdriver.name
if verbose:
print("chosen_gripper_type = %s" % chosen_gripper_type)
print("gripper_id = %s" % gripper_id)
else:
# Compute Gripper Type
for gripper_type in process.available_gripper_types:
gripper = process.get_one_gripper_by_type(gripper_type)
# Check if beam length is within limits
if beam_length >= gripper.beam_length_limits[0] and beam_length <= gripper.beam_length_limits[1]:
# Compute beam length vs ideal length and make decision
length_to_ideal = abs(beam_length - gripper.target_beam_length)
if chosen_gripper_type is None or length_to_ideal < chosen_gripper_ideal:
chosen_gripper_type = gripper_type
chosen_gripper_ideal = length_to_ideal
# In cases no suitable gripper is available
if chosen_gripper_type is None:
if verbose:
print("No suitable gripper can be assigned to %s" % (beam_id))
print("WARNING: No suitable gripper can be assigned to %s" % (beam_id))
return ComputationalResult.ValidCannotContinue
gripper_id = process.get_one_tool_by_type(chosen_gripper_type).name
# Set gripper_type and gripper_id and return
process.assembly.set_beam_attribute(beam_id, "gripper_type", chosen_gripper_type)
process.assembly.set_beam_attribute(beam_id, "gripper_id", gripper_id)
if verbose:
print("Gripper Type: %s assigned to %s" % (chosen_gripper_type, beam_id))
return ComputationalResult.ValidCanContinue
def compute_gripper_grasp_pose(process, beam_id, verbose=False):
# type: (RobotClampAssemblyProcess, str, bool) -> ComputationalResult
""" Compute grasp pose for the beam and gripper.
Gripper should be assigned before.
For Beams with Gripper Gripper
------------------------------
Default values will be applied if 'gripper_grasp_dist_from_start' and 'gripper_grasp_face'
are not set. Otherwise previous values will be preserved to calculate 'gripper_tcp_in_ocf'.
For Beams with Screwdriver as gripper
-------------------------------------
- `tool_id`s and `gripper_id` should be assigned before.
- `grasping_joint_id` should be assigned before.
- `gripper_tcp_in_ocf` will be based on the
- beam attribute `grasping_joint_id` (Set Manually)
- joint_attribute `tool_orientation_frame_index` (Set Manually)
State Change
------------
This functions sets the following beam_attribute
- 'gripper_grasp_dist_from_start' (if default)
- 'gripper_grasp_face' (if default)
- 'gripper_tcp_in_ocf'
Return
------
`ComputationalResult.ValidCannotContinue` if prerequisite not satisfied
`ComputationalResult.ValidCanContinue` otherwise (this function should not fail)
"""
assembly_method = process.assembly.get_assembly_method(beam_id)
# * Skip MANUAL_ASSEMBLY
if assembly_method == BeamAssemblyMethod.MANUAL_ASSEMBLY:
if verbose:
print("Skipping compute_gripper_grasp_pose for MANUAL_ASSEMBLY")
return ComputationalResult.ValidCanContinue
# Check to ensure prerequisite
if process.assembly.get_beam_attribute(beam_id, 'gripper_type') is None:
return ComputationalResult.ValidCannotContinue
beam = process.assembly.beam(beam_id)
if assembly_method == BeamAssemblyMethod.SCREWED_WITHOUT_GRIPPER:
# Retrive which joint is the gripper screwdriver and the tool_orientation_frame index
joint_id = process.assembly.get_grasping_joint_id(beam_id) # grasping_joint_id
tool_orientation_frame_index = process.assembly.get_joint_attribute(joint_id, 'tool_orientation_frame_index')
# Transform the tool orientation frame to beam ocf
joint = process.assembly.joint((joint_id[1], joint_id[0]))
screwdriver_tcp_frame_in_wcf = joint.get_clamp_frames(beam)[tool_orientation_frame_index]
t_world_from_screwdriver_tcp = Transformation.from_frame(screwdriver_tcp_frame_in_wcf)
t_world_from_beam = Transformation.from_frame(beam.frame)
t_beam_from_screwdriver_tcp = t_world_from_beam.inverse() * t_world_from_screwdriver_tcp
process.assembly.set_beam_attribute(beam_id, "gripper_tcp_in_ocf", Frame.from_transformation(t_beam_from_screwdriver_tcp))
return ComputationalResult.ValidCanContinue
else:
# * Computing `gripper_grasp_face` if it is None
# Apply default values if None
if grasp_face(beam_id) not in [1, 2, 3, 4]: # Default method
gripper_grasp_face = process.set_grasp_face_following_assembly_direction(beam_id)
if grasp_face(beam_id) not in [1, 2, 3, 4]: # Backup plan
gripper_grasp_face = process.set_grasp_face_following_guide_vector(beam_id)
if grasp_face(beam_id) not in [1, 2, 3, 4]: # Picking face 1 and deal with it
process.assembly.set_beam_attribute(beam_id, "gripper_grasp_face", 1)
print("Someting wrong, gripper_grasp_face is not in [1,2,3,4] after search. Grasp face defaulted to ", 1)
# * Computing `gripper_grasp_dist_from_start` if it is None
gripper_grasp_dist_from_start = process.assembly.get_beam_attribute(beam_id, "gripper_grasp_dist_from_start")
if gripper_grasp_dist_from_start is None:
gripper_grasp_dist_from_start = beam.length / 2.0
process.assembly.set_beam_attribute(beam_id, "gripper_grasp_dist_from_start", gripper_grasp_dist_from_start)
# * Compute Gripper Grasp Pose, aka. gripper_tcp_in_ocf
gripper_tcp_in_ocf = beam.grasp_frame_ocf(grasp_face(beam_id), gripper_grasp_dist_from_start)
process.assembly.set_beam_attribute(beam_id, "gripper_tcp_in_ocf", gripper_tcp_in_ocf)
return ComputationalResult.ValidCanContinue
def set_grasp_face_following_assembly_direction(process, beam_id):
# type: (RobotClampAssemblyProcess, str) -> int
"""Return the best face number (1-4) for creating `gripper_tcp_in_ocf`
where grasp face normal is the opposite direction of the beam's assembly direction.
State Change
------------
This functions sets the following beam_attribute
- 'gripper_grasp_face'
Dependency Trigger
------------------
Invalidate: 'compute_gripper_grasp_pose' and downstream
"""
for joint_id in process.assembly.get_joints_of_beam_connected_to_already_built(beam_id):
joint = process.assembly.joint(joint_id)
selected_face = (joint.face_id + 1) % 4 + 1
process.assembly.set_beam_attribute(beam_id, 'gripper_grasp_face', selected_face)
# Dependency Trigger
process.dependency.invalidate(beam_id, process.compute_gripper_grasp_pose)
# Only the first joint is considered
return selected_face
def set_grasp_face_following_guide_vector(process, beam_id):
# type: (RobotClampAssemblyProcess, str) -> int
"""Return the best face number (1-4) for creating `gripper_tcp_in_ocf`
where the Z-Axis of the tcp_in_WCF, when beam is at 'assembly_wcf_final',
follows the direction of guide vector `design_guide_vector_grasp`
Side Effect
-----------
beam_attribute 'gripper_grasp_face' will be set.
"""
# Get the guide Vector from beam_attribute
design_guide_vector_grasp = process.assembly.get_beam_attribute(beam_id, 'design_guide_vector_grasp').unitized()
assert design_guide_vector_grasp is not None
# Try different grasp face and choose the one that aligns best.
beam = process.assembly.beam(beam_id)
best_face = 0
best_score = -1
for gripper_grasp_face in range(1, 5):
gripper_tcp_in_ocf = beam.grasp_frame_ocf(gripper_grasp_face, 0)
gripper_tcp_in_wcf = gripper_tcp_in_ocf.transformed(Transformation.from_frame(beam.frame))
# Compute the alignment score using dot product
alignment_score = gripper_tcp_in_wcf.zaxis.dot(design_guide_vector_grasp)
if alignment_score > best_score:
best_score = alignment_score
best_face = gripper_grasp_face
process.assembly.set_beam_attribute(beam_id, 'gripper_grasp_face', best_face)
return best_face
# ------------------------------------
# Manually invoked Functions
# -------------------------------------
def adjust_gripper_pos(process, beam_id, amount):
# type: (RobotClampAssemblyProcess, str, float) -> bool
""" Modify the grasp pose 'gripper_grasp_dist_from_start'
'gripper_tcp_in_ocf'
Gripper should be assigned before.
State Change
------------
This functions updates the following beam_attribute
- 'gripper_grasp_dist_from_start'
- 'gripper_tcp_in_ocf'
Return
------
False if prerequisite not satisfied
True, if setting is successdul otherwise (this function should not fail)
Dependency Trigger
------------------
Invalidate: 'compute_gripper_grasp_pose' and downstream
"""
# Check to ensure prerequisite
assembly = process.assembly
beam = assembly.beam(beam_id)
if assembly.get_beam_attribute(beam_id, 'gripper_type') is None:
return False
gripper_grasp_face = assembly.get_beam_attribute(beam_id, "gripper_grasp_face")
gripper_grasp_dist_from_start = assembly.get_beam_attribute(beam_id, "gripper_grasp_dist_from_start")
gripper_grasp_dist_from_start += amount
assembly.set_beam_attribute(beam_id, "gripper_grasp_dist_from_start", gripper_grasp_dist_from_start)
# Recompute beam grasp_frame
gripper_tcp_in_ocf = beam.grasp_frame_ocf(gripper_grasp_face, gripper_grasp_dist_from_start)
assembly.set_beam_attribute(beam_id, "gripper_tcp_in_ocf", gripper_tcp_in_ocf)
# Dependency Trigger
process.dependency.invalidate(beam_id, process.compute_gripper_grasp_pose)
return True
def override_grasp_face(process, beam_id, grasp_face):
# type: (RobotClampAssemblyProcess, str, float) -> bool
"""Manually override `gripper_grasp_face` for a specified beam
`grasp_face` can only be within 1 - 4, overrange value will be wrapped
State Change
------------
This functions sets the following beam_attribute
- 'gripper_grasp_face'
Dependency Trigger
------------------
Invalidate: 'compute_gripper_grasp_pose' and downstream
"""
grasp_face = (grasp_face - 1) % 4 + 1
process.assembly.set_beam_attribute(beam_id, 'gripper_grasp_face', grasp_face)
# Dependency Trigger
process.dependency.invalidate(beam_id, process.compute_gripper_grasp_pose)
return True
| [
6738,
552,
292,
13,
469,
15748,
1330,
33322,
11,
20650,
11,
49127,
11,
25184,
198,
6738,
19287,
62,
16514,
527,
62,
73,
1563,
82,
13,
41873,
1330,
25855,
49670,
17410,
198,
6738,
19287,
62,
16514,
527,
62,
73,
1563,
82,
13,
14681,
1... | 2.570192 | 6,005 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: graph_description.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='graph_description.proto',
package='graph_description',
syntax='proto3',
serialized_pb=_b('\n\x17graph_description.proto\x12\x11graph_description\x1a\x1egoogle/protobuf/wrappers.proto\"G\n\x04Host\x12\x12\n\x08hostname\x18\x01 \x01(\tH\x00\x12\x0c\n\x02ip\x18\x02 \x01(\tH\x00\x12\x12\n\x08\x61sset_id\x18\x03 \x01(\tH\x00\x42\t\n\x07host_id\"\xfb\x02\n\x10\x41ssetDescription\x12\x10\n\x08node_key\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x04\x12.\n\x08\x61sset_id\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\thost_name\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x31\n\x0bhost_domain\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12/\n\thost_fqdn\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x34\n\x0ehost_local_mac\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07host_ip\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x18\n\x10operating_system\x18\t \x01(\t\"\xe2\x03\n\x0fNodeDescription\x12\x39\n\nasset_node\x18\x01 \x01(\x0b\x32#.graph_description.AssetDescriptionH\x00\x12=\n\x0cprocess_node\x18\x02 \x01(\x0b\x32%.graph_description.ProcessDescriptionH\x00\x12\x37\n\tfile_node\x18\x03 \x01(\x0b\x32\".graph_description.FileDescriptionH\x00\x12\x42\n\x0fip_address_node\x18\x04 \x01(\x0b\x32\'.graph_description.IpAddressDescriptionH\x00\x12I\n\x18outbound_connection_node\x18\x05 \x01(\x0b\x32%.graph_description.OutboundConnectionH\x00\x12G\n\x17inbound_connection_node\x18\x06 \x01(\x0b\x32$.graph_description.InboundConnectionH\x00\x12\x36\n\x0c\x64ynamic_node\x18\x07 \x01(\x0b\x32\x1e.graph_description.DynamicNodeH\x00\x42\x0c\n\nwhich_node\"\xa8\x02\n\x12OutboundConnection\x12\x10\n\x08node_key\x18\x01 \x01(\t\x12.\n\x08\x61sset_id\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08hostname\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07host_ip\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\r\n\x05state\x18\x05 \x01(\r\x12\x19\n\x11\x63reated_timestamp\x18\x06 \x01(\x04\x12\x1c\n\x14terminated_timestamp\x18\x07 \x01(\x04\x12\x1b\n\x13last_seen_timestamp\x18\x08 \x01(\x04\x12\x0c\n\x04port\x18\t \x01(\r\"\xa7\x02\n\x11InboundConnection\x12\x10\n\x08node_key\x18\x01 \x01(\t\x12.\n\x08\x61sset_id\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08hostname\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07host_ip\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\r\n\x05state\x18\x05 \x01(\r\x12\x19\n\x11\x63reated_timestamp\x18\x06 \x01(\x04\x12\x1c\n\x14terminated_timestamp\x18\x07 \x01(\x04\x12\x1b\n\x13last_seen_timestamp\x18\x08 \x01(\x04\x12\x0c\n\x04port\x18\t \x01(\r\"\xb3\x03\n\x12ProcessDescription\x12\x10\n\x08node_key\x18\x01 \x01(\t\x12.\n\x08\x61sset_id\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08hostname\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07host_ip\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\r\n\x05state\x18\x05 \x01(\r\x12\x12\n\nprocess_id\x18\x06 \x01(\x04\x12\x14\n\x0cprocess_guid\x18\x07 \x01(\t\x12\x19\n\x11\x63reated_timestamp\x18\x08 \x01(\x04\x12\x1c\n\x14terminated_timestamp\x18\t \x01(\x04\x12\x1b\n\x13last_seen_timestamp\x18\n \x01(\x04\x12\x14\n\x0cprocess_name\x18\x0b \x01(\t\x12\x1c\n\x14process_command_line\x18\x0c \x01(\t\x12\x1f\n\x17process_integrity_level\x18\r \x01(\t\x12\x18\n\x10operating_system\x18\x0e \x01(\t\"\xd8\x04\n\x0f\x46ileDescription\x12\x10\n\x08node_key\x18\x01 \x01(\t\x12.\n\x08\x61sset_id\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08hostname\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07host_ip\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\r\n\x05state\x18\x05 \x01(\r\x12\x19\n\x11\x63reated_timestamp\x18\x06 \x01(\x04\x12\x19\n\x11\x64\x65leted_timestamp\x18\x07 \x01(\x04\x12\x1b\n\x13last_seen_timestamp\x18\x08 \x01(\x04\x12\x11\n\tfile_name\x18\t \x01(\t\x12\x11\n\tfile_path\x18\n \x01(\t\x12\x16\n\x0e\x66ile_extension\x18\x0b \x01(\t\x12\x16\n\x0e\x66ile_mime_type\x18\x0c \x01(\t\x12\x11\n\tfile_size\x18\r \x01(\x04\x12\x14\n\x0c\x66ile_version\x18\x0e \x01(\t\x12\x18\n\x10\x66ile_description\x18\x0f \x01(\t\x12\x14\n\x0c\x66ile_product\x18\x10 \x01(\t\x12\x14\n\x0c\x66ile_company\x18\x11 \x01(\t\x12\x16\n\x0e\x66ile_directory\x18\x12 \x01(\t\x12\x12\n\nfile_inode\x18\x13 \x01(\x04\x12\x17\n\x0f\x66ile_hard_links\x18\x14 \x01(\x04\x12\x10\n\x08md5_hash\x18\x15 \x01(\t\x12\x11\n\tsha1_hash\x18\x16 \x01(\t\x12\x13\n\x0bsha256_hash\x18\x17 \x01(\t\"a\n\x14IpAddressDescription\x12\x10\n\x08node_key\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x04\x12\x12\n\nip_address\x18\x03 \x01(\t\x12\x10\n\x08ip_proto\x18\x04 \x01(\t\"\x97\x01\n\x07Session\x12\x1e\n\x16primary_key_properties\x18\x01 \x03(\t\x12%\n\x1dprimary_key_requires_asset_id\x18\x02 \x01(\x08\x12\x14\n\x0c\x63reated_time\x18\x03 \x01(\x04\x12\x16\n\x0elast_seen_time\x18\x04 \x01(\x04\x12\x17\n\x0fterminated_time\x18\x05 \x01(\x04\"O\n\x06Static\x12\x1e\n\x16primary_key_properties\x18\x01 \x03(\t\x12%\n\x1dprimary_key_requires_asset_id\x18\x02 \x01(\x08\"t\n\nIdStrategy\x12-\n\x07session\x18\x01 \x01(\x0b\x32\x1a.graph_description.SessionH\x00\x12+\n\x06static\x18\x02 \x01(\x0b\x32\x19.graph_description.StaticH\x00\x42\n\n\x08strategy\"T\n\x0cNodeProperty\x12\x11\n\x07intprop\x18\x01 \x01(\x03H\x00\x12\x12\n\x08uintprop\x18\x02 \x01(\x04H\x00\x12\x11\n\x07strprop\x18\x03 \x01(\tH\x00\x42\n\n\x08property\"\x9e\x03\n\x0b\x44ynamicNode\x12\x42\n\nproperties\x18\x01 \x03(\x0b\x32..graph_description.DynamicNode.PropertiesEntry\x12\x10\n\x08node_key\x18\x02 \x01(\t\x12\x11\n\tnode_type\x18\x03 \x01(\t\x12\x0f\n\x07seen_at\x18\x04 \x01(\x04\x12.\n\x08\x61sset_id\x18\x05 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\x08hostname\x18\x06 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12-\n\x07host_ip\x18\x07 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12\x32\n\x0bid_strategy\x18\x08 \x03(\x0b\x32\x1d.graph_description.IdStrategy\x1aR\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.graph_description.NodeProperty:\x02\x38\x01\"=\n\x0f\x45\x64geDescription\x12\x0c\n\x04\x66rom\x18\x01 \x01(\t\x12\n\n\x02to\x18\x02 \x01(\t\x12\x10\n\x08\x65\x64geName\x18\x03 \x01(\t\"=\n\x08\x45\x64geList\x12\x31\n\x05\x65\x64ges\x18\x01 \x03(\x0b\x32\".graph_description.EdgeDescription\"\xc0\x02\n\x10GraphDescription\x12=\n\x05nodes\x18\x01 \x03(\x0b\x32..graph_description.GraphDescription.NodesEntry\x12=\n\x05\x65\x64ges\x18\x02 \x03(\x0b\x32..graph_description.GraphDescription.EdgesEntry\x12\x11\n\ttimestamp\x18\x03 \x01(\x04\x1aP\n\nNodesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".graph_description.NodeDescription:\x02\x38\x01\x1aI\n\nEdgesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.graph_description.EdgeList:\x02\x38\x01\"L\n\x12GeneratedSubgraphs\x12\x36\n\tsubgraphs\x18\x01 \x03(\x0b\x32#.graph_description.GraphDescriptionb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_HOST = _descriptor.Descriptor(
name='Host',
full_name='graph_description.Host',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hostname', full_name='graph_description.Host.hostname', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ip', full_name='graph_description.Host.ip', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_id', full_name='graph_description.Host.asset_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='host_id', full_name='graph_description.Host.host_id',
index=0, containing_type=None, fields=[]),
],
serialized_start=78,
serialized_end=149,
)
_ASSETDESCRIPTION = _descriptor.Descriptor(
name='AssetDescription',
full_name='graph_description.AssetDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_key', full_name='graph_description.AssetDescription.node_key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='graph_description.AssetDescription.timestamp', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_id', full_name='graph_description.AssetDescription.asset_id', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_name', full_name='graph_description.AssetDescription.host_name', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_domain', full_name='graph_description.AssetDescription.host_domain', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_fqdn', full_name='graph_description.AssetDescription.host_fqdn', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_local_mac', full_name='graph_description.AssetDescription.host_local_mac', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_ip', full_name='graph_description.AssetDescription.host_ip', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operating_system', full_name='graph_description.AssetDescription.operating_system', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=152,
serialized_end=531,
)
_NODEDESCRIPTION = _descriptor.Descriptor(
name='NodeDescription',
full_name='graph_description.NodeDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asset_node', full_name='graph_description.NodeDescription.asset_node', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='process_node', full_name='graph_description.NodeDescription.process_node', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_node', full_name='graph_description.NodeDescription.file_node', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ip_address_node', full_name='graph_description.NodeDescription.ip_address_node', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='outbound_connection_node', full_name='graph_description.NodeDescription.outbound_connection_node', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inbound_connection_node', full_name='graph_description.NodeDescription.inbound_connection_node', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dynamic_node', full_name='graph_description.NodeDescription.dynamic_node', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='which_node', full_name='graph_description.NodeDescription.which_node',
index=0, containing_type=None, fields=[]),
],
serialized_start=534,
serialized_end=1016,
)
_OUTBOUNDCONNECTION = _descriptor.Descriptor(
name='OutboundConnection',
full_name='graph_description.OutboundConnection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_key', full_name='graph_description.OutboundConnection.node_key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_id', full_name='graph_description.OutboundConnection.asset_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostname', full_name='graph_description.OutboundConnection.hostname', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_ip', full_name='graph_description.OutboundConnection.host_ip', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='graph_description.OutboundConnection.state', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created_timestamp', full_name='graph_description.OutboundConnection.created_timestamp', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='terminated_timestamp', full_name='graph_description.OutboundConnection.terminated_timestamp', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_seen_timestamp', full_name='graph_description.OutboundConnection.last_seen_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='port', full_name='graph_description.OutboundConnection.port', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1019,
serialized_end=1315,
)
_INBOUNDCONNECTION = _descriptor.Descriptor(
name='InboundConnection',
full_name='graph_description.InboundConnection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_key', full_name='graph_description.InboundConnection.node_key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_id', full_name='graph_description.InboundConnection.asset_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostname', full_name='graph_description.InboundConnection.hostname', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_ip', full_name='graph_description.InboundConnection.host_ip', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='graph_description.InboundConnection.state', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created_timestamp', full_name='graph_description.InboundConnection.created_timestamp', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='terminated_timestamp', full_name='graph_description.InboundConnection.terminated_timestamp', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_seen_timestamp', full_name='graph_description.InboundConnection.last_seen_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='port', full_name='graph_description.InboundConnection.port', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1318,
serialized_end=1613,
)
_PROCESSDESCRIPTION = _descriptor.Descriptor(
name='ProcessDescription',
full_name='graph_description.ProcessDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_key', full_name='graph_description.ProcessDescription.node_key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_id', full_name='graph_description.ProcessDescription.asset_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostname', full_name='graph_description.ProcessDescription.hostname', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_ip', full_name='graph_description.ProcessDescription.host_ip', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='graph_description.ProcessDescription.state', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='process_id', full_name='graph_description.ProcessDescription.process_id', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='process_guid', full_name='graph_description.ProcessDescription.process_guid', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created_timestamp', full_name='graph_description.ProcessDescription.created_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='terminated_timestamp', full_name='graph_description.ProcessDescription.terminated_timestamp', index=8,
number=9, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_seen_timestamp', full_name='graph_description.ProcessDescription.last_seen_timestamp', index=9,
number=10, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='process_name', full_name='graph_description.ProcessDescription.process_name', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='process_command_line', full_name='graph_description.ProcessDescription.process_command_line', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='process_integrity_level', full_name='graph_description.ProcessDescription.process_integrity_level', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operating_system', full_name='graph_description.ProcessDescription.operating_system', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1616,
serialized_end=2051,
)
_FILEDESCRIPTION = _descriptor.Descriptor(
name='FileDescription',
full_name='graph_description.FileDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_key', full_name='graph_description.FileDescription.node_key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_id', full_name='graph_description.FileDescription.asset_id', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostname', full_name='graph_description.FileDescription.hostname', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_ip', full_name='graph_description.FileDescription.host_ip', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='graph_description.FileDescription.state', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created_timestamp', full_name='graph_description.FileDescription.created_timestamp', index=5,
number=6, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deleted_timestamp', full_name='graph_description.FileDescription.deleted_timestamp', index=6,
number=7, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_seen_timestamp', full_name='graph_description.FileDescription.last_seen_timestamp', index=7,
number=8, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_name', full_name='graph_description.FileDescription.file_name', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_path', full_name='graph_description.FileDescription.file_path', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_extension', full_name='graph_description.FileDescription.file_extension', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_mime_type', full_name='graph_description.FileDescription.file_mime_type', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_size', full_name='graph_description.FileDescription.file_size', index=12,
number=13, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_version', full_name='graph_description.FileDescription.file_version', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_description', full_name='graph_description.FileDescription.file_description', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_product', full_name='graph_description.FileDescription.file_product', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_company', full_name='graph_description.FileDescription.file_company', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_directory', full_name='graph_description.FileDescription.file_directory', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_inode', full_name='graph_description.FileDescription.file_inode', index=18,
number=19, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='file_hard_links', full_name='graph_description.FileDescription.file_hard_links', index=19,
number=20, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='md5_hash', full_name='graph_description.FileDescription.md5_hash', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sha1_hash', full_name='graph_description.FileDescription.sha1_hash', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sha256_hash', full_name='graph_description.FileDescription.sha256_hash', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2054,
serialized_end=2654,
)
_IPADDRESSDESCRIPTION = _descriptor.Descriptor(
name='IpAddressDescription',
full_name='graph_description.IpAddressDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_key', full_name='graph_description.IpAddressDescription.node_key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='graph_description.IpAddressDescription.timestamp', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ip_address', full_name='graph_description.IpAddressDescription.ip_address', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ip_proto', full_name='graph_description.IpAddressDescription.ip_proto', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2656,
serialized_end=2753,
)
_SESSION = _descriptor.Descriptor(
name='Session',
full_name='graph_description.Session',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='primary_key_properties', full_name='graph_description.Session.primary_key_properties', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='primary_key_requires_asset_id', full_name='graph_description.Session.primary_key_requires_asset_id', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='created_time', full_name='graph_description.Session.created_time', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_seen_time', full_name='graph_description.Session.last_seen_time', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='terminated_time', full_name='graph_description.Session.terminated_time', index=4,
number=5, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2756,
serialized_end=2907,
)
_STATIC = _descriptor.Descriptor(
name='Static',
full_name='graph_description.Static',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='primary_key_properties', full_name='graph_description.Static.primary_key_properties', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='primary_key_requires_asset_id', full_name='graph_description.Static.primary_key_requires_asset_id', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2909,
serialized_end=2988,
)
_IDSTRATEGY = _descriptor.Descriptor(
name='IdStrategy',
full_name='graph_description.IdStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='session', full_name='graph_description.IdStrategy.session', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='static', full_name='graph_description.IdStrategy.static', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='strategy', full_name='graph_description.IdStrategy.strategy',
index=0, containing_type=None, fields=[]),
],
serialized_start=2990,
serialized_end=3106,
)
_NODEPROPERTY = _descriptor.Descriptor(
name='NodeProperty',
full_name='graph_description.NodeProperty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='intprop', full_name='graph_description.NodeProperty.intprop', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uintprop', full_name='graph_description.NodeProperty.uintprop', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='strprop', full_name='graph_description.NodeProperty.strprop', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='property', full_name='graph_description.NodeProperty.property',
index=0, containing_type=None, fields=[]),
],
serialized_start=3108,
serialized_end=3192,
)
_DYNAMICNODE_PROPERTIESENTRY = _descriptor.Descriptor(
name='PropertiesEntry',
full_name='graph_description.DynamicNode.PropertiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='graph_description.DynamicNode.PropertiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='graph_description.DynamicNode.PropertiesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3527,
serialized_end=3609,
)
_DYNAMICNODE = _descriptor.Descriptor(
name='DynamicNode',
full_name='graph_description.DynamicNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='properties', full_name='graph_description.DynamicNode.properties', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='node_key', full_name='graph_description.DynamicNode.node_key', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='node_type', full_name='graph_description.DynamicNode.node_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='seen_at', full_name='graph_description.DynamicNode.seen_at', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='asset_id', full_name='graph_description.DynamicNode.asset_id', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hostname', full_name='graph_description.DynamicNode.hostname', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='host_ip', full_name='graph_description.DynamicNode.host_ip', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id_strategy', full_name='graph_description.DynamicNode.id_strategy', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DYNAMICNODE_PROPERTIESENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3195,
serialized_end=3609,
)
_EDGEDESCRIPTION = _descriptor.Descriptor(
name='EdgeDescription',
full_name='graph_description.EdgeDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='from', full_name='graph_description.EdgeDescription.from', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='to', full_name='graph_description.EdgeDescription.to', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='edgeName', full_name='graph_description.EdgeDescription.edgeName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3611,
serialized_end=3672,
)
_EDGELIST = _descriptor.Descriptor(
name='EdgeList',
full_name='graph_description.EdgeList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edges', full_name='graph_description.EdgeList.edges', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3674,
serialized_end=3735,
)
_GRAPHDESCRIPTION_NODESENTRY = _descriptor.Descriptor(
name='NodesEntry',
full_name='graph_description.GraphDescription.NodesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='graph_description.GraphDescription.NodesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='graph_description.GraphDescription.NodesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3903,
serialized_end=3983,
)
_GRAPHDESCRIPTION_EDGESENTRY = _descriptor.Descriptor(
name='EdgesEntry',
full_name='graph_description.GraphDescription.EdgesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='graph_description.GraphDescription.EdgesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='graph_description.GraphDescription.EdgesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3985,
serialized_end=4058,
)
_GRAPHDESCRIPTION = _descriptor.Descriptor(
name='GraphDescription',
full_name='graph_description.GraphDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='nodes', full_name='graph_description.GraphDescription.nodes', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='edges', full_name='graph_description.GraphDescription.edges', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='graph_description.GraphDescription.timestamp', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GRAPHDESCRIPTION_NODESENTRY, _GRAPHDESCRIPTION_EDGESENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3738,
serialized_end=4058,
)
_GENERATEDSUBGRAPHS = _descriptor.Descriptor(
name='GeneratedSubgraphs',
full_name='graph_description.GeneratedSubgraphs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subgraphs', full_name='graph_description.GeneratedSubgraphs.subgraphs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4060,
serialized_end=4136,
)
_HOST.oneofs_by_name['host_id'].fields.append(
_HOST.fields_by_name['hostname'])
_HOST.fields_by_name['hostname'].containing_oneof = _HOST.oneofs_by_name['host_id']
_HOST.oneofs_by_name['host_id'].fields.append(
_HOST.fields_by_name['ip'])
_HOST.fields_by_name['ip'].containing_oneof = _HOST.oneofs_by_name['host_id']
_HOST.oneofs_by_name['host_id'].fields.append(
_HOST.fields_by_name['asset_id'])
_HOST.fields_by_name['asset_id'].containing_oneof = _HOST.oneofs_by_name['host_id']
_ASSETDESCRIPTION.fields_by_name['asset_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ASSETDESCRIPTION.fields_by_name['host_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ASSETDESCRIPTION.fields_by_name['host_domain'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ASSETDESCRIPTION.fields_by_name['host_fqdn'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ASSETDESCRIPTION.fields_by_name['host_local_mac'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_ASSETDESCRIPTION.fields_by_name['host_ip'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_NODEDESCRIPTION.fields_by_name['asset_node'].message_type = _ASSETDESCRIPTION
_NODEDESCRIPTION.fields_by_name['process_node'].message_type = _PROCESSDESCRIPTION
_NODEDESCRIPTION.fields_by_name['file_node'].message_type = _FILEDESCRIPTION
_NODEDESCRIPTION.fields_by_name['ip_address_node'].message_type = _IPADDRESSDESCRIPTION
_NODEDESCRIPTION.fields_by_name['outbound_connection_node'].message_type = _OUTBOUNDCONNECTION
_NODEDESCRIPTION.fields_by_name['inbound_connection_node'].message_type = _INBOUNDCONNECTION
_NODEDESCRIPTION.fields_by_name['dynamic_node'].message_type = _DYNAMICNODE
_NODEDESCRIPTION.oneofs_by_name['which_node'].fields.append(
_NODEDESCRIPTION.fields_by_name['asset_node'])
_NODEDESCRIPTION.fields_by_name['asset_node'].containing_oneof = _NODEDESCRIPTION.oneofs_by_name['which_node']
_NODEDESCRIPTION.oneofs_by_name['which_node'].fields.append(
_NODEDESCRIPTION.fields_by_name['process_node'])
_NODEDESCRIPTION.fields_by_name['process_node'].containing_oneof = _NODEDESCRIPTION.oneofs_by_name['which_node']
_NODEDESCRIPTION.oneofs_by_name['which_node'].fields.append(
_NODEDESCRIPTION.fields_by_name['file_node'])
_NODEDESCRIPTION.fields_by_name['file_node'].containing_oneof = _NODEDESCRIPTION.oneofs_by_name['which_node']
_NODEDESCRIPTION.oneofs_by_name['which_node'].fields.append(
_NODEDESCRIPTION.fields_by_name['ip_address_node'])
_NODEDESCRIPTION.fields_by_name['ip_address_node'].containing_oneof = _NODEDESCRIPTION.oneofs_by_name['which_node']
_NODEDESCRIPTION.oneofs_by_name['which_node'].fields.append(
_NODEDESCRIPTION.fields_by_name['outbound_connection_node'])
_NODEDESCRIPTION.fields_by_name['outbound_connection_node'].containing_oneof = _NODEDESCRIPTION.oneofs_by_name['which_node']
_NODEDESCRIPTION.oneofs_by_name['which_node'].fields.append(
_NODEDESCRIPTION.fields_by_name['inbound_connection_node'])
_NODEDESCRIPTION.fields_by_name['inbound_connection_node'].containing_oneof = _NODEDESCRIPTION.oneofs_by_name['which_node']
_NODEDESCRIPTION.oneofs_by_name['which_node'].fields.append(
_NODEDESCRIPTION.fields_by_name['dynamic_node'])
_NODEDESCRIPTION.fields_by_name['dynamic_node'].containing_oneof = _NODEDESCRIPTION.oneofs_by_name['which_node']
_OUTBOUNDCONNECTION.fields_by_name['asset_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_OUTBOUNDCONNECTION.fields_by_name['hostname'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_OUTBOUNDCONNECTION.fields_by_name['host_ip'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INBOUNDCONNECTION.fields_by_name['asset_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INBOUNDCONNECTION.fields_by_name['hostname'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_INBOUNDCONNECTION.fields_by_name['host_ip'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_PROCESSDESCRIPTION.fields_by_name['asset_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_PROCESSDESCRIPTION.fields_by_name['hostname'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_PROCESSDESCRIPTION.fields_by_name['host_ip'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_FILEDESCRIPTION.fields_by_name['asset_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_FILEDESCRIPTION.fields_by_name['hostname'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_FILEDESCRIPTION.fields_by_name['host_ip'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_IDSTRATEGY.fields_by_name['session'].message_type = _SESSION
_IDSTRATEGY.fields_by_name['static'].message_type = _STATIC
_IDSTRATEGY.oneofs_by_name['strategy'].fields.append(
_IDSTRATEGY.fields_by_name['session'])
_IDSTRATEGY.fields_by_name['session'].containing_oneof = _IDSTRATEGY.oneofs_by_name['strategy']
_IDSTRATEGY.oneofs_by_name['strategy'].fields.append(
_IDSTRATEGY.fields_by_name['static'])
_IDSTRATEGY.fields_by_name['static'].containing_oneof = _IDSTRATEGY.oneofs_by_name['strategy']
_NODEPROPERTY.oneofs_by_name['property'].fields.append(
_NODEPROPERTY.fields_by_name['intprop'])
_NODEPROPERTY.fields_by_name['intprop'].containing_oneof = _NODEPROPERTY.oneofs_by_name['property']
_NODEPROPERTY.oneofs_by_name['property'].fields.append(
_NODEPROPERTY.fields_by_name['uintprop'])
_NODEPROPERTY.fields_by_name['uintprop'].containing_oneof = _NODEPROPERTY.oneofs_by_name['property']
_NODEPROPERTY.oneofs_by_name['property'].fields.append(
_NODEPROPERTY.fields_by_name['strprop'])
_NODEPROPERTY.fields_by_name['strprop'].containing_oneof = _NODEPROPERTY.oneofs_by_name['property']
_DYNAMICNODE_PROPERTIESENTRY.fields_by_name['value'].message_type = _NODEPROPERTY
_DYNAMICNODE_PROPERTIESENTRY.containing_type = _DYNAMICNODE
_DYNAMICNODE.fields_by_name['properties'].message_type = _DYNAMICNODE_PROPERTIESENTRY
_DYNAMICNODE.fields_by_name['asset_id'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DYNAMICNODE.fields_by_name['hostname'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DYNAMICNODE.fields_by_name['host_ip'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_DYNAMICNODE.fields_by_name['id_strategy'].message_type = _IDSTRATEGY
_EDGELIST.fields_by_name['edges'].message_type = _EDGEDESCRIPTION
_GRAPHDESCRIPTION_NODESENTRY.fields_by_name['value'].message_type = _NODEDESCRIPTION
_GRAPHDESCRIPTION_NODESENTRY.containing_type = _GRAPHDESCRIPTION
_GRAPHDESCRIPTION_EDGESENTRY.fields_by_name['value'].message_type = _EDGELIST
_GRAPHDESCRIPTION_EDGESENTRY.containing_type = _GRAPHDESCRIPTION
_GRAPHDESCRIPTION.fields_by_name['nodes'].message_type = _GRAPHDESCRIPTION_NODESENTRY
_GRAPHDESCRIPTION.fields_by_name['edges'].message_type = _GRAPHDESCRIPTION_EDGESENTRY
_GENERATEDSUBGRAPHS.fields_by_name['subgraphs'].message_type = _GRAPHDESCRIPTION
DESCRIPTOR.message_types_by_name['Host'] = _HOST
DESCRIPTOR.message_types_by_name['AssetDescription'] = _ASSETDESCRIPTION
DESCRIPTOR.message_types_by_name['NodeDescription'] = _NODEDESCRIPTION
DESCRIPTOR.message_types_by_name['OutboundConnection'] = _OUTBOUNDCONNECTION
DESCRIPTOR.message_types_by_name['InboundConnection'] = _INBOUNDCONNECTION
DESCRIPTOR.message_types_by_name['ProcessDescription'] = _PROCESSDESCRIPTION
DESCRIPTOR.message_types_by_name['FileDescription'] = _FILEDESCRIPTION
DESCRIPTOR.message_types_by_name['IpAddressDescription'] = _IPADDRESSDESCRIPTION
DESCRIPTOR.message_types_by_name['Session'] = _SESSION
DESCRIPTOR.message_types_by_name['Static'] = _STATIC
DESCRIPTOR.message_types_by_name['IdStrategy'] = _IDSTRATEGY
DESCRIPTOR.message_types_by_name['NodeProperty'] = _NODEPROPERTY
DESCRIPTOR.message_types_by_name['DynamicNode'] = _DYNAMICNODE
DESCRIPTOR.message_types_by_name['EdgeDescription'] = _EDGEDESCRIPTION
DESCRIPTOR.message_types_by_name['EdgeList'] = _EDGELIST
DESCRIPTOR.message_types_by_name['GraphDescription'] = _GRAPHDESCRIPTION
DESCRIPTOR.message_types_by_name['GeneratedSubgraphs'] = _GENERATEDSUBGRAPHS
Host = _reflection.GeneratedProtocolMessageType('Host', (_message.Message,), dict(
DESCRIPTOR = _HOST,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.Host)
))
_sym_db.RegisterMessage(Host)
AssetDescription = _reflection.GeneratedProtocolMessageType('AssetDescription', (_message.Message,), dict(
DESCRIPTOR = _ASSETDESCRIPTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.AssetDescription)
))
_sym_db.RegisterMessage(AssetDescription)
NodeDescription = _reflection.GeneratedProtocolMessageType('NodeDescription', (_message.Message,), dict(
DESCRIPTOR = _NODEDESCRIPTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.NodeDescription)
))
_sym_db.RegisterMessage(NodeDescription)
OutboundConnection = _reflection.GeneratedProtocolMessageType('OutboundConnection', (_message.Message,), dict(
DESCRIPTOR = _OUTBOUNDCONNECTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.OutboundConnection)
))
_sym_db.RegisterMessage(OutboundConnection)
InboundConnection = _reflection.GeneratedProtocolMessageType('InboundConnection', (_message.Message,), dict(
DESCRIPTOR = _INBOUNDCONNECTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.InboundConnection)
))
_sym_db.RegisterMessage(InboundConnection)
ProcessDescription = _reflection.GeneratedProtocolMessageType('ProcessDescription', (_message.Message,), dict(
DESCRIPTOR = _PROCESSDESCRIPTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.ProcessDescription)
))
_sym_db.RegisterMessage(ProcessDescription)
FileDescription = _reflection.GeneratedProtocolMessageType('FileDescription', (_message.Message,), dict(
DESCRIPTOR = _FILEDESCRIPTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.FileDescription)
))
_sym_db.RegisterMessage(FileDescription)
IpAddressDescription = _reflection.GeneratedProtocolMessageType('IpAddressDescription', (_message.Message,), dict(
DESCRIPTOR = _IPADDRESSDESCRIPTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.IpAddressDescription)
))
_sym_db.RegisterMessage(IpAddressDescription)
Session = _reflection.GeneratedProtocolMessageType('Session', (_message.Message,), dict(
DESCRIPTOR = _SESSION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.Session)
))
_sym_db.RegisterMessage(Session)
Static = _reflection.GeneratedProtocolMessageType('Static', (_message.Message,), dict(
DESCRIPTOR = _STATIC,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.Static)
))
_sym_db.RegisterMessage(Static)
IdStrategy = _reflection.GeneratedProtocolMessageType('IdStrategy', (_message.Message,), dict(
DESCRIPTOR = _IDSTRATEGY,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.IdStrategy)
))
_sym_db.RegisterMessage(IdStrategy)
NodeProperty = _reflection.GeneratedProtocolMessageType('NodeProperty', (_message.Message,), dict(
DESCRIPTOR = _NODEPROPERTY,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.NodeProperty)
))
_sym_db.RegisterMessage(NodeProperty)
DynamicNode = _reflection.GeneratedProtocolMessageType('DynamicNode', (_message.Message,), dict(
PropertiesEntry = _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), dict(
DESCRIPTOR = _DYNAMICNODE_PROPERTIESENTRY,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.DynamicNode.PropertiesEntry)
))
,
DESCRIPTOR = _DYNAMICNODE,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.DynamicNode)
))
_sym_db.RegisterMessage(DynamicNode)
_sym_db.RegisterMessage(DynamicNode.PropertiesEntry)
EdgeDescription = _reflection.GeneratedProtocolMessageType('EdgeDescription', (_message.Message,), dict(
DESCRIPTOR = _EDGEDESCRIPTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.EdgeDescription)
))
_sym_db.RegisterMessage(EdgeDescription)
EdgeList = _reflection.GeneratedProtocolMessageType('EdgeList', (_message.Message,), dict(
DESCRIPTOR = _EDGELIST,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.EdgeList)
))
_sym_db.RegisterMessage(EdgeList)
GraphDescription = _reflection.GeneratedProtocolMessageType('GraphDescription', (_message.Message,), dict(
NodesEntry = _reflection.GeneratedProtocolMessageType('NodesEntry', (_message.Message,), dict(
DESCRIPTOR = _GRAPHDESCRIPTION_NODESENTRY,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.GraphDescription.NodesEntry)
))
,
EdgesEntry = _reflection.GeneratedProtocolMessageType('EdgesEntry', (_message.Message,), dict(
DESCRIPTOR = _GRAPHDESCRIPTION_EDGESENTRY,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.GraphDescription.EdgesEntry)
))
,
DESCRIPTOR = _GRAPHDESCRIPTION,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.GraphDescription)
))
_sym_db.RegisterMessage(GraphDescription)
_sym_db.RegisterMessage(GraphDescription.NodesEntry)
_sym_db.RegisterMessage(GraphDescription.EdgesEntry)
GeneratedSubgraphs = _reflection.GeneratedProtocolMessageType('GeneratedSubgraphs', (_message.Message,), dict(
DESCRIPTOR = _GENERATEDSUBGRAPHS,
__module__ = 'graph_description_pb2'
# @@protoc_insertion_point(class_scope:graph_description.GeneratedSubgraphs)
))
_sym_db.RegisterMessage(GeneratedSubgraphs)
_DYNAMICNODE_PROPERTIESENTRY.has_options = True
_DYNAMICNODE_PROPERTIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_GRAPHDESCRIPTION_NODESENTRY.has_options = True
_GRAPHDESCRIPTION_NODESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_GRAPHDESCRIPTION_EDGESENTRY.has_options = True
_GRAPHDESCRIPTION_EDGESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
4823,
62,
11213,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
60,
27,
18,
290,
357,
500... | 2.436953 | 29,502 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on March 6, 2021
@author: Mohammad Asif Zaman
- April 10, 2021
- Added optical spot ON-OFF text
"""
import numpy as np
import pylab as py
import matplotlib as plt
from parameters import *
# Module global Parameters:
# =============================================================================
# Force parameters
r_active = 0
n_order = 1 # Order of the Gaussian potential = 2n
w_well = 10e-6 # 1/e *max width of the potential well
A_well = 4000*k_B*T # well depth
# Particle parameters (number and raidus array)
# =============================================================================
# def draw_yz(tm):
# substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_yz)
# def draw_xz(tm):
# substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_xz)
# This is function that is called from the main program
# Simplified spring force model
# force_plot()
# draw_source(9)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2805,
718,
11,
33448,
198,
198,
31,
9800,
25,
29674,
1081,
361,
1168,
10546,
628,
198,
12... | 2.52621 | 496 |
import datetime
import random
import json
# base = datetime.datetime.today()
# date_list = [base - datetime.timedelta(days=x) for x in range(0, 100*365)]
# lt = {}
# for d in date_list:
# print str(d)[:10]
# lt[str(d)[:10]] = int(10000*random.random())
# with open('test.json', 'w') as f:
# f.write(json.dumps(lt))
with open('untitled.json') as f:
text = f.read()
print len(json.loads(text)) | [
11748,
4818,
8079,
198,
11748,
4738,
198,
11748,
33918,
198,
198,
2,
2779,
796,
4818,
8079,
13,
19608,
8079,
13,
40838,
3419,
198,
2,
3128,
62,
4868,
796,
685,
8692,
532,
4818,
8079,
13,
16514,
276,
12514,
7,
12545,
28,
87,
8,
329,
... | 2.421687 | 166 |
from __future__ import absolute_import
from __future__ import unicode_literals
from memoized import memoized
from django import forms
from django.utils.translation import ugettext as _, ugettext_noop, ugettext_lazy
from crispy_forms.layout import Submit
from crispy_forms import layout as crispy
from corehq.apps.hqwebapp import crispy as hqcrispy
from corehq.apps.integration.models import SimprintsIntegration
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
16155,
1143,
1330,
16155,
1143,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
... | 3.495798 | 119 |
# Generated by Django 2.0.9 on 2019-02-22 15:06
from django.db import migrations, models
import wagtail.search.index
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
24,
319,
13130,
12,
2999,
12,
1828,
1315,
25,
3312,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
266,
363,
13199,
13,
12947,
13,
9630,
628
] | 2.902439 | 41 |
from random import randint
#con init clases de numeros complejos
#clase con init
# complejo=Complejo(3,4)
# print(complejo.getimaginary(9,3))
# print(complejo.suma(3,6))
# print(complejo.resta(3,6))
# print(complejo.multiplicacion(3,6))
# print(complejo.division(3,6))
#clase sin init de numero complejo
# class Complejo:
# def getimaginary(self,r,i):
# return str(r)+ "+" + str(i)+"i"
# def suma(self,x,y):
# real=int(x[0:1])+ int(y[0:1])
# imaginary=int(x[2:])+int(y[2:])
# complejo=str(real)+"+"+str(imaginary)+self.i
# return (complejo)
# complejo=Complejo()
# print(complejo.getimaginary(9,3))
# numero1=input("ingrese un numero complejo para sumar")
# numero2=input("ingrese el segundo valor")
# print(complejo.suma(numero1,numero2))
#idem para el resto de operaciones, pedir valores
#ejercicio 2 clase vector de 3 dimensiones
# vector=Vector()
# vec1=input("ingrese un vector ejemplo a,b,c")
# vec2=input("ingree otro vector")
# num=input("ingrese un numero para multiplicar y/o dividir")
# print(vector.suma(vec1,vec2))
# print(vector.resta(vec1,vec2))
# print(vector.multiplicacion(vec1,num))
# print(vector.dividir(vec1,num))
#ejercicio3 clases matrices
# def multiplicacion(self,vec):
dimension = int(input('Ingrese dimensión de la matriz cuadrada: '))
dimensionSegunda = int(input('Ingrese dimensión de la matriz cuadrada: '))
matriz=Matriz(dimension)
matrizSegunda=Matriz(dimensionSegunda)
# print(matriz)
matriz.representacion()
print('\n *********** \n')
matrizSegunda.representacion()
print('\n *********** \n')
#para que la representacion sea igual a las matrices
suma = matriz.sumaMatriz(matrizSegunda)
for fila in suma:
print(fila)
| [
6738,
4738,
1330,
43720,
600,
201,
198,
201,
198,
2,
1102,
2315,
537,
1386,
390,
5470,
418,
1224,
73,
418,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
201,
198,
220,
220,
220,
220,
201,
198,
201,
198,
201,
198,
2,
565... | 2.153667 | 859 |
"""
Summarize a column total cases column and total deaths column
Country by country data in columns, sum up and match global totals
"""
import csv
import pandas
pandas.set_option("display.max_rows", None, "display.max_columns", None)
col_list = ["Total Cases", "Country/ Other", "Total Deaths", "# 9/27/2020"]
df = pandas.read_csv("covidmilliondead.csv", usecols=col_list, thousands=',')
totalCases, totalDeaths = 0,0
for idx, cases,deaths in zip(df["# 9/27/2020"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
totalCases += cases
if deaths > 0:
totalDeaths += deaths
for idx, country, cases, deaths in zip(df["# 9/27/2020"], df["Country/ Other"], df["Total Cases"], df["Total Deaths"]):
if idx > 0:
print("\n",country)
print("Cases : ", cases, "/", totalCases, " %", "{:.5%}".format(cases/totalCases))
if deaths > 0:
print("Deaths : ", int(deaths), "/", totalDeaths, " %", "{:.5%}".format(deaths/totalDeaths))
print("")
print("Total Cases")
print(totalCases)
print("Total Deaths")
print(totalDeaths) | [
37811,
198,
13065,
3876,
1096,
257,
5721,
2472,
2663,
5721,
290,
2472,
7040,
5721,
220,
198,
33921,
416,
1499,
1366,
287,
15180,
11,
2160,
510,
290,
2872,
3298,
26310,
198,
37811,
198,
11748,
269,
21370,
220,
198,
11748,
19798,
292,
198... | 2.582938 | 422 |
from flask import request, jsonify
from api import api, core
@api.route('/v1/service/deploy', methods=['POST'])
@api.route('/v1/service/<name>', methods=['DELETE'])
@api.route('/v1/service/<name>', methods=['GET'])
@api.route('/v1/service/<name>/scale/<replicas>', methods=['POST'])
@api.route('/v1/service/<name>/scale-up', methods=['POST'])
@api.route('/v1/service/<name>/scale-down', methods=['POST'])
| [
6738,
42903,
1330,
2581,
11,
33918,
1958,
198,
6738,
40391,
1330,
40391,
11,
4755,
198,
198,
31,
15042,
13,
38629,
10786,
14,
85,
16,
14,
15271,
14,
2934,
1420,
3256,
5050,
28,
17816,
32782,
6,
12962,
198,
198,
31,
15042,
13,
38629,
... | 2.429412 | 170 |
# _________________________________________________________________________
#
# Water Security Toolkit (WST)
# Copyright (c) 2012 Sandia Corporation.
# This software is distributed under the Revised BSD License.
# Under the terms of Contract DE-AC04-94AL85000, there is a non-exclusive
# license for use of this work by or on behalf of the U.S. government.
# For more information, see the License Notice in the WST User Manual.
# _________________________________________________________________________
#
import os, sys, datetime
import pyutilib.subprocess
import yaml, json
import time
import logging
import itertools
import pprint
import imp
import pywst.common.problem
import pywst.common.wst_util as wst_util
import pywst.common.wst_config as wst_config
from pyutilib.misc.config import ConfigBlock
import pywst.visualization.inp2svg as inp2svg
from pyomo.environ import *
logger = logging.getLogger('wst.inversion')
try:
import pyepanet
except ImportError:
pyepanet = {}
#raise RuntimeError("EPANET DLL is missing or corrupt. Please reinstall PyEPANET.")
| [
2,
220,
220,
27193,
2602,
62,
201,
198,
2,
201,
198,
2,
220,
5638,
4765,
16984,
15813,
357,
54,
2257,
8,
201,
198,
2,
220,
15069,
357,
66,
8,
2321,
3837,
544,
10501,
13,
201,
198,
2,
220,
770,
3788,
318,
9387,
739,
262,
31492,
... | 3.142061 | 359 |
from django import template
register = template.Library()
@register.filter(name='get_public_notes')
| [
6738,
42625,
14208,
1330,
11055,
198,
30238,
796,
11055,
13,
23377,
3419,
628,
198,
31,
30238,
13,
24455,
7,
3672,
11639,
1136,
62,
11377,
62,
17815,
11537,
198
] | 3.642857 | 28 |
import os
import time
import numpy as np
import tensorflow as tf
from eight_mile.tf.layers import reload_checkpoint
from eight_mile.tf.optz import optimizer
from baseline.tf.tfy import TRAIN_FLAG, SET_TRAIN_FLAG
from baseline.train import Trainer, register_trainer
from baseline.model import create_model_for
from collections import OrderedDict
# Number of batches to prefetch if using tf.datasets
NUM_PREFETCH = 2
# The shuffle buffer
SHUF_BUF_SZ = 5000
_EVENT_FILE_GLOB_PATTERN = 'events.out.tfevents.*'
def _summaries(eval_dir):
"""Yields `tensorflow.Event` protos from event files in the eval dir.
Args:
eval_dir: Directory containing summary files with eval metrics.
Yields:
`tensorflow.Event` object read from the event files.
"""
if tf.gfile.Exists(eval_dir):
for event_file in tf.gfile.Glob(os.path.join(eval_dir, _EVENT_FILE_GLOB_PATTERN)):
for event in tf.train.summary_iterator(event_file):
yield event
def read_eval_metrics(eval_dir):
"""Helper to read eval metrics from eval summary files.
Args:
eval_dir: Directory containing summary files with eval metrics.
Returns:
A `dict` with global steps mapping to `dict` of metric names and values.
"""
eval_metrics_dict = {}
for event in _summaries(eval_dir):
if not event.HasField('summary'):
continue
metrics = {}
for value in event.summary.value:
if value.HasField('simple_value'):
metrics[value.tag] = value.simple_value
if metrics:
eval_metrics_dict[event.step] = metrics
return OrderedDict(sorted(eval_metrics_dict.items(), key=lambda t: t[0]))
def to_tensors(ts):
"""Convert a data feed into a tuple of `features` (`dict`) and `y` values
This method is required to produce `tf.dataset`s from the input data feed.
Any fields ending with `_lengths` are ignored, unless they match the
`src_lengths_key` or `tgt_lengths_key`, in which case, they are converted to `src_len` and `tgt_len`
:param ts: The data feed to convert
:param lengths_key: This is a field passed from the model params specifying source of truth of the temporal lengths
:return: A `tuple` of `features` and `y` (labels)
"""
keys = ts[0].keys()
# This is kind of a hack
keys = [k for k in keys if k != 'ids']
features = dict((k, []) for k in keys)
for sample in ts:
for k in features.keys():
for s in sample[k]:
features[k].append(s)
features = dict((k, np.stack(v).astype(np.int32)) for k, v in features.items())
tgt = features.pop('y')
return features, tgt
@register_trainer(task='lm', name='default')
class LanguageModelTrainerTf(Trainer):
"""A Trainer to use if not using eager mode
The trainer can run in 2 modes: `dataset` and `feed_dict`. When the former, the graph is assumed to
be connected by features attached to the input so the `feed_dict` will only be used to pass dropout information.
When the latter, we will use the baseline DataFeed to read the object into the `feed_dict`
"""
def checkpoint(self):
"""This method saves a checkpoint
:return: None
"""
checkpoint_dir = '{}-{}'.format("./tf-lm", os.getpid())
self.model.saver.save(self.sess,
os.path.join(checkpoint_dir, 'lm'),
global_step=self.global_step,
write_meta_graph=False)
def recover_last_checkpoint(self):
"""Recover the last saved checkpoint
:return: None
"""
checkpoint_dir = '{}-{}'.format("./tf-lm", os.getpid())
latest = tf.train.latest_checkpoint(checkpoint_dir)
self.model.saver.restore(self.model.sess, latest)
@staticmethod
def train(self, ts, reporting_fns, dataset=True):
"""Train by looping over the steps
For a `tf.dataset`-backed `fit_func`, we are using the previously wired `dataset`s
in the model (and `dataset` is `True`). For `feed_dict`, we convert the ts samples
to `feed_dict`s and hand them in one-by-one
:param ts: The training set
:param reporting_fns: A list of reporting hooks
:param dataset: (`bool`) Are we using `tf.dataset`s
:return: Metrics
"""
epoch_loss = 0.0
epoch_toks = 0
if self.model.requires_state:
state = self.model.sess.run(self.model.initial_state, self.model.make_input(ts[0], True))
fetches = {
"loss": self.loss,
"train_op": self.train_op,
"global_step": self.global_step
}
if self.model.requires_state:
fetches["final_state"] = self.model.final_state
start = time.time()
self.nstep_start = start
for batch_dict in ts:
if dataset:
feed_dict = {TRAIN_FLAG(): 1}
else:
feed_dict = self.model.make_input(batch_dict, True)
_, global_step, lossv = self.sess.run([self.train_op, self.global_step, self.loss], feed_dict=feed_dict)
# In Keras LSTM, the order is h first, c second, its the opposite in TF 1, however I dont think it
# ends up mattering here
if self.model.requires_state:
for i, (s1, s2) in enumerate(self.model.initial_state):
feed_dict[s1] = state[i][0] #.c # 0
feed_dict[s2] = state[i][1] #.h # 1
vals = self.model.sess.run(fetches, feed_dict)
loss = vals["loss"]
if self.model.requires_state:
state = vals["final_state"]
global_step = vals["global_step"]
toks = self._num_toks(batch_dict)
report_loss = loss * toks
epoch_loss += report_loss
epoch_toks += toks
self.nstep_agg += report_loss
self.nstep_div += toks
if (global_step + 1) % self.nsteps == 0:
metrics = self.calc_metrics(self.nstep_agg, self.nstep_div)
self.report(
global_step + 1, metrics, self.nstep_start,
'Train', 'STEP', reporting_fns, self.nsteps
)
self.reset_nstep()
metrics = self.calc_metrics(epoch_loss, epoch_toks)
self.train_epochs += 1
self.report(
self.train_epochs, metrics, start,
'Train', 'EPOCH', reporting_fns
)
return metrics
def test(self, vs, reporting_fns, phase, dataset=True):
"""Run an epoch of testing over the dataset
If we are using a `tf.dataset`-based `fit_func`, we will just
cycle the number of steps and let the `dataset` yield new batches.
If we are using `feed_dict`s, we convert each batch from the `DataFeed`
and pass that into TF as the `feed_dict`
:param vs: A validation set
:param reporting_fns: Reporting hooks
:param phase: The phase of evaluation (`Test`, `Valid`)
:param dataset: (`bool`) Are we using `tf.dataset`s
:return: Metrics
"""
total_loss = 0.0
total_toks = 0
epochs = 0
if phase == 'Valid':
self.valid_epochs += 1
epochs = self.valid_epochs
if self.model.requires_state:
state = self.model.sess.run(self.model.initial_state, self.model.make_input(vs[0], False))
fetches = {
"loss": self.test_loss,
}
if self.model.requires_state:
fetches["final_state"] = self.model.final_state
start = time.time()
for batch_dict in vs:
feed_dict = {}
if not dataset:
feed_dict = self.model.make_input(batch_dict, False)
# In Keras LSTM, the order is h first, c second, its the opposite in TF 1, however I dont think it
# ends up mattering here
if self.model.requires_state:
for i, (s1, s2) in enumerate(self.model.initial_state):
feed_dict[s1] = state[i][0] # .c # 0
feed_dict[s2] = state[i][1] # .h # 1
vals = self.model.sess.run(fetches, feed_dict)
loss = vals["loss"]
toks = self._num_toks(batch_dict)
if self.model.requires_state:
state = vals["final_state"]
total_loss += loss * toks
total_toks += toks
metrics = self.calc_metrics(total_loss, total_toks)
self.report(
epochs, metrics, start,
phase, 'EPOCH', reporting_fns
)
return metrics
| [
11748,
28686,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
3624,
62,
18085,
13,
27110,
13,
75,
6962,
1330,
18126,
62,
9122,
4122,
198,
6738,
3624,
62,
18085,
13,
27110,
13,
... | 2.200904 | 3,982 |
import paho.mqtt.client as mqtt
if __name__ == '__main__':
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect('localhost', 1883, 60)
client.loop_forever()
| [
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5456,
796,
285,
80,
926,
13,
11792,
3419,
198,
220,
220,
220,
5456,
13,... | 2.516484 | 91 |
# coding: utf-8
import parsingconfig
from jpype import *
import sys
import socket
import socks
from io import BytesIO
import struct
#Since we deliver message from java module to python module,
#I think it is ok to just use this socket function to directly
#deliver and process the message
#Need to figure out whether it is true.
if __name__ == '__main__':
if len(sys.argv[1:])<1:
print "Use: python BFTServer.py <ReplicaID>"
exit()
replicaID = sys.argv[1]
(n,f,host,baseport) = parsingconfig.readconfig() #Read in the config number of replicas, failures, host, and port number.
sock = connect_to_channel(host,baseport,replicaID) #The parameters to connect_to_channel are (hostname,port,id)
#original classpath:
#classpath = "lib/commons-codec-1.5.jar:lib/core-0.1.4.jar:lib/netty-all-4.1.9.Final.jar:lib/slf4j-api-1.5.8.jar:lib/slf4j-jdk14-1.5.8.jar:bft-smart/bin/BFT-SMaRt.jar"
#James. Changed classpath, specifically the path to BFT-SMaRt.jar. Commented out the original
classpath = "lib/commons-codec-1.5.jar:lib/core-0.1.4.jar:lib/netty-all-4.1.9.Final.jar:lib/slf4j-api-1.5.8.jar:lib/slf4j-jdk14-1.5.8.jar:bin/BFT-SMaRt.jar"
startJVM(getDefaultJVMPath(),"-Djava.class.path=%s"%classpath)
KVServerClass = JPackage("bftsmart.demo.keyvalue") #Create instance of KVServer class from the demo/keyvalue/KVServer.java class
KVServerClass.KVServer.passArgs((replicaID,"1")) #James. TO DO: Change this call to include host and port number.
listen_to_channel(sock,replicaID)
# and you have to shutdown the VM at the end
shutdownJVM()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
32096,
11250,
198,
6738,
474,
79,
2981,
1330,
1635,
198,
11748,
25064,
198,
11748,
17802,
198,
11748,
24359,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
11748,
2878,
198,
198,
2,
6385,
35... | 2.450075 | 671 |
from setuptools import setup
setup(
name='flickr-archive-extractor',
version='0.1.1',
install_requires=[],
tests_require=['nose>=1.3', 'pycodestyle'],
test_suite='nose.collector',
scripts=['flickr_archive_extractor.py'],
author='Nikita Kovaliov',
author_email='nikita@maizy.ru',
description='flickr archive extractor',
license='Apache License 2.0',
keywords='flickr',
url='https://github.com/maizy/flickr-archive-extractor',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
2704,
18994,
12,
17474,
12,
2302,
40450,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
16,
3256,
198,
220,
220,
220,
2721,
62,
47911,... | 2.492147 | 191 |
#!/usr/bin/python
#####################################################################################
# Copyright (c) 2022 Marijn Heule, Randal E. Bryant, Carnegie Mellon University
# Last edit: March 23, 2022
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
########################################################################################
# Time the execution of a program. Force termination if that program exceeds a time limit
import sys
import subprocess
import datetime
import os.path
name = sys.argv[0]
arglist = sys.argv[1:]
run(name, arglist)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
29113,
29113,
14468,
4242,
2,
198,
2,
15069,
357,
66,
8,
33160,
1526,
48848,
679,
2261,
11,
371,
7642,
412,
13,
16754,
11,
33976,
49808,
2059,
198,
2,
4586,
4370,
25,
2805,
2242,
11,
... | 3.911548 | 407 |
"""Environment testing non_RL scenario 1 of the bayesian envs."""
import numpy as np
from gym.spaces.box import Box
from flow.core.rewards import desired_velocity
from flow.envs.ring.accel import AccelEnv
from flow.core.kernel import Kernel
ADDITIONAL_ENV_PARAMS = {
# maximum acceleration for autonomous vehicles, in m/s^2
'max_accel': 3,
# maximum deceleration for autonomous vehicles, in m/s^2
'max_decel': 3,
# desired velocity for all vehicles in the network, in m/s
'target_velocity': 10,
# specifies whether vehicles are to be sorted by position during a
# simulation step. If set to True, the environment parameter
# self.sorted_ids will return a list of all vehicles sorted in accordance
# with the environment
'sort_vehicles': False
}
class Bayesian1Env(AccelEnv):
"""Specify the starting positions and routes of 3 cars and 1 pedestrian"""
| [
37811,
31441,
4856,
1729,
62,
7836,
8883,
352,
286,
262,
15489,
35610,
551,
14259,
526,
15931,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11550,
13,
2777,
2114,
13,
3524,
1330,
8315,
198,
6738,
5202,
13,
7295,
13,
260,
2017,
1330,
... | 3.198582 | 282 |
import pandas as pd
di_matrix = pd.DataFrame() | [
11748,
19798,
292,
355,
279,
67,
198,
10989,
62,
6759,
8609,
796,
279,
67,
13,
6601,
19778,
3419
] | 2.555556 | 18 |
import numpy as np
from .conformalmap import ConformalMap
from .closedcurve import ClosedCurve
from .unitdisk import unitdisk
from .region import Region
from .szego import Szego, SzegoOpts
class SzMap(ConformalMap):
"""SzMap represents a Riemann map via the Szego kernel.
"""
def __init__(self, range=None, conformalCenter=0, **kwargs):
"""Create a new conformal map based on the Szego kernel
Parameters
----------
range : Region or ClosedCurve
an object that represents the range of the map
conformalCenter : complex
the conformal center (forward to the szego kernel)
"""
if isinstance(range, ClosedCurve):
range = Region(range)
if not range.issimplyconnected():
raise Exception('Region must be simply connected')
kwargs['range'] = range
kwargs['domain'] = unitdisk()
super(SzMap, self).__init__(**kwargs)
boundary = self.range.outer
# question, how to alter these?
szargs = SzegoOpts()
S = Szego(boundary, conformalCenter, szargs)
nF = szargs.numFourierPts
t = S.invtheta(2*np.pi*np.arange(nF)/float(nF))
c = np.fft.fft(boundary(t))/float(nF)
c = c[::-1]
self._kernel = S
self._coefficients = c
self._opts = szargs
| [
11748,
299,
32152,
355,
45941,
198,
6738,
764,
1102,
687,
282,
8899,
1330,
1482,
687,
282,
13912,
198,
6738,
764,
20225,
22019,
303,
1330,
30550,
26628,
303,
198,
6738,
764,
20850,
39531,
1330,
4326,
39531,
198,
6738,
764,
36996,
1330,
... | 2.285235 | 596 |
from setuptools import setup, find_packages
from os import path
from ctlearn.version import *
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(name='ctlearn',
version=get_version_pypi(),
author="CTLearn Team",
author_email="d.nieto@ucm.es",
description='Deep learning for analysis and classification of image data for Imaging Atmospheric Cherenkov Telescopes, especially the Cherenkov Telescope Array (CTA).',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/ctlearn-project/ctlearn',
license='BSD-3-Clause',
packages=['ctlearn'],
entry_points = {
'console_scripts': ['ctlearn=ctlearn.run_model:main'],
},
include_package_data=True,
dependencies=[],
dependency_links=[],
zip_safe=False)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
198,
6738,
269,
7100,
1501,
13,
9641,
1330,
1635,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
40... | 2.647887 | 355 |
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 23:54:45 2018
@author: SilverDoe
"""
from tkinter import messagebox
res = messagebox.askquestion('Message title','Message content')
res = messagebox.askyesno('Message title','Message content')
res = messagebox.askyesnocancel('Message title','Message content')
res = messagebox.askokcancel('Message title','Message content')
res = messagebox.askretrycancel('Message title','Message content') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
1737,
3261,
2242,
25,
4051,
25,
2231,
2864,
198,
198,
31,
9800,
25,
7698,
35,
2577,
198,
37811,
628,
198,
6738,
256,
74,
3849,
1330,
327... | 3.407692 | 130 |
from django.test import TestCase, TransactionTestCase
from .models import Notes, Diary
from django.utils import timezone
from rest_framework.test import APIRequestFactory, APIClient
from django.contrib.auth.models import User
from freezegun import freeze_time
from rest_framework_jwt.views import obtain_jwt_token
import json
class ModelTest(TransactionTestCase):
"""
Test all models
"""
current_date_time = timezone.now()
reset_sequences = True
# class AuthTest(TestCase):
# """
# Test JWT auth (now I am thinking , do I really need this test ? :/ )
# """
# current_date_time = timezone.now()
#
# def setUp(self):
# User.objects.create_user('hiren', 'a@b.com', 'password')
# tag = Tag.objects.create(name="Test tag")
# Notes.objects.create(tag=tag, content="test content ", date=self.current_date_time)
# Diary.objects.create(tag=tag, title="Hello title", content="test content", date=self.current_date_time)
#
# self.factory = APIRequestFactory()
#
# def test_jwt_auth(self):
# request = self.factory.post('/api-token-auth/', {'username': 'hiren', 'password': 'password'})
# response = obtain_jwt_token(request)
# response.render()
# self.assertEqual(response.status_code, 200)
class NotesViewTest(TransactionTestCase):
"""
Test Notes View
"""
reset_sequences = True
current_date_time = timezone.now()
# current_date_time = "2017-01-14T00:00:00Z"
@freeze_time("2012-01-14")
@freeze_time("2012-01-14")
# @freeze_time("2012-01-14")
# def test_new_note_creation_works(self):
# response = self.client.post('/api/notes/', data={'tag': ["xyz"], 'iv': 'random', 'content': "New content",
# 'salt': 'sa', 'date': "2012-01-14T00:00:00"}, format="json")
# print(response.json())
# self.assertEqual(response.json(), {'id': 2, 'tag': [self.tag], 'iv': 'random', 'content': "New content",
# 'date': '2012-01-14T00:00:00',
# 'created_at': '2012-01-14T00:00:00',
# 'updated_at': '2012-01-14T00:00:00'})
#
# def test_deleting_note_works(self):
# #self.client.post('/api/notes/', data={'tag': [self.tag], 'iv': 'random', 'content': "New content !",
# # 'date': self.current_date_time})
# response = self.client.delete('/api/notes/1/')
# self.assertEqual(response.status_code, 204)
#
#
# class DiaryViewTest(TransactionTestCase):
# """
# Test Diary View
# """
# reset_sequences = True
# current_date_time = timezone.now()
#
# def setUp(self):
# self.client = APIClient()
# self.user = User.objects.create_user('hiren', 'a@b.com', 'password')
# self.client.force_authenticate(user=self.user)
# self.tag = "Test tag"
# Diary.objects.create(tag=self.tag, title="Hello title", content="test content", date=self.current_date_time)
#
# def test_login_works(self):
# response = self.client.get('/api/diary/')
# self.assertEqual(response.status_code, 200)
#
# self.client.logout()
# response = self.client.get('/api/diary/')
# self.assertEqual(response.status_code, 403)
#
# def test_return_correct_diary_object(self):
# response = self.client.get('/api/diary/1/')
# self.assertEqual(response.json(), {'content': 'test content', 'id': 1,
# 'tag': 1, 'title': 'Hello title', 'date': self.current_date_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')})
#
# def test_diary_update_works(self):
# response = self.client.patch('/api/diary/1/', data={'content': 'Updated content'})
# self.assertEqual(response.json(), {'content': 'Updated content', 'id': 1,
# 'tag': 1, 'title': 'Hello title', 'date': self.current_date_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')})
#
# def test_new_diary_creation_works(self):
# response = self.client.post('/api/diary/', data={'tag': self.tag.id, 'content': "New content",
# 'date': self.current_date_time, 'title': 'New Title'})
# self.assertEqual(response.json(), {'id': 2, 'tag': self.tag.id, 'content': "New content",
# 'date': self.current_date_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), 'title': 'New Title' })
#
# def test_deleting_diary_works(self):
# self.client.post('/api/diary/', data={'tag': self.tag.id, 'content': "New content !",
# 'date': self.current_date_time, 'title': 'Delete me :D '})
# response = self.client.delete('/api/diary/2/')
# self.assertEqual(response.status_code, 204)
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
11,
45389,
14402,
20448,
198,
6738,
764,
27530,
1330,
11822,
11,
35911,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
7824,
18453,
22810,... | 2.101774 | 2,368 |
from gameInitialization import *
from getAndDoActions import *
from checkState import *
from voting import *
from displayAndPrompt import *
import Globals
teamInfoList = promptTeams()
teams = createTeams(teamInfoList)
roleList = []
for team in teams:
roleList += getListOfTeamRoles(team)
playerInfoList = getPlayerNames(roleList)
players = addPlayersToTeams(playerInfoList)
Day = False
for team in teams:
team.cleanUp()
cleanUpGlobalTraits(players)
while True:
alivePlayers = getSpecificPlayers(players, {'alive' : True})
if Day:
voteInfoList = getVotes(alivePlayers)
applyVotes(voteInfoList)
voteCountList = countVotes(alivePlayers)
displayVotes()
killedOff = killTopVoted(voteCountList)
displayKilledOff(killedOff)
Day = False
else:
actionInfoList = getActions(alivePlayers)
actionDict = primeActions(actionInfoList)
actionOrderPlayers = orderPlayers(actionDict)
doActions(actionOrderPlayers)
for team in teams:
team.cleanUp()
cleanUpGlobalTraits(players)
Day = True
winners = checkWinners(teams, players)
if winners:
print('The winners are:')
displayTeams(winners)
break | [
6738,
983,
24243,
1634,
1330,
1635,
198,
6738,
651,
1870,
5211,
32,
2733,
1330,
1635,
198,
6738,
2198,
9012,
1330,
1635,
198,
6738,
6709,
1330,
1635,
198,
6738,
3359,
1870,
24129,
457,
1330,
1635,
198,
11748,
40713,
874,
198,
198,
15097... | 2.929319 | 382 |
import math,random,pygame
from pygame.locals import *
def combineVectors(vector1, vector2):
""" Adds together two vectors given as an angle plus a magnitude (length)"""
(angle1, length1) = vector1
(angle2, length2) = vector2
x = math.sin(angle1) * length1
y = math.cos(angle1) * length1
x1 = x + math.sin(angle2) * length2
y1 = y + math.cos(angle2) * length2
angle = 0.5*math.pi - math.atan2(y1, x1)
length = math.hypot(x1, y1)
return (angle, length)
# Set up Pygame variables
pygame.init()
BG_colour = (0,0,0)
particle_colour = (200,200,200)
(width, height) = (480, 360)
screen = pygame.display.set_mode((width, height))
number_of_particles = 170
particles = []
for p in range(number_of_particles):
mass = random.randint(1, 4)
#mass = 1
x = random.randrange(0, width)
y = random.randrange(0, height)
particles.append(Particle(x, y, mass))
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
screen.fill(BG_colour)
for i in range(number_of_particles):
j = i+1
while j < number_of_particles:
collide = particles[i].attract(particles[j])
if collide != None:
particles.remove(collide)
number_of_particles -= 1
else:
j += 1
for p in particles:
p.move()
# if p.size < 1:
# screen.set_at((int(p.x), int(p.y)), particle_colour)
if p.size < 2:
pygame.draw.rect(screen, p.colour, (int(p.x), int(p.y), 2, 2))
else:
pygame.draw.circle(screen, p.colour, (int(p.x), int(p.y)), p.size, 0)
pygame.display.flip()
for p in particles:
dx = math.sin(p.angle) * p.speed
dy = math.cos(p.angle) * p.speed
print ("(%d, %d)\t(dx=%f, dy=%f)\tmass = %d" % (p.x, p.y, dx, dy, p.mass))
| [
11748,
10688,
11,
25120,
11,
9078,
6057,
198,
6738,
12972,
6057,
13,
17946,
874,
1330,
1635,
198,
198,
4299,
12082,
53,
478,
669,
7,
31364,
16,
11,
15879,
17,
2599,
198,
220,
220,
220,
37227,
34333,
1978,
734,
30104,
1813,
355,
281,
... | 2.11479 | 906 |
import numpy as np
import schnell as snl
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'sans-serif',
'sans-serif': ['Helvetica']})
rc('text', usetex=True)
t_obs = 1
f_ref = 63
nside = 64
obs_time = t_obs*365*24*3600.
freqs = np.linspace(10., 1010., 101)
dets = [snl.GroundDetector('Hanford', 46.4, -119.4, 171.8,
'data/aLIGO.txt'),
snl.GroundDetector('Livingstone', 30.7, -90.8, 243.0,
'data/aLIGO.txt'),
snl.GroundDetector('Virgo', 43.6, 10.5, 116.5,
'data/Virgo.txt'),
snl.GroundDetector('KAGRA', 36.3, 137.2, 225.0,
'data/KAGRA.txt')]
print("0")
mc = snl.MapCalculator(dets, f_pivot=f_ref,
spectral_index=0.)
nl_a0 = mc.get_N_ell(obs_time, freqs, nside, no_autos=True)
print("2/3")
mc = snl.MapCalculator(dets, f_pivot=f_ref,
spectral_index=2./3.)
nl_a2o3 = mc.get_N_ell(obs_time, freqs, nside, no_autos=True)
print("3")
mc = snl.MapCalculator(dets, f_pivot=f_ref,
spectral_index=3.)
nl_a3 = mc.get_N_ell(obs_time, freqs, nside, no_autos=True)
ls = np.arange(3*nside)
plt.figure()
plt.plot(ls, (ls+0.5)*nl_a3, 'k--', label=r'$\alpha=3$')
plt.plot(ls, (ls+0.5)*nl_a2o3, 'k-', label=r'$\alpha=2/3$')
plt.plot(ls, (ls+0.5)*nl_a0, 'k:', label=r'$\alpha=0$')
plt.loglog()
plt.xlabel(r'$\ell$', fontsize=16)
plt.ylabel(r'$(\ell+1/2)\,N_\ell$', fontsize=16)
plt.ylim([3E-20, 1E-10])
plt.xlim([1, 100])
plt.legend(loc='upper left', fontsize='x-large', frameon=False)
plt.gca().tick_params(labelsize="large")
plt.savefig("Nell_alphas.pdf", bbox_inches='tight')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
264,
1349,
695,
355,
3013,
75,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
1330,
48321,
198,
6015,
10786,
10331,
3256,
12429,
90,
6,
17989,
10354... | 1.780962 | 977 |
from kernel.models.tools import Base, tool_engine
from sqlalchemy.orm import sessionmaker
user_engine = tool_engine(tool)
user_session = sessionmaker(bind = user_engine)
user_session = user_session()
Base.metadata.create_all(user_engine) | [
6738,
9720,
13,
27530,
13,
31391,
1330,
7308,
11,
2891,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
198,
7220,
62,
18392,
796,
2891,
62,
18392,
7,
25981,
8,
198,
7220,
62,
29891,
796,
6246,
10297,
7,
21... | 3.30137 | 73 |
import numpy as np
import pandas as pd
from datetime import timedelta
from datetime import datetime
from io import StringIO
from urllib import request as url_request
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from scipy.optimize import curve_fit
# This stuff because pandas or matplot lib complained...
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy import stats,integrate
from scipy.optimize import curve_fit
from scipy import stats,integrate
def logistic(x, L, k, x0, y0):
"""
General Logistic function.
Args:
x float or array-like, it represents the time
L float, the curve's maximum value
k float, the logistic growth rate or steepness of the curve.
x0 float, the x-value of the sigmoid's midpoint
y0 float, curve's shift in the y axis
"""
y = L / (1 + np.exp(-k*(x-x0))) + y0
return y
def logistic_derivative(x, L, k, x0):
"""
General Gaussian-like function (derivative of the logistic).
Args:
x float or array-like, it represents the time
L float, the curve's integral (area under the curve)
k float, the logistic growth rate or steepness of the curve.
x0 float, the x-value of the max value
"""
y = k * L * (np.exp(-k*(x-x0))) / np.power(1 + np.exp(-k*(x-x0)), 2)
return y
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
33245,
1330,
10903,
9399,
198,
6738,
2956,
297,
571,
1330,
2581,
355,
19... | 2.606171 | 551 |
# coding: utf-8
def bids_dir_to_fsl_dir(bids_dir):
"""
Converts BIDS PhaseEncodingDirection parameters (i,j,k,i-,j-,k-) to
FSL direction (x,y,z,x-,y-,z-).
"""
fsl_dir = bids_dir.lower()
if fsl_dir == "i-":
return "x-"
if fsl_dir == "i":
return "x"
if fsl_dir == "j-":
return "y-"
if fsl_dir == "j":
return "y"
if fsl_dir == "k-":
return "z-"
if fsl_dir == "k":
return "z"
raise RuntimeError(
f"PhaseEncodingDirection {fsl_dir} is unknown, it should be a value in (x,y,z,x-,y-,z-)"
)
return fsl_dir
| [
2,
19617,
25,
3384,
69,
12,
23,
628,
198,
4299,
27837,
62,
15908,
62,
1462,
62,
69,
6649,
62,
15908,
7,
65,
2340,
62,
15908,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1482,
24040,
347,
14255,
18983,
27195,
7656,
35,
415... | 1.915888 | 321 |
import logging as __logging
from ._tokenizer import Tokenizer
__logging.getLogger(f"pizza_nlp.{__name__}").addHandler(__logging.NullHandler())
| [
11748,
18931,
355,
11593,
6404,
2667,
198,
198,
6738,
47540,
30001,
7509,
1330,
29130,
7509,
198,
198,
834,
6404,
2667,
13,
1136,
11187,
1362,
7,
69,
1,
79,
9990,
62,
21283,
79,
13,
90,
834,
3672,
834,
92,
11074,
2860,
25060,
7,
834... | 2.9 | 50 |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
#Code ends here
data=pd.read_csv(path)
#Plotting histogram of Rating
data['Rating'].plot(kind='hist')
plt.show()
#Subsetting the dataframe based on `Rating` column
data=data[data['Rating']<=5]
#Plotting histogram of Rating
data['Rating'].plot(kind='hist')
# --------------
# code starts here
# code ends here
total_null=data.isnull().sum()
total_null
k=[]
for i in range (0,len(total_null)):
s=(total_null[i]/len(data))*100
k.append(s)
k
percent_null=pd.Series(k,total_null.index)
percent_null
missing_data=pd.DataFrame({'Total':total_null,'Percent':percent_null})
missing_data
data=data.dropna()
total_null_1=data.isnull().sum()
total_null_1
r=[]
for i in range (0,len(total_null_1)):
t=(total_null_1[i]/len(data))*100
r.append(t)
r
percent_null_1=pd.Series(r,total_null_1.index)
percent_null_1
missing_data_1=pd.DataFrame({'Total':total_null_1,'Percent':percent_null_1})
missing_data_1
# --------------
#Code starts here
#Code ends here
g=sns.catplot(x="Category",y="Rating",data=data, kind="box", height=10)
g.set_xticklabels(rotation=90)
g.set_titles('Rating vs Category [BoxPlot]')
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
#Code ends here
data['Installs']=data['Installs'].str.replace(',','')
#Removing `+` from the column
data['Installs']=data['Installs'].str.replace('+','')
#Converting the column to `int` datatype
data['Installs'] = data['Installs'].astype(int)
#Creating a label encoder object
le=LabelEncoder()
#Label encoding the column to reduce the effect of a large range of values
data['Installs']=le.fit_transform(data['Installs'])
#Setting figure size
plt.figure(figsize = (10,10))
#Plotting Regression plot between Rating and Installs
sns.regplot(x="Installs", y="Rating", color = 'teal',data=data)
#Setting the title of the plot
plt.title('Rating vs Installs[RegPlot]',size = 20)
#Code ends here
# --------------
#Code starts here
#Code ends here
data['Price'].value_counts()
data['Price']=data['Price'].str.replace('$','').astype(float)
sns.regplot(x='Price',y='Rating',data=data)
plt.figure(figsize=(10,10))
plt.title('Rating vs Price [RegPlot]',size=20)
# --------------
#Code starts here
#Code ends here
print( len(data['Genres'].unique()) , "genres")
#Splitting the column to include only the first genre of each app
data['Genres'] = data['Genres'].str.split(';').str[0]
#Grouping Genres and Rating
gr_mean=data[['Genres', 'Rating']].groupby(['Genres'], as_index=False).mean()
print(gr_mean.describe())
#Sorting the grouped dataframe by Rating
gr_mean=gr_mean.sort_values('Rating')
print(gr_mean.head(1))
print(gr_mean.tail(1))
# --------------
#Code starts here
#Code ends here
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
#Creating new column having `Last Updated` in days
data['Last Updated Days'] = (data['Last Updated'].max()-data['Last Updated'] ).dt.days
#Setting the size of the figure
plt.figure(figsize = (10,10))
#Plotting a regression plot between `Rating` and `Last Updated Days`
sns.regplot(x="Last Updated Days", y="Rating", color = 'lightpink',data=data )
#Setting the title of the plot
plt.title('Rating vs Last Updated [RegPlot]',size = 20)
#Code ends here
| [
2,
220,
26171,
198,
2,
20939,
278,
13639,
3696,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
3013,
82,
628,
628,
198,
198,
2,
10669,
4940,
994,
... | 2.758921 | 1,261 |
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import sys
import unittest
from random import randint
import ntpath
import pandas as pd
from mock import MagicMock, patch
from munch import Munch
from neptune.exceptions import NeptuneNoExperimentContextException
from neptune.experiments import Experiment
from neptune.model import LeaderboardEntry
from neptune.projects import Project
from tests.neptune.api_objects_factory import a_registered_project_member, an_invited_project_member
from tests.neptune.project_test_fixture import some_exp_entry_dto, some_exp_entry_row
from tests.neptune.random_utils import a_string, a_string_list, a_uuid_string
if __name__ == '__main__':
unittest.main()
| [
2,
198,
2,
15069,
357,
66,
8,
13130,
11,
26461,
23500,
1338,
13,
1976,
267,
13,
78,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 3.416216 | 370 |
from typing import Any
py_map = map
py_type = type | [
6738,
19720,
1330,
4377,
198,
198,
9078,
62,
8899,
796,
3975,
198,
198,
9078,
62,
4906,
796,
2099
] | 2.888889 | 18 |
from Model import *
import MinMax
import AlphaBeta
import parameters
import threading
import sys
from random import randint
from time import sleep
# Lancer le combat d'ia
model = Model(1, 1)
model.pMax_ias[1] = int(sys.argv[1]) # pMax j1
model.eval_ias[1] = int(sys.argv[2]) # eval j1
model.pMax_ias[2] = int(sys.argv[3]) # pMax j2
model.eval_ias[2] = int(sys.argv[4]) # eval j2
game = VueConsole(model)
game.ia_vs_ia()
| [
6738,
9104,
1330,
1635,
201,
198,
11748,
1855,
11518,
201,
198,
11748,
12995,
43303,
201,
198,
11748,
10007,
201,
198,
11748,
4704,
278,
201,
198,
11748,
25064,
201,
198,
6738,
4738,
1330,
43720,
600,
201,
198,
6738,
640,
1330,
3993,
20... | 2.309278 | 194 |
import numpy as np
import torch
from torch.autograd import Variable
from .Attacker import Attacker
from .WhiteBoxAttacker import PGDAttack
from ..models.PixelLevelTransferN import PixelLevelTransferN
from ..utils import clamp_to_valid_img, tform1, tform2, retrieve_image
if __name__ == "__main__":
# gan_attacker = GANAttack(None, None)
from ..models.ResNet18 import pretrained_res18
from ..models.ModelWrapper import ModelWrapper
from skimage.io import imread, imshow
from ..utils import heat_map
import matplotlib.pyplot as plt
import numpy as np
# quick test
model = pretrained_res18()
model_att = pretrained_res18(which=3)
wrapper = ModelWrapper(model)
attack = PGDAttackBB(model, epsilon=0.03, k=5)
# img = imread("../../data-augmentation/banknotes_augmented/val/img_10_76_98.jpg")
# img = imread("data-augmentation/banknotes_augmented/test/img_5_90_10.jpg")
# img = imread("../../data-augmentation/banknotes_augmented/test/img_20_133_100.jpg")
# img = imread("../../data-augmentation/banknotes_augmented/val/img_50_71_2.jpg")
img = imread("data-augmentation/banknotes_augmented_small/test/img_10_100_1.jpg")
img, p1 = wrapper(img, True)
img_pert, p2 = attack(model_att, img, True)
# print(p1)
# print(p2)
plt.subplot(2,2,1)
plt.imshow(img)
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.title("Original image")
plt.subplot(2,2,2)
plt.imshow(img_pert)
plt.gca().axes.get_xaxis().set_visible(False)
plt.gca().axes.get_yaxis().set_visible(False)
plt.title("Perturbed image (undirected PGD)")
plt.subplot(2,2,3)
plt.title("Probability distribution")
plt.bar([1.5,2.5,3.5,4.5], p1, tick_label=["5", "10", "20", "50"], log=True)
plt.xlabel("Banknote")
plt.subplot(2,2,4)
plt.title("Probability distribution")
plt.bar([1.5,2.5,3.5,4.5], p2, tick_label=["5", "10", "20", "50"], log=True)
plt.xlabel("Banknote")
plt.tight_layout()
plt.show()
plt.close()
# diff = np.abs(img_pert.astype(np.float) - img)
# diff = np.mean(diff, 2)
# min_ = np.min(diff)
# max_ = np.max(diff)
# diff = (diff-min_)/(max_-min_)
# plt.imshow(diff, cmap=plt.get_cmap("hot"))
# plt.title("Normalized differences (maximum diff: {:.00f})".format(max_))
# plt.gca().axes.get_xaxis().set_visible(False)
# plt.gca().axes.get_yaxis().set_visible(False)
# plt.colorbar()
# plt.tight_layout()
# plt.show()
heatmap = heat_map(img, img_pert)
plt.imshow(heatmap)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
198,
6738,
764,
8086,
10735,
1330,
8307,
263,
198,
6738,
764,
12256,
14253,
8086,
10735,
1330,
23842,
5631,
926,
441,
198,
1... | 2.239557 | 1,173 |
'''
FORCE
Copyright (c) 2020-present NAVER Corp.
MIT license
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from ignite.engine import create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from pruning.pruning_algos import iterative_pruning
from experiments.experiments import *
from pruning.mask_networks import apply_prune_mask
import os
import argparse
import random
# from IPython import embed
LOG_INTERVAL = 20
REPEAT_WITH_DIFFERENT_SEED = 3 # Number of initialize-prune-train trials (minimum of 1)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# New additions
args = parseArgs()
if __name__ == '__main__':
# Randomly pick a random seed for the experiment
# Multiply the number of seeds to be sampled by 300 so there is wide range of seeds
seeds = list(range(300 * REPEAT_WITH_DIFFERENT_SEED))
random.shuffle(seeds)
for seed in seeds[:REPEAT_WITH_DIFFERENT_SEED]:
train(seed)
| [
7061,
6,
198,
13775,
5222,
198,
15269,
357,
66,
8,
12131,
12,
25579,
11746,
5959,
11421,
13,
198,
36393,
5964,
198,
7061,
6,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
... | 2.898701 | 385 |
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt1
import pylab
G = nx.MultiDiGraph()
#list of kabyle tags
tags=[]
i=0
#extraction du tableau des tags
for ligne in open("c:/tal/tagspos.txt",encoding='utf-8'):
a=ligne.replace('\n',"")
if (i!=0):
b=(a,0,())
tags.append(b)
i=i+1
edges=[] # Edges list
#this function renders the tag index in the tags kab array
regexp ='[-A-Zḍčǧḥṛṣẓṭţɣɛ« ».,:1-9a-z]+/[A-Z]+' # regular expression to retreive the couple (tagged wor/tag)
text=""
#Construction du texte global
first=0
for ligne in open("c:/tal/corpus-kab.txt",encoding='utf-8'):
if (first!=0):
text=text+ligne
first=1
text=text.replace('\n'," ")
text=text.replace(" "," ")
text=text.replace(" "," ")
text=text.replace("\ufeff","")
a=text.split(" ")
i=0
start=0
b=''
while i<len(a)-1:
iii=b
#récupérer la paire mot tag
b=a[i].split("/") #split a couple
#print (b[1])
try:
tuplea=tags[index_of_tag(b[1])] #look for the index of the tag
except:
print (b,iii,'here',b)
exit()
#print (tuple)
number=tuplea[1]+1#increment the tag count
tuple_tag=tuplea[2]
list_a=list(tuple_tag)
if b[1]=='NMP':
list_a.append(b[0])
else:
list_a.append(b[0].lower())
#print (list_a)
tuple_tag=tuple(list_a)
tags[index_of_tag(b[1])]=(tuplea[0],number,tuple_tag)# update une tag count
c=a[i+1].split("/") # this is for the last couple word/tag
if (len(c)!=2):
print (b,c,'moins de deux',a[i-1])
exit()
if(start==0) and (i==0): # the first start edge : First word in the text or the first edge after a dot
G.add_edges_from([('Start',b[1])], weight=0)
edges.append(('Start->'+b[1],1))
G.add_edges_from([(b[1],c[1])], weight=0) # and create an edge betwen the dot and the previous tags
edges.append((b[1]+'->'+c[1],1))
start=1
#print ('start')
elif (start==0):
try:
G.add_edges_from([('Start',c[1])], weight=0) # edge start -> next word after a dot .
start=1
edges.append(('Start->'+c[1],1))
except:
print(c,b,iii)
exit()
elif (c[1]=='.'):
G.add_edges_from([(c[1],'Stop')], weight=0) # when a dot is found, create an end
edges.append((c[1]+'->Stop',1))
G.add_edges_from([(b[1],c[1])], weight=0) # and create an edge betwen the dot and the previous tags
edges.append((b[1]+'->'+c[1],1))
start=0
else:
G.add_edges_from([(b[1],c[1])], weight=0) # create and edge between two neighbours
edges.append((b[1]+'->'+c[1],1))
i=i+1
# this is for the last tag. We will increment its occurence
try:
tuplea=tags[index_of_tag(c[1])]
except:
print (c[1])
exit()
number=tuplea[1]+1
tuple_tag=tuplea[2]
list_a=list(tuple_tag)
list_a.append(c[0])
tuple_tag=tuple(list_a)
try:
tags[index_of_tag(c[1])]=(tuplea[0],number,tuple_tag)
except:
print (c[1])
exit()
#print (tags)
val_map = {}
values = [val_map.get(node, 0.45) for node in G.nodes()]
edge_labels=dict([((u,v,),d['weight'])
for u,v,d in G.edges(data=True)])
red_edges = [('Start','NMC'),('NMC','Stop')]
edge_colors = ['black' if not edge in red_edges else 'black' for edge in G.edges()]
pos=nx.spring_layout(G)
options = {
'node_color': 'blue',
'node_size': 800,
'width': 1,
'arrowstyle': '-|>',
'arrowsize': 13,
}
color_map = []
j=0
for node in G:
#print (node)
if str(node) =='Start' or str(node) =='Stop':
color_map.append('blue')
elif (len(str(node))>=4):
color_map.append('olive')
elif (len(str(node))==3):
color_map.append('yellow')
elif (len(str(node))==2):
color_map.append('purple')
else:
color_map.append('red')
j=j+1
nx.draw(G,pos, node_color = color_map, node_size=1500,edge_color=edge_colors,edge_cmap=plt.cm.Reds)
#nx.draw_networkx_labels()
#networkx.draw_networkx_labels(graph,node_positions,font_size=16)
#nx.coloring.greedy_color(G, strategy='largest_first')
#nx.draw_networkx(G, arrows=True, **options)
#print (words)i
j=0
labels={}
for i in G.nodes:
labels[i]=i
nx.draw_networkx_labels(G,pos,labels,font_size=16)
pylab.axis('off')
pylab.show()
# calculate the occurences of grammatical classes ant show them on histogram
x = np.arange(len(tags))
valeurs=[]
symbols=[]
i=0
while i< len (tags):
if (tags[i][1] != 0):
valeurs.append(tags[i][1])
symbols.append(tags[i][0])
i=i+1
x = np.arange(len(valeurs))
plt.bar(x, height= valeurs)
##
plt.xticks(x+.5, symbols);
plt.ylabel('Timeḍriwt/Tiseqqaṛ')
plt.xlabel('Ismilen inejrumen')
plt.show()
#calculate probabilities
edges_probabilities=[]
edges_probabilities=[[x,edges.count(x)] for x in set(edges)]
for i in edges_probabilities:
edges_probabilities[edges_probabilities.index(i)]=(i[0],i[1]/len(edges))
#print(i[0][0],'+',i[1])
x = np.arange(len(tags))
valeurs=[]
symbols=[]
i=0
while i< len (edges_probabilities):
if (edges_probabilities[i][1] != 0):
valeurs.append(edges_probabilities[i][1]*100)
symbols.append(edges_probabilities[i][0][0])
i=i+1
x = np.arange(len(valeurs))
plt.bar(x, height= valeurs)
plt.xticks(x+.1, symbols);
plt.ylabel('Probabilité')
plt.xlabel('Transitions')
plt.show()
#print ('yes')
#calcul de la matrice de probabilité
probablilities = []
line=[]
l=0
for i in tags:
k=0
line=[]
for j in tags:
line.append(0)
k=k+1
probablilities.append(line)
l=l+1
x=0
for j in edges_probabilities:
x=a
a=j[0][0].split("->")
#print (j,'-> ',index_of_tag(a[0]))# print (j[1])
try:
probablilities[index_of_tag(a[0])][index_of_tag(a[1])]=j[1]
except:
print (x,a,a[0],'->',a[1],j[1])
exit()
for i in probablilities:
k=0
x=0
for j in i:
x=j+x
#print (x)
#######begin cloud
tags1=[]
i=0
for ligne in open("c:/tal/tagspos.txt",encoding='utf-8'):
a=ligne.replace('\n',"")
if (i!=0):
tags1.append(a)
i=i+1
x=[]
y=[]
for i in tags1:
x.append(0)
y.append(0)
#this function renders the tag index in the tags kab array
for i in edges_probabilities:
h=i[0][0]
j=h.split('->')
x[index_of_tag1(j[0],tags1)]=x[index_of_tag1(j[0],tags1)]+1
y[index_of_tag1(j[1],tags1)]=y[index_of_tag1(j[1],tags1)]+1
plt1.scatter(x,y,s=10)
plt1.title('Asigna n waggazen : ismilen n tjerrumt')
plt1.xlabel('x')
plt1.ylabel('y')
plt1.show()
x1=[]
y1=[]
for i in edges_probabilities:
h=i[0]
j=h[0].split('->')
## print(j)
x1.append(x[index_of_tag1(j[0],tags1)]*10000)
y1.append(x[index_of_tag1(j[1],tags1)]*10000)
plt1.scatter(x1,y1,s=5)
plt1.title('Asigna n waggazen : Tiyugiwin n yismilen n tjerrumt')
plt1.xlabel('x')
plt1.ylabel('y')
plt1.show()
x1=[]
y1=[]
for i in edges_probabilities:
h=i[0]
j=h[0].split('->')
## print(j)
x1.append(x[index_of_tag1(j[0],tags1)]*10000)
y1.append(y[index_of_tag1(j[1],tags1)]*10000)
plt1.scatter(x1,y1,s=5)
plt1.title('Asigna n waggazen : Tiyugiwin n yismilen n tjerrumt')
plt1.xlabel('x')
plt1.ylabel('y')
plt1.show()
x1=[]
y1=[]
for i in edges_probabilities:
h=i[0]
j=h[0].split('->')
## print(j)
x1.append(y[index_of_tag1(j[0],tags1)]*10000)
y1.append(x[index_of_tag1(j[1],tags1)]*10000)
plt1.scatter(x1,y1,s=5)
plt1.title('Asigna n waggazen : Tiyugiwin n yismilen n tjerrumt')
plt1.xlabel('x')
plt1.ylabel('y')
plt1.show()
x1=[]
y1=[]
for i in edges_probabilities:
h=i[0]
j=h[0].split('->')
## print(j)
x1.append(y[index_of_tag1(j[0],tags1)]*10000)
y1.append(y[index_of_tag1(j[1],tags1)]*10000)
plt1.scatter(x1,y1,s=5)
plt1.title('Asigna n waggazen : Tiyugiwin n yismilen n tjerrumt')
plt1.xlabel('x')
plt1.ylabel('y')
plt1.show()
x1=[]
y1=[]
for i in edges_probabilities:
h=i[0]
print(i[1])
j=h[0].split('->')
## print(j)
x1.append(y[index_of_tag1(j[0],tags1)]*10000)
y1.append(i[1]*10000)
plt1.scatter(x1,y1,s=5)
plt1.title('Asigna n waggazen : Tiyugiwin n yismilen n tjerrumt')
plt1.xlabel('x')
plt1.ylabel('y')
plt1.show()
| [
11748,
3127,
87,
355,
299,
87,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
16,
201,
198,
11748,
279,
... | 1.850969 | 4,697 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
220,
220
] | 3.277778 | 18 |
import os
import pytest
from typer.testing import CliRunner
from ops2deb.cli import app
runner = CliRunner()
@pytest.mark.parametrize("args", [[], ["-v"], ["-v", "-e", "10"]])
| [
11748,
28686,
198,
198,
11748,
12972,
9288,
198,
6738,
1259,
525,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
6738,
39628,
17,
11275,
13,
44506,
1330,
598,
198,
198,
16737,
796,
1012,
72,
49493,
3419,
628,
628,
198,
198,
31,
9078,
928... | 2.628571 | 70 |
# MIT License
#
# Copyright (c) 2021 Université Paris-Saclay
# Copyright (c) 2021 Laboratoire national de métrologie et d'essais (LNE)
# Copyright (c) 2021 CNRS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from typing import Union, Text
import yaml
from box import Box
from utils import fix_seed
class Config:
"""
Dot-based access to configuration parameters saved in a YAML file.
"""
def __init__(self, file: Union[Path, Text]):
"""
Load the parameters from the YAML file.
If no path are given in the YAML file for bert_checkpoint and seqeval, the corresponding objects will be load
if used (needs an internet connection).
"""
# get a Box object from the YAML file
with open(str(file), 'r') as ymlfile:
cfg = Box(yaml.safe_load(ymlfile), default_box=True, default_box_attr=None)
# manually populate the current Config object with the Box object (since Box inheritance fails)
for key in cfg.keys():
setattr(self, key, getattr(cfg, key))
# resolve seqeval config into a name or a path
seqeval_path = getattr(self, "seqeval_path", None)
self.seqeval_path = seqeval_path if seqeval_path is not None else 'seqeval'
self.dataset.path = Path(self.dataset.path)
# Don't lowercase if the corresponding attribute is not defined in config.yml
self.dataset.do_lowercase = getattr(self.dataset, 'do_lowercase', False)
# Correct types in train (ex. lr = 5e-5 is read as string)
for float_var in ["dropout", "learning_rate", "slot_loss_coeff"]:
val = getattr(self.train, float_var)
if type(val) != float:
setattr(self.train, float_var, float(val))
assert self.train.validation_metric in ["intent_acc", "slot_f1", "loss"], "Unrecognized validation metric"
# Some attributes could not be defined in config.yml, set them as None
self.train.num_workers = getattr(self.train, "num_workers", None)
self.train.seed = getattr(self.train, "seed", None)
# Fix seed if specified
if self.train.seed is not None:
fix_seed(self.train.seed) | [
2,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
26986,
43816,
6342,
12,
38318,
10724,
198,
2,
15069,
357,
66,
8,
33448,
7882,
5549,
557,
2260,
390,
285,
25125,
3225,
37987,
2123,
288,
6,
408,
15152,
357,
43,
12161,
8,
1... | 2.882458 | 1,123 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from pyppeteer.launcher import Launcher
from pyppeteer.chromium_downloader import chromium_excutable
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
12972,
381,
14471,
263,
13,
38722,
2044,
1330,
26385,
198,
6738,
12972,
... | 2.737705 | 61 |