blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
64cbd3b4e1e43fe2305f5f1bb0661dbee7eaa93f
3a5b4f8834d8a7ed63ccbdbea40ea85974a8b7c4
/CallInterface/wsgi.py
26b71059d21c89d2f1fb148180c9e36965de5426
[ "Apache-2.0" ]
permissive
babakmhz/callinterface_server
1de37a83208546a0c90c59497423a83d887e6090
004c0c8ec05b051b36f5bd89432955ceba0ee2a1
refs/heads/master
2020-04-04T06:52:09.646024
2018-11-01T18:44:59
2018-11-01T18:44:59
155,759,725
0
0
null
null
null
null
UTF-8
Python
false
false
403
py
""" WSGI config for CallInterface project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CallInterface.settings') application = get_wsgi_application()
[ "babakmhz96@gmail.com" ]
babakmhz96@gmail.com
8589d9bd7373b78746960b04f357c76a95469f96
3e24611b7315b5ad588b2128570f1341b9c968e8
/pacbiolib/pacbio/pythonpkgs/kineticsTools/lib/python2.7/site-packages/kineticsTools/WorkerProcess.py
8f942b04b941bca938157f82b6d7dc6e0aca26f1
[ "BSD-2-Clause" ]
permissive
bioCKO/lpp_Script
dc327be88c7d12243e25557f7da68d963917aa90
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
refs/heads/master
2022-02-27T12:35:05.979231
2019-08-27T05:56:33
2019-08-27T05:56:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,249
py
################################################################################# # Copyright (c) 2011-2013, Pacific Biosciences of California, Inc. # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Pacific Biosciences nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY # THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC BIOSCIENCES AND ITS # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR # ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ################################################################################# import cProfile import logging import os.path import copy from multiprocessing import Process from multiprocessing.process import current_process from threading import Thread, Event from urlparse import urlparse import warnings import numpy as np import pbcore.io from pbcore.io.opener import (openAlignmentFile, openIndexedAlignmentFile) # FIXME this should ultimately go somewhere else. actually, so should the # rest of this module. def _openFiles(self, refFile=None, sharedIndices=None): """ Hack to enable sharing of indices (but not filehandles!) between dataset instances. """ log = logging.getLogger() log.debug("Opening resources") for k, extRes in enumerate(self.externalResources): location = urlparse(extRes.resourceId).path sharedIndex = None if sharedIndices is not None: sharedIndex = sharedIndices[k] try: resource = openIndexedAlignmentFile( location, referenceFastaFname=refFile, sharedIndex=sharedIndex) except (IOError, ValueError): log.info("pbi file missing for {f}, operating with " "reduced speed and functionality".format( f=location)) resource = openAlignmentFile(location, referenceFastaFname=refFile) if not resource: raise IOError("{f} fails to open".format(f=location)) self._openReaders.append(resource) log.debug("Done opening resources") def _reopen (self): """ Force re-opening of underlying alignment files, preserving the reference and indices if present, and return a copy of the AlignmentSet. This is a workaround to allow us to share the index file(s) already loaded in memory while avoiding multiprocessing problems related to .bam files. """ refFile = None if not self.isCmpH5: refFile = self._referenceFile newSet = copy.deepcopy(self) newSet._referenceFastaFname = refFile if not self.isCmpH5 and not self.hasPbi: self.close() newSet._openFiles(refFile=refFile) else: indices = [ f.index for f in self.resourceReaders() ] self.close() _openFiles(newSet, refFile=refFile, sharedIndices=indices) return newSet class Worker(object): """ Base class for worker processes that read reference coordinates from the task queue, perform variant calling, then push results back to another queue, to be written to a GFF file by another process. All tasks that are O(genome length * coverage depth) should be distributed to Worker processes, leaving the ResultCollector process only O(genome length) work to do. """ def __init__(self, options, workQueue, resultsQueue, sharedAlignmentSet=None): self.options = options self.daemon = True self._workQueue = workQueue self._resultsQueue = resultsQueue self._sharedAlignmentSet = sharedAlignmentSet def _run(self): logging.info("Worker %s (PID=%d) started running" % (self.name, self.pid)) if self._sharedAlignmentSet is not None: # XXX this will create an entirely new AlignmentSet object, but # keeping any indices already loaded into memory self.caseCmpH5 = _reopen(self._sharedAlignmentSet) #`self._sharedAlignmentSet.close() self._sharedAlignmentSet = None else: warnings.warn("Shared AlignmentSet not used") self.caseCmpH5 = pbcore.io.AlignmentSet(self.options.infile, referenceFastaFname=self.options.reference) self.controlCmpH5 = None if not self.options.control is None: # We have a cmp.h5 with control vales -- load that cmp.h5 self.controlCmpH5 = pbcore.io.AlignmentSet(self.options.control, referenceFastaFname=self.options.reference) if self.options.randomSeed is None: np.random.seed(42) self.onStart() while True: if self.isTerminated(): break chunkDesc = self._workQueue.get() if chunkDesc is None: # Sentinel indicating end of input. Place a sentinel # on the results queue and end this worker process. self._resultsQueue.put(None) self._workQueue.task_done() break else: (chunkId, datum) = chunkDesc logging.info("Got chunk: (%s, %s) -- Process: %s" % (chunkId, str(datum), current_process())) result = self.onChunk(datum) logging.debug("Process %s: putting result." % current_process()) self._resultsQueue.put((chunkId, result)) self._workQueue.task_done() self.onFinish() logging.info("Process %s (PID=%d) done; exiting." % (self.name, self.pid)) def run(self): # Make the workers run with lower priority -- hopefully the results writer will win # It is single threaded so it could become the bottleneck self._lowPriority() if self.options.doProfiling: cProfile.runctx("self._run()", globals=globals(), locals=locals(), filename="profile-%s.out" % self.name) else: self._run() #== # Begin overridable interface #== def onStart(self): pass def onChunk(self, target): """ This function is the heart of the matter. referenceWindow, alnHits -> result """ pass def onFinish(self): pass class WorkerProcess(Worker, Process): """Worker that executes as a process.""" def __init__(self, *args, **kwds): Process.__init__(self) super(WorkerProcess, self).__init__(*args, **kwds) self.daemon = True def _lowPriority(self): """ Set the priority of the process to below-normal. """ import sys try: sys.getwindowsversion() except: isWindows = False else: isWindows = True if isWindows: # Based on: # "Recipe 496767: Set Process Priority In Windows" on ActiveState # http://code.activestate.com/recipes/496767/ import win32api import win32process import win32con pid = win32api.GetCurrentProcessId() handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid) win32process.SetPriorityClass(handle, win32process.BELOW_NORMAL_PRIORITY_CLASS) else: os.nice(10) def isTerminated(self): return False class WorkerThread(Worker, Thread): """Worker that executes as a thread (for debugging purposes only).""" def __init__(self, *args, **kwds): Thread.__init__(self) super(WorkerThread, self).__init__(*args, **kwds) self._stop = Event() self.daemon = True self.exitcode = 0 def terminate(self): self._stop.set() def isTerminated(self): return self._stop.isSet() @property def pid(self): return -1 def _lowPriority(self): pass
[ "409511038@qq.com" ]
409511038@qq.com
42e9d37ca0e88c79f2944d2aa4fd5cda1e340a19
1e7b7389ecc178dcf5e7e25342e03b35e6290735
/kernel_Init.py
ab9aeeddd4eb8b08669161b193f281e3f686c502
[]
no_license
gongyu0010/DIMENSION
f4cd207b961b0a7a496b82bd5a200db466178f8a
c7a0b26f0cebe69f0929d6ddea9f2511b7cb294b
refs/heads/master
2020-06-23T00:22:34.738931
2019-06-04T10:04:07
2019-06-04T10:04:07
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,312
py
from tensorflow.python.ops.init_ops import Initializer,_compute_fans from numpy.random import RandomState import numpy as np import tensorflow as tf class ComplexInit(Initializer): # The standard complex initialization using # either the He or the Glorot criterion. def __init__(self, kernel_size, input_dim, weight_dim, nb_filters=None, criterion='glorot', seed=None): # `weight_dim` is used as a parameter for sanity check # as we should not pass an integer as kernel_size when # the weight dimension is >= 2. # nb_filters == 0 if weights are not convolutional (matrix instead of filters) # then in such a case, weight_dim = 2. # (in case of 2D input): # nb_filters == None and len(kernel_size) == 2 and_weight_dim == 2 # conv1D: len(kernel_size) == 1 and weight_dim == 1 # conv2D: len(kernel_size) == 2 and weight_dim == 2 # conv3d: len(kernel_size) == 3 and weight_dim == 3 assert len(kernel_size) == weight_dim and weight_dim in {0, 1, 2, 3} self.nb_filters = nb_filters self.kernel_size = kernel_size self.input_dim = input_dim self.weight_dim = weight_dim self.criterion = criterion self.seed = 1337 if seed is None else seed def __call__(self, shape, dtype=None, partition_info=None): if self.nb_filters is not None: kernel_shape = tuple(self.kernel_size) + (int(self.input_dim), self.nb_filters) else: kernel_shape = (int(self.input_dim), self.kernel_size[-1]) fan_in, fan_out = _compute_fans( tuple(self.kernel_size) + (self.input_dim, self.nb_filters) ) if self.criterion == 'glorot': s = 1. / (fan_in + fan_out) elif self.criterion == 'he': s = 1. / fan_in else: raise ValueError('Invalid criterion: ' + self.criterion) rng = RandomState(self.seed) modulus = rng.rayleigh(scale=s, size=kernel_shape) phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape) weight_real = modulus * np.cos(phase) weight_imag = modulus * np.sin(phase) weight = np.concatenate([weight_real, weight_imag], axis=-1) return weight
[ "zw.ke@siat.ac.cn" ]
zw.ke@siat.ac.cn
881b2796e754eccb435d9f1824561012eb3f9263
8308fa0e5f998e0aa6741af5720d6da99497060d
/estoque/admin.py
deb9bd5b8513f6c1109d2812a086ec45998d55fe
[]
no_license
gbpjr/sistema-estoque
7aae11c657c555b98a329cdafde704504ef8b23a
701471e593fa758a1da1b66fa279da4dd3d979e7
refs/heads/master
2020-04-23T08:37:35.123431
2019-02-24T21:43:14
2019-02-24T21:43:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
207
py
from django.contrib import admin from .models import Local, Tipo, Fabricante, Componente admin.site.register(Local) admin.site.register(Tipo) admin.site.register(Fabricante) admin.site.register(Componente)
[ "=" ]
=
ce7cceea7e858c2f7540cb64491d25072b5b9676
d5c745371b1a795302abaae0494177c9698d4d72
/such_server/core/fields.py
0b1c705c65a70f700503b318277fbf6d7a26d14c
[]
no_license
lakenen/such.io
0a2d0fb067ee821ab169e3811e11208b714c0538
5b9c8230d6fc43dac5a35358015228c2fc601e41
refs/heads/master
2023-08-18T12:39:41.468994
2014-01-21T08:18:05
2014-01-21T08:18:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
416
py
from decimal import Decimal from django.db import models class CoinAmountField(models.DecimalField): MAX_DIGITS = 16 DECIMAL_PLACES = 8 def __init__(self, *args, **kwargs): kwargs['max_digits'] = self.__class__.MAX_DIGITS kwargs['decimal_places'] = self.__class__.DECIMAL_PLACES kwargs['default'] = Decimal('0.0') super(CoinAmountField, self).__init__(*args, **kwargs)
[ "matt@crocodoc.com" ]
matt@crocodoc.com
feffae347830f1b5c23eff3efc56f457fd05c16f
8c5588b6fbe7665038d24ebbcb41cceb7be738f3
/Python_Basics/Exam/02_Mountain_Run.py
86f5dd5bf06f74426438ac19b23f081a6551108d
[ "MIT" ]
permissive
Dochko0/Python
9157aa60094e946ecb98fa484102a122f4101d80
e9612c4e842cfd3d9a733526cc7485765ef2238f
refs/heads/master
2020-04-03T06:08:37.120917
2019-03-10T03:47:56
2019-03-10T03:47:56
155,066,929
0
0
null
null
null
null
UTF-8
Python
false
false
377
py
import math record = float(input()) distance = float(input()) speed = float(input()) time_for_dist = distance*speed slowing = math.floor(distance/50) slowing = slowing*30 time_for_dist+=slowing if time_for_dist<record: print(f'Yes! The new record is {time_for_dist:.2f} seconds.') else: slow = time_for_dist-record print(f'No! He was {slow:.2f} seconds slower.')
[ "dochko_dochev@yahoo.com" ]
dochko_dochev@yahoo.com
eadbd701bc7fafb29b726f2330b241a74aad34d8
9cdfe7992090fb91696eec8d0a8ae15ee12efffe
/recursion/prob1221.py
75de61d8cb82f1561f811ecae781765b333d2848
[]
no_license
binchen15/leet-python
e62aab19f0c48fd2f20858a6a0d0508706ae21cc
e00cf94c5b86c8cca27e3bee69ad21e727b7679b
refs/heads/master
2022-09-01T06:56:38.471879
2022-08-28T05:15:42
2022-08-28T05:15:42
243,564,799
1
0
null
null
null
null
UTF-8
Python
false
false
528
py
# 1221 Split string into Balanced Strings class Solution(object): def balancedStringSplit(self, s): """ :type s: str :rtype: int """ m = len(s) if m == 2: return 1 i = 2 while i < m: if self.isBalanced(s[:i]): # self.balancedStringSplit(s[:i]) return 1 + self.balancedStringSplit(s[i:]) i += 2 return 1 def isBalanced(self, sub): return sub.count("L") == sub.count('R')
[ "binchen.devops@gmail.com" ]
binchen.devops@gmail.com
de3790e6ec5dcd81a3108b9ae7dcf9ea628336f9
3f81ad43c3d9d5408764f26a2506ee256345526c
/pack_dj/settings.py
516490f503c0cdaf9bfebb7c1dbe722c96ce930f
[]
no_license
gerasachin89/dj_exception
84e55acde11d03ae944ab6a885438fe8d9eb329c
a57e75072e14bbf6ad29221f084ed3bfd7eeecbb
refs/heads/master
2020-03-19T06:58:23.258662
2018-08-16T15:19:41
2018-08-16T15:19:41
136,072,184
0
0
null
2018-08-16T15:19:34
2018-06-04T19:20:34
Python
UTF-8
Python
false
false
3,166
py
""" Django settings for pack_dj project. Generated by 'django-admin startproject' using Django 1.10. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'u$-wj0xgf5q139u4z5=l_=3@^2sn%5oq#n132la@zxtv93o=6p' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'djbugger', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'djbugger.middleware.ExceptionGettingMiddleware' ] ROOT_URLCONF = 'pack_dj.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'pack_dj.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/'
[ "sachin.gera@verificient.com" ]
sachin.gera@verificient.com
493de8646f5dd35ace3b378ca6ac0e2fcbdcac2a
95c3cf0202ef4438abc962332aca55f18b8948b1
/kway_merge.py
75acbf23f9e8b490aaaf72825f221e74bffa436a
[]
no_license
Coder-12/Wikipedia-Search-Engine
1ea094d4e169389b47380613caf8aafb1da171eb
20e60276d6b62d4166f3cf40553c6f3a11a98d65
refs/heads/main
2023-04-28T00:38:01.205719
2021-05-17T06:46:15
2021-05-17T06:46:15
321,054,141
0
0
null
null
null
null
IBM852
Python
false
false
1,946
py
# -*- coding: cp1252 -*- import sys import re import time from nltk.stem import PorterStemmer from collections import defaultdict from heapq import heapify, heappush, heappop isdone=[0 for i in range(1965)] index_files=["./Data/index_"+str(i)+".txt" for i in range(1,1965,1)] num_of_index_files=len(index_files) chunk_size=100000 processed_index_file=0 secondary_index={} index_file_ptr={} row={} listOfwords={} heap=[] tot=0 inverted_index=defaultdict() def store_primery_index_info(): global processed_index_file processed_index_file+=1 flag=1 index_file="./Data/mergefiles/"+"´ndex"+str(processed_index_file)+".txt" with open(index_file,"w") as fp: for i in sorted(inverted_index): if flag: secondary_index[i]=processed_index_file flag=0 fp.write(str(i)+"="+inverted_index[i]+"\n") def main(): while isdone.count(0)!=num_of_index_files: word=heappop(heap) tot+=1 for i in range(num_of_index_files): if (isdone[i] and listOfwords[i][0]==word): if word not in inverted_index: inverted_index[word]=listOfwords[i][1] else: inverted_index[word]+=","+listOfwords[i][1] row[i]=index_file_ptr[i].readline().strip() if row[i]: listOfwords[i]=row[i].split("=") if listOfwords[i][0] not in heap: heappush(heap,listOfwords[i][0]) else: isdone[i]=0 index_file_ptr[i].close() if (tot>=chunk_size): store_primery_index_info() tot=0 inverted_index.clear() store_primery_index_info() with open("./Data/mergefiles/"+"secondary.txt","w") as fp: for i in sorted(secondary_index): fp.write(str(i)+" "+str(secondary_index[i])+"\n") if __name__=="__main__": main()
[ "noreply@github.com" ]
Coder-12.noreply@github.com
c84e28fc468e05fe8ab1aec658363b926602fb29
c4340afbc8fe3be017d7025686d47bde86b8d3e3
/run.py
e07a19217c0298774d84aa7723ff5e8f5971fbf1
[]
no_license
CobbleGen/rock-paper-royale
43b438bd62771de895ded87d37c9dfea2f154c87
62ee9d97bb050557aa487275fa0286071df327da
refs/heads/main
2023-01-15T03:23:39.362908
2020-11-24T12:34:32
2020-11-24T12:34:32
314,386,807
0
0
null
null
null
null
UTF-8
Python
false
false
126
py
from my_server import app if __name__ == '__main__': app.run(host='0.0.0.0', port=8070, debug=True, use_reloader=False)
[ "cobblegenerator@gmail.com" ]
cobblegenerator@gmail.com
a40ffa25cafd8ef107009a02716aa27de5fa4f4d
3fa29688b54c6fcb68035c0681a50b5e5cb29b7c
/include/i2c_handler.py
b6d1e694508f9ac699658bf2f806ea1f6c2b735f
[]
no_license
Joon174/Low-Cost-Platform-For-Robot-Learning
e26c541342ae506f37b32f8fdeafa7d060b6c6fe
7a0ad280acff2e8ed16d003e22b0b747735db2c8
refs/heads/master
2022-11-08T10:43:40.823462
2020-06-11T06:27:21
2020-06-11T06:27:21
235,024,696
0
0
null
null
null
null
UTF-8
Python
false
false
2,900
py
## PCA9685_interface import wiringpi import time class PCA9685(): def __init__(self, address=0x40, pwm_freq=50): super(PCA9685, self).__init__() self.address = address self.freq = 0 self.registers = {"MODE1": 0x00, "PRESCALE": 0xFE, "LED0_ON_L": 0x06, "LED0_ON_H": 0x07, "LED0_OFF_L": 0x08, "LED0_OFF_H": 0x09, "LED_ALL_ON":0xFA, "PIN_ALL": 0x0F } self.device = self._initConnection() self._setupDevice() self._setPWMFreq(pwm_freq) # initConnection # Checks for a return from the MPU6050 in the I2C bus. Should there be no gyroscope # found, the class will assert an error and exit the program. def _initConnection(self): try: device = wiringpi.wiringPiI2CSetup(self.address) print("Successfully conneced to PCA9685 IC Chip.\n") return device except AttributeError: print("Failed to connect to device. Please check the connection.\n") print("Tip:\t You may need to initialize the I2C bus using raspi-config.\n") exit(0) def _setupDevice(self): settings = wiringpi.wiringPiI2CReadReg8(self.device, self.registers["MODE1"]) & 0x7F auto_increment = settings | 0x20 wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["MODE1"], auto_increment) def _setPWMFreq(self, freq): self.freq = (1000 if freq>1000 else freq if freq<400 else 400) prescale = int(25000000/(4096*freq) - 0.5) settings = wiringpi.wiringPiI2CReadReg8(self.device, self.registers["MODE1"]) & 0x7F sleep = settings | 0x10 wake = settings & 0xEF restart = wake | 0x80 wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["MODE1"], sleep) wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["PRESCALE"], prescale) wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["MODE1"], wake) time.sleep(0.001) wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["MODE1"], restart) def _triggerPulse(self, channel, on, off): wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["LED0_ON_L"]+4*channel, on & 0xFF) wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["LED0_ON_H"]+4*channel, on >> 8) wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["LED0_OFF_L"]+4*channel, off & 0xFF) wiringpi.wiringPiI2CWriteReg8(self.device, self.registers["LED0_OFF_H"]+4*channel, off >> 8) def servo_set_angle(self, channel, pulse): analog_value = int(float(pulse) / 1000000 * self.freq * 4096) self._triggerPulse(channel, 0, analog_value)
[ "tanjoonyou@gmail.com" ]
tanjoonyou@gmail.com
31b90af5e2d762ee6482a7c0202484d4b2a0cff5
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
/python/spaCy/2016/4/test_only_punct.py
12c9580880eb988530171fcf1973e0dc5ca361fa
[ "MIT" ]
permissive
rosoareslv/SED99
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
a062c118f12b93172e31e8ca115ce3f871b64461
refs/heads/main
2023-02-22T21:59:02.703005
2021-01-28T19:40:51
2021-01-28T19:40:51
306,497,459
1
1
null
2020-11-24T20:56:18
2020-10-23T01:18:07
null
UTF-8
Python
false
false
191
py
from __future__ import unicode_literals def test_only_pre1(en_tokenizer): assert len(en_tokenizer("(")) == 1 def test_only_pre2(en_tokenizer): assert len(en_tokenizer("((")) == 2
[ "rodrigosoaresilva@gmail.com" ]
rodrigosoaresilva@gmail.com
6030c0e24d7cd9232a8e406f7eb32fa8a4ab9b96
9a031adfff82fb3e36d7ff62c6125cb72d8b6141
/UNAG/apps/general/validators.py
496774ccc8c6fa5d7a8e44fa6ca69efbc19718be
[]
no_license
steffyn/UNAG
3d57836f768fdaa475eb64543c6039d8b4ae4993
9cb00b04b0c64051fe0244b9bcb67c9eb31c390b
refs/heads/master
2020-12-11T03:10:44.743755
2016-07-28T14:33:26
2016-07-28T14:33:26
50,447,125
1
0
null
2016-07-28T14:33:26
2016-01-26T17:46:38
JavaScript
UTF-8
Python
false
false
949
py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import mimetypes from os.path import splitext from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ from django.template.defaultfilters import filesizeformat class validar(object): def __init__(self, *args, **kwargs): self.tipo = kwargs.pop('tipo', None) self.longitud = kwargs.pop('longitud', None) self.min_size = kwargs.pop('min_size', 0) self.max_size = kwargs.pop('max_size', None) def __call__(self, value): if self.tipo == 'identidad' or self.tipo == 'telefono': if len(value) != self.longitud: raise ValidationError(u'%s tiene un formato inválido, mostrando %s caracteres' % (value, len(value))) if not value.isdigit(): raise ValidationError(u'%s tiene un formato inválido, mostrando letras u otro caracter especial' % (value))
[ "kshernandez90@gmail.com" ]
kshernandez90@gmail.com
da483124b36bb618910d48b9e3a6e5772152e440
d1a974067558d0a9fd1c069bf93097fd2b9a09fb
/blockchain_simple/flask/app/main.py
4a430007ae36a23f9119b88bbf9f5a29b46a4c29
[]
no_license
vainia/Blockchain-basic-implementation
c5c1537277b4c26de842483c410751c8c17ccf0f
1efd4fcdead62aaad07c9a84877dbf7cd2d300c7
refs/heads/master
2020-03-11T03:54:26.240137
2018-04-16T16:32:25
2018-04-16T16:32:25
129,762,386
0
0
null
null
null
null
UTF-8
Python
false
false
676
py
from flask import Flask, render_template, redirect, url_for, request from block import * app = Flask(__name__) @app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST': lender = request.form['lender'] amount = request.form['amount'] borrower = request.form['borrower'] write_block(name=lender, amount=amount, to_whom=borrower) return redirect(url_for('index')) return render_template('index.html') @app.route('/checking', methods=['GET']) def check(): results = check_integrity() return render_template('index.html', results=results) if __name__ == '__main__': app.run(debug=True)
[ "Vanias2806@gmail.com" ]
Vanias2806@gmail.com
e09680852580fbe4e07aabb37417546e41e46762
5a5cfae120184e460ea79967a7acdc7b88cf6db7
/GCC-paddle/gcc/models/mpnn.py
5d6b2d7f1631700500fb520a08fd5c0b762b81e0
[ "Apache-2.0", "MIT" ]
permissive
xmy0916/Contrib
f3186d9240a7b43ef032319b6f0d90223d7a9879
0959d9c440d6b9f8b3701a887c6ac78b346c2638
refs/heads/master
2023-06-30T22:12:37.538889
2021-08-13T04:49:11
2021-08-13T04:49:11
380,311,934
0
0
Apache-2.0
2021-06-25T17:33:32
2021-06-25T17:33:31
null
UTF-8
Python
false
false
3,602
py
#!/usr/bin/env python # coding: utf-8 # pylint: disable=C0103, C0111, E1101, W0612 """Implementation of MPNN model.""" import dgl import paddorch as torch import paddorch.nn as nn import paddorch.nn.functional as F from dgl.nn.paddorch import NNConv class UnsupervisedMPNN(nn.Module): """ MPNN from `Neural Message Passing for Quantum Chemistry <https://arxiv.org/abs/1704.01212>`__ Parameters ---------- node_input_dim : int Dimension of input node feature, default to be 15. edge_input_dim : int Dimension of input edge feature, default to be 15. output_dim : int Dimension of prediction, default to be 12. node_hidden_dim : int Dimension of node feature in hidden layers, default to be 64. edge_hidden_dim : int Dimension of edge feature in hidden layers, default to be 128. num_step_message_passing : int Number of message passing steps, default to be 6. num_step_set2set : int Number of set2set steps num_layer_set2set : int Number of set2set layers """ def __init__( self, output_dim=32, node_input_dim=32, node_hidden_dim=32, edge_input_dim=32, edge_hidden_dim=32, num_step_message_passing=6, lstm_as_gate=False, ): super(UnsupervisedMPNN, self).__init__() self.num_step_message_passing = num_step_message_passing self.lin0 = nn.Linear(node_input_dim, node_hidden_dim) edge_network = nn.Sequential( nn.Linear(edge_input_dim, edge_hidden_dim), nn.ReLU(), nn.Linear(edge_hidden_dim, node_hidden_dim * node_hidden_dim), ) self.conv = NNConv( in_feats=node_hidden_dim, out_feats=node_hidden_dim, edge_func=edge_network, aggregator_type="sum", ) self.lstm_as_gate = lstm_as_gate if lstm_as_gate: self.lstm = nn.LSTM(node_hidden_dim, node_hidden_dim) else: self.gru = nn.GRU(node_hidden_dim, node_hidden_dim) def forward(self, g, n_feat, e_feat): """Predict molecule labels Parameters ---------- g : DGLGraph Input DGLGraph for molecule(s) n_feat : tensor of dtype float32 and shape (B1, D1) Node features. B1 for number of nodes and D1 for the node feature size. e_feat : tensor of dtype float32 and shape (B2, D2) Edge features. B2 for number of edges and D2 for the edge feature size. Returns ------- res : Predicted labels """ out = F.relu(self.lin0(n_feat)) # (B1, H1) h = out.unsqueeze(0) # (1, B1, H1) c = torch.zeros_like(h) for i in range(self.num_step_message_passing): m = F.relu(self.conv(g, out, e_feat)) # (B1, H1) if self.lstm_as_gate: out, (h, c) = self.lstm(m.unsqueeze(0), (h, c)) else: out, h = self.gru(m.unsqueeze(0), h) out = out.squeeze(0) return out if __name__ == "__main__": model = UnsupervisedMPNN() print(model) g = dgl.DGLGraph() g.add_nodes(3) g.add_edges([0, 0, 1], [1, 2, 2]) g.ndata["pos_directed"] = torch.rand(3, 16) g.ndata["pos_undirected"] = torch.rand(3, 16) g.ndata["seed"] = torch.zeros(3, dtype=torch.long) g.ndata["nfreq"] = torch.ones(3, dtype=torch.long) g.edata["efreq"] = torch.ones(3, dtype=torch.long) y = model(g) print(y.shape) print(y)
[ "544062970@qq.com" ]
544062970@qq.com
229ab3cd33fbd1e29c42bba3c8a2e7a302e7f02d
1d14ee9da00fce008bac63296e341ffa7af21104
/hackaton_app/migrations/0001_initial.py
ed3194088c5ad5f2f611b81dbef0f1b27c535b3c
[]
no_license
anycode-pk/hackaton
7d9cee6dee95d392c6a135bd7d295dcb069e90ad
7295f5cd73999e97a0fcafb2c204bd874d8777b1
refs/heads/main
2023-07-27T03:49:48.482706
2020-11-21T19:45:47
2020-11-21T19:45:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
781
py
# Generated by Django 3.1.3 on 2020-11-20 22:02 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(default='profile/default.jpg', upload_to='profile/')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "jedrzej@matuszewski.co" ]
jedrzej@matuszewski.co
93871eb67de2634e959d2d28db571c1675f27f9d
30a3e20ce71db816c3d94570f07da86066112eaa
/URI_1052.py
0527b78fda4a6bfb36d1748462620833ea6bcaf9
[]
no_license
JorgeLJunior/Exercicios-do-URI
0fead39533e1c4f07c9c092ab53053213f2e8359
cbb080f7d1d5b4f65fa39488626360a9a9e53d08
refs/heads/main
2023-02-19T22:47:10.350709
2021-01-22T03:18:34
2021-01-22T03:18:34
322,184,393
0
0
null
null
null
null
UTF-8
Python
false
false
233
py
mes = int(input()) calendario = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'} print(calendario[mes])
[ "jorge.limajr@outlook.com" ]
jorge.limajr@outlook.com
0e9096e4b0553691cf5b1f21edf9dbdd5345cd3b
dfcb65de02953afaac24cc926ee32fcdede1ac21
/src/pyrin/database/paging/paginator.py
080a42e2616f9e7b4b8a7043c65bb37c92b2c6a9
[ "BSD-3-Clause" ]
permissive
mononobi/pyrin
031d0c38da945b76b07ea100554ffc7f8081b05e
9d4776498225de4f3d16a4600b5b19212abe8562
refs/heads/master
2023-08-31T03:56:44.700142
2023-08-20T22:20:06
2023-08-20T22:20:06
185,481,041
20
8
null
null
null
null
UTF-8
Python
false
false
14,011
py
# -*- coding: utf-8 -*- """ database paging paginator module. """ from copy import deepcopy from abc import abstractmethod from collections import OrderedDict from flask import url_for import pyrin.configuration.services as config_services import pyrin.database.paging.services as paging_services import pyrin.security.session.services as session_services from pyrin.core.structs import CoreObject from pyrin.core.exceptions import CoreNotImplementedError from pyrin.database.orm.sql.schema.globals import BIG_INTEGER_MAX from pyrin.database.paging.exceptions import PageSizeLimitError, TotalCountIsAlreadySetError class PaginatorBase(CoreObject): """ paginator base class. """ @abstractmethod def next(self): """ gets the next page number. returns None if there is no next page. :raises CoreNotImplementedError: core not implemented error. :rtype: int """ raise CoreNotImplementedError() @abstractmethod def previous(self): """ gets the previous page number. returns None if there is no previous page. :raises CoreNotImplementedError: core not implemented error. :rtype: int """ raise CoreNotImplementedError() @abstractmethod def inject_paging_keys(self, values, **options): """ injects paging keys into given values from given inputs. :param dict values: dict values to inject paging keys into it. :raises CoreNotImplementedError: core not implemented error. """ raise CoreNotImplementedError() @abstractmethod def paginate(self, items, **options): """ paginates the given items. it returns a tuple of two values, first value is a list of items to be returned to client, and second value is a dict of metadata to be injected into client response. :param list items: items to be paginated. :raises CoreNotImplementedError: core not implemented error. :returns: tuple[list items, dict metadata] :rtype: tuple[list, dict] """ raise CoreNotImplementedError() @abstractmethod def has_next(self, count, *args, **options): """ gets a value indicating that there is a next page available. it returns a tuple of two items. first item is a boolean indicating that there is a next page and the second item is the number of excess items that must be removed from end of items. :param int count: count of current items. :raises CoreNotImplementedError: core not implemented error. :returns: tuple[bool has_next, int excess] :rtype: tuple[bool, int] """ raise CoreNotImplementedError() @abstractmethod def has_previous(self, count, *args, **options): """ gets a value indicating that there is a previous page available. it returns a tuple of two items. first item is a boolean indicating that there is a previous page and the second item is the number of excess items that must be removed from beginning of items. :param int count: count of current items. :raises CoreNotImplementedError: core not implemented error. :returns: tuple[bool has_previous, int excess] :rtype: tuple[bool, int] """ raise CoreNotImplementedError() def copy(self): """ returns a deep copy of this instance :rtype: PaginatorBase """ return deepcopy(self) @property @abstractmethod def current_page(self): """ gets current page number. :raises CoreNotImplementedError: core not implemented error. :rtype: int """ raise CoreNotImplementedError() @property @abstractmethod def current_page_size(self): """ gets current page size. :raises CoreNotImplementedError: core not implemented error. :rtype: int """ raise CoreNotImplementedError() @property @abstractmethod def total_count(self): """ gets the total count of items in all pages. :raises CoreNotImplementedError: core not implemented error. :rtype: int """ raise CoreNotImplementedError() @total_count.setter @abstractmethod def total_count(self, value): """ sets the total count of items in all pages. :param int value: total count to be set. :raises CoreNotImplementedError: core not implemented error. """ raise CoreNotImplementedError() class SimplePaginator(PaginatorBase): """ simple paginator class. page numbers start from 1. it does not emit any extra queries to database to fetch count or like that. the only limitation is that it could not detect previous page in `last_page + 1` page. """ def __init__(self, endpoint, **options): """ initializes an instance of SimplePaginator. :param str endpoint: endpoint of route. :keyword int page_size: default page size. if not provided, it will be get from `default_page_size` of `database` config store. :keyword int max_page_size: max allowed page size. if not provided, it will be get from `max_page_size` of `database` config store. :raises PageSizeLimitError: page size limit error. """ super().__init__() global_max_page_size = config_services.get('database', 'paging', 'max_page_size') max_page_size = options.get('max_page_size') if max_page_size is None or max_page_size < 1: max_page_size = global_max_page_size if max_page_size > global_max_page_size: raise PageSizeLimitError('Max page size [{max}] is bigger than global max page ' 'size which is [{global_max}] on endpoint [{endpoint}].' .format(max=max_page_size, global_max=global_max_page_size, endpoint=endpoint)) page_size = options.get('page_size') default_page_size = config_services.get('database', 'paging', 'default_page_size') if page_size is None or page_size < 1: page_size = min(default_page_size, max_page_size) if page_size > max_page_size: raise PageSizeLimitError('Page size [{page_size}] is bigger than max page size ' 'which is [{max}] on endpoint [{endpoint}].' .format(page_size=page_size, max=max_page_size, endpoint=endpoint)) self._page_size = page_size self._max_page_size = max_page_size self._endpoint = endpoint self._limit = None self._offset = None self._current_page = None self._current_page_size = None self._has_next = False self._has_previous = False self._total_count = None def _url_for(self, page, page_size): """ gets the url for given page number. :param int page: page number to generate its url. :param int page_size: page size. :rtype: str """ request = session_services.get_current_request() options = OrderedDict() options.update(request.get_all_query_strings()) options.update(paging_services.generate_paging_params(page, page_size)) options.update(request.view_args or {}) options.update(_method=request.method) return url_for(self._endpoint, **options) def has_next(self, count, **options): """ gets a value indicating that there is a next page available. it returns a tuple of two items. first item is a boolean indicating that there is a next page and the second item is the number of excess items that must be removed from end of items. :param int count: count of current items. :returns: tuple[bool has_next, int excess] :rtype: tuple[bool, int] """ # the original limit is always 2 less than the current limit. excess = count - (self._limit - 2) if excess <= 0: self._has_next = False return self._has_next, 0 if self._current_page == 1: self._has_next = excess > 0 return self._has_next, excess else: self._has_next = excess > 1 return self._has_next, excess - 1 def has_previous(self, count, **options): """ gets a value indicating that there is a previous page available. it returns a tuple of two items. first item is a boolean indicating that there is a previous page and the second item is the number of excess items that must be removed from beginning of items. :param int count: count of current items. :returns: tuple[bool has_previous, int excess] :rtype: tuple[bool, int] """ # at any page, if there is a count > 0, it means that there is a previous # page available. because the first item is from the previous page. if count <= 0 or self._current_page == 1: self._has_previous = False return self._has_previous, 0 self._has_previous = True return self._has_previous, 1 def next(self): """ gets the next page number. returns None if there is no next page. :rtype: int """ if self._has_next is True: return self._url_for(self._current_page + 1, self._current_page_size) return None def previous(self): """ gets the previous page number. returns None if there is no previous page. :rtype: int """ if self._has_previous is True: return self._url_for(self._current_page - 1, self._current_page_size) return None def inject_paging_keys(self, values, **options): """ injects paging keys into given values from given inputs. :param dict values: dict values to inject paging keys into it. :keyword int page: page number. :keyword int page_size: page size. """ page, page_size = paging_services.get_paging_params(**options) if page is None or not isinstance(page, int) or page < 1: page = 1 if page > BIG_INTEGER_MAX: page = BIG_INTEGER_MAX if page_size is None or not isinstance(page_size, int) or page_size < 1: page_size = self._page_size elif page_size > self._max_page_size: page_size = self._max_page_size # we increase limit by 2 to be able to detect if there is a next and previous page. # the extra items will not be returned to client. self._limit = page_size + 2 offset = page - 1 extra_offset = offset * page_size if extra_offset > BIG_INTEGER_MAX: extra_offset = BIG_INTEGER_MAX if extra_offset > 0: # we decrease offset by 1 to be able to detect if there is a previous page. # the extra item will not be returned to client. extra_offset = extra_offset - 1 self._offset = extra_offset self._current_page = page self._current_page_size = page_size paging_services.inject_paging_keys(self._limit, self._offset, values) def paginate(self, items, **options): """ paginates the given items. it returns a tuple of two values, first value is a list of items to be returned to client, and second value is a dict of metadata to be injected into client response. :param list items: items to be paginated. :returns: tuple[list items, dict metadata] :rtype: tuple[list, dict] """ metadata = OrderedDict() count = len(items) result = items has_next, excess_end = self.has_next(count) has_previous, excess_first = self.has_previous(count) if has_next is True: result = result[:-excess_end] count = count - excess_end if has_previous is True: result = result[excess_first:] count = count - excess_first next_url = self.next() previous_url = self.previous() if self.total_count is not None: metadata.update(count_total=self.total_count) metadata.update(count=count, next=next_url, previous=previous_url) return result, metadata @property def current_page(self): """ gets current page number. :rtype: int """ return self._current_page @property def current_page_size(self): """ gets current page size. :rtype: int """ return self._current_page_size @property def total_count(self): """ gets the total count of items in all pages. :rtype: int """ return self._total_count @total_count.setter def total_count(self, value): """ sets the total count of items in all pages. :param int value: total count to be set. :raises TotalCountIsAlreadySetError: total count is already set error. """ if self._total_count is not None: raise TotalCountIsAlreadySetError('Total count for paginator is already ' 'set and could not be overwritten in ' 'current request.') self._total_count = value
[ "mohamadnobakht@gmail.com" ]
mohamadnobakht@gmail.com
2d88af4f8e19ec8a0e08f27224e0b1fae9da8459
f71c9b3549c7e4f7ef9cae54638beb5d2882907e
/unittesting/example.py
664f2da33efbdad12d1c99d2e0ce394aca9b8cdf
[]
no_license
sparrowV/hands_on_python
8c0f7afface7f9d67a104d22cabe30bfdbc77ee4
011270ebd3c3238f9f0399ee53be6803a0049130
refs/heads/master
2020-06-25T04:14:01.544269
2019-08-04T16:49:47
2019-08-04T16:49:47
199,197,821
0
0
null
null
null
null
UTF-8
Python
false
false
1,916
py
import unittest class Client: def __init__(self,username): self.username = username self.balance = 0 def get_username(self): return self.username def get_balance(self): return self.balance def add_funds(self,amount): self.balance+=amount def withdraw_funds(self,amount): if(amount > self.balance): raise Exception else: self.balance-=amount def transfer_to_another_account(self,amount,clientTo): if(self.balance < amount): raise Exception else: self.withdraw_funds(amount) clientTo.add_funds(amount) class TestClient(unittest.TestCase): def setUp(self): mark = Client('Mark') tom = Client('Tom') self.mark = mark self.tom = tom #possible extension is to add another client, for example tim, and test transef_to_another_account method with triple transaction def test_get_balance(self): self.assertEqual(self.mark.get_balance(),0) self.assertTrue(self.tom.get_balance() == 0) def test_add_funds(self): self.mark.add_funds(500) self.assertNotEqual(self.mark.get_balance(),0) self.assertEqual(self.mark.get_balance(),500) def test_withdraw_funds(self): self.assertRaises(Exception,self.mark.withdraw_funds,20) self.mark.add_funds(400) self.mark.withdraw_funds(300) self.assertTrue(self.mark.get_balance() == 100) def test_transfer_to_another_account(self): self.mark.add_funds(1000) self.assertRaises(Exception,self.mark.transfer_to_another_account,2000,self.tom) self.mark.transfer_to_another_account(200,self.tom) self.assertEqual(self.mark.get_balance(), 800) self.assertTrue(self.tom.get_balance() == 200) #extension here for triple transaction
[ "odval14@freeuni.edu.ge" ]
odval14@freeuni.edu.ge
193b820c117f4fc7ef54473263f67998cec53e72
882a8801528a11488315df86838975ded3e107b8
/app/user_db_model.py
f6eaa8fde0934974592c88afc93c772ef82454be
[]
no_license
christianbueno1/authentication
eca934a66e6d8c6b77f3a82fbf0bf212e7ffe0b1
e4f9df7cfda92a7320f40fb5542e355fd4471b66
refs/heads/main
2023-07-13T07:27:13.436463
2021-08-19T15:38:22
2021-08-19T15:38:22
397,745,530
0
0
null
null
null
null
UTF-8
Python
false
false
6,004
py
# import mariadb # from flask import current_app, g from .user_model import User # from .db_model import get_db, mariadb, get_db1 from .db_model import mariadb, get_db1 # from app.user_model import User import sys # try: # conn = get_db() # cur = conn.cursor() # query = "select * from account" # cur.execute(query) # for row in cur: # print(f"{row}") # except mariadb.Error as e: # print(f"error database, auth.py: {e}") # sys.exit(1) #cur.fetchmany() #default 1 row #cur.fetchmany(5) #crud: create, retrieve, update, delete #from list of tuples, fetchall #to list of User class def to_user_lst(list_): user_lst = [] for tuple_ in list_: user_obj = tpl_to_user_obj(tuple_) user_lst.append(user_obj) return user_lst #print all user #from list of user class def print_user_lst(user_lst): for user_obj in user_lst: print(f"{user_obj}") print() def tpl_to_user_obj(user_tpl): # user_dict = dict( # id = tuple_[0], # username = tuple_[1], # email = tuple_[2], # password = tuple_[3], # fname = tuple_[4], # lname = tuple_[5], # dob = tuple_[6] # ) # user_obj = User(**user_dict) user_obj = User( id=user_tpl[0], username=user_tpl[1], email=user_tpl[2], password=user_tpl[3], fname=user_tpl[4], lname=user_tpl[5], dob=user_tpl[6] ) return user_obj #retrieve #return a list of user class #if doesn't find any return None def get_all_users(): try: conn = get_db1() #use connection cur = conn.cursor() query = "select * from account" cur.execute(query) fetchall_lst = cur.fetchall() except mariadb.Error as e: print(f"get_all_users method: {e}") #sys.exit(0) #ok # database issues, I use exit code 1 sys.exit(1) finally: #close connection cur.close() conn.close() if not fetchall_lst: return None user_lst = to_user_lst(fetchall_lst) return user_lst #retrieve #Return a User object, if doens't find one return None #when resulset is empty fetchone() return None def get_user(id): try: conn = get_db1() # use connection cur = conn.cursor() query = "select * from account where id=?" # #id as tuple cur.execute(query, (id,)) #return a tuple user_tpl = cur.fetchone() except mariadb.Error as e: print(f"Error connecting to mariadb platform: {e}") sys.exit(1) finally: #close connection cur.close() conn.close() if not user_tpl: return None user_obj = tpl_to_user_obj(user_tpl) return user_obj #create # arguments required:username, email, password # def insert_user(cur, user_obj): def insert_user(user_obj): try: #use connection conn = get_db1() cur = conn.cursor() #spanning string over multiple lines #triple quotes #ignore end of lines with \ query = """\ insert into account \ (username, email, password, fname, lname, dob) \ values \ (?,?,?,?,?,?)""" # query = "insert into account (username, email, password, fname, lname, dob) values (?,?,?,?,?,?)" cur.execute(query, (user_obj.username, user_obj.email, user_obj.password, user_obj.fname, user_obj.lname, user_obj.dob)) conn.commit() except mariadb.Error as e: print(f"Error connecting to mariadb platform: {e}") sys.exit(1) finally: cur.close() conn.close() # def insert_user2(user_obj): # try: # conn = get_db1() # cur = conn.cursor() # query = "insert into account (username, email, password, fname, lname, dob) values (?,?,?,?,?,?)" # cur.execute(query, (user_obj.username, user_obj.email, user_obj.password, user_obj.fname, user_obj.lname, user_obj.dob)) # conn.commit() # except mariadb.Error as e: # print(f"Error connecting to mariadb platform: {e}") # sys.exit(1) # finally: # cur.close() # conn.close() #update def update_user(id, new_fname, new_lname, new_dob): try: conn = get_db1() cur = conn.cursor() query = "update account set fname=?, lname=?, dob=? where id=?" cur.execute(query, (new_fname, new_lname, new_dob, id)) conn.commit() except mariadb.Error as e: print(f"Error connecting to mariadb platform: {e}") sys.exit(1) finally: cur.close() conn.close() #change username or create username #verify availability of username #if there is no username return None def get_by_username(username): try: conn = get_db1() cur = conn.cursor() query = "select * from account where username=?" cur.execute(query, (username,)) user_tpl = cur.fetchone() except mariadb.Error as e: print(f"Error connecting to mariadb platform: {e}") sys.exit(1) finally: cur.close() conn.close() if not user_tpl: return None user_obj = tpl_to_user_obj(user_tpl) return user_obj # try: # conn = get_db1() # cur = conn.cursor() # except mariadb.Error as e: # print(f"Error connecting to mariadb platform: {e}") # sys.exit(1) # finally: # cur.close() # conn.close() def get_by_email(email): try: conn = get_db1() cur = conn.cursor() query = "select * from account where email=?" cur.execute(query, (email,)) user_tpl = cur.fetchone() except mariadb.Error as e: print(f"Error connecting to mariadb platform: {e}") sys.exit(1) finally: cur.close() conn.close() if not user_tpl: return None user_obj = tpl_to_user_obj(user_tpl) return user_obj
[ "chmabuen@espol.edu.ec" ]
chmabuen@espol.edu.ec
46cb60525d116aa86c173a0f529208516f17a224
ac4988c7f9cc98f6dcc31280cd208eeb57d674f2
/Semana6Hackaton/Bryan Arias/app/factura.py
63e6bd34aad5fe4a203ddd457fd9364ab168cb22
[]
no_license
BrucePorras/PachaQTecMayo2020-1
77e396ceb2619d028dd740b9414f8a3160eee2cd
5f39146cc4f69d844dae22a12098479194fd6332
refs/heads/master
2022-12-18T02:20:39.828175
2020-09-25T05:39:20
2020-09-25T05:39:20
269,999,646
1
0
null
2020-06-06T14:06:19
2020-06-06T14:06:18
null
UTF-8
Python
false
false
398
py
import utils class factura: __log = utils.log("Factura") def __init__(self, idCabecera, nombreEmp, nombreClie, tipo, igv, subtotal, total, fecha): self.idCabecera = idCabecera self.nombreEmp = nombreEmp self.nombreClie = nombreClie self.tipo = tipo self.igv = igv self.subtotal = subtotal self.total = total self.fecha = fecha
[ "bryan.arias@gmail.com" ]
bryan.arias@gmail.com
45f7f92941db093e3f28fa8b2cda70c0db1c2242
587fd701181b8ff4899346b6b7cf670f23eeaa43
/short_link/migrations/0005_delete_myclass.py
c779ecc797b3bed89190f085ccee64374e0493e0
[]
no_license
marxjoy/link_shortener
4855ac9c63d00d3263f20d3efd70a0da74710aea
a45d3cee8f60ed14ec78840a1b0d8feda72ce12a
refs/heads/master
2020-09-06T10:22:25.092174
2019-12-10T15:16:20
2019-12-10T15:16:20
220,398,448
2
0
null
2020-06-06T00:32:08
2019-11-08T06:08:25
Python
UTF-8
Python
false
false
288
py
# Generated by Django 2.2.5 on 2019-11-12 20:33 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('short_link', '0004_myclass'), ] operations = [ migrations.DeleteModel( name='MyClass', ), ]
[ "jooy@interia.pl" ]
jooy@interia.pl
ed3f414331ee66079a652461818ffb92723a1524
43eb30a565459ed89a761b41aad88575a49a9c02
/send_device_info.py
6fb0aa159ed083fe6ff478df66d3cbd27ea26844
[]
no_license
maedward/raspberry_pi_wifi_counting
7bb1e2e388ecfacd86c9beb5851601aae69a4749
ce7de8d4998d47ad4f1cd36492c2a4cd62a6f78e
refs/heads/master
2021-01-21T08:29:12.716549
2017-09-19T16:29:25
2017-09-19T16:29:25
58,982,316
0
0
null
null
null
null
UTF-8
Python
false
false
928
py
from utils import * from config import * import urllib2, json #post data to server def post_device_info(data): url = ADD_DEVICE_INFO_API json_data = json.dumps(data) req = urllib2.Request(url, json_data, {'Content-Type': 'application/json'}) f = urllib2.urlopen(req) response = f.read() f.close() print response def create_device_info(): device_info = {} device_info['internal_ip'] = get_internal_ip() device_info['external_ip'] = get_external_ip() device_info['mac'] = get_mac() device_info['router_mac'] = get_router_mac() device_info['lat'], device_info['lon'] = get_location() device_info['city'] = get_info()['city'] device_info['country'] = get_info()['country'] device_info['isp'] = get_info()['isp'] device_info['version'] = VERSION print device_info return device_info def send_device_info(): post_device_info(create_device_info())
[ "edward@motherapp.com" ]
edward@motherapp.com
9b8d8db69441c178ebe4c8457fbfd03bf5071976
6323f775323145034902af26cb3204fa184dede6
/plugin.py
a187b0e4f14ebce99c109a4a498e2198bc6fb745
[ "MIT" ]
permissive
enix403/ST-NewFilePrompt
d5704aad206cb50da642ac68bb8daff889406b1c
374a7a2a66019a038c49037ef74963f8f757a538
refs/heads/master
2023-08-11T21:54:42.402286
2021-09-10T18:25:26
2021-09-10T18:25:26
405,176,301
2
0
null
null
null
null
UTF-8
Python
false
false
1,670
py
import os import sublime import sublime_plugin from . import bracex def reveal_in_sidebar(self): self.window.run_command("reveal_in_side_bar") class MakeNewFileCommand(sublime_plugin.WindowCommand): def on_path_entered(self, location_str): locations_str = location_str.rstrip("/") locations = bracex.expand(location_str) for location in locations: if os.path.exists(location): sublime.error_message("NewFilePrompt: Path already exists") return base, _ = os.path.split(location) if not os.path.exists(base): os.makedirs(base) for location in locations: # create the file open(location, "a").close() self.window.run_command('hide_panel') self.window.open_file(locations[0]) sublime.set_timeout_async(lambda: reveal_in_sidebar(self), 250) def run(self, dirs): self.window.show_input_panel("File Location:", dirs[0] + "/", self.on_path_entered, None, None) class MakeNewFolderCommand(sublime_plugin.WindowCommand): def on_path_entered(self, location_str): locations = bracex.expand(location_str) for location in locations: if os.path.exists(location): sublime.error_message("NewFilePrompt: Path already exists") return for location in locations: # create the folder os.makedirs(location) self.window.run_command('hide_panel') def run(self, dirs): self.window.show_input_panel("Folder Location:", dirs[0] + "/", self.on_path_entered, None, None)
[ "fog.code000@gmail.com" ]
fog.code000@gmail.com
6706585dcfbee4954d6b7eb34ece330e2c07a2c7
cf08c4741b494384b898e485068ca3e5a23787d4
/heirarchicalData/urls.py
dc48d650ac4f208e0a57f5e9342e1aac96aeb794
[]
no_license
jontaylor224/heirarchical-data-demo
08538e12d66f54ecd1815c24b4e1851191124699
bdbff9297054b05c166fb3f4092e9fba95767b57
refs/heads/master
2021-09-25T04:45:38.168706
2020-02-14T03:30:49
2020-02-14T03:30:49
240,099,914
0
0
null
2021-09-22T18:37:40
2020-02-12T19:42:58
Python
UTF-8
Python
false
false
1,161
py
"""heirarchicalData URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from heirarchicalData.dataApp import views as data_views urlpatterns = [ path('admin/', admin.site.urls), path('', data_views.show_files, name='homepage'), path('adduser/', data_views.add_user, name='adduser'), path('addfile/', data_views.add_file, name='addfile'), path('login/', data_views.login_view, name='login'), path('logout/', data_views.logout_view, name='logout'), path('signup/', data_views.add_user, name='signup'), ]
[ "jontaylor224@gmail.com" ]
jontaylor224@gmail.com
c60c536109f8eaa5d756b109cf100d08bcfa650c
4210822afbe738322d3143e26110e8ad6d8fa22e
/lanchonete-ambrosina.py
845502f1daee95a0923dbc431027dd2a1afc8ff4
[]
no_license
ThiagoMatsumoto/thehuxley
45c548d59f42ef586a7838dfac5a8e9fbea9bd39
84b630e3c8c6f85d673785490d9c541929cac065
refs/heads/master
2021-02-03T22:19:16.161220
2020-02-27T20:57:17
2020-02-27T20:57:17
243,556,369
0
0
null
null
null
null
UTF-8
Python
false
false
1,144
py
""" sequencia de input: Produtos: numero de produtos a serem alterados código do produto descrição do produto preço do produto ... Pedido: código do produto quantidade a ser pedida ... 0 -> finaliza o pedido Output: multiplica as quantidades dos produtos com seus respectivos preços e soma-os. Loop para armazenar os produtos. Loop para comparar o pedido com o produto. """ n = int(input()) codProdutos = [] descProdutos = [] precoProdutos = [] totalFinal = 0 for i in range(n): codigo = int(input()) codProdutos.append(codigo) descricao = input() descProdutos.append(descricao) preco = float(input()) precoProdutos.append(preco) codigoPedido = 1 #mantem o pedido em aberto while True: codigoPedido = int(input()) if codigoPedido == 0: #se o input do usuário for 0, para o loop break totalPedido = float(input()) for i in range(n): if (codigoPedido == codProdutos[i] and totalPedido > 0) : totalFinal += (totalPedido * precoProdutos[i]) print("%.2f"%totalFinal)
[ "noreply@github.com" ]
ThiagoMatsumoto.noreply@github.com
074b3290a71e928f37d8a52a56620b6ef2839347
604094e4d3689c1d67ce67d3132315511904cc27
/app/root/helper.py
1c3d667447b5c65107abe65da5d7ef4437c4119e
[]
no_license
sychsergiy/TableEditor
8f2d57d4d8b4433446c6c0aabd68a8f70c41151f
40cf8cf82f98aa66233e8670ccf8fe8650cb5bce
refs/heads/master
2021-09-01T19:21:55.196667
2017-12-28T12:11:08
2017-12-28T12:11:08
114,454,150
0
1
null
null
null
null
UTF-8
Python
false
false
1,605
py
class DataHelper(object): def __init__(self, string_data): data = self.parse_data(string_data) self.data = self.normalize_initial_data(data) @staticmethod def normalize_initial_data(data): cols_n = max((len(row) for row in data)) for row in data: while len(row) < cols_n: row.append('') return data def insert_empty_rows(self, n, begin_index): for i in range(n): self.data.insert(begin_index, ['']*self.get_cols_n()) def insert_empty_cols(self, n, begin_index): for row in self.data: for i in range(n): row.insert(begin_index, '') def update_cell(self, value, row_index, col_index): self.data[row_index][col_index] = value def get_cols_n(self): return len(self.data[0]) def get_rows_n(self): return len(self.data) def sort_by_column(self, index, reverse=False): self.data.sort(key=lambda x: x[index], reverse=reverse) def remove_rows(self, n, begin_index): possible_n = self.get_rows_n() if n >= possible_n: n = possible_n - 1 self.data = self.data[:begin_index] + self.data[begin_index+n:] def remove_cols(self, n, begin_index): possible_n = self.get_cols_n() if n >= possible_n: n = possible_n - 1 for i, row in enumerate(self.data): self.data[i] = row[:begin_index] + row[begin_index+n:] @staticmethod def parse_data(string_data): return [row.split('\t') for row in string_data.split('\n')]
[ "sychsergiy@gmail.com" ]
sychsergiy@gmail.com
47c3be1644c3b304105e0c662dc9f38ee860d001
9ecb6a1d3a71e7f87f3784af6b808f23a2abe348
/drlhp/show_prefs.py
4227c4f161eda839c7a5c5661322f9b2b12658a5
[]
no_license
HumanCompatibleAI/interactive-behaviour-design
13ae305b39d29595e8fd5907f8d9e9fa6c2efc16
226db7a55d64ce15edfb8d7b3352c7bf7b81b533
refs/heads/master
2020-05-02T16:54:02.232639
2019-08-08T14:29:11
2019-08-08T14:29:11
178,082,205
0
1
null
null
null
null
UTF-8
Python
false
false
1,945
py
#!/usr/bin/env python3 """ Display examples of the specified preference database (with the less-preferred segment on the left, and the more-preferred segment on the right) (skipping over equally-preferred segments) """ import argparse import pickle from multiprocessing import freeze_support import numpy as np from utils import VideoRenderer def main(): parser = argparse.ArgumentParser() parser.add_argument("prefs", help=".pkl.gz file") args = parser.parse_args() with open(args.prefs, 'rb') as pkl_file: print("Loading preferences from '{}'...".format(args.prefs), end="") prefs = pickle.load(pkl_file) print("done!") print("{} preferences found".format(len(prefs))) print("(Preferred clip on the left)") v = VideoRenderer(zoom=2, mode=VideoRenderer.restart_on_get_mode) q = v.vid_queue prefs = prefs[0] # The actual pickle file is a tuple of test, train DBs for k1, k2, pref in prefs.prefs: pref = tuple(pref) if pref == (0.0, 1.0) or pref == (0.5, 0.5): s1 = np.array(prefs.segments[k2]) s2 = np.array(prefs.segments[k1]) elif pref == (1.0, 0.0): s1 = np.array(prefs.segments[k1]) s2 = np.array(prefs.segments[k2]) else: raise Exception("Unexpected preference", pref) print("Preference", pref) vid = [] height = s1[0].shape[0] border = np.ones((height, 10), dtype=np.uint8) * 128 for t in range(len(s1)): # -1 => select the last frame in the 4-frame stack f1 = s1[t, :, :, -1] f2 = s2[t, :, :, -1] frame = np.hstack((f1, border, f2)) vid.append(frame) n_pause_frames = 10 for _ in range(n_pause_frames): vid.append(np.copy(vid[-1])) q.put(vid) input() v.stop() if __name__ == '__main__': freeze_support() main()
[ "matthew.rahtz@gmail.com" ]
matthew.rahtz@gmail.com
b5644533f4814bf76a438d3f873511d94ae32cb7
ffedbe2d957677d65cb873d96482f1c94e74b988
/regs/depth/paragraph.py
4c7ad12fb53030d7ebe97823604eec9398cee496
[]
no_license
cmc333333/Depth-Parser
b7602c158b6cb75179af90b78af93f28e547a3d2
4332b8c51e8e7d44b68985b3845b300d251af536
refs/heads/master
2020-05-20T12:09:03.662019
2013-04-16T20:37:56
2013-04-16T20:37:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,421
py
import itertools import re from regs.depth import tree from regs.search import segments from regs.utils import roman_nums import string p_levels = [ list(string.ascii_lowercase), [str(i) for i in range(1,51)], list(itertools.islice(roman_nums(), 0, 50)), list(string.ascii_uppercase), # Technically, there's italics (alpha) and (roman), but we aren't # handling that yet ] class ParagraphParser(): def __init__(self, p_regex, inner_label_fn): """p_regex is the regular expression used when searching through paragraphs. It should contain a %s for the next paragraph 'part' (e.g. 'a', 'A', '1', 'i', etc.) inner_label_fn is a function which takes the current label, and the next paragraph 'part' and produces a new label.""" self.p_regex = p_regex self.inner_label_fn = inner_label_fn def matching_subparagraph_ids(self, p_level, paragraph): """Return a list of matches if this paragraph id matches one of the subparagraph ids (e.g. letter (i) and roman numeral (i).""" matches = [] for depth in range(p_level+1, len(p_levels)): for sub_id, sub in enumerate(p_levels[depth]): if sub == p_levels[p_level][paragraph]: matches.append((depth, sub_id)) return matches def best_start(self, text, p_level, paragraph, starts, exclude = []): """Given a list of potential paragraph starts, pick the best based on knowledge of subparagraph structure. Do this by checking if the id following the subparagraph (e.g. ii) is between the first match and the second. If so, skip it, as that implies the first match was a subparagraph.""" subparagraph_hazards = self.matching_subparagraph_ids(p_level, paragraph) starts = starts + [(len(text), len(text))] for i in range(1, len(starts)): _, prev_end = starts[i-1] next_start, _ = starts[i] s_text = text[prev_end:next_start] s_exclude = [(e_start + prev_end, e_end + prev_end) for e_start, e_end in exclude] is_subparagraph = False for hazard_level, hazard_idx in subparagraph_hazards: if self.find_paragraph_start_match(s_text, hazard_level, hazard_idx + 1, s_exclude): is_subparagraph = True if not is_subparagraph: return starts[i-1] def find_paragraph_start_match(self, text, p_level, paragraph, exclude=[]): """Find the positions for the start and end of the requested label. p_Level is one of 0,1,2,3; paragraph is the index within that label. Return None if not present. Does not return results in the exclude list (a list of start/stop indices). """ if len(p_levels) <= p_level or len(p_levels[p_level]) <= paragraph: return None match_starts = [(m.start(), m.end()) for m in re.finditer( self.p_regex % p_levels[p_level][paragraph], text)] match_starts = [(start, end) for start,end in match_starts if all([end < es or start > ee for es, ee in exclude])] if len(match_starts) == 0: return None elif len(match_starts) == 1: return match_starts[0] else: return self.best_start(text, p_level, paragraph, match_starts, exclude) def paragraph_offsets(self, text, p_level, paragraph, exclude = []): """Find the start/end of the requested paragraph. Assumes the text does not just up a p_level -- see build_paragraph_tree below.""" start = self.find_paragraph_start_match(text, p_level, paragraph, exclude) if start == None: return None id_start, id_end = start end = self.find_paragraph_start_match(text[id_end:], p_level, paragraph + 1, [(e_start - id_end, e_end - id_end) for e_start, e_end in exclude]) if end == None: end = len(text) else: end = end[0] + id_end return (id_start, end) def paragraphs(self, text, p_level, exclude = []): """Return a list of paragraph offsets defined by the level param.""" def offsets_fn(remaining_text, p_idx, exclude): return self.paragraph_offsets(remaining_text, p_level, p_idx, exclude) return segments(text, offsets_fn, exclude) def build_paragraph_tree(self, text, p_level = 0, exclude = [], label = tree.label("", [])): """ Build a dict to represent the text hierarchy. """ subparagraphs = self.paragraphs(text, p_level, exclude) if subparagraphs: body_text = text[0:subparagraphs[0][0]] else: body_text = text children = [] for paragraph, (start,end) in enumerate(subparagraphs): new_text = text[start:end] new_excludes = [(e[0] - start, e[1] - start) for e in exclude] new_label = self.inner_label_fn(label, p_levels[p_level][paragraph]) children.append(self.build_paragraph_tree(new_text, p_level + 1, new_excludes, new_label)) return tree.node(body_text, children, label)
[ "cm.lubinski@gmail.com" ]
cm.lubinski@gmail.com
6ca35b54b85397ccaf04ac052e0d15037aec1ced
8db5c21865bcf82697ff8528107e341e38211742
/config/settings/production.py
c0a0085109ca83cceb875329087ad2b5f3ccc2b9
[]
no_license
Jean-Zombie/headless-wagtail
84016abc7167e31cf8f7872e6aa62baf989ac578
5a3538e6154dcecb7ce3b97166427a554b5b55d5
refs/heads/main
2023-08-23T22:36:08.965963
2021-10-20T15:45:48
2021-10-20T15:45:48
416,701,139
0
0
null
null
null
null
UTF-8
Python
false
false
6,534
py
from django.core.management.utils import get_random_secret_key from .base import * # noqa # GENERAL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secret-key SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", get_random_secret_key()) # https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts ALLOWED_HOSTS = os.getenv("DJANGO_ALLOWED_HOSTS", "127.0.0.1,localhost").split(",") ALLOWED_HOSTS.append("0.0.0.0") ALLOWED_HOSTS.append("127.0.0.1") # DATABASES # ------------------------------------------------------------------------------ # DATABASES["default"] = env.db("DATABASE_URL") # noqa F405, DB is hopefully correctly configured in 'base.py' # DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405 # DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405 # CACHES # ------------------------------------------------------------------------------ # Task for another day # CACHES = { # "default": { # "BACKEND": "django_redis.cache.RedisCache", # "LOCATION": env("REDIS_URL"), # "OPTIONS": { # "CLIENT_CLASS": "django_redis.client.DefaultClient", # # Mimicing memcache behavior. # # https://github.com/jazzband/django-redis#memcached-exceptions-behavior # "IGNORE_EXCEPTIONS": True, # }, # } # } # SECURITY # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") # https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect SECURE_SSL_REDIRECT = os.getenv("DJANGO_SECURE_SSL_REDIRECT", default=True) # https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure SESSION_COOKIE_SECURE = True # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure CSRF_COOKIE_SECURE = True # https://docs.djangoproject.com/en/dev/topics/security/#ssl-https # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds # TODO: set this to 60 seconds first and then to 518400 once you prove the former works SECURE_HSTS_SECONDS = 60 # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains SECURE_HSTS_INCLUDE_SUBDOMAINS = os.getenv( "DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True ) # https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload SECURE_HSTS_PRELOAD = os.getenv("DJANGO_SECURE_HSTS_PRELOAD", default=True) # https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff SECURE_CONTENT_TYPE_NOSNIFF = os.getenv( "DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True ) # STATIC # ------------------------ STATICFILES_STORAGE = "whitenoise.storage.CompressedStaticFilesStorage" # MEDIA # ------------------------------------------------------------------------------ # TEMPLATES # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#templates TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405 ( "django.template.loaders.cached.Loader", [ "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ], ) ] # EMAIL # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email DEFAULT_FROM_EMAIL = os.getenv( "DJANGO_DEFAULT_FROM_EMAIL", default="Headless Wagtail <noreply@example.com>" ) # https://docs.djangoproject.com/en/dev/ref/settings/#server-email SERVER_EMAIL = os.getenv("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL) # https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix EMAIL_SUBJECT_PREFIX = os.getenv( "DJANGO_EMAIL_SUBJECT_PREFIX", default="[Headless Wagtail]", ) # ADMIN # ------------------------------------------------------------------------------ # Admin URL regex. DJANGO_ADMIN_URL = os.getenv("DJANGO_ADMIN_URL", "") WAGTAIL_ADMIN_URL = os.getenv("WAGTAIL_ADMIN_URL", "") # Anymail # ------------------------------------------------------------------------------ # https://anymail.readthedocs.io/en/stable/installation/#installing-anymail INSTALLED_APPS += ["anymail"] # noqa F405 # https://docs.djangoproject.com/en/dev/ref/settings/#email-backend # https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference # https://anymail.readthedocs.io/en/stable/esps/mailjet/ EMAIL_BACKEND = "anymail.backends.mailjet.EmailBackend" ANYMAIL = { # "MAILJET_API_KEY": env("MAILJET_API_KEY"), # "MAILJET_SECRET_KEY": env("MAILJET_SECRET_KEY"), # "MAILJET_API_URL": env("MAILJET_API_URL", default="https://api.mailjet.com/v3"), } # LOGGING # ------------------------------------------------------------------------------ # https://docs.djangoproject.com/en/dev/ref/settings/#logging # See https://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. LOGGING = { "version": 1, "disable_existing_loggers": False, "filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}}, "formatters": { "verbose": { "format": "%(levelname)s %(asctime)s %(module)s " "%(process)d %(thread)d %(message)s" } }, "handlers": { "mail_admins": { "level": "ERROR", "filters": ["require_debug_false"], "class": "django.utils.log.AdminEmailHandler", }, "console": { "level": "DEBUG", "class": "logging.StreamHandler", "formatter": "verbose", }, }, "root": {"level": "INFO", "handlers": ["console"]}, "loggers": { "django.request": { "handlers": ["mail_admins"], "level": "ERROR", "propagate": True, }, "django.security.DisallowedHost": { "level": "ERROR", "handlers": ["console", "mail_admins"], "propagate": True, }, }, } # Your stuff... # ------------------------------------------------------------------------------
[ "22709399+Jean-Zombie@users.noreply.github.com" ]
22709399+Jean-Zombie@users.noreply.github.com
654a0f58e52cd94a5be02d22db84d06c0b44738b
8ab31b068f96ad0730cb2d1e1c705d6ae8dced28
/geocsv/__init__.py
32defc585a0fc603cc9c46993c1a52e9d4305d3b
[ "MIT" ]
permissive
poliquin/geocsv-to-mysql
e4d47ef2b1f07cfd7c9027713149bcb0187c5e45
f2417f06b9e5148a783abac8f6cab702789b596f
refs/heads/master
2021-03-16T09:12:54.795727
2018-08-03T18:21:35
2018-08-03T18:21:35
81,633,272
0
0
null
null
null
null
UTF-8
Python
false
false
2,462
py
import csv import sys import itertools from collections import OrderedDict from .coltypes import parse_coltype from .converters import convert_value def get_header_row(fpath, delim=',', lowercase=False): """Read first row of a CSV file.""" with open(fpath, 'r') as fh: rdr = csv.reader(fh, delimiter=delim) hdr = [i.strip() for i in next(rdr)] if lowercase: hdr = [i.lower() for i in hdr] return hdr def get_schema(fpath, csvt_path, delim=';'): """Get information on column names and types.""" colnames = get_header_row(fpath, delim=delim, lowercase=True) return OrderedDict(zip(colnames, read_csvt(csvt_path))) def build_mysql_schema(fpath, csvt_path, tblname, delim=';'): """Make create table and insert statements suitable for MySQL.""" schema = get_schema(fpath, csvt_path, delim) create = """ CREATE TABLE IF NOT EXISTS {} ( id INT UNSIGNED NOT NULL AUTO_INCREMENT,""".format(tblname) placeholders = [] for colname, info in schema.items(): create += """ {} {} DEFAULT NULL,""".format(colname, info[2]) value = 'ST_GeomFromText(%({})s)' if info[0] == 'WKT' else '%({})s' placeholders.append(value.format(colname)) create += """ PRIMARY KEY (id) );""" # insert statement insert = "INSERT INTO {} ({}) VALUES ({});".format( tblname, ', '.join(schema.keys()), ', '.join(placeholders) ) return create, insert def read_csvt(csvt_path): """Read a GeoCSV schema from .csvt file.""" hdr = get_header_row(csvt_path, delim=',', lowercase=False) return [parse_coltype(i) for i in hdr] def read_geocsv(fpath, csvt_path=None, delim=';', enc='utf8'): """Read a GeoCSV file and schema.""" curlim = csv.field_size_limit(sys.maxsize) with open(fpath, 'r', encoding=enc) as fh: rdr = csv.reader(fh, delimiter=delim) hdr = [i.strip().lower() for i in next(rdr)] # get types from csvt file if csvt_path is not None: coltypes = OrderedDict(zip(hdr, read_csvt(csvt_path))) else: coltypes = OrderedDict( zip(hdr, itertools.cycle([('String', None, 'VARCHAR(255)')])) ) yield from ( {k: convert_value(coltypes[k][0], v) for k, v in zip(hdr, record)} for record in rdr ) csv.field_size_limit(curlim)
[ "chrispoliquin@gmail.com" ]
chrispoliquin@gmail.com
4ab445c9e195a6512be677a5c74d89400cf5b8ee
8783d015169267c27062a231c33aa7450fc7153d
/hackers_rank/Maximum Subarray Sum.py
6145273c73624ca5ad429b90482431a1bd324577
[]
no_license
thangarajan8/misc_python
51619e932ffd972be78a23b62ad69b34f84f035d
b00ad259e240a3897348bc80fb9040a257db208f
refs/heads/master
2021-06-26T02:14:13.613212
2021-02-05T04:35:25
2021-02-05T04:35:25
209,036,549
0
0
null
null
null
null
UTF-8
Python
false
false
179
py
# -*- coding: utf-8 -*- """ Created on Thu Nov 7 11:24:21 2019 https://www.hackerrank.com/challenges/maximum-subarray-sum/problem?h_r=next-challenge&h_v=zen @author: Thanga """
[ "Thangarajan.P@tvscredit.com" ]
Thangarajan.P@tvscredit.com
56e5d1043d7e8d83e0e3ea1fce2733a7622b0ec9
fd9dd0b4aea0f55e2b35529462bf5fa7bd4f9d33
/reverse.py
c0dcc7d1737601f63fb4d47cc44260852d6b35f9
[]
no_license
mraines4/Python_wk1
833a48f6576cb2130c02516c69a537a1a4e0f158
88bb07b706a67254d795e616412b2baf70217e6c
refs/heads/master
2020-04-23T16:05:43.731860
2019-02-19T20:28:39
2019-02-19T20:28:39
171,286,541
0
0
null
null
null
null
UTF-8
Python
false
false
207
py
thingy = "what is up chicken butt" # print(thingy[::-1]) new_string = '' length = len(thingy) for letter in range(length, 0, -1): index = letter - 1 new_string += thingy[index] print(new_string)
[ "mraines4@DC-MacBook-Air.T-mobile.com" ]
mraines4@DC-MacBook-Air.T-mobile.com
2a2a13d14c59d50cc9bd664810dcf59f904a4ea2
7082d05a3292b2fec62966326cb289f478b4e2be
/main.py
ae153cc850290273501f38556fdf231d10674770
[]
no_license
Fwbeasley3/web-caesar
2b3bddffb079db4c6d8792a2306b04955270ac28
ed4648d5279ba8529ba0278613db1c14aa785c6d
refs/heads/master
2020-05-04T14:12:46.522188
2019-04-04T01:38:35
2019-04-04T01:38:35
179,188,205
0
0
null
null
null
null
UTF-8
Python
false
false
1,140
py
from flask import Flask,request from caesar import rotate_string app = Flask(__name__) app.config['DEBUG'] = True form = """ <!DOCTYPE html> <html> <head> <style> form {{ background-color: #eee; padding: 20px; margin: 0 auto; width: 540px; font: 16px sans-serif; border-radius: 10px; }} textarea {{ margin: 10px 0; width: 540px; height: 120px; }} </style> </head> <body> <form method="POST"> <label>Rotate by:</label> <input type="text" name="rot" value="0"/> <textarea name="text">{0} </textarea/> <input type="submit" /> </form> </body> </html> """ @app.route("/") def index(): return form.format('') @app.route("/", methods=['POST']) def encrypt(): rot = int(request.form['rot']) text = request.form['text'] return form.format(rotate_string(text,rot)) app.run()
[ "fwbeasley3@yahoo.com" ]
fwbeasley3@yahoo.com
9e51554a63ea745f2574b28165948e41f852a97e
a90aa4871684f6f24aa5b0daf2ece384418c748b
/basic/python/2_applica/1_scrapy/bloomfilter.py
868227e7ae9f1616e4f5ff0e2650e336b69c8b7a
[]
no_license
Martians/code
fed5735b106963de79b18cc546624893665066cd
653e2c595f4ac011aed7102ca26b842d4f6beaaf
refs/heads/master
2021-07-11T19:22:24.858037
2019-02-22T13:04:55
2019-02-22T13:04:55
110,106,407
1
0
null
null
null
null
UTF-8
Python
false
false
88
py
# https://media.readthedocs.org/pdf/pybloomfiltermmap3/latest/pybloomfiltermmap3.pdf
[ "liudong@daowoo.com" ]
liudong@daowoo.com
3b72dd9ebd85180116d827b3d36b959f3f7f41cf
9ca81dc89fc00cfec27296b662fc7a4dcfc430ce
/bot/cogs/8ball.py
755a781fa85f2d4326a2e74e67717c3e1f7568f5
[]
no_license
Mortal-Jelly/Aidan-Bot
8e5481dadcf3c217ec2c57625dd5f88582443bb5
cbbd4de9f546a8a805a363e2d3da10dc9cf9f311
refs/heads/master
2023-01-13T04:51:50.870409
2020-11-10T16:20:42
2020-11-10T16:20:42
311,467,607
0
0
null
null
null
null
UTF-8
Python
false
false
1,293
py
import discord from discord.ext import commands import random from random import choice class Magic8Ball(commands.Cog): def __init__(self, client): self.client = client @commands.command(aliases=['8ball', '8b'], help='Pretty self explanatory, its an 8 ball') async def _8ball(self, ctx, *, question): responses = ['It is certain', 'It is decidedly so', 'Without a doubt', 'Yes – definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes Signs point to yes', 'Reply hazy', 'Try again', 'Ask again later', 'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again', 'Don\'t count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful'] await ctx.send(f'Question: {question}\nAnswer: {random.choice(responses)}') def setup(client): client.add_cog(Magic8Ball(client))
[ "gameraidk10@gmail.com" ]
gameraidk10@gmail.com
c770dfb1245a2df9940c9031fc46d7ef18bda972
d50c3a9aeae1a01fc258e403285e98c4069635c7
/main.py
8c733538d1a0872390ff13cdc5f3f8322a0ebfbb
[]
no_license
rmguy7155/Mad_Lib
c6c8a63a08b8b95895810ee5c476bedd9a9d9e72
e905612bf009938cf9fc1e5cda6dc44584eff95b
refs/heads/main
2023-03-14T19:47:45.113382
2021-03-26T04:57:23
2021-03-26T04:57:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,563
py
# Name: Rebecca Guy # Course: COP1500 # Description: Word Lib and fun conversations for the family. ############################################################################### import random def main(): # begin introduction ######################################################## # prompt user for their name and store to variable name name = input("Hey there. What's your name? ") # prompt user for last name, this will be used within the story # capitalize first letter of the name last_name = input("What's your last name? ") print("\nHey,", name.capitalize(), last_name.capitalize() + ", nice name you've got there.") # prompt user to name the program for entertainment my_name = input("I don't have a name yet. What should I be called? ") # print statement thanking user for name choice print("\nThanks! I love it. '", my_name.capitalize(), "' is great!", sep='') # ask for age age = int(input("How old are you " + '' + name.capitalize() + "? ")) # to incorporate multiplication, addition, and division print("That's awesome! I'm ", format(age*11+7/3 - 4**2%1//3, '.2f'), "years old!") # ask user for favorite letter, this will be used in the story letter = input("What is your favorite letter of the alphabet? ") # define function,do_you_know: opens text file to explain what a Mad Lib def do_you_know(): text = open('DoYouKnow.txt').read() return text # asks user if they've ever heard of Mad Libs answer1 = input("So, this program is a Mad Lib. Have you heard of Mad Libs? ") while True: # if user enters yes, the program will move on if answer1.lower() == 'yes': print(" ") print("Great! Let's fill in the blanks for some wordy fun!") print("After we choose the words, \nI will show you our story!") break # if the user enters no, it will explain what a Mad Lib is elif answer1.lower() == 'no': print(" ") print(open('DoYouKnow.txt').read()) break else: # this will prompt if user doesn't answer 'yes' or 'no' print("I'm not sure what you meant.") answer1 = input("Have you heard of Mad Libs? You can type 'yes' or 'no'. ") answer2 = input("\nSo, are you ready to get started? ") if answer2.lower() == 'yes': print("Okay, ", name.capitalize(), "! Let's have some fun!", sep='') else: print("Well... ", name.capitalize(), ", here we go anyways!", sep='') # end introduction ############################################################### # definition of adjectives and nouns ############################################# print("\nFor our story, we will need adjectives and nouns. ") # ask user if they would like to know what an adjective is adjective_question = input("Would you like to know what an adjective is? ") # if user selects yes, print the definition of adjective with an example if adjective_question.lower() == 'yes': print("\nAn adjective is a word used to describe a person, a place, or a thing.") print("For example: The 'red' car. The 'smelly' dog. Both 'red' and 'smelly' are adjectives.\n") # ask user if they would like to know what an noun is noun_question = input("Would you like to know what a noun is? ") # if user selects yes, print the definition of noun with an example if noun_question.lower() == 'yes': print("\nA noun is a word used to name a person, a place, or a thing.") print("For example: The red 'car'. The smelly 'dog'. Both 'car' and 'dog' are nouns.\n") proceed = input("When you're ready to begin, type 'yes'. ") while proceed.lower() != 'yes': proceed = input("Type 'yes' to continue. ") print("\nOkay, whenever you would like me to choose a word, type 'you choose'.") def countdown(n): if n == 0: print("Let's go!") else: print(n) countdown(n - 1) countdown(3) print(" ") # choosing words for the story #### def random_adj(): # define function to call random adjective from adjectives.txt # open list, assign random element to random_adjective random_adjective = random.choice(list(open('adjectives.txt'))) # return value for random_adjective and removes the space after the final letter return random_adjective.rstrip() def random_nouns(): # function to call random noun from nouns.txt # open list, assign random element to random_nouns random_noun = random.choice(list(open('nouns.txt'))) # return value for random_noun and removes the space after the final letter return random_noun.rstrip() def random_plural_nouns(): # function to call random noun from nouns.txt # open list, assign random element to random_nouns random_plural_noun = random.choice(list(open('pluralNouns.txt'))) # return value for random_noun and removes the space after the final letter return random_plural_noun.rstrip() def random_exp(): random_expression = random.choice(list(open('expressions.txt'))) return random_expression.rstrip() story = open('story.txt', 'r') new_story = open("new_story.txt", "w") noun_list = [] adj_list = [] for line in story: if '(adj)' in line: adjective = input("Choose an adjective: ") if adjective == 'you choose': adjective = random_adj() print(random_exp(), " '", adjective, "' ", sep="") new_story.write(line.replace('(adj)', adjective)) adj_list.append(adjective) elif '(noun)' in line: noun = input("Choose a noun: ") if noun == 'you choose': noun = random_nouns() print(random_exp(), " '", noun, "' ", sep="") new_story.write(line.replace('(noun)', noun)) noun_list.append(noun) elif '(plural_noun)' in line: plural_noun = input("Choose a plural noun. (Plural means more than one): ") if plural_noun == 'you choose': plural_noun = random_plural_nouns() print(random_exp(), " '", plural_noun, "' ", sep="") new_story.write(line.replace('(plural_noun)', plural_noun)) noun_list.append(plural_noun) elif '(letter)' in line: new_story.write(line.replace('(letter)', letter.capitalize())) elif '(last_name)' in line: new_story.write(line.replace('(last_name)', last_name.capitalize())) else: new_story.write(line) print("\nGreat choices! Our adjectives are:") print(*adj_list, sep= ", ") print(" ") print("\nAnd our nouns are:") print(*noun_list, sep= ", ") print(" ") story.close() new_story.close() print("Okay! Our story is ready. ") proceed_story = input("Are you ready to read our story? Type 'yes' to proceed. ") while proceed_story.lower() != 'yes': proceed_story = input("Type 'yes' to continue. ") print(" ") print(" ") read_story = open('new_story.txt', 'r') print(read_story.read()) for i in range(3, 0, -1): print(i) print("The End!") main()
[ "noreply@github.com" ]
rmguy7155.noreply@github.com
4af2ccbccc3801bfd03ba5d348228bde9e7d5e13
fd133e8252dc4ddb8221007f806da336639e9029
/924_minimize_malware_speed.py
9ebceeb1693f0f22ca3036256554fcb1d0d201ee
[]
no_license
nikrasiya/Graph-2
ea331e8470a73eef2f70cbb71f28023f704f1ba2
4689f2e0d1a0847ab519715d7659939dad89e001
refs/heads/master
2021-05-17T16:21:17.539763
2020-04-06T13:18:31
2020-04-06T13:18:31
250,869,007
0
0
null
2020-03-28T18:44:58
2020-03-28T18:44:58
null
UTF-8
Python
false
false
4,082
py
from typing import List from collections import defaultdict, Counter class Solution: def minMalwareSpread(self, graph: List[List[int]], initial: List[int]) -> int: """ https://leetcode.com/problems/minimize-malware-spread/ Time Complexity - O(V*E) 'V' -> vertices 'E' -> edges Space Complexity - O(V) """ self.n = len(graph) self.colors = [-1] * self.n c = 0 for i in range(self.n): self._dfs(i, c, graph) c += 1 groups = Counter(self.colors) init_color = [0] * c for node in initial: init_color[self.colors[node]] += 1 result = float('inf') for node in initial: color = self.colors[node] count = init_color[color] if count == 1: if result == float('inf'): result = node elif groups[color] > groups[self.colors[result]]: result = node elif groups[color] == groups[self.colors[result]] and node < result: result = node if result == float('inf'): return min(initial) return result def _dfs(self, node, color, graph): # base if self.colors[node] != -1: return # logic self.colors[node] = color for i in range(self.n): if graph[node][i] == 1: self._dfs(i, color, graph) # def minMalwareSpread(self, graph: List[List[int]], initial: List[int]) -> int: # """ # https://leetcode.com/problems/minimize-malware-spread/ # Time Complexity - # Space Complexity - # """ # adj_matrix = defaultdict(list) # initial = sorted(initial) # # graph # for node in range(len(graph)): # for edge in range(len(graph[0])): # if graph[node][edge] == 1: # adj_matrix[node].append(edge) # # make groups # groups = {} # counts = {} # g_name = 0 # min_group_name = None # max_group_size = float('-inf') # visited = set() # for node in initial: # group, infected_count = self._dfs(initial, visited, adj_matrix, node) # if group: # groups[g_name] = group # counts[g_name] = infected_count # if infected_count == 1 and len(group) > max_group_size: # max_group_size = len(group) # min_group_name = g_name # g_name += 1 # if min_group_name is None: # return min(initial) # return min(set(initial).intersection(groups[min_group_name])) # # def _dfs(self, initial, visited, adj_matrix, root): # if root in visited: # return None, None # stack = [root] # result = [] # initial_count = 0 # while stack: # cur = stack.pop() # if cur in initial: # initial_count += 1 # for edge in adj_matrix[cur]: # if edge != cur and edge not in visited: # stack.append(edge) # if cur not in visited: # visited.add(cur) # result.append(cur) # return result, initial_count if __name__ == '__main__': print(Solution().minMalwareSpread([[1, 1, 1], [1, 1, 1], [1, 1, 1]], [1, 2])) print(Solution().minMalwareSpread([[1, 1, 0], [1, 1, 0], [0, 0, 1]], [0, 1])) print(Solution().minMalwareSpread([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 2])) print(Solution().minMalwareSpread([[1, 1, 0], [1, 1, 0], [0, 0, 1]], [0, 1, 2])) print(Solution().minMalwareSpread([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]], [3, 1])) print(Solution().minMalwareSpread( [[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 1]], [5, 0]))
[ "you@example.com" ]
you@example.com
67606cf989c1d52887d6ca7b791be061b25f47fd
31fc39f233845e01c5491a744db08832e66d6a75
/nginx/fulltest/testrun.py
685316183f53cfe839077a429052fa4a662d7fa5
[]
no_license
fhnw-ise-qcrypt/oqs-demos
1dfb0ba83cb421ef2fdc98cb6b6356d81fbfe6e6
940b5f2b75b63c5e2b7fbcf9423f4492f9926134
refs/heads/main
2023-03-07T13:14:27.779046
2021-02-22T13:59:54
2021-02-22T13:59:54
299,259,879
0
0
null
null
null
null
UTF-8
Python
false
false
1,035
py
import json import sys import subprocess import os # Parameter checks already done in shellscript with open("assignments.json", "r") as f: jsoncontents = f.read(); assignments = json.loads(jsoncontents) for sig in assignments: print("Testing %s:" % (sig)) for kem in assignments[sig]: # assemble testing command cmd = "docker run -v "+os.path.abspath(os.getcwd())+":/ca -it "+sys.argv[1]+" curl --cacert /ca/CA.crt https://test.openquantumsafe.org:"+str(assignments[sig][kem]) if kem!="*": # don't prescribe KEM cmd=cmd+" --curves "+kem dockerrun = subprocess.run(cmd.split(" "),stdout=subprocess.PIPE,stderr=subprocess.PIPE) if dockerrun.returncode != 0 or not (b"Successfully" in dockerrun.stdout): print("Error executing %s (Output: %s). Terminating." % (cmd, dockerrun.stdout)) exit(1) else: print(" Tested KEM %s successfully." % (kem)) print(" Successfully concluded testing "+sig) print("All tests successfully passed.")
[ "noreply@github.com" ]
fhnw-ise-qcrypt.noreply@github.com
77ebbe0d48ff860ba8eab641e85ade6503ca77d9
f2ab8ccda7203dd37d61facb9978cf74b781c7f1
/tests/models.py
2a33a19a5a6c499db6c4c5ca9168a18891a56d61
[ "MIT" ]
permissive
Apkawa/easy-thumbnails-admin
1991137224dcd117520b2c114d4012daf803776e
9d7a38f215cdac53a663b00f1d4ff3a3c2a54eb4
refs/heads/master
2021-01-01T15:47:34.334792
2017-11-23T10:38:09
2017-11-23T10:38:09
97,703,157
0
0
null
null
null
null
UTF-8
Python
false
false
252
py
from django.db import models from easy_thumbnails.fields import ThumbnailerImageField def upload_to(instance, filename): return 'example/{}'.format(filename) class Example(models.Model): image = ThumbnailerImageField(upload_to=upload_to)
[ "apkawa@gmail.com" ]
apkawa@gmail.com
d798c98df73ac0f376823956410ace3f60caa57f
07a401d503741c11f56715a7f4abf1ada89e00e8
/reinforcement_learning_portfolio_management/src/features/myenv.py
f85ff38f5bfd3ba634d5e26140e17d8a2f4aed4e
[ "MIT" ]
permissive
francisliu2/master_thesis
bff065012e682101ff339820b4aacf481b0a7a68
802281108cf42e6d571081e954bd6b9675b87aaf
refs/heads/master
2020-03-24T15:19:40.898701
2018-11-27T18:29:07
2018-11-27T18:29:07
142,781,630
0
0
null
null
null
null
UTF-8
Python
false
false
4,897
py
import numpy as np import pandas as pd import os, sys # sys.path.append('../src/data/') import data_preprocessing_from_yahoo_finance as dp import matplotlib.pyplot as plt class myenv: def make(self): # working pass def __init__(self, result_np, price_window = 2000, total_steps=100, reward_type='log_return', commission_rate = 0.05, initial_weight = None, stocks_name = None): #reward_type can choose log_return self.current_step = 0 # start from step 0 self.total_steps = total_steps # self.price_window = int(result_np.shape[1]-self.total_steps)-1 # How many data points to look at in each step self.price_window = price_window self.reward_type = reward_type self.commission_rate = commission_rate # Initialize Price Tensor self.cash_price = np.ones(result_np.shape[0:2]) self.all_prices = np.concatenate((self.cash_price[:,:,None], result_np), axis=2) self.all_prices_normalized = self.all_prices[:,1:,:]/self.all_prices[:,:-1,:] self.initial_weight = initial_weight # Action Space is the number of asset in result_np + cash self.action_space_dimension = result_np.shape[2]+1 # Observation Space self.observation_space_dimension = {'price_tensor':[4,self.price_window,result_np.shape[2]+1], 'weight':[self.all_prices.shape[2]]} # Backtest self.portfolio_size = [] self.portfolio_return = [] self.sharpe_ratio = 0 self.weights = [] self.total_commision = 0 def reset(self): self.total_steps = min(self.total_steps, self.all_prices_normalized.shape[1] - self.price_window) # how many days to trade on if self.initial_weight is not None: weight = self.initial_weight # initialize weight else: weight = np.zeros(self.all_prices.shape[2]) # add cash dimension weight[0] = 1 self.weights.append(weight) # Keep track of how many stock units are in portfolio self.units_list = [] self.units_list.append(weight) self.current_step = 0 start_prices = self.all_prices_normalized[:,self.current_step:self.current_step+self.price_window] start_portfolio_size = 1 self.portfolio_size.append(start_portfolio_size) return start_prices, weight, 0, False # new_prices_toagent, weight, reward, done def step(self, weight, verbose=False): assert round(np.sum(weight),5)==1, "Sum of input weight is not equal to 1, %s" %weight # make sure input weight intact assert ~(np.sign(weight) == -1).any(), "Negative weight is not allowed, %s" %weight old_units = self.units_list[-1] portfolio_size = self.portfolio_size[-1] current_prices = self.all_prices[3,self.current_step+self.price_window,:] # +1? units = portfolio_size*weight/current_prices self.units_list.append(units) self.weights.append(weight) # Price change new_prices = self.all_prices[3,self.current_step+self.price_window+1,:] # +2? new_portfolio_size = np.sum(units*new_prices) reward = (new_portfolio_size/portfolio_size)-1 # Commission commission = np.absolute(old_units-units) * self.commission_rate self.total_commision += commission # Keep track self.portfolio_size.append(new_portfolio_size) self.portfolio_return.append(reward) # Update reward before turning it into log scale # next step self.current_step += 1 done = False if self.current_step == self.total_steps: done = True new_prices_toagent = self.all_prices_normalized[:,self.current_step:self.current_step+self.price_window,:] if self.reward_type=='log_return': reward = np.log(reward+1) return new_prices_toagent, weight, reward, done def render_psize(self): assert self.sharpe_ratio !=0, 'Have you end game?' p1 = plt.plot(self.portfolio_size) p2 = plt.plot(self.all_prices[3,-self.total_steps-1:,:]/self.all_prices[3,-self.total_steps-1,:]) return p1, p2 # Plot the weight, return, and the price together. def render_weights(self, include_start=True): if include_start: i = 1 else: i=0 return plt.plot(self.weights[i:]) def end_game(self): expected_return = np.mean(self.portfolio_return) std_return = np.std(self.portfolio_return) self.sharpe_ratio = expected_return / std_return
[ "francisliutfp@gmail.com" ]
francisliutfp@gmail.com
f124255d95088ab5a627f2afbe0efbc1fea1648c
90b4240997d1b23d326303eb9f69f32d9b20b6d5
/line.py
c603773edfea7221650d7bb5c34b0525ea2e2d55
[]
no_license
FrancoYudica/2D-Reflections
4532a64e3e098fb0647794b93a63ddd8ccf1a8ff
f0355fac7bcc23087eafcbaf12094d44a6588395
refs/heads/main
2023-04-19T02:11:13.610423
2021-04-29T01:19:11
2021-04-29T01:19:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,287
py
from vector import Vector from ray import RayInteractive class Line(RayInteractive): def __init__(self, point1, point2): self.p1 = Vector(point1[0], point1[1]) self.p2 = Vector(point2[0], point2[1]) def line_intersect(self, other): """ returns a (x, y) tuple or None if there is no intersection """ d = (other.p2.y - other.p1.y) * (self.p2.x - self.p1.x) - (other.p2.x - other.p1.x) * (self.p2.y - self.p1.y) if d: uA = ((other.p2.x - other.p1.x) * (self.p1.y - other.p1.y) - (other.p2.y - other.p1.y) * ( self.p1.x - other.p1.x)) / d uB = ((self.p2.x - self.p1.x) * (self.p1.y - other.p1.y) - (self.p2.y - self.p1.y) * ( self.p1.x - other.p1.x)) / d else: return if not (0 <= uA <= 1 and 0 <= uB <= 1): return x = self.p1.x + uA * (self.p2.x - self.p1.x) y = self.p1.y + uA * (self.p2.y - self.p1.y) return x, y def intersection_point(self, ray): return self.line_intersect(Line(ray.origin, ray.origin + ray.direction * 1000000)) def normal(self, intersection_point, ray): return Vector(-(self.p1.y - self.p2.y), (self.p1.x - self.p2.x)).normalized()
[ "noreply@github.com" ]
FrancoYudica.noreply@github.com
df5d0b6bce7e95494aebfafdb33a6c7116a2f611
1e648983311c93a14b20516bd7acaf183ed12b10
/algorithmic_heights/DEG.py
242aafe6ee427ba41aebaf0a04d76cb8f913476e
[]
no_license
biomathcode/Rosalind_solutions
5279b852b15e634dea57822ce12d63b5c3cd6715
c84bbd570b22d8de4d29d417c99d2a4b96170453
refs/heads/master
2023-02-26T22:07:12.119786
2021-01-25T11:54:45
2021-01-25T11:54:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
584
py
from __future__ import print_function from collections import Counter from itertools import chain edges = [] with open("docs/rosalind_deg.txt", 'r') as f: next(f) for line in f: edges.append(line.strip().split()) f.close() my_list = [] for x in chain.from_iterable(edges): #flatten the lists my_list.append(x) ##Counter accept a list and returns the count of each occurance of a list d = Counter(my_list) o = open("submission.txt", 'w') #Sorted will sort the list based on number for key in sorted(d, key = int): print(d[key], end=" ", file= o) o.close()
[ "sharma.pratik2016@gmail.com" ]
sharma.pratik2016@gmail.com
52d3c5b174531266dd88c55ceec6bc417ef42be2
f8f77041f10f14e92e08bc08d16d3aabb09c5792
/Python/exercicios/ex037.py
3b7a74fa60d14e38d9f6d5b1b54a32e136b68028
[]
no_license
danielns-op/CursoEmVideo
0546380f67b6d101d87b59f01d87d9832570d32f
0a979dae6a6d3757f833f0fbe5bb7e86437beb18
refs/heads/main
2023-06-20T22:06:23.884946
2021-08-05T18:58:58
2021-08-05T18:58:58
326,108,593
0
0
null
null
null
null
UTF-8
Python
false
false
2,308
py
n = int(input('Número: ')) bc = str(input('''Qual a base de conversão: 1 - Binário 2 - Octal 3 - Hexadecimal\nOpção: ''')) valor = n num = '' if bc == '1': while n // 2 != 0: mod = (n % 2) num = num + str(mod) n = n // 2 mod = (n % 2) num = num + str(mod) print('O número {} equivale a {} em binário.'.format(valor, num[::-1])) elif bc == '2': while n // 8 != 0: mod = (n % 8) num = num + str(mod) n = n // 8 mod = (n % 8) num = num + str(mod) print('O número {} equivale a {} em octal.'.format(valor, num[::-1])) elif bc == '3': while n // 16 != 0: mod = (n % 16) if mod in [10, 11, 12, 13, 14, 15]: if mod == 10: mod = 'A' if mod == 11: mod = 'B' if mod == 12: mod = 'C' if mod == 13: mod = 'D' if mod == 14: mod = 'E' if mod == 15: mod = 'F' num = num + str(mod) n = n // 16 mod = (n % 16) if mod in [10, 11, 12, 13, 14, 15]: if mod == 10: mod = 'A' if mod == 11: mod = 'B' if mod == 12: mod = 'C' if mod == 13: mod = 'D' if mod == 14: mod = 'E' if mod == 15: mod = 'F' num = num + str(mod) print('O número {} equivale a {} em hexadecimal.'.format(valor, num[::-1])) else: print('Opção "{}" inválida.'.format(n)) ''' Resolução do Gustavo Guanabara: num = int(input('Digite um número inteiro: ')) print("""Escolha uma das bases para conversão: [ 1 ] converter para BINÁRIO [ 2 ] converter para OCTAL [ 3 ] converter para HEXADECIMAL""") opção = int(input('Sua opção: ')) if opção == 1: print('{} convertido para BINÁRIO é igual a {}'.format(num, bin(num)[2:])) elif opção == 2: print('{} convertido para OCTAL é igual a {}'.format(num, oct(num)[2:])) elif opção == 3: print('{} convertido para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:])) else: print('Opção inválida, tente novamente') '''
[ "noreply@github.com" ]
danielns-op.noreply@github.com
139ecc75596912c669b4ed0216a1514922c50a4c
605611de5eae63ce4eef388a287a3ef18b52eae7
/CovidCrowd/settings.py
95d5f1e4b28e5afba1ede23609cd8a48a22b35cd
[]
no_license
RahulAttarde/CovidCrowd
e6b2e45c222f03112c157403c2d6630d888599d8
55740e1ea72cd434aed0a627f6fffb16024a6f17
refs/heads/master
2021-04-23T00:02:46.726288
2020-03-25T02:45:35
2020-03-25T02:45:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,879
py
""" Django settings for CovidCrowd project. Generated by 'django-admin startproject' using Django 3.0.3. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os from decouple import config, Csv # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config("SECRET_KEY") # SECURITY WARNING: don't run with debug turned on in production! DEBUG = config("DEBUG") ALLOWED_HOSTS = config("MY_HOSTS", cast=Csv()) # Application definition INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", "django.contrib.gis", "social_django", "crispy_forms", "patients", "rest_framework", "django_filters", "django_tables2", "debug_toolbar", "memcache_status", ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", "social_django.middleware.SocialAuthExceptionMiddleware", "debug_toolbar.middleware.DebugToolbarMiddleware", ] ROOT_URLCONF = "CovidCrowd.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", "social_django.context_processors.backends", "social_django.context_processors.login_redirect", ], }, }, ] WSGI_APPLICATION = "CovidCrowd.wsgi.application" # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { "default": { "ENGINE": "django.contrib.gis.db.backends.spatialite", "NAME": os.path.join(BASE_DIR, "db.sqlite3"), } } AUTHENTICATION_BACKENDS = ( "social_core.backends.github.GithubOAuth2", "social_core.backends.twitter.TwitterOAuth", "social_core.backends.google.GoogleOAuth2", "django.contrib.auth.backends.ModelBackend", ) # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",}, {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",}, {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",}, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = "en-us" TIME_ZONE = "Asia/Kolkata" USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = "/static/" STATIC_ROOT = os.path.join(BASE_DIR, 'static') # Crispy forms CRISPY_TEMPLATE_PACK = "bootstrap4" # OAUTH for Social Login LOGIN_URL = "/login-form" SOCIAL_AUTH_URL_NAMESPACE = "social" SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/" SOCIAL_AUTH_GITHUB_KEY = config("SOCIAL_AUTH_GITHUB_KEY") SOCIAL_AUTH_GITHUB_SECRET = config("SOCIAL_AUTH_GITHUB_SECRET") SOCIAL_AUTH_TWITTER_KEY = config("SOCIAL_AUTH_TWITTER_KEY") SOCIAL_AUTH_TWITTER_SECRET = config("SOCIAL_AUTH_TWITTER_SECRET") SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config("SOCIAL_AUTH_GOOGLE_OAUTH2_KEY") SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config("SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET") # Django REST Framework REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ] } CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '127.0.0.1:11211', } } INTERNAL_IPS = [ '127.0.0.1', ]
[ "arun@arunmozhi.in" ]
arun@arunmozhi.in
8af2242cd24a4a8c4f415adcb80c6ae7924c776b
205b47f01782a2e68cf24edd463adfd0c8a7658d
/DUPLICATES.PY
2f4de8b59b4af4153bff6b1121c7a41f599d49c2
[]
no_license
arnavsood/Python
ab0b2ad3108c961f55729f36812d610f2a0da259
546ff9da01e1c59d973167aa62749f5e61365a43
refs/heads/master
2020-08-21T22:21:25.912414
2019-10-19T19:31:57
2019-10-19T19:31:57
216,259,864
0
0
null
null
null
null
UTF-8
Python
false
false
402
py
def finddubs(s): c=dict() for j in range(len(s)): if(s[j].isalpha()): t=list([0,[]]) c[s[j]]=t for i in range(len(s)): if(s[i].isalpha()): c[s[i]][0]+=1 c[s[i]][1].append(i) for k,v in c.items(): if(v[0]>1): print("key : {} is repeating {}".format(k,v[1])) return c
[ "noreply@github.com" ]
arnavsood.noreply@github.com
ce9b464752622154a4ca511cf88559cc2aa81878
edac41fcba010837bff6d81c067659942d88829d
/venv/bin/pip3.6
7e13802577805a4c5a366a938f4e0bafc0f9ae13
[]
no_license
SOARingLab/House_Exchange_System
3dc970ec81926500cbb727b7c3a67bf49371031c
93bf3dd1ff0acbdcc0114dc86e1c4163ffa621ce
refs/heads/master
2020-11-30T16:33:16.170424
2019-12-27T13:31:36
2019-12-27T13:31:36
230,442,487
0
0
null
null
null
null
UTF-8
Python
false
false
420
6
#!/Users/yuzeyuan/PycharmProjects/House_replacement/venv/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6' __requires__ = 'pip==10.0.1' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')() )
[ "wsnxdyj@qq.com" ]
wsnxdyj@qq.com
fcfdb78f355e672e3b379c75f3b8d13c84478b25
c43fb306fd66cce57950ac0e9fd9fcb3d0d4014e
/managements/manage.py
306a34490603a825c6a00f1c743c61a0de56f38c
[]
no_license
DuyHV20150601/iot_platform
fa092e19738d57ffc79cbc2f464745e2cc7a1458
5d8c7af83f5b9e8d2b21ad3541ab9604be3db695
refs/heads/master
2022-12-07T04:53:22.559358
2020-04-08T15:35:31
2020-04-08T15:35:31
252,399,451
0
0
null
2022-11-21T22:48:06
2020-04-02T08:35:11
Python
UTF-8
Python
false
false
631
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'managements.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[ "duy.hv1997@gmail.com" ]
duy.hv1997@gmail.com
5addc06ab30cb767590a09babea31964eb5d9908
c3afee743e89c7f0cc8cb532d6166664450ac191
/apps/servicios/migrations/0001_initial.py
29dac8308e1f56ab7c93f1da1b48e4366292d49b
[]
no_license
MaregaAugusto/SAPAM
7a47aee6eedd8a841b0a7924ff6788a209c9c6ed
2ab268fb755aaac8644b225e5832a58a3c632e6b
refs/heads/master
2022-12-25T05:07:48.096206
2020-10-04T15:58:40
2020-10-04T15:58:40
290,857,221
0
0
null
2020-10-04T03:51:59
2020-08-27T18:54:51
JavaScript
UTF-8
Python
false
false
1,850
py
# Generated by Django 3.0 on 2020-09-26 19:59 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('ancianos', '0001_initial'), ('colaboradores', '0001_initial'), ] operations = [ migrations.CreateModel( name='Servicio', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tipo', models.CharField(max_length=50)), ('fecha', models.DateField(auto_now=True)), ('estado', models.BooleanField(default=False)), ('descripcion', models.TextField(blank=True, null=True)), ('gasto', models.DecimalField(blank=True, decimal_places=2, max_digits=8, null=True)), ('anciano', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='solicitante', to='ancianos.Anciano')), ('colaborador', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ayudante', to='colaboradores.Colaborador')), ], options={ 'ordering': ['-id'], }, ), migrations.CreateModel( name='Denuncia', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('asunto', models.CharField(max_length=200)), ('texto', models.TextField()), ('estado', models.BooleanField(default=False)), ('servicio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='miservicio', to='servicios.Servicio')), ], ), ]
[ "augustomarega@gmail.com" ]
augustomarega@gmail.com
e1752708a0af5efe19acf9209a0dd0734303fa0d
840b98f14f181f7dbd693f2ee4b3c46e5be59305
/demos/demo_pycloudmessenger/POM2/NeuralNetworks/pom2_NN_worker_pycloudmessenger.py
672126432edb472a87502a57beff578247d9307a
[ "Apache-2.0" ]
permissive
Musketeer-H2020/MMLL-Robust
4ef6b2ff5dff18d4d2b2a403a89d9455ba861e2b
ccc0a7674a04ae0d00bedc38893b33184c5f68c6
refs/heads/main
2023-09-01T18:47:46.065297
2021-09-28T15:34:12
2021-09-28T15:34:12
386,264,004
0
0
null
null
null
null
UTF-8
Python
false
false
7,614
py
# -*- coding: utf-8 -*- ''' @author: Marcos Fernandez Diaz November 2020 Example of use: python pom2_NN_worker_pycloudmessenger.py --user <user> --password <password> --task_name <task_name> --id <id> Parameters: - user: String with the name of the user. If the user does not exist in the pycloudmessenger platform a new one will be created - password: String with the password - task_name: String with the name of the task. If the task already exists, an error will be displayed - id: Integer representing the partition of data to be used by the worker. Each worker should use a different partition, possible values are 0 to 4. ''' # Import general modules import argparse import logging import json import numpy as np import sys, os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Disables tensorflow warnings import tensorflow as tf import onnxruntime # Add higher directory to python modules path. sys.path.append("../../../../") # To be imported from MMLL (pip installed) from MMLL.nodes.WorkerNode import WorkerNode from MMLL.comms.comms_pycloudmessenger import Comms_worker as Comms # To be imported from demo_tools from demo_tools.task_manager_pycloudmessenger import Task_Manager from demo_tools.data_connectors.Load_from_file import Load_From_File as DC from demo_tools.mylogging.logger_v1 import Logger from demo_tools.evaluation_tools import display, plot_cm_seaborn, create_folders # Set up logger logging.basicConfig( level=logging.ERROR, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') LOGGER = logging.getLogger() LOGGER.setLevel(logging.DEBUG) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--user', type=str, default=None, help='User') parser.add_argument('--password', type=str, default=None, help='Password') parser.add_argument('--task_name', type=str, default=None, help='Name of the task') parser.add_argument('--id', type=int, default=None, choices=[0, 1, 2, 3, 4], help='The data partition of the worker') FLAGS, unparsed = parser.parse_known_args() user_name = FLAGS.user user_password = FLAGS.password task_name = FLAGS.task_name data_partition_id = FLAGS.id # This integer identifies the data partition used for the worker # Set basic configuration dataset_name = 'mnist' verbose = False pom = 2 model_type = 'NN' # Create the directories for storing relevant outputs if they do not exist create_folders("./results/") # Setting up the logger logger = Logger('./results/logs/Worker_' + str(user_name) + '.log') # Load credentials file to use pycloudmessenger # Note: this part creates the task and waits for the workers to join. This code is intended to be used only at the demos, in Musketeer this part must be done in the client. display('===========================================', logger, verbose) credentials_filename = '../../musketeer.json' try: with open(credentials_filename, 'r') as f: credentials = json.load(f) except: display('Error - The file musketeer.json is not available, please put it under the following path: "' + os.path.abspath(os.path.join("","../../")) + '"', logger, verbose) sys.exit() # Create user and join the task tm = Task_Manager(credentials_filename) participant = tm.create_worker_and_join_task(user_name, user_password, task_name, display, logger) # Creating the comms object display('Creating WorkerNode under POM %d, communicating through pycloudmessenger' %pom, logger, verbose) comms = Comms(participant, user_name) # Creating Workernode wn = WorkerNode(pom, comms, logger, verbose) display('-------------------- Loading dataset %s --------------------------' % dataset_name, logger, verbose) # Load data # Warning: this data connector is only designed for the demos. In Musketeer, appropriate data # connectors must be provided data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl' try: dc = DC(data_file) except: display('Error - The file ' + dataset_name + '_demonstrator_data.pkl does not exist. Please download it from Box and put it under the following path: "' + os.path.abspath(os.path.join("","../../../../input_data/")) + '"', logger, verbose) sys.exit() # Get train/test data and set training data [Xtr, ytr, _, _, Xtst, ytst] = dc.get_all_data_Worker(int(data_partition_id)) wn.set_training_data(dataset_name, Xtr, ytr) display('WorkerNode loaded %d patterns for training' % wn.NPtr, logger, verbose) # Creating a ML model and start training procedure wn.create_model_worker(model_type) display('MMLL model %s is ready for training!' %model_type, logger, verbose) display('Worker_' + model_type + ' %s is running...' %user_name, logger, verbose) wn.run() display('Worker_' + model_type + ' %s: EXIT' %user_name, logger, verbose) # Retrieving and saving the trained model display('Retrieving the trained model from WorkerNode', logger, verbose) model = wn.get_model() # Warning: this save_model utility is only for demo purposes output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model' model.save(output_filename_model) # Making predictions on test data display('------------- Obtaining predictions------------------------------------\n', logger, verbose) preprocessors = wn.get_preprocessors() if preprocessors is not None: for prep_model in preprocessors: # Apply stored preprocessor sequentially (in the same order received) Xtst = prep_model.transform(Xtst) display('Test data transformed using %s' %prep_model.name, logger, verbose) preds_tst = model.predict(Xtst) preds_tst = np.argmax(preds_tst, axis=-1) # Labels y = np.argmax(ytst, axis=-1) # Convert to labels classes = np.arange(ytst.shape[1]) # 0 to 9 # Evaluating the results display('------------- Evaluating --------------------------------------------\n', logger, verbose) # Warning, these evaluation methods are not part of the MMLL library, they are only intended to be used for the demos. Use them at your own risk. output_filename = 'Worker_' + str(user_name) + '_NN_confusion_matrix_' + dataset_name + '.png' title = 'NN confusion matrix in test set worker' plot_cm_seaborn(preds_tst, y, classes, title, output_filename, logger, verbose, normalize=True) # Load Tf SavedModel and check results model_loaded = tf.keras.models.load_model(output_filename_model) preds_tst = model_loaded.predict(Xtst) preds_tst = np.argmax(preds_tst, axis=-1) # Convert to labels # Model export to ONXX output_filename_model = './results/models/Worker_' + str(user_name) + '_' + dataset_name + '_model.onnx' model.save(output_filename_model) # Compute the prediction with ONNX Runtime onnx_session = onnxruntime.InferenceSession(output_filename_model) onnx_inputs = {onnx_session.get_inputs()[0].name: Xtst} onnx_output = onnx_session.run(None, onnx_inputs)[0] onnx_output = np.argmax(onnx_output, axis=-1) # Convert to labels err_onnx = np.mean((preds_tst.ravel() - onnx_output.ravel())**2) display('Error in ONNX predictions is %f' %err_onnx, logger, verbose)
[ "rober.diaz@gmail.com" ]
rober.diaz@gmail.com
e039a691ab85b5546fa57b3a953a926b753ec312
a0082b6d0bf497d3f244fdaec1ce9d8b338ac9ef
/TensorFlow/util_model.py
ef9b3369395c7546ffd3c3c74fd73e6c9e2c8ce6
[]
no_license
EvilicLufas/Equality-of-Opportunity-Travel-Behavior
419eae84b2b1105a9e255d4d4e1f40d535c52581
06878173d5cb1c06a8a563a473cb067bc7c626a6
refs/heads/master
2023-08-18T01:25:03.994926
2021-10-15T17:09:22
2021-10-15T17:09:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
21,508
py
import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib as mpl import pickle import os import copy import tensorflow as tf from sklearn import svm, datasets from sklearn.metrics import confusion_matrix from sklearn.utils.class_weight import compute_sample_weight import time import collections from sklearn.model_selection import KFold from tqdm.notebook import tqdm def correlation(x, y, w): eps=1e-20 mx=tf.reduce_sum(w*x)/tf.reduce_sum(w) my=tf.reduce_sum(w*y)/tf.reduce_sum(w) xm, ym = x-mx, y-my r_num = tf.reduce_sum(w*tf.multiply(xm,ym)) r_den =(tf.sqrt(tf.reduce_sum(w*tf.square(xm)))+eps)*(tf.sqrt(tf.reduce_sum(w*tf.square(ym)))+eps) return r_num / r_den class FeedForward_DNN: def __init__(self,K,MODEL_NAME,INCLUDE_VAL_SET,INCLUDE_RAW_SET, RUN_DIR): self.graph = tf.Graph() self.K = K self.MODEL_NAME = MODEL_NAME self.INCLUDE_VAL_SET = INCLUDE_VAL_SET self.INCLUDE_RAW_SET=INCLUDE_RAW_SET self.RUN_DIR = RUN_DIR def init_hyperparameter(self, lam,lr,lyr,n_epoch,n_mini_batch): # h stands for hyperparameter self.h = {} self.h['M']=lyr self.h['M_2']=2 self.h['n_hidden']=200 self.h['l1_const']=1e-5 self.h['l2_const']=0.001 self.h['dropout_rate']=0.01 self.h['batch_normalization']=True self.h['learning_rate']=lr self.h['n_iteration']=5000 self.h['n_mini_batch']=n_mini_batch self.h['lam']=lam self.h['n_epoch']=n_epoch def change_hyperparameter(self, new_hyperparameter): assert bool(self.h) == True self.h = new_hyperparameter def random_sample_hyperparameter(self): assert bool(self.hs) == True assert bool(self.h) == True for name_ in self.h.keys(): self.h[name_] = np.random.choice(self.hs[name_+'_list']) def obtain_mini_batch(self,i,full=True): index=np.arange((i-1)*self.h['n_mini_batch'],i*self.h['n_mini_batch']) if full==False: index=np.arange((i-1)*self.h['n_mini_batch'],self.N_train) self.X_batch = self.X_train_[index, :] self.Y_batch = self.Y_train_[index] self.Z_batch = self.Z_train_[index] self.W_batch = self.W_train_[index] def load_data(self, Z_var,input_data_raw = None): print("Loading datasets...") Z_train= X_train[Z_var] Z_test = X_test[Z_var] self.colnames = list(X_train.columns) self.X_train = X_train.values self.Y_train = Y_train.values self.Z_train = Z_train.values self.X_test=X_test.values self.Y_test=Y_test.values self.Z_test=Z_test.values self.X_z0_train=X_train[Z_train==1].values self.X_z1_train=X_train[Z_train==0].values self.Y_z0_train=Y_train[Z_train==1].values self.Y_z1_train=Y_train[Z_train==0].values self.X_z0_test=X_test[Z_test==1].values self.X_z1_test=X_test[Z_test==0].values self.Y_z0_test=Y_test[Z_test==1].values self.Y_z1_test=Y_test[Z_test==0].values self.W_z0_train=W_train[Z_train==1].values self.W_z1_train=W_train[Z_train==0].values self.W_z0_test=W_test[Z_test==1].values self.W_z1_test=W_test[Z_test==0].values if self.INCLUDE_VAL_SET: self.X_val = X_val.values self.Y_val = Y_val.values print("Training set", self.X_train.shape, self.Y_train.shape, self.Z_train.shape) print("Testing set", self.X_test.shape, self.Y_test.shape, self.Z_test.shape) if self.INCLUDE_VAL_SET: print("Validation set", self.X_val.shape, self.Y_val.shape) self.W_train = W_train.values self.W_test = W_test.values # save dim self.N_train,self.D = self.X_train.shape self.N_test,self.D = self.X_test.shape def bootstrap_data(self, N_bootstrap_sample): self.N_bootstrap_sample = N_bootstrap_sample bootstrap_sample_index = np.random.choice(self.N_train, size = self.N_bootstrap_sample) self.X_train_ = self.X_train self.Y_train_ = self.Y_train self.Z_train_ = self.Z_train self.W_train_ = self.W_train #save positive y index def standard_hidden_layer(self, name): # standard layer, repeated in the following for loop. self.hidden = tf.layers.dense(self.hidden, self.h['n_hidden'], activation = tf.nn.relu, name = name) if self.h['batch_normalization'] == True: self.hidden = tf.layers.batch_normalization(inputs = self.hidden, axis = 1) self.hidden = tf.layers.dropout(inputs = self.hidden, rate = self.h['dropout_rate']) def build_model(self, method): with self.graph.as_default(): self.X = tf.placeholder(dtype = tf.float32, shape = (None, self.D), name = 'X') self.Y = tf.placeholder(dtype = tf.int64, shape = (None), name = 'Y') self.Z = tf.placeholder(dtype = tf.int64, shape = (None), name = 'Z') self.W = tf.placeholder(dtype=tf.float32, shape=(None),name='W') self.hidden = self.X for i in range(self.h['M']): name = 'hidden'+str(i) self.standard_hidden_layer(name) # last layer: utility in choice models self.output=tf.layers.dense(self.hidden, self.K, name = 'output') self.prob=tf.nn.softmax(self.output, name = 'prob') self.cl=tf.argmax(self.prob,1,name='class') self.output_tensor = tf.identity(self.output, name='logits') l1_l2_regularization = tf.contrib.layers.l1_l2_regularizer(scale_l1=self.h['l1_const'], scale_l2=self.h['l2_const'], scope=None) vars_ = tf.trainable_variables() weights = [var_ for var_ in vars_ if 'kernel' in var_.name] regularization_penalty = tf.contrib.layers.apply_regularization(l1_l2_regularization, vars_) # evaluate self.correct = tf.equal(self.cl, self.Y, name='correct') self.accuracy,self.update_op = tf.metrics.accuracy(labels=self.Y, predictions=self.cl,weights=self.W,name='accuracy') self.confusion_matrix = tf.confusion_matrix(self.Y,self.cl,weights=self.W,name='confusion_matrix') # Isolate the variables stored behind the scenes by the metric operation running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy") # Define initializer to initialize/reset running variables self.running_vars_initializer = tf.variables_initializer(var_list=running_vars) # loss function self.cost1 = (1-self.h['lam'])*tf.reduce_sum(self.W*tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.output, labels = self.Y), name = 'cost1')/tf.reduce_sum(self.W) iy=tf.transpose(tf.where(self.Y>0)) iy2=tf.transpose(tf.where(self.Y<1)) self.z2=tf.gather(self.Z, iy) self.c2=tf.gather(self.cl, iy) self.cr=correlation(tf.cast(tf.gather(self.Z, iy), 'float'), tf.cast(tf.gather(self.cl,iy), 'float'), tf.cast(tf.gather(self.W,iy), 'float')) if method=='cor_soft': self.prob_one=tf.squeeze(tf.gather(self.prob, [1], axis=1)) self.cost2 =self.h['lam']*abs(correlation(tf.cast(tf.gather(self.Z, iy), 'float'), tf.cast(tf.gather(self.prob_one,iy), 'float'), tf.cast(tf.gather(self.W,iy), 'float'))) self.cost=self.cost2+self.cost1 if method=='cor_soft_FP': self.prob_one=tf.squeeze(tf.gather(self.prob, [1], axis=1)) self.cost2 =self.h['lam']*abs(correlation(tf.cast(tf.gather(self.Z, iy2), 'float'), tf.cast(tf.gather(self.prob_one,iy2), 'float'), tf.cast(tf.gather(self.W,iy2), 'float'))) self.cost=self.cost2+self.cost1 self.optimizer = tf.train.AdamOptimizer(learning_rate = self.h['learning_rate']) # opt objective self.training_op = self.optimizer.minimize(self.cost) # minimize the opt objective self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(),self.running_vars_initializer) self.saver= tf.train.Saver() def train_model(self): self.train_accuracy_list=[] self.test_accuracy_list=[] self.train_cost1_list=[] self.test_cost1_list=[] self.train_cost2_list=[] self.test_cost2_list=[] with tf.Session(graph=self.graph) as sess: self.init.run() #for early stopping: best_train_accuracy=0 best_cost=100 stop=False last_improvement=0 require_improvement=20 i=0 for i in tqdm(range(self.h['n_epoch'])): for k in range(1,self.N_train//self.h['n_mini_batch']+1): sess.run(self.running_vars_initializer) # gradient descent self.obtain_mini_batch(k) sess.run(self.training_op, feed_dict = {self.X: self.X_batch, self.Y: self.Y_batch, self.Z: self.Z_batch, self.W: self.W_batch}) for k in range(self.N_train//self.h['n_mini_batch']+1,self.N_train//self.h['n_mini_batch']+2): sess.run(self.running_vars_initializer) self.obtain_mini_batch(k,full=False) sess.run(self.training_op, feed_dict = {self.X: self.X_batch, self.Y: self.Y_batch, self.Z: self.Z_batch, self.W: self.W_batch}) self.cl_train =sess.run(self.cl,feed_dict={self.X: self.X_train_, self.Y: self.Y_train_, self.Z: self.Z_train_}) self.cl_test =sess.run(self.cl,feed_dict={self.X: self.X_test, self.Y: self.Y_test, self.Z: self.Z_test}) self.update_train_temp = sess.run(self.update_op,feed_dict = {self.cl: self.cl_train, self.Y: self.Y_train_, self.W: self.W_train_}) self.accuracy_train_temp = sess.run(self.accuracy) self.update_test_temp = sess.run(self.update_op,feed_dict = {self.cl: self.cl_test, self.Y: self.Y_test, self.W: self.W_test}) self.accuracy_test_temp = sess.run(self.accuracy) self.train_accuracy_list.append(self.accuracy_train_temp) self.test_accuracy_list.append(self.accuracy_test_temp) if self.h['lam']!=0: self.cost1_train_temp = self.cost1.eval(feed_dict = {self.X: self.X_train_, self.Y: self.Y_train_, self.Z: self.Z_train_, self.W: self.W_train_}) self.cost1_test_temp = self.cost1.eval(feed_dict = {self.X: self.X_test, self.Y: self.Y_test, self.Z: self.Z_test, self.W: self.W_test}) self.cost2_train_temp = self.cost2.eval(feed_dict = {self.X: self.X_train_, self.Y: self.Y_train_, self.Z: self.Z_train_, self.W: self.W_train_}) self.cost2_test_temp = self.cost2.eval(feed_dict = {self.X: self.X_test, self.Y: self.Y_test, self.Z: self.Z_test, self.W: self.W_test}) self.train_cost1_list.append(self.cost1_train_temp) self.test_cost1_list.append(self.cost1_test_temp) self.train_cost2_list.append(self.cost2_train_temp) self.test_cost2_list.append(self.cost2_test_temp) self.confusion_matrix_z0_train =self.confusion_matrix.eval(feed_dict={self.X: self.X_z0_train, self.Y: self.Y_z0_train, self.Z: self.Z_train, self.W: self.W_z0_train}) self.confusion_matrix_z1_train =self.confusion_matrix.eval(feed_dict={self.X: self.X_z1_train, self.Y: self.Y_z1_train, self.Z: self.Z_train, self.W: self.W_z1_train}) self.confusion_matrix_z0_test =self.confusion_matrix.eval(feed_dict={self.X: self.X_z0_test, self.Y: self.Y_z0_test, self.Z: self.Z_test, self.W: self.W_z0_test}) self.confusion_matrix_z1_test =self.confusion_matrix.eval(feed_dict={self.X: self.X_z1_test, self.Y: self.Y_z1_test, self.Z: self.Z_test, self.W: self.W_z1_test}) train_cm0=self.confusion_matrix_z0_train.ravel() train_cm1=self.confusion_matrix_z1_train.ravel() test_cm0=self.confusion_matrix_z0_test.ravel() test_cm1=self.confusion_matrix_z1_test.ravel() train_FNR0=train_cm0[2]/(train_cm0[2]+train_cm0[3]) train_FNR1=train_cm1[2]/(train_cm1[2]+train_cm1[3]) train_FNR_gap=train_FNR0-train_FNR1 test_FNR0=test_cm0[2]/(test_cm0[2]+test_cm0[3]) test_FNR1=test_cm1[2]/(test_cm1[2]+test_cm1[3]) test_FNR_gap=test_FNR0-test_FNR1 train_FPR0=train_cm0[1]/(train_cm0[0]+train_cm0[1]) train_FPR1=train_cm1[1]/(train_cm1[0]+train_cm1[1]) train_FPR_gap=train_FPR0-train_FPR1 test_FPR0=test_cm0[1]/(test_cm0[0]+test_cm0[1]) test_FPR1=test_cm1[1]/(test_cm1[0]+test_cm1[1]) test_FPR_gap=test_FPR0-test_FPR1 if i%100==0: print("Epoch ", i," Accuracy_train = ", self.accuracy_train_temp,\ " Cost1_train = ",self.cost1_train_temp,\ " Cost2_train = ",self.cost2_train_temp,\ " Cost_train = ",self.cost1_train_temp+self.cost2_train_temp) if not FP: print(" FNR_gap_train = ", train_FNR_gap,\ " FNR_gap_test = ",test_FNR_gap) else: print(" FPR_gap_train = ", train_FPR_gap,\ " FPR_gap_test = ",test_FPR_gap) print(" FNR_gap_train = ", train_FNR_gap,\ " FNR_gap_test = ",test_FNR_gap) if best_cost > self.cost1_train_temp+self.cost2_train_temp: save_sess=sess best_cost=self.cost1_train_temp+self.cost2_train_temp last_improvement = 0 self.best_iter=i self.saver.save(sess, self.RUN_DIR+self.MODEL_NAME+".ckpt") else: last_improvement +=1 # if last_improvement > require_improvement: # print("No improvement found during the "+str(require_improvement)+" last iterations, stopping optimization.") # # Break out from the loop. # stop = True else: if i%100==0: self.confusion_matrix_z0_train =self.confusion_matrix.eval(feed_dict={self.X: self.X_z0_train, self.Y: self.Y_z0_train, self.Z: self.Z_train, self.W: self.W_z0_train}) self.confusion_matrix_z1_train =self.confusion_matrix.eval(feed_dict={self.X: self.X_z1_train, self.Y: self.Y_z1_train, self.Z: self.Z_train, self.W: self.W_z1_train}) self.confusion_matrix_z0_test =self.confusion_matrix.eval(feed_dict={self.X: self.X_z0_test, self.Y: self.Y_z0_test, self.Z: self.Z_test, self.W: self.W_z0_test}) self.confusion_matrix_z1_test =self.confusion_matrix.eval(feed_dict={self.X: self.X_z1_test, self.Y: self.Y_z1_test, self.Z: self.Z_test, self.W: self.W_z1_test}) train_cm0=self.confusion_matrix_z0_train.ravel() train_cm1=self.confusion_matrix_z1_train.ravel() test_cm0=self.confusion_matrix_z0_test.ravel() test_cm1=self.confusion_matrix_z1_test.ravel() train_FNR0=train_cm0[2]/(train_cm0[2]+train_cm0[3]) train_FNR1=train_cm1[2]/(train_cm1[2]+train_cm1[3]) train_FNR_gap=train_FNR0-train_FNR1 test_FNR0=test_cm0[2]/(test_cm0[2]+test_cm0[3]) test_FNR1=test_cm1[2]/(test_cm1[2]+test_cm1[3]) test_FNR_gap=test_FNR0-test_FNR1 train_FPR0=train_cm0[1]/(train_cm0[0]+train_cm0[1]) train_FPR1=train_cm1[1]/(train_cm1[0]+train_cm1[1]) train_FPR_gap=train_FPR0-train_FPR1 test_FPR0=test_cm0[1]/(test_cm0[0]+test_cm0[1]) test_FPR1=test_cm1[1]/(test_cm1[0]+test_cm1[1]) test_FPR_gap=test_FPR0-test_FPR1 print("Epoch ", i," Accuracy_train = ", self.accuracy_train_temp,\ " Accuracy_test = ",self.accuracy_test_temp) if not FP: print(" FNR_gap_train = ", train_FNR_gap,\ " FNR_gap_test = ",test_FNR_gap) else: print(" FPR_gap_train = ", train_FPR_gap,\ " FPR_gap_test = ",test_FPR_gap) print(" FNR_gap_train = ", train_FNR_gap,\ " FNR_gap_test = ",test_FNR_gap) if best_train_accuracy < self.accuracy_train_temp: save_sess=sess best_train_accuracy=self.accuracy_train_temp last_improvement = 0 self.best_iter=i self.saver.save(sess, self.RUN_DIR+self.MODEL_NAME+".ckpt") else: last_improvement +=1 # if last_improvement > require_improvement: # print("No improvement found during the "+str(require_improvement)+" last iterations, stopping optimization.") # # Break out from the loop. # stop = True self.final_iter=i i+=1 def evaluate_model(self): tf.reset_default_graph() with tf.Session() as sess: saver = tf.train.import_meta_graph(self.RUN_DIR+self.MODEL_NAME+ ".ckpt.meta") saver.restore(sess, self.RUN_DIR+self.MODEL_NAME+ ".ckpt") graph = tf.get_default_graph() self.X = graph.get_tensor_by_name("X:0") self.Y = graph.get_tensor_by_name("Y:0") self.Z = graph.get_tensor_by_name("Z:0") self.W = graph.get_tensor_by_name("W:0") self.cl= graph.get_tensor_by_name("class:0") self.confusion_matrix = tf.confusion_matrix(self.Y,self.cl,weights=self.W, name='confusion_matrix') self.accuracy = tf.metrics.accuracy(labels=self.Y, predictions=self.cl,weights=self.W,name='accuracy')[1] running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="accuracy") running_vars_initializer = tf.variables_initializer(var_list=running_vars) self.init=tf.group(tf.local_variables_initializer()) self.init.run() self.cl_z0_train =sess.run(self.cl,feed_dict={self.X: self.X_z0_train, self.Y: self.Y_z0_train, self.Z: self.Z_train}) self.cl_z1_train =sess.run(self.cl,feed_dict={self.X: self.X_z1_train, self.Y: self.Y_z1_train, self.Z: self.Z_train}) self.cl_z0_test =sess.run(self.cl,feed_dict={self.X: self.X_z0_test, self.Y: self.Y_z0_test, self.Z: self.Z_test}) self.cl_z1_test =sess.run(self.cl,feed_dict={self.X: self.X_z1_test, self.Y: self.Y_z1_test, self.Z: self.Z_test}) self.cl_train =sess.run(self.cl,feed_dict={self.X: self.X_train_, self.Y: self.Y_train_, self.Z: self.Z_train_}) self.cl_test =sess.run(self.cl,feed_dict={self.X: self.X_test, self.Y: self.Y_test, self.Z: self.Z_test}) self.accuracy_train = self.accuracy.eval(feed_dict={self.Y: self.Y_train_, self.cl:self.cl_train,self.W: self.W_train_}) self.accuracy_test = self.accuracy.eval(feed_dict={self.Y: self.Y_test, self.cl:self.cl_test,self.W: self.W_test}) self.confusion_matrix_z0_train =self.confusion_matrix.eval(feed_dict={self.Y: self.Y_z0_train, self.cl: self.cl_z0_train, self.W: self.W_z0_train}) self.confusion_matrix_z1_train =self.confusion_matrix.eval(feed_dict={self.Y: self.Y_z1_train, self.cl: self.cl_z1_train, self.W: self.W_z1_train}) self.confusion_matrix_z0_test =self.confusion_matrix.eval(feed_dict={self.Y: self.Y_z0_test, self.cl:self.cl_z0_test, self.W: self.W_z0_test}) self.confusion_matrix_z1_test =self.confusion_matrix.eval(feed_dict={self.Y: self.Y_z1_test, self.cl:self.cl_z1_test, self.W: self.W_z1_test})
[ "yunhan@mit.edu" ]
yunhan@mit.edu
f327656c3c6c957763b8883c4183d103b33e956c
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/verbs/_linking.py
619af3537e5435c213053702bea9f7364b783fca
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
228
py
from xai.brain.wordbase.verbs._link import _LINK #calss header class _LINKING(_LINK, ): def __init__(self,): _LINK.__init__(self) self.name = "LINKING" self.specie = 'verbs' self.basic = "link" self.jsondata = {}
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
a2121d08aa0926caba510b2adebc34c8814b7af7
fa3e3c36a237efa2aa08ab9ae0c2bf1c6b0cc072
/w.py
741837179270392d5ec84b3cd7ff9689958cabe0
[]
no_license
redashu/infogather
e943b49b2baccf0d20e629ac4f718aa8967698a1
b3a9033ff79cab63b9590199cfa201deccc418bd
refs/heads/master
2021-01-11T14:45:18.926667
2017-03-17T16:33:12
2017-03-17T16:33:12
80,208,478
0
1
null
null
null
null
UTF-8
Python
false
false
126
py
c=raw_input("Enter value 1 : ") d=raw_input("Enter value 2 : ") e=raw_input("Enter value 3 : ") x=(c,d,e) print x
[ "asashuthegreat4@gmail.com" ]
asashuthegreat4@gmail.com
f3dc9836116d5a3cab9ad691959312fa860676f5
af0f3fe331c0f52961e856730991ac53c060944d
/śpiewnik/urls.py
a2f89cfe6b19f7133cfd71fcd4c904dfe85efd78
[]
no_license
AnT313/Songbook
928bb47aab951140a9f4d577f3fbfd59b730c41d
831b303d50179be834b3c4d520e86d75ac208744
refs/heads/master
2020-06-23T02:32:13.222972
2019-07-23T21:19:45
2019-07-23T21:19:45
198,478,079
0
0
null
null
null
null
UTF-8
Python
false
false
365
py
from django.contrib import admin from django.urls import path, include from django.contrib.auth import views from django.contrib.auth import authenticate, login from django.contrib.auth import logout urlpatterns = [ path('admin/', admin.site.urls), path('', include('songbook.urls')), path('accounts/login/', views.LoginView.as_view(), name='login') ]
[ "kaczyk13@gmail.com" ]
kaczyk13@gmail.com
f846aa50b94237abbb7afa193043c0c862df1261
606725d080dc21f7d995134a19b37df02688083b
/Arrays/LongestSubStringAndLength/LenOfLongestSubString.py
de1be8485e091c52aea6d1520fd3c8e4e899128e
[]
no_license
bhavinidata/DataStructuresAndAlgorithms
fa4d83c56c5322d14a9acf97e76464aba82a144a
33680d395a3ed11e874309246db14b6494347df1
refs/heads/master
2021-05-21T23:29:59.524349
2020-05-14T04:35:37
2020-05-14T04:35:37
252,858,353
0
0
null
null
null
null
UTF-8
Python
false
false
3,481
py
class Solution: def lengthOfLongestSubstring(self, s: str) -> int: # Faster in 116ms str_list = [] length = 0 for c in s: if c in str_list: if (len(str_list) > length): length = len(str_list) str_list = str_list[str_list.index(c)+1:] str_list += c if(len(str_list) > length): length = len(str_list) return length if __name__ == '__main__': solution = Solution() abc = "hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789hijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" max_length = solution.lengthOfLongestSubstring(abc) print(f"Max length: {max_length}") abc = "bbbbb" max_length = solution.lengthOfLongestSubstring(abc) print(f"Max length: {max_length}") max_length = solution.lengthOfLongestSubstring(abc) print(f"Max length: {max_length}") abc = "pwwkew" max_length = solution.lengthOfLongestSubstring(abc) print(f"Max length: {max_length}") abc = "" max_length = solution.lengthOfLongestSubstring(abc) print(f"Max length at the last: {max_length}") abc = " " max_length = solution.lengthOfLongestSubstring(abc) print(f"Max length: {max_length}") # Longer time # pos_map = {} # # s = s.lower() # if not s: # return 0 # max_len = 0 # for i in range (len(s)): # # Handle last letter # if i == len(s)-1: # if s[i] not in pos_map: # max_len += 1 # # print(f"Max length at the last char: {max_len}") # return max_len # pos_map.clear() # pos_map[s[i]] = i # curr_len = 1 # j=0 # for j in range (i+1, len(s)-j): # # print("in second for loop") # # print(f"Initial Current Length: {curr_len}") # # print(s[j]) # # handle last letter # if j == len(s)-1: # if s[j] not in pos_map: # curr_len += 1 # if max_len < curr_len: # max_len = curr_len # # print(f"Max length at the last char: {max_len}") # return max_len # # Handle duplicates # if s[j] in pos_map: # # print("found match!") # if max_len < curr_len: # max_len = curr_len # # print(f"Max Length: {max_len}") # # print(f"Current Length: {curr_len}") # curr_len = 1 # break # else: # pos_map[s[j]] = j # curr_len +=1 # return max_len # dicts = {} # maxlength = start = 0 # for i,value in enumerate(s): # if value in dicts: # sums = dicts[value] + 1 # if sums > start: # start = sums # num = i - start + 1 # if num > maxlength: # maxlength = num # dicts[value] = i # return maxlength
[ "bhavini266@gmail.com" ]
bhavini266@gmail.com
93fec12421ef49c3bb96557dd14d0c51f33edb3c
a784b9a7dc195d6c407c448f306eb8a02fa391bd
/maoyan/spiders/MaoyanSpider.py
41ac9cb665d99f6f0965d107be7db172f9bb2ea7
[]
no_license
jdqx777/maoyan
2161afa9117b154e24056d665c1c0bf504102714
137a2959349e5b2f3aa2963cc80a0b9d0e776f03
refs/heads/master
2020-12-02T06:04:50.208581
2019-12-30T13:00:23
2019-12-30T13:00:23
230,916,840
0
0
null
null
null
null
UTF-8
Python
false
false
883
py
# -*- coding: utf-8 -*- import scrapy from maoyan.items import MaoyanItem import time class MaoyanspiderSpider(scrapy.Spider): name = 'MaoyanSpider' allowed_domains = ['maoyan.com'] start_urls = ['https://maoyan.com/board'] def parse(self, response): dl = response.css('.board-wrapper dd') print('#'*100) print(dl) print(type(dl)) print('#' * 100) for dd in dl: item = MaoyanItem() item['index'] = dd.css('.board-index::text').extract_first() item['title'] = dd.css('.name a::text').extract_first() item['star'] = dd.css('.star::text').extract_first() item['releasetime'] = dd.css('.releasetime::text').extract_first() item['score'] = dd.css('.integer::text').extract_first()+dd.css('.fraction::text').extract_first() yield item
[ "2712784833@qq.com" ]
2712784833@qq.com
b0e5a34d0da62b3ac1bb555c2af50ad9b943b240
f4699565d60eae56230158576a0fe410e35144da
/scripts/seed.py
06b5e0ae0ac61a900fa9c0284238e9ec12ffc52c
[]
no_license
credence0x/CoinOffering
7592a9f9b825483bfe23e26906c9d3ac33347647
60c3a4d5f99715b6748bd5cb4d78aec6343175a7
refs/heads/main
2023-07-16T20:13:30.228683
2021-09-04T06:10:23
2021-09-04T06:10:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,030
py
from django.shortcuts import render from django.contrib.auth.models import User from Users.models import Customer from Bid.models import Bid, SuccessfulBid, UnsuccessfulBid from Coins.models import Coin from datetime import datetime,timedelta # Create your views here. def run(*args): if 'delete' in args: test_users = User.objects.filter(username__icontains="test") for each in test_users: each.delete() print("deleted {} successfully".format(each)) # Coin.objects.get(name="MillionToken").delete() # print("MillionToken deleted successfully") # Cascades make all other models deleted else: admin = User.objects.get(username="lanre") if not Coin.objects.filter(name="MillionToken").exists(): Coin.objects.create(creator=admin, name="MillionToken", number_available=22740, end_bid_time = datetime.now()) print("MillionToken created successfully") customers = [] ######## CREATE 20 USERS AND CUSTOMERS ####### def new_user_data(num): # a number is added as suffix to make each field unique num = str(num) username = "test" + num password = "digiCentra123" first_name = username + "_first" last_name = username + "_last" email = username+"@digicentra.com" return username,password,first_name,last_name,email # Create 20 Users and Customer models print ("..........Creating 20 test Users...........") for num in range(20): username,password,first_name,last_name,email = new_user_data(num) new_user = User.objects.create_user(username=username, password=password, first_name=first_name, last_name=last_name, email=email) customer = Customer.objects.create(user=new_user) customers.append(customer) print("Successfully created user {}".format(num+1)) ########################################################## # CREATE BIDS # I'm basically engineering the data so it covers scenerios # in which the app will have to make sure that; # 1. Unsucessful bids would be recorded # 2. Successful bids will be recorded # 3. Price takes first priority # 4. Timestamps takes second priority # 5. Equal price bid are handled consequtively # 6. Ensure removal of bidder from queue and saving model once done count = 0 no_tokens_for_less_than_4 = [2000,3000,1000,7000] price_for_less_than_4 = 20000 old_timestamp = datetime.now() - timedelta(hours=2) for customer in customers: number_of_tokens = 700 price_per_token = 18000 timestamp = datetime.now() if count < 4: price_per_token = 20000 number_of_tokens = no_tokens_for_less_than_4[count] if count == 3: timestamp = old_timestamp elif count == 19: price_per_token = 15000 elif count == 18: price_per_token = 17000 number_of_tokens = 2100 elif count == 16: price_per_token = 17000 number_of_tokens = 2100 elif count == 15: price_per_token = 17000 number_of_tokens = 2100 elif count == 17: price_per_token = 14000 Bid.objects.create(customer=customer, price_per_token=price_per_token, number_of_tokens=number_of_tokens, timestamp=timestamp) count+=1
[ "lojetokun@gmail.com" ]
lojetokun@gmail.com
b84a05b1ead1676269d5aad60e42eddeb8382628
1e9ded5c3959461f0572a14b252512b9d773ed18
/setup.py
c4c0c93c8c9c28d336917b919f6a217b976a98b3
[]
no_license
delmandojoe/MainData
ecac4529e23409bba0a8991617f9f547d80aa6ab
dc3682c97bb680545fc4964b60373591cc23587c
refs/heads/master
2020-09-21T20:58:21.341431
2019-12-13T09:36:56
2019-12-13T09:36:56
224,928,170
0
0
null
null
null
null
UTF-8
Python
false
false
110
py
from distutils.core import setup setup(name ="Databuiz", version ="1.0", packagees =["Databuiz"])
[ "delmandojoe@gmail.com" ]
delmandojoe@gmail.com
727c059364956c850ab25acc74da2bf6dc32079c
5f5719fca3ba26a6daae2c8e78b5eaa62bac24c7
/guess.py
38d0490ae8c667a0e1a6cf5e7cee2885abf1cf6e
[]
no_license
seakun/automated_python_projects
8e4bc3444c336973ba89fc6e814fbd56a1d4e2ee
5e355f5bc3d073acd5d48cacf129dcbbc117a247
refs/heads/master
2022-04-08T17:17:39.766942
2020-03-11T14:20:28
2020-03-11T14:20:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
760
py
# This is a guess the number game. import random print('Hello. What is your name?') name = input() print('Well, ' + name + 'I am thinking of a number between 1 and 20.') secretNumber = random.randint(1,20) # print('DEBUG: Secret number is ' + str(secretNumber)) for guessesTaken in range(1, 7): print('Take a guess.') guess = int(input()) if guess < secretNumber: print('You guess is too low.') elif guess > secretNumber: print('Your guess is too high.') else: break # This condition is for the correct guess! if guess == secretNumber: print('Good job, ' + name + '! You guessed my number in ' + str(guessesTaken) + ' guesses!') else: print('Nope. The number I was thinking of was ' + str(secretNumber))
[ "seakun@users.noreply.github.com" ]
seakun@users.noreply.github.com
3166d69fee23803a4f44b970d5d70e7795f39204
de9b0ffa8f29471faed49dd356277713fbba453d
/fog/fog-client/fog-client
4415a0344a1112844d03dca3fabb2db0ae32a6d1
[ "Apache-2.0" ]
permissive
uk0/tor_dev
a3a7c8e08af6bf7ca9e919525778ce59f63e7d2b
1040d906474d1da463f4de57b3c5f72ae14f550d
refs/heads/master
2021-07-20T16:21:51.094597
2017-10-30T12:59:07
2017-10-30T12:59:07
108,850,963
1
0
null
null
null
null
UTF-8
Python
false
false
26,075
#!/usr/bin/python import argparse import os import sys from collections import namedtuple from functools import partial # TODO(infinity0): this is temporary workaround until we do #10047 if sys.platform == 'win32': os.environ["KILL_CHILDREN_ON_DEATH"] = "1" from pyptlib.util import parse_addr_spec from pyptlib.util.subproc import auto_killall, Popen from pyptlib.client import ClientTransportPlugin from subprocess import PIPE from twisted.internet.defer import Deferred, DeferredList from twisted.internet.error import CannotListenError from twisted.internet.stdio import StandardIO from twisted.internet.protocol import Factory, connectionDone from twisted.internet.endpoints import TCP4ClientEndpoint from twisted.protocols.basic import LineReceiver from twisted.protocols.portforward import ProxyServer as _ProxyServer from twisted.python import log from txsocksx.client import SOCKS4ClientEndpoint, SOCKS5ClientEndpoint from fog.socks import SOCKSv4InterceptorFactory import shlex import logging DEFAULT_CONFIG_FILE_NAME = os.path.dirname(os.path.realpath(__file__)) + '/fogrc' logger = None def pt_setup_logger(): global logger logger = logging.getLogger('fog-logger') logger.setLevel(logging.WARNING) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) logger.addHandler(ch) def pt_child_env(managed_ver, env=os.environ): """ Prepare the environment for a child PT process, by clearing all TOR_PT_* envvars and TOR_PT_MANAGED_TRANSPORT_VER. """ cur_env = [(k, v) for k, v in env.iteritems() if not k.startswith('TOR_PT_')] if 'TOR_PT_STATE_LOCATION' in env: prev_pt_state_location = env['TOR_PT_STATE_LOCATION'] cur_env.append(('TOR_PT_STATE_LOCATION', os.path.join(prev_pt_state_location, 'fog'))) cur_env.append(('TOR_PT_MANAGED_TRANSPORT_VER', ','.join(managed_ver))) return cur_env class MethodSpec(namedtuple('MethodSpec', 'name protocol addrport args opts')): @classmethod def fromLine(cls, line): args = line.rstrip('\n').split(' ') name = args[0] protocol = args[1] addrport = parse_addr_spec(args[2]) args = args[3][-5:].split(',') if len(args) > 3 and args[3].startswith("ARGS=") else [] opts = args[4][-9:].split(',') if len(args) > 4 and args[4].startswith("OPT-ARGS=") else [] return MethodSpec(name, protocol, addrport, args, opts) def branch(parent): """ Returns a new Deferred that does not advance the callback-chain of the parent. See http://xph.us/2009/12/10/asynchronous-programming-in-python.html for motivation. """ d = Deferred() parent.addCallback(lambda v: (v, d.callback(v))[0]) parent.addErrback(lambda f: (f, d.errback(f))[1]) return d class ManagedTransportProtocolV1(LineReceiver): """ A Twisted IProtocol to read PT output. See pt-spec.txt and others for details of the protocol. """ # TODO(infinity0): eventually this could be padded out and moved to pyptlib delimiter = os.linesep protocol_version = "1" def __init__(self): self.cmethods = {} self._dCMethodsDone = Deferred() self._dPluginError = Deferred() # dPluginError triggers errors on all sub-events, not the other way round # so fatal sub-events should call _abort rather than errback on their Deferreds self._dPluginError.addErrback(lambda f: (f, self._fireCMethodsDone().errback(f))[0]) # TODO(infinity0): call _abort if we don't recv CMETHODS DONE within n sec def whenCMethodsDone(self): """ Return a new Deferred that calls-back when CMETHODS DONE is received. """ return branch(self._dCMethodsDone) def whenPluginError(self): """ Return a new Deferred that errors-back when the remote plugin fails. Note: the success chain (callback) is never fired. """ return branch(self._dPluginError) def lineReceived(self, line): if not line: return (kw, args) = line.split(' ', 1) if kw == "VERSION": version = args.strip() if version != self.protocol_version: self._abort(ValueError("child used unsupported managed transport version: %s" % version)) elif kw == "CMETHOD": cmethod = MethodSpec.fromLine(args) self.cmethods[cmethod.name] = cmethod elif kw == "CMETHODS" and args == "DONE": self._fireCMethodsDone().callback(self.cmethods) else: pass # ignore unrecognised line def connectionLost(self, reason=connectionDone): self._firePluginError().errback(reason) def _abort(self, exc): self._firePluginError().errback(exc) self.transport.loseConnection() def _fireCMethodsDone(self): """Return dCMethodsDone or a dummy if it was already called.""" if self._dCMethodsDone: d = self._dCMethodsDone self._dCMethodsDone = None return d return Deferred().addErrback(lambda *args: None) def _firePluginError(self): """Return dPluginError or a dummy if it was already called.""" if self._dPluginError: d = self._dPluginError self._dPluginError = None return d return Deferred().addErrback(lambda *args: None) # TODO(infinity0): remove this class when twisted update their side class ProxyServer(_ProxyServer): def connectionMade(self): # code copied from super class, except instead of connecting # to a TCP endpoint we abstract that out to a child method self.transport.pauseProducing() client = self.clientProtocolFactory() client.setServer(self) if self.reactor is None: from twisted.internet import reactor self.reactor = reactor self.connectProxyClient(client) def connectProxyClient(self, client): raise NotImplementedError() class OneUseSOCKSWrapper(ProxyServer): def connectProxyClient(self, client): local_host, local_port = self.factory.method_spec.addrport TCPPoint = TCP4ClientEndpoint( self.reactor, local_host, local_port) # Next PT may need either SOCKS4 or SOCKS5 so check its protocol and get the required class socks_endpoint_class = self.getSocksEndpointClass() SOCKSPoint = socks_endpoint_class( self.factory.remote_host, self.factory.remote_port, TCPPoint) # Store port for debugging messages before stopListening is called. # listen_port will not have a port after stopListening is called. stored_port = self.factory.listen_port.getHost().port d_port_closed = self.factory.listen_port.stopListening() d_port_closed.addCallback( lambda x: logger.debug("Closed factory listener %s on port %s" % (self.factory, stored_port))) d_port_closed.addErrback( lambda x: logger.warn("Failed to close factory listener %s listening on port %s" % (self.factory, stored_port))) d = SOCKSPoint.connect(client) d.chainDeferred(self.factory.d_connected) @d.addErrback def _gotError(error): log.err(error, "error connecting to SOCKS server") def getSocksEndpointClass(self): """ Checks self.factory.method_spec.protocol and returns the appropriate socks endpoint class. """ socks_endpoint_class = None if self.factory.method_spec.protocol == 'socks4': socks_endpoint_class = SOCKS4ClientEndpoint elif self.factory.method_spec.protocol == 'socks5': socks_endpoint_class = SOCKS5ClientEndpoint else: raise ValueError("Pluggable transport requires unknown protocol %s. Supported protocols are %s" % (self.factory.method_spec.protocol, ('socks4', 'socks5'))) return socks_endpoint_class class OneUseSOCKSFactory(Factory): protocol = OneUseSOCKSWrapper def __init__(self, method_spec, dest_addr_port): self._connected_once = False self.method_spec = method_spec self.remote_host = dest_addr_port[0] self.remote_port = dest_addr_port[1] self.d_connected = Deferred() self.listen_port = None def __str__(self): return "OneUseSOCKSFactory connecting %s to %s:%s" % (self.method_spec, self.remote_host, self.remote_port) def __repr__(self): return "OneUseSOCKSFactory(%s, %s, %s)" % (self.method_spec, self.remote_host, self.remote_port) def setListenPort(self, listen_port): """ Sets the listen_port object. :param function listen_port: The function returned from a ListenTCP call. Used to shutdown the port when a connection is made. """ self.listen_port = listen_port def whenConnected(self): """ Returns a new Deferred that triggers when a connection is successfully made. """ return branch(self.d_connected) def buildProtocol(self, addr): """ Only allows one protocol to be created. After that it always returns None :param twisted.internet.interfaces.IAddress addr: an object implementing L{twisted.internet.interfaces.IAddress} """ if self._connected_once: return None else: self._connected_once = True return Factory.buildProtocol(self, addr) if sys.platform == "win32": # TODO(infinity0): push this upstream to Twisted from twisted.internet import _pollingfile import msvcrt _StandardIO = StandardIO class StandardIO(_StandardIO): def __init__(self, proto, stdin=None, stdout=None, reactor=None): """ Start talking to standard IO with the given protocol. Also, put it stdin/stdout/stderr into binary mode. """ if reactor is None: import twisted.internet.reactor reactor = twisted.internet.reactor _pollingfile._PollingTimer.__init__(self, reactor) self.proto = proto fdstdin = stdin or sys.stdin.fileno() fdstdout = stdout or sys.stdout.fileno() for stdfd in (fdstdin, fdstdout): msvcrt.setmode(stdfd, os.O_BINARY) hstdin = msvcrt.get_osfhandle(fdstdin) self.stdin = _pollingfile._PollableReadPipe( hstdin, self.dataReceived, self.readConnectionLost) hstdout = msvcrt.get_osfhandle(fdstdout) self.stdout = _pollingfile._PollableWritePipe( hstdout, self.writeConnectionLost) self._addPollableResource(self.stdin) self._addPollableResource(self.stdout) self.proto.makeConnection(self) def pt_launch_child(reactor, client, methodnames, chain_names, cmdline): """Launch a child PT and ensure it has the right transport methods.""" cur_env = pt_child_env(ManagedTransportProtocolV1.protocol_version) environment = dict(cur_env + { "TOR_PT_CLIENT_TRANSPORTS": ",".join(methodnames), }.items()) sub_proc = Popen(cmdline, stdout = PIPE, env = environment, ) sub_protocol = ManagedTransportProtocolV1() # we ought to pass reactor=reactor in below, but this breaks Twisted 12 StandardIO(sub_protocol, stdin=sub_proc.stdout.fileno()) methoddefers = [sub_protocol.whenCMethodsDone().addCallback( partial(pt_require_child, client, name, chain_names)) for name in methodnames] return sub_proc, sub_protocol, methoddefers def pt_require_child(client, childmethod, chain_names, cmethods): """Callback for checking a child PT has the right transport methods.""" if childmethod not in cmethods: for chain_name in chain_names: client.reportMethodError(chain_name, "failed to start required child transport: %s" % childmethod) raise ValueError() return cmethods[childmethod] def pt_get_unique_transport_list(aliases, config): """ Returns all the pts needed by the chains without duplicates :param aliases list: The list of alias names requested by tor and intersected with the transports fog can serve :param Config config: The configuration object """ uniq_transports = set() for alias in aliases: for pt_name in config.alias_map[alias]: uniq_transports.add(pt_name) return list(uniq_transports) def pt_setup_transports(reactor, client, configuration, pt_names, chain_names): """ Sets up the pluggable transports needed by the chains :param twisted.internet.interfaces.IReactor reactor: Reactor to install this PT to. :param pyptlib.client.ClientTransportPlugin client: PT client API. :param Config configuration: The configuration object. :param list pt_names: The list of pt names to setup. :param list chain_names: The list of chain names to launch. """ pt_defer_map = {} for pt_name in pt_names: if pt_name not in configuration.transport_map: raise ValueError("Pluggable transport %s not found in transport_map. Check your configuration file." % pt_name) pt_cmdline = configuration.transport_map[pt_name] # TODO make this more than a one item list when fixing multiple transports launched by one ClientTransportPlugin line. multi_pts = [pt_name] # Find all the chains where this pt is used. pt_chains = [chain_name for chain_name in chain_names if pt_name in configuration.alias_map[chain_name]] _, _, defers = pt_launch_child(reactor, client, multi_pts, pt_chains, pt_cmdline) for pt, defer in zip(multi_pts, defers): pt_defer_map[pt] = defer chains_finished_dlist = pt_setup_chains(reactor, client, configuration, chain_names, pt_defer_map) chains_finished_dlist.addCallback(lambda x: client.reportMethodsEnd()) def pt_setup_chains(reactor, client, configuration, chain_names, pt_defer_map): """ Sets up each chain of pluggable transports :param twisted.internet.interfaces.IReactor reactor: Reactor to install this PT to. :param pyptlib.client.ClientTransportPlugin client: PT client API. :param Config configuration: The configuration object. :param list chain_names: The list of chain names to setup. :param dict pt_defer_map: A map between each pt and the defer that will callback when the pt is successfully launched. """ all_chains_defer_list = [] for chain_name in chain_names: chain = configuration.alias_map[chain_name] if len(chain) < 2: raise ValueError("PT Chain %s does not contain enough transports." % chain) chain_deferred_list = DeferredList([pt_defer_map[pt] for pt in set(chain)]) partial_funct = partial(pt_launch_interceptor, reactor, client, configuration, chain_name) chain_deferred_list.addCallback(partial_funct) all_chains_defer_list.append(chain_deferred_list) return DeferredList(all_chains_defer_list) def pt_setup_socks_shim(pt_name, pt_chain, success_list, dest_addr_port, reactor, proxy_deferreds): """ Launches a socks proxy server to link two PTs together. :param str pt_name: The name of the pt to send traffic to. :param list pt_chain: The list of PTs in this chain. :param list success_list: A list of tuples containing a launch status boolean, MethodSpec pairs. Ex: [(True, MethodSpec(name='dummy', protocol='socks4', addrport=('127.0.0.1', 58982), args=[], opts=[])), (True, MethodSpec(name='b64', protocol='socks4', addrport=('127.0.0.1', 58981), args=[], opts=[]))] :param tuple dest_addr_port: The bridge address:port pair for the next PT to send its results to. :param twisted.internet.interfaces.IReactor reactor: Reactor to attack the TCP server to. :param list proxy_deferreds: This list has each factorys' deferred appended to it. :returns twisted.internet.interfaces.IListeningPort: An IListeningPort used for shutting down a factory after a connection is made. """ methodspec = [r[1] for r in success_list if r[1].name == pt_name][0] # Returns the resulting methodspec. factory = OneUseSOCKSFactory(methodspec, dest_addr_port) # TODO switch to using endpoints instead of listenTCP proxy_server = reactor.listenTCP(interface='127.0.0.1', port=0, factory=factory) factory.setListenPort(proxy_server) proxy_deferreds.append(factory.whenConnected()) logger.debug("launched %s on port %s with dest %s" % (pt_name, proxy_server.getHost().port, dest_addr_port)) return proxy_server def pt_launch_chain(dest_addr_port, pt_chain, _chain_set_up, reactor, success_list): """ Launches a chain of pluggable transports by connecting each pt with SOCKS proxies. :param tuple dest_addr_port: The bridge address:port pair to connect to. :param list pt_chain: The list of pt names to launch. :param function _chain_set_up: The function to call when the shims have been set up. :param twisted.internet.interfaces.IReactor reactor: Reactor to install this PT to. :param list success_list: A list of tuples containing a launch status boolean, MethodSpec pairs. Ex: [(True, MethodSpec(name='dummy', protocol='socks4', addrport=('127.0.0.1', 58982), args=[], opts=[])), (True, MethodSpec(name='b64', protocol='socks4', addrport=('127.0.0.1', 58981), args=[], opts=[]))] """ proxy_deferreds = [] last_pt_name = pt_chain[-1] logger.debug("launching chain %s" % pt_chain) # Initialize prev_server to the port picked by the last proxy server as that's the only one we know yet. last_server = pt_setup_socks_shim(last_pt_name, pt_chain, success_list, dest_addr_port, reactor, proxy_deferreds) prev_server = last_server for pt_name in reversed(pt_chain[:-1]): # Loops through the pts linking them together through SOCKS proxies, skipping the last pt. prev_server = pt_setup_socks_shim(pt_name, pt_chain, success_list, ('127.0.0.1', prev_server.getHost().port), reactor, proxy_deferreds) def check_chain_all_connected(protocol_list): """ Checks all the shims launched to see if they successfully connected. :param list protocol_list: A list of tuples containing status boolean, twisted.protocols.portforward.ProxyClient pairs. Ex: [(True, <twisted.protocols.portforward.ProxyClient instance at 0x10b825518>), (True, <twisted.protocols.portforward.ProxyClient instance at 0x10b829518>)] """ if all([result[0] for result in protocol_list]): logger.debug("All PT shims connected correctly") else: # At this point the SOCKS protocol is in communication mode so no need to call makeReply(91) # This assumes that the child pluggable transport will shut down the connection cleanly. failed_protocols = [x[1] for x in protocol_list if x[0] == False] logger.error("Shims %s failed to connect." % failed_protocols) raise ValueError() finished = DeferredList(proxy_deferreds) finished.addCallback(check_chain_all_connected) _chain_set_up((prev_server.getHost().host, prev_server.getHost().port)) def pt_launch_interceptor(reactor, client, configuration, pt_method_name, success_list): """ Launches a SOCKS interceptor. :param twisted.internet.interfaces.IReactor reactor: Reactor to install this PT to. :param pyptlib.client.ClientTransportPlugin client: PT client API. :param Config configuration: The configuration structure for this pair. :param str pt_method_name: The name of the pt chain to launch. Ex: "obfs3_flashproxy" :param list success_list: A list of tuples containing a launch status boolean, MethodSpec pairs. Ex: [(True, MethodSpec(name='dummy', protocol='socks4', addrport=('127.0.0.1', 58982), args=[], opts=[])), (True, MethodSpec(name='b64', protocol='socks4', addrport=('127.0.0.1', 58981), args=[], opts=[]))] """ logger.debug("launching interceptor for %s" % pt_method_name) pt_chain = configuration.alias_map[pt_method_name] success = all(r[0] for r in success_list if r[1].name in pt_chain) # failure was already reported by pt_require_child, just return if not success: return socks_interceptor = SOCKSv4InterceptorFactory(pt_method_name, lambda dest_addr_port, pt_method_name, chain_finished: pt_launch_chain(dest_addr_port, pt_chain, chain_finished, reactor, success_list)) # TODO switch to using endpoints instead of listenTCP try: interceptor = reactor.listenTCP(interface='127.0.0.1', port=0, factory=socks_interceptor) interceptor_port = interceptor.getHost().port except CannotListenError: client.reportMethodError(pt_method_name, "failed to launch SOCKS interceptor. The interceptor listenTCP failed.") return client.reportMethodSuccess(pt_method_name, "socks4", ("127.0.0.1", interceptor_port)) class Config(): # Transport map links a pluggable transport name to the a commandline to launch it. # Ex: {'b64' : 'exec obfsproxy managed'} transport_map = None #Alias map links a pluggable transport chain name to a list of individual pluggable transports # Ex: {'dummy_b64_dummy2' : ['dummy''b64''dummy2']} alias_map = None def __init__(self, transport_map, alias_map): self.transport_map = transport_map self.alias_map = alias_map def __repr__(self): return "Config(%s, %s)" % (self.transport_map, self.alias_map) def __str__(self): return "Config Object with transport_map: %s, and alias_map %s." % (self.transport_map, self.alias_map) @classmethod def parse(cls, config_string): """ Reads a configuration string and returns an instance of configuration. Uses shlex to parse configuration lines. :param str config_string: The string which will be parsed to populate the transport_map and alias_map hash tables. See the file example-fog-config for format. """ # TODO Add possibility of reading a ClientTransportPlugin with multiple transport types # Ex: ClientTransportPlugin obfs3,scramblesuit obfsclient --option=value line_counter = 0 lines = config_string.split('\n') transport_map = {} alias_map = {} for line in lines: line_counter += 1 if len(line) > 0 and line[0] != '#' : # Check for empty lines and comment tags on the first line = line.strip() delimited_tokens = shlex.split(line) if len(delimited_tokens) > 1: config_line_type = delimited_tokens[0] # This can be either Alias or ClientTransportPlugin if config_line_type == 'ClientTransportPlugin': cls.parse_transport_line(transport_map, delimited_tokens, line_counter) elif config_line_type == 'Alias': cls.parse_alias_line(alias_map, transport_map, delimited_tokens, line_counter) else: logger.warn("Configuration file has unknown line %s: '%s'" % (line_counter, line)) return cls(transport_map, alias_map) @classmethod def parse_transport_line(cls, transport_map, delimited_tokens, line_counter): transport_name = delimited_tokens[1] transport_cmdline = delimited_tokens[2:] if transport_name in transport_map: raise ValueError('Configuration file has duplicate ClientTransportPlugin lines. Duplicate line is at line number %s' % line_counter) transport_map[transport_name] = transport_cmdline @classmethod def parse_alias_line(cls, alias_map, transport_map, delimited_tokens, line_counter): alias_name = delimited_tokens[1] # Example: "obfs3_flashproxy" alias_path = delimited_tokens[2].split('|') # Example: "obfs3|flashproxy" if alias_name in alias_map: raise ValueError('Configuration file has duplicate Alias lines. Duplicate line is at line number %s' % line_counter) for pt_name in alias_path: if pt_name not in transport_map: raise KeyError('Transport map is missing pluggable transport %s needed for chain %s. Check your configuration file for a ClientTransportPlugin line can launch %s' % (pt_name, alias_name, pt_name)) alias_map[alias_name] = alias_path def main(*args): parser = argparse.ArgumentParser() parser.add_argument("-f", help="fog configuration file path", metavar='FOGFILE', type=argparse.FileType('r'), default=DEFAULT_CONFIG_FILE_NAME) pt_setup_logger() # TODO(infinity0): add an "external" mode, which would require us to run # obfsproxy in external mode too. opts = parser.parse_args(args) configuration = None file_contents = opts.f.read() configuration = Config.parse(file_contents) pt_method_names = configuration.alias_map.keys() client = ClientTransportPlugin() client.init(pt_method_names) # Initialize our possible methods to all the chains listed by the fog file and stored in alias map. if not client.getTransports(): logger.error("no transports to serve. pt_method_names may be invalid.") return 1 pt_method_names = pt_get_unique_transport_list(client.getTransports(), configuration) from twisted.internet import reactor auto_killall(1, cleanup=reactor.stop) pt_setup_transports(reactor, client, configuration, pt_method_names, client.getTransports()) reactor.run(installSignalHandlers=0) return 0 if __name__ == "__main__": sys.exit(main(*sys.argv[1:]))
[ "zhang-rd-fe@s139.org" ]
zhang-rd-fe@s139.org
74968907cefb2fd918142fe959af5345aec4c6c1
21eb0ff114558cbb66a1e2a163f68276370eb265
/loginsys/urls.py
c2506741985b1318c32bf69a04b7a4d9c1dff2b3
[]
no_license
nntndfrk/dw_test_task
163ba574c7a93b07a0df0a763191e1662c564bfb
085fdbec1d2cc594a0c5e70ddd84987fd14114dd
refs/heads/master
2021-01-11T17:19:19.580952
2017-01-25T16:26:00
2017-01-25T16:26:00
79,743,070
0
0
null
null
null
null
UTF-8
Python
false
false
178
py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^login/', views.login, name='login'), url(r'^logout/', views.logout, name='logout'), ]
[ "nntndfrk@gmail.com" ]
nntndfrk@gmail.com
db739a5c2e39385334201aa8edd4fbf5ba957494
67b3a264eccac1fdf3d061ba7e189ad577d7b241
/scraper.py
07a12f46b0db471c679fefaac6c9407fb95b5ef3
[ "MIT" ]
permissive
shw079/HMDB-Protein-Scraper
c8c8c645da42f88c0caa69dd3f869a8f9d7c357d
2f264f2b6715d478eec8a5868b974786f524238d
refs/heads/master
2022-12-05T20:41:55.989259
2020-08-25T00:23:46
2020-08-25T00:23:46
289,160,830
0
0
null
null
null
null
UTF-8
Python
false
false
829
py
import argparse from src.pquery import ProteinQuery def write_tsv(records, out): with open(out, "w") as h: h.write("HMDB_id,UniProt_id,name\n") for record in records: h.write("\t".join(record)) h.write("\n") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Using a keyword to search in HMDB and save results to a table") parser.add_argument("-k", type=str, required=True, help="keywords to search in HMDB") parser.add_argument("-o", type=str, required=True, help="output path of tab-delimited result table") args = parser.parse_args() query = ProteinQuery(args.k) query.parse() write_tsv(query.res, args.o) print("---Found {} records of {}---".format(len(query.res), args.k))
[ "shuow@princeton.edu" ]
shuow@princeton.edu
e2e0a5e05ade4bf1b990a627802943af3a19626d
f5c7d50973d47abd555502470b300b3c70af9fa5
/voting/asgi.py
856ae28db6db7b31736db9f3585a818ef2de5cc0
[ "MIT" ]
permissive
jess-monter/voting_back
62b67fafcfa8a9b7feebbca463c5055efdff7d98
de54218f01095f5090d490cabf32a86b1e608925
refs/heads/main
2023-04-06T16:00:45.066076
2021-04-14T07:51:10
2021-04-14T07:51:10
336,810,613
0
0
MIT
2021-04-14T07:51:11
2021-02-07T14:46:05
Python
UTF-8
Python
false
false
756
py
""" ASGI config for voting project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "voting.settings") django_asgi_app = get_asgi_application() from channels.auth import AuthMiddlewareStack from channels.routing import ProtocolTypeRouter, URLRouter import voting.apps.notifications.routing application = ProtocolTypeRouter( { "http": django_asgi_app, "websocket": AuthMiddlewareStack( URLRouter(voting.apps.notifications.routing.websocket_urlpatterns) ), } )
[ "=" ]
=
ecf1231711d781ebfd952b0e827ef79d4826bf2d
edc84d0d1d597868d029e5e4831abc8c864aa033
/t2.py
2c1b981f2b0d8f4dc030ec4061ebc8d39df2f7c3
[]
no_license
w56hm/DeepLearningCourse
221043bdf49ca86358dbe761323a13abdaf95017
3e2e664cc17527d98406d9242fdc413f4c328816
refs/heads/master
2021-02-22T06:36:39.228401
2020-06-27T14:53:13
2020-06-27T14:56:16
245,371,247
0
0
null
null
null
null
UTF-8
Python
false
false
392
py
import numpy as ny x=ny.array([64.3,99.6,145.45,63.75,135.46,92.85,86.97,144.76,59.3,116.03]) y=ny.array([62.55,82.42,132.62,73.31,131.05,86.57,85.49,127.44,55.25,104.84]) x1=ny.sum(x)/x.size y1=ny.sum(y)/y.size sum1=0 sum2=0 for i in range(x.size): sum1=sum1+(x[i]-x1)*(y[i]-y1) sum2=sum2+pow((x[i]-x1),2) w=sum1/sum2 b=y1-w*x1 print("w的值为:%f" %(w)) print("b的值为:%f" %(b))
[ "2053757057@qq.com" ]
2053757057@qq.com
f04d5ea4e010dbfe938ea8463466c00f149c2ff3
af1f4a109d6d1d52242dc57518056f52b6daa9e5
/pytorch_gaga/preprocessing/data/data_helper.py
d4c01433883677fcc696a6b38f2fb5e54977be2f
[]
no_license
Orion-wyc/GAGA
9fb429227be50cc15f318494d126cd7a22fbe7e4
63fa661375e3612f4d7477ad2722cab551ef3cb8
refs/heads/master
2023-04-18T19:08:55.242508
2023-02-23T12:01:19
2023-02-23T12:01:19
597,987,619
33
3
null
null
null
null
UTF-8
Python
false
false
10,855
py
import copy import dgl import numpy as np import torch from dgl import function as fn from scipy import sparse as sp from sklearn import preprocessing from data import fraud_dataset def normalize(feats, train_nid, dtype=np.float32): """Normalizing node features. Reference: <https://github.com/PonderLY/PC-GNN/blob/main/src/utils.py> :param feats: :param train_nid: :param dtype: :return: """ train_feats = feats[train_nid] scaler = preprocessing.StandardScaler() scaler.fit(train_feats) feats = scaler.transform(feats) return feats.astype(dtype) def row_normalize(mx, dtype=np.float32): """Row-normalize sparse matrix Reference: <https://github.com/williamleif/graphsage-simple> :param mx: :param dtype: :return: """ rowsum = np.array(mx.sum(1)) + 0.01 r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) mx = r_mat_inv.dot(mx) return mx.astype(dtype) def load_graphs(dataset_name='amazon', raw_dir='~/.dgl/', train_size=0.4, val_size=0.1, seed=717, norm=True, force_reload=False, verbose=True) -> dict: """Loading dataset from dgl's FraudDataset. 这里的设计目前是冗余且不必要的,可以直接使用dgl的异构图来处理. 为了兼容后期的数据集, 将每一张图单独处理 """ if dataset_name in ['amazon', 'yelp', 'mimic']: fraud_data = fraud_dataset.FraudDataset(dataset_name, train_size=train_size, val_size=val_size, random_seed=seed, force_reload=force_reload) # elif dataset_name in ['BF10M']: # fraud_data = baidu_dataset.BaiduFraudDataset(dataset_name, raw_dir=raw_dir, # train_size=train_size, val_size=val_size, # random_seed=seed, force_reload=force_reload) # dgl 下一个版本可以取消下面的注释 # fraud_data = dgl.data.FraudDataset(dataset_name, train_size=train_size, val_size=val_size, # random_seed=seed, force_reload=force_reload) g = fraud_data[0] # Feature tensor dtpye is float64, change it to float32 if norm and (dataset_name not in ['BF10M']): # train_nid = torch.nonzero(g.ndata['train_mask'], as_tuple=True)[0] # h = normalize(g.ndata['feature'], train_nid, dtype=np.float32) h = row_normalize(g.ndata['feature'], dtype=np.float32) g.ndata['feature'] = torch.from_numpy(h) else: g.ndata['feature'] = g.ndata['feature'].float() # label shape is (n,1), reshape it to be (n, ) lb = g.ndata['label'].squeeze().long() g.ndata['label'] = lb graphs = {} for etype in g.etypes: graphs[etype] = g.edge_type_subgraph([etype]) # By default, the returned homogeneous graph will not have any node features. # Reloading homogeneous graphs from <.mat> file due to the bug in <dgl.metis_partition> # Or, using <to_simple> to remove redundant and duplicate edges. # name_dict = {"yelp": "YelpChi.mat", "amazon": "Amazon.mat"} # mat_path = os.path.join('~/.dgl', '{}/{}'.format(dataset_name, name_dict[dataset_name])) # mat = loadmat(mat_path) # graphs['homo'] = dgl.from_scipy(mat['homo']) g_homo = dgl.to_homogeneous(g) # remove duplicate edges graphs['homo'] = dgl.to_simple(g_homo) for key, value in g.ndata.items(): graphs['homo'].ndata[key] = value return graphs def calc_weight(g): """Compute row_normalized(D^(-1/2)AD^(-1/2)). Reference: <> :param g: DGLGraph. The homogeneous graph is used for calculating normalized edge weights. :return: FloatTensor. Edge weights. """ with g.local_scope(): # @todo (yuchen) 这里原本是 $\hat A = D^{-1/2}AD^{-1/2}$, 假设A=I,后期改一下 # Computing D^(-0.5)*D(-1/2), assuming A is Identity g.ndata["in_deg"] = g.in_degrees().float().pow(-0.5) g.ndata["out_deg"] = g.out_degrees().float().pow(-0.5) g.apply_edges(fn.u_mul_v("out_deg", "in_deg", "weight")) # Row-normalize weight g.update_all(fn.copy_e("weight", "msg"), fn.sum("msg", "norm")) g.apply_edges(fn.e_div_v("weight", "norm", "weight")) return g.edata["weight"] def preprocess(args, g, features): """Pre-compute the average of n-th hop neighbors. Reference: <> :param g: DGLGraph. A homogeneous graph with specific single relation "net_xxx". :param features: FloatTensor. The input node features. :param args: dict. Arguments used for preprocessing multi-hop (donated as n_hops) averaged features. :return: list A list that contains [0, args['n_hops']] hop's averaged node features. hop_feat_list = [feat_0, feat_1,...,feat_R] """ # g = dgl.to_homogeneous(g) with torch.no_grad(): g.edata["weight"] = calc_weight(g) g.ndata["feat_0"] = features for hop in range(1, args['n_hops'] + 1): g.update_all(fn.u_mul_e(f"feat_{hop - 1}", "weight", "msg"), fn.sum("msg", f"feat_{hop}")) hop_feat_list = [] for hop in range(args['n_hops'] + 1): hop_feat_list.append(g.ndata.pop(f"feat_{hop}")) return hop_feat_list def mask_nodes(nids, label_rate=0.5, seed=717): index = np.arange(nids.shape[0]) index = np.random.RandomState(seed).permutation(index) unmasked_idx = index[:int(label_rate * len(index))] masked_idx = index[int(label_rate * len(index)):] unmasked_nid = nids[unmasked_idx] masked_nid = nids[masked_idx] return unmasked_nid, masked_nid def add_label_emb(unmasked_nid, features, labels): n_nodes = features.shape[0] pos_nid, neg_nid = pos_neg_split(unmasked_nid, labels[unmasked_nid]) padding_feat = torch.zeros((n_nodes,3)) padding_feat[:, -1] = 1 padding_feat[unmasked_nid, -1] = 0 if pos_nid.shape != torch.Size([0]): padding_feat[pos_nid, 0] = 1 if neg_nid.shape != torch.Size([0]): padding_feat[neg_nid, 1] = 1 new_feat = torch.cat([features, padding_feat], dim=1) return new_feat def prepare_data(args): """Preparing training data. :param args: dict Arguments for loading datasets and pre-computing multi-hop neighbours' node features. :return: tuple Training data. feat_list is a list that contains $|relations|$ hop_feat_lists. [[feat_0, feat_1,...,feat_R], // hop-0 [feat_0, feat_1,...,feat_R], // hop-1 ..., [feat_0, feat_1,...,feat_R]] // hop-R """ graphs = load_graphs(dataset_name=args['dataset'], raw_dir=args['base_dir'], train_size=args['train_size'], val_size=args['val_size'], seed=args['seed'], norm=args['norm_feat'], force_reload=args['force_reload']) # MR-Graphs share same {feat,label,mask}, here we can load homo_g g = graphs['homo'] # Processing mask train_mask = g.ndata['train_mask'] val_mask = g.ndata['val_mask'] test_mask = g.ndata['test_mask'] train_nid = torch.nonzero(train_mask, as_tuple=True)[0] val_nid = torch.nonzero(val_mask, as_tuple=True)[0] test_nid = torch.nonzero(test_mask, as_tuple=True)[0] # Processing labels n_classes = 2 in_feats = g.ndata['feature'].shape[1] labels = g.ndata['label'].squeeze().long() # Pre-computing node features for each relation. feat_list = [] for k, vg in graphs.items(): # @todo minic dataset是rel开头的 if k.startswith('net') or k.startswith('rel'): feats = preprocess(args, vg, vg.ndata['feature'].float()) feat_list.append(feats) print(f"[Global] Dataset <{args['dataset']}> Overview\n" f"\tEntire (postive/total) {torch.sum(labels):>6} / {labels.shape[0]:<6}\n" f"\tTrain (postive/total) {torch.sum(labels[train_nid]):>6} / {labels[train_nid].shape[0]:<6}\n" f"\tValid (postive/total) {torch.sum(labels[val_nid]):>6} / {labels[val_nid].shape[0]:<6}\n" f"\tTest (postive/total) {torch.sum(labels[test_nid]):>6} / {labels[test_nid].shape[0]:<6}\n") return feat_list, labels, in_feats, n_classes, train_nid, val_nid, test_nid def load_batch(batch, feat_list, device='cpu'): """Loading a subset of features for each relation as a batch. :param batch: Tensor Node's nids for loading node features as a batch. :param feat_list: list List of hop_feat_lists that contains the averaged multi-hop node features. :param device: str :return: list A list contains batched node features for each relation (asuming there are $P$ relations). [batch_feat_list_0, batch_feat_list_1, ..., batch_feat_list_P] """ batch_feat_list = [] for hop_feat_list in feat_list: batch_feats = [feat[batch] for feat in hop_feat_list] batch_feat_list.append(batch_feats) batch_feat_list = [torch.stack(feat) for feat in batch_feat_list] batch_feats = torch.cat(batch_feat_list, dim=0) # if len(batch_feats.shape) == 2: # batch_feats = batch_feats.unsqueeze(1) return batch_feats.to(device) # deprecated def _pos_neg_split(nids, labels): """Split positive and negtive nodes in array nids . @todo 大规模性能太差需要改进 :param nids: FloatTensor :param labels: LongTensor :return: tuple (LongTensor, LongTensor) """ # nids = nids.cpu().tolist() pos_nids = [] neg_nids = [] for nid in nids: if labels[nid] == 1: pos_nids.append(nid.item()) else: neg_nids.append(nid.item()) # torch.int64 pos_nids = torch.tensor(pos_nids) neg_nids = torch.tensor(neg_nids) return pos_nids, neg_nids def pos_neg_split(nids, labels): pos_idx = torch.where(labels == 1)[0] neg_idx = torch.where(labels == 0)[0] # 特殊判断孤立点的情况 pos_nids = nids[pos_idx] if min(pos_idx.shape) != 0 else torch.LongTensor([]) neg_nids = nids[neg_idx] if min(neg_idx.shape) != 0 else torch.LongTensor([]) return pos_nids, neg_nids def under_sample(pos_nids, neg_nids, scale=1): """Under-sample the negative nodes based on scale. :param pos_nids: LongTensor :param neg_nids: LongTensor :param scale: float :param seed: int :return: """ index = np.arange(neg_nids.shape[0]) index = np.random.RandomState().permutation(index) N = min(int(pos_nids.shape[0] * scale), neg_nids.shape[0]) index = index[0: N] neg_sampled = neg_nids[index] sampled_nids = torch.cat((pos_nids, neg_sampled)) return sampled_nids
[ "852722340@qq.com" ]
852722340@qq.com
e9e6698732f76d17e1d3f2be4ed78e6d42a0884a
9f915f449d30adad213eb47fd05f44ba1fe6ed04
/main_app/migrations/0001_initial.py
8e024f77d59e5f626fbcbfabd857c2bb148def88
[]
no_license
JManey/home_sweet_home
306f58c2d90d4cc956128ac71b1462bfcc0fbc8a
70444439ea997681db2eeeea58260db6659cb8c3
refs/heads/master
2021-07-20T21:40:43.469041
2021-07-15T22:21:06
2021-07-15T22:21:06
228,694,157
0
0
null
null
null
null
UTF-8
Python
false
false
2,207
py
# Generated by Django 3.0 on 2019-12-26 23:02 import datetime from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Company', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('tax_id', models.IntegerField()), ('name', models.CharField(max_length=200)), ('address', models.CharField(max_length=200)), ('email', models.EmailField(max_length=254)), ('phone', models.CharField(max_length=200)), ], ), migrations.CreateModel( name='Property', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('street_address', models.CharField(max_length=200)), ('city', models.CharField(max_length=100)), ('state', models.CharField(max_length=100)), ('beds', models.IntegerField()), ('baths', models.IntegerField()), ('price', models.IntegerField()), ('sqft', models.IntegerField()), ('levels', models.IntegerField()), ('date_listed', models.DateField(default=datetime.datetime.now)), ('status', models.CharField(max_length=100)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_agent', models.BooleanField(default=False)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "jdm216@gmail.com" ]
jdm216@gmail.com
78430575c8a6091691a2baff513bfbe12212aa04
e8805bf7c79da1b63d36c3535b8f5ba7d97b6b56
/tests/test_auditor/test_auditor_bookmark.py
05eaeea64b067419856d25819c257ca50d667dd1
[ "MIT" ]
permissive
wbuchwalter/polyaxon
9ad681e37065e8aa05741fb7d63b170e4c1fdfe6
a01396ea86a74082c457bfbc2c91d283b6ff6fba
refs/heads/master
2020-03-23T08:34:42.248328
2018-07-17T18:29:06
2018-07-17T18:29:06
141,334,939
0
0
MIT
2018-07-17T19:35:22
2018-07-17T19:35:21
null
UTF-8
Python
false
false
2,819
py
# pylint:disable=ungrouped-imports from unittest.mock import patch import pytest import activitylogs import auditor import tracker from event_manager.events import bookmark as bookmarks_events from tests.utils import BaseTest @pytest.mark.auditor_mark class AuditorBookmarksTest(BaseTest): """Testing subscribed events""" DISABLE_RUNNER = False def setUp(self): auditor.validate() auditor.setup() tracker.validate() tracker.setup() activitylogs.validate() activitylogs.setup() super().setUp() @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_build_bookmarks_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=bookmarks_events.BOOKMARK_BUILD_JOBS_VIEWED, actor_id=1, id=2) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_job_bookmarks_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=bookmarks_events.BOOKMARK_JOBS_VIEWED, actor_id=1, id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_experiment_bookmarks_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=bookmarks_events.BOOKMARK_EXPERIMENTS_VIEWED, actor_id=1, id=2) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_experiment_group_bookmarks_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=bookmarks_events.BOOKMARK_EXPERIMENT_GROUPS_VIEWED, actor_id=1, id=2) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1 @patch('tracker.service.TrackerService.record_event') @patch('activitylogs.service.ActivityLogService.record_event') def test_project_bookmarks_viewed(self, activitylogs_record, tracker_record): auditor.record(event_type=bookmarks_events.BOOKMARK_PROJECTS_VIEWED, actor_id=1, id=1) assert tracker_record.call_count == 1 assert activitylogs_record.call_count == 1
[ "mouradmourafiq@gmail.com" ]
mouradmourafiq@gmail.com
86b44a20f1b925e0e122f85553ac88b13ad3a550
3289109ab3d1ac83079fd69c6f61dbc35981e74e
/main/migrations/0051_auto__chg_field_decantime_date.py
734e2ff46df9933bfc573c73879323f828900884
[]
no_license
vovadenisov/SM
2c60dc5165003946fc07c78584c90b7e3d3d6bb7
9f5c5e03386eaaa23949a652bdf78993cf97dce2
refs/heads/master
2016-09-08T02:41:44.706475
2015-10-09T00:25:47
2015-10-09T00:25:47
42,978,582
0
0
null
null
null
null
UTF-8
Python
false
false
20,534
py
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'decanTime.date' db.alter_column(u'main_decantime', 'date', self.gf('django.db.models.fields.CharField')(max_length=50)) def backwards(self, orm): # Changing field 'decanTime.date' db.alter_column(u'main_decantime', 'date', self.gf('django.db.models.fields.DateTimeField')()) models = { u'main.aboutbmstu': { 'Meta': {'object_name': 'AboutBMSTU'}, 'article': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Articles']", 'symmetrical': 'False', 'blank': 'True'}), 'facts_gallery': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Gallery']"}), 'facts_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'facts_text': ('ckeditor.fields.RichTextField', [], {'blank': 'True'}), 'facts_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'graduates': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Persons']", 'symmetrical': 'False', 'blank': 'True'}), 'graduates_title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}), 'history_gallery': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Gallery']"}), 'history_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'history_text': ('ckeditor.fields.RichTextField', [], {'blank': 'True'}), 'history_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'tradition_gallery': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Gallery']"}), 'tradition_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'tradition_text': ('ckeditor.fields.RichTextField', [], {'blank': 'True'}), 'tradition_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'tree_photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'tree_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'main.addresses': { 'Meta': {'object_name': 'Addresses'}, 'address': ('django.db.models.fields.CharField', [], {'max_length': '250'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'main.articles': { 'Meta': {'object_name': 'Articles'}, 'author': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'content': ('ckeditor.fields.RichTextField', [], {}), 'description': ('ckeditor.fields.RichTextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isFavorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 6, 18, 0, 0)'}), 'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'subtext': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}), 'template': ('django.db.models.fields.CharField', [], {'default': "'base_article.html'", 'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'main.banners': { 'Meta': {'object_name': 'Banners'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'main.contacts': { 'Meta': {'object_name': 'Contacts'}, 'addresses': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Addresses']", 'symmetrical': 'False', 'blank': 'True'}), 'bank_details': ('ckeditor.fields.RichTextField', [], {'blank': 'True'}), 'contact_information': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'contactees': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Persons']", 'symmetrical': 'False', 'blank': 'True'}), 'emails': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Emails']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phone_number': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Phones']", 'symmetrical': 'False', 'blank': 'True'}), 'photo_map': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'main.decantime': { 'Meta': {'object_name': 'decanTime'}, 'date': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'decan': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'main.departments': { 'Meta': {'object_name': 'Departments'}, 'article': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Articles']", 'symmetrical': 'False', 'blank': 'True'}), 'content': ('ckeditor.fields.RichTextField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'template': ('django.db.models.fields.CharField', [], {'default': "'base_department.html'", 'max_length': '50'}) }, u'main.didjest': { 'Meta': {'object_name': 'Didjest'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'number': ('django.db.models.fields.IntegerField', [], {}), 'year': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Didjest_year']"}) }, u'main.didjest_theme': { 'Meta': {'object_name': 'Didjest_theme'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'main.didjest_year': { 'Meta': {'object_name': 'Didjest_year'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Didjest_theme']"}), 'year': ('django.db.models.fields.CharField', [], {'max_length': '30'}) }, u'main.emails': { 'Meta': {'object_name': 'Emails'}, 'email': ('django.db.models.fields.CharField', [], {'max_length': '250'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'main.news': { 'Meta': {'object_name': 'News'}, 'article': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Articles']", 'symmetrical': 'False', 'blank': 'True'}), 'content': ('ckeditor.fields.RichTextField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'gallery': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Gallery']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isFavorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 6, 18, 0, 0)'}), 'template': ('django.db.models.fields.CharField', [], {'default': "'base_news.html'", 'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'main.partner': { 'Meta': {'object_name': 'Partner'}, 'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'main.persons': { 'Meta': {'object_name': 'Persons'}, 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, u'main.phones': { 'Meta': {'object_name': 'Phones'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '250'}) }, u'main.projects': { 'Meta': {'object_name': 'Projects'}, 'article': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Articles']", 'symmetrical': 'False', 'blank': 'True'}), 'content': ('ckeditor.fields.RichTextField', [], {}), 'department': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Departments']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isFavorite': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'template': ('django.db.models.fields.CharField', [], {'default': "'base_project.html'", 'max_length': '50'}) }, u'main.publication': { 'Meta': {'object_name': 'Publication'}, 'content': ('django.db.models.fields.CharField', [], {'max_length': '450'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'pub_autor': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 6, 18, 0, 0)'}), 'pub_img': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'pub_link': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'pub_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}) }, u'main.units': { 'Meta': {'object_name': 'Units'}, 'article': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['main.Articles']", 'symmetrical': 'False', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '150'}) }, u'main.youth_project': { 'Meta': {'object_name': 'Youth_project'}, 'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'photo': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['photologue.Photo']"}), 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'photologue.gallery': { 'Meta': {'ordering': "['-date_added']", 'object_name': 'Gallery'}, 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'photos': ('sortedm2m.fields.SortedManyToManyField', [], {'blank': 'True', 'related_name': "'galleries'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['photologue.Photo']"}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'tags': ('photologue.models.TagField', [], {'max_length': '255', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}) }, u'photologue.photo': { 'Meta': {'ordering': "['-date_added']", 'object_name': 'Photo'}, 'caption': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}), 'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'photo_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'tags': ('photologue.models.TagField', [], {'max_length': '255', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}), 'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'photologue.photoeffect': { 'Meta': {'object_name': 'PhotoEffect'}, 'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}), 'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}), 'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}), 'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}), 'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}) }, u'sites.site': { 'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['main']
[ "root@ip-172-31-32-220.us-west-2.compute.internal" ]
root@ip-172-31-32-220.us-west-2.compute.internal
78fd73b5ee06f4bb569aa86478fc7e71e680f397
7a99e9eae5fe9833e41a112e0a7d7ede4bc02a24
/intro-to-pytorch/loading_data.py
aa5d7e76c7b7aeeae4e70c12c62231bb7f36fcd5
[ "MIT" ]
permissive
youssriaboelseod/deep-learning-v2-pytorch
3d9e819480a5399733a27e053686a81303e183b6
db0cf684d58ed660a1d3b334661bbc88050e4dc1
refs/heads/master
2022-11-16T21:29:30.670117
2020-06-22T11:29:16
2020-06-22T11:29:16
274,116,699
0
0
MIT
2020-06-22T11:17:58
2020-06-22T11:17:58
null
UTF-8
Python
false
false
1,434
py
import matplotlib.pyplot as plt import torch from torch import nn from torch import optim import torch.nn.functional as F from torchvision import datasets, transforms import helper data_dir = 'E:\Contact group_delete\deepLearning\Cat_Dog_data\Cat_Dog_data' # TODO: Define transforms for the training data and testing data train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor()]) test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor()]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=32) testloader = torch.utils.data.DataLoader(test_data, batch_size=32) # change this to the trainloader or testloader data_iter = iter(testloader) images, labels = next(data_iter) fig, axes = plt.subplots(figsize=(10,4), ncols=4) for ii in range(4): ax = axes[ii] helper.imshow(images[ii], ax=ax, normalize=False)
[ "youssri.ahmed@cg-eg.com" ]
youssri.ahmed@cg-eg.com
4a43d22f36905e9553fb3038e905a6bca4d30100
e18ea66b91ce268b8dbe5525d85efdc87d971c48
/books/migrations/0009_auto_20210422_1618.py
cd3712e8116e73eb27c6ef103c274b7506ed4bc4
[]
no_license
AyaHusseinAly/Bookstore_Django
0c8c06b8fa44a02acfba3abd541e66e9d7dd496a
b7218d83fc79a295e3747c0751f836faf0a4bd00
refs/heads/master
2023-04-15T06:30:03.247784
2021-04-24T16:55:02
2021-04-24T16:55:02
360,259,612
1
0
null
null
null
null
UTF-8
Python
false
false
753
py
# Generated by Django 3.2 on 2021-04-22 14:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('books', '0008_isbn_name'), ] operations = [ migrations.RemoveField( model_name='isbn', name='author', ), migrations.RemoveField( model_name='isbn', name='name', ), migrations.AddField( model_name='isbn', name='book_author', field=models.CharField(max_length=100, null=True), ), migrations.AlterField( model_name='book', name='categories', field=models.ManyToManyField(to='books.Category'), ), ]
[ "aya.hussein3107@gmail.com" ]
aya.hussein3107@gmail.com
c3f387488e18415441d92be7b503abfd69d40ad1
8e97cb7c8668a9061683ea3ba893dab32029fac9
/pytorch_toolkit/instance_segmentation/segmentoly/utils/profile.py
9c17d0e496ee59a819333e28ce8963262200b8d3
[ "Apache-2.0" ]
permissive
DmitriySidnev/openvino_training_extensions
e01703bea292f11ffc20d50a1a06f0565059d5c7
c553a56088f0055baba838b68c9299e19683227e
refs/heads/develop
2021-06-14T06:32:12.373813
2020-05-13T13:25:15
2020-05-13T13:25:15
180,546,423
0
1
Apache-2.0
2019-04-15T13:39:48
2019-04-10T09:17:55
Python
UTF-8
Python
false
false
14,154
py
""" Copyright (c) 2019 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import gc import operator as op import time from collections import defaultdict from functools import reduce, wraps import numpy as np import torch class Timer(object): def __init__(self, warmup=0, smoothing=0.5, cuda_sync=True): self.warmup = warmup self.cuda_sync = cuda_sync self.total_time = 0. self.calls = 0 self.start_time = 0. self.diff = 0. self.average_time = 0. self.smoothed_time = 0. self.smoothing_alpha = smoothing self.min_time = float('inf') self.max_time = 0. self.reset() def tic(self): if self.cuda_sync and torch.cuda.is_available(): torch.cuda.synchronize() self.start_time = time.time() def toc(self, average=True, smoothed=False): if self.cuda_sync and torch.cuda.is_available(): torch.cuda.synchronize() self.diff = time.time() - self.start_time self.calls += 1 if self.calls <= self.warmup: return self.diff self.total_time += self.diff self.average_time = self.total_time / (self.calls - self.warmup) self.smoothed_time = self.smoothed_time * self.smoothing_alpha + self.diff * (1.0 - self.smoothing_alpha) self.min_time = min(self.min_time, self.diff) self.max_time = max(self.max_time, self.diff) if average: return self.average_time elif smoothed: return self.smoothed_time else: return self.diff def __enter__(self): self.tic() return self def __exit__(self, exc_type, exc_val, exc_tb): self.toc() def reset(self): self.total_time = 0. self.calls = 0 self.start_time = 0. self.diff = 0. self.average_time = 0. class DummyTimer(Timer): def __init__(self): super().__init__() def tic(self): pass def toc(self, *args, **kwargs): return 0 def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass def timed(func): @wraps(func) def wrapper_timer(self, *args, **kwargs): if not hasattr(self, '_timers'): self._timers = defaultdict(Timer) with self._timers[func.__name__]: value = func(self, *args, **kwargs) return value return wrapper_timer def print_timing_stats(timers, key='average_time'): print('{:>40}: {:>10} [{:>10}, {:>10}] {:>10} {:>10}'.format('name', 'average', 'min', 'max', '#calls', 'total')) for k, v in sorted(timers.items(), key=lambda x: op.attrgetter(key)(x[1]), reverse=True): print('{:>40}: {:10.2f} [{:10.2f}, {:10.2f}] {:10d} {:10.2f}'.format(k, 1000 * v.average_time, 1000 * v.min_time, 1000 * v.max_time, v.calls, 1000 * v.total_time)) print('-' * 40) def pretty_shape(shape): if shape is None: return 'None' return '×'.join(map(str, shape)) def pretty_size(size, units='G', precision=2, base=1024): if units is None: if size // (base ** 3) > 0: val = str(round(size / (base ** 3), precision)) units = 'G' elif size // (base ** 2) > 0: val = str(round(size / (base ** 2), precision)) units = 'M' elif size // base > 0: val = str(round(size / base, precision)) units = 'K' else: val = str(size) units = '' else: if units == 'G': val = str(round(size / (base ** 3), precision)) elif units == 'M': val = str(round(size / (base ** 2), precision)) elif units == 'K': val = str(round(size / base, precision)) else: val = str(size) return val, units def dump_tensors(gpu_only=True): """Prints a list of the Tensors being tracked by the garbage collector.""" total_size = 0 for obj in gc.get_objects(): try: if torch.is_tensor(obj): if not gpu_only or obj.is_cuda: print('%s:%s%s %s' % (type(obj).__name__, ' GPU' if obj.is_cuda else '', ' pinned' if obj.is_pinned else '', pretty_shape(obj.size()))) total_size += obj.numel() elif hasattr(obj, 'data') and torch.is_tensor(obj.data): if not gpu_only or obj.is_cuda: print('%s → %s:%s%s%s%s %s' % (type(obj).__name__, type(obj.data).__name__, ' GPU' if obj.is_cuda else '', ' pinned' if obj.data.is_pinned else '', ' grad' if obj.requires_grad else '', ' volatile' if obj.volatile else '', pretty_shape(obj.data.size()))) total_size += obj.data.numel() except Exception as e: pass print('Total size:', total_size) def list_allocated_tensors(): memtable = [] for obj in gc.get_objects(): if torch.is_tensor(obj): memtable.append(dict(obj=obj, size=(reduce(op.mul, obj.size()) if len(obj.size()) > 0 else 0) * obj.element_size())) memtable = sorted(memtable, key=op.itemgetter('size')) for i, item in enumerate(memtable): obj = item['obj'] print('{:03}: {:>10} {:>30} {:>25} {:>10}'.format(i, item['size'], str(np.array(obj.shape)), str(obj.type()), str(obj.device))) def list_parameters(module): memtable = [] for name, x in module.named_parameters(): memtable.append(dict(name=name, shape=np.array(x.data.shape), size=int(x.data.numel() * x.data.element_size()), has_grad=x.requires_grad, grad_shape=np.array(x.grad.shape) if x.requires_grad else None, grad_size=int(x.grad.numel() * x.grad.element_size()) if x.requires_grad else 0 ) ) total_data_size = 0 total_grad_size = 0 for i, item in enumerate(memtable): print('{:03} {:>60}: {:>15} {:>15} {:>15} {:>15}'.format(i, item['name'], pretty_size(item['size'], units='M')[0], pretty_shape(item['shape']), pretty_size(item['grad_size'], units='M')[0], pretty_shape(item['grad_shape']))) total_data_size += item['size'] total_grad_size += item['grad_size'] total_mem_size = list(pretty_size(total_data_size)) + list(pretty_size(total_grad_size)) print('TOTAL MEMORY USAGE FOR MODEL PARAMETERS: data: {} {}B grad: {} {}B'.format(*total_mem_size)) class FeatureMapsTracer(object): fwd_tensors_registry = set() bwd_tensors_registry = set() @staticmethod def reset(*args, **kwargs): FeatureMapsTracer.summary_fwd() FeatureMapsTracer.summary_bwd() del FeatureMapsTracer.fwd_tensors_registry FeatureMapsTracer.fwd_tensors_registry = set() del FeatureMapsTracer.bwd_tensors_registry FeatureMapsTracer.bwd_tensors_registry = set() @staticmethod def summary_fwd(*args, **kwargs): total_data_size = FeatureMapsTracer.get_total_size(list(FeatureMapsTracer.fwd_tensors_registry)) print('TOTAL FORWARD DATA BLOBS SIZE: {} {}B'.format(*pretty_size(total_data_size))) @staticmethod def summary_bwd(*args, **kwargs): total_data_size = FeatureMapsTracer.get_total_size(list(FeatureMapsTracer.bwd_tensors_registry)) print('TOTAL BACKWARD GRAD BLOBS SIZE: {} {}B'.format(*pretty_size(total_data_size))) @staticmethod def list_tensors(x): tensors = [] if isinstance(x, (list, tuple)): for i in x: tensors.extend(FeatureMapsTracer.list_tensors(i)) elif isinstance(x, dict): for i in x.values(): tensors.extend(FeatureMapsTracer.list_tensors(i)) elif isinstance(x, torch.Tensor): tensors.append(x) return tensors @staticmethod def get_shapes(tensors): shapes = [x.shape for x in tensors] return shapes @staticmethod def shapes_to_str(shapes): return '[' + ', '.join([pretty_shape(shape) for shape in shapes]) + ']' @staticmethod def get_total_size(tensors): total_size = 0 for x in tensors: total_size += int(x.numel() * x.element_size()) return total_size @staticmethod def forward(module, inputs, outputs, verbose=False): input_tensors = FeatureMapsTracer.list_tensors(inputs) inputs_shapes = FeatureMapsTracer.get_shapes(input_tensors) inputs_shapes_str = FeatureMapsTracer.shapes_to_str(inputs_shapes) inputs_size = FeatureMapsTracer.get_total_size(input_tensors) FeatureMapsTracer.fwd_tensors_registry.update(set(input_tensors)) output_tensors = FeatureMapsTracer.list_tensors(outputs) outputs_shapes = FeatureMapsTracer.get_shapes(output_tensors) outputs_shapes_str = FeatureMapsTracer.shapes_to_str(outputs_shapes) outputs_size = FeatureMapsTracer.get_total_size(output_tensors) FeatureMapsTracer.fwd_tensors_registry.update(set(output_tensors)) if verbose: print('fwd {:>20}: {:>15} {:>15} {:>15} {:>15}'.format(module._get_name(), pretty_size(inputs_size, units='M')[0], inputs_shapes_str, pretty_size(outputs_size, units='M')[0], outputs_shapes_str)) @staticmethod def backward(module, inputs, outputs, verbose=False): input_tensors = FeatureMapsTracer.list_tensors(inputs) inputs_shapes = FeatureMapsTracer.get_shapes(input_tensors) inputs_shapes_str = FeatureMapsTracer.shapes_to_str(inputs_shapes) inputs_size = FeatureMapsTracer.get_total_size(input_tensors) FeatureMapsTracer.bwd_tensors_registry.update(set(input_tensors)) output_tensors = FeatureMapsTracer.list_tensors(outputs) outputs_shapes = FeatureMapsTracer.get_shapes(output_tensors) outputs_shapes_str = FeatureMapsTracer.shapes_to_str(outputs_shapes) outputs_size = FeatureMapsTracer.get_total_size(output_tensors) FeatureMapsTracer.bwd_tensors_registry.update(set(output_tensors)) if verbose: print('bwd {:>20}: {:>15} {:>15} {:>15} {:>15}'.format(module._get_name(), pretty_size(inputs_size, units='M')[0], inputs_shapes_str, pretty_size(outputs_size, units='M')[0], outputs_shapes_str)) @staticmethod def add_fwd_hooks(module): def register_per_layer_hooks(m): m.register_forward_hook(FeatureMapsTracer.forward) module.register_forward_pre_hook(FeatureMapsTracer.reset) module.apply(register_per_layer_hooks) @staticmethod def add_bwd_hooks(module): def register_per_layer_hooks(m): m.register_backward_hook(FeatureMapsTracer.backward) module.apply(register_per_layer_hooks) @staticmethod def add_hooks(module): FeatureMapsTracer.add_fwd_hooks(module) FeatureMapsTracer.add_bwd_hooks(module) class PerformanceCounters(object): def __init__(self): self.pc = {} def update(self, pc): for layer, stats in pc.items(): if layer not in self.pc: self.pc[layer] = dict(layer_type=stats['layer_type'], exec_type=stats['exec_type'], status=stats['status'], real_time=stats['real_time'], calls=1) else: self.pc[layer]['real_time'] += stats['real_time'] self.pc[layer]['calls'] += 1 def print(self): print('Performance counters:') print(' '.join(['name', 'layer_type', 'exec_type', 'status', 'real_time(us)'])) for layer, stats in self.pc.items(): print('{} {} {} {} {}'.format(layer, stats['layer_type'], stats['exec_type'], stats['status'], stats['real_time'] / stats['calls']))
[ "48012821+AlexanderDokuchaev@users.noreply.github.com" ]
48012821+AlexanderDokuchaev@users.noreply.github.com
2cce6d4bce2b9128ccd9ae2aac1e97142f8a18e1
8f6125f45793bc4d867bc819ca16cb3a5571f9a3
/Treasure Hunt Queue/queue.py
4b2ebd0181e755cd8d800fe6424b699a114a36d7
[]
no_license
AbhayVAshokan/Core-Dump
dd446582147761c41fd6c9afe62830b540a3b9e2
44035896c667dd76f7c69bb6cee566e452f1d321
refs/heads/master
2021-06-21T17:11:06.186836
2021-01-03T10:23:34
2021-01-03T10:23:34
178,909,017
0
0
null
null
null
null
UTF-8
Python
false
false
659
py
# T: Number of test cases. # N is the number of people in the queue. # Q is the queue. # All persons are numbered from 1. # Count the number of swaps in bubble sort. def minimumbribes(N, Q): count = 0 bribes = [0 for i in range(N+1)] for i in range(N-1): for j in range(N - i - 1): if Q[j] > Q[j+1]: Q[j], Q[j+1] = Q[j+1], Q[j] bribes[Q[j+1]] += 1 if bribes[Q[j+1]] > 2: return "Impossible" count += 1 return count for t in range(int(input())): N = int(input()) Q = list(map(int, input().split())) print(minimumbribes(N, Q))
[ "abhayvashokan@gmail.com" ]
abhayvashokan@gmail.com
c7d144b8335a423d324ebdc6e7a74ee5f11d99ad
665455c521cc7cf76c5436337ed545de90976af4
/cohesity_management_sdk/models/node_port.py
0160cdc36722ac0df6ecf9ed7e2a96895d226b7a
[ "Apache-2.0" ]
permissive
hsantoyo2/management-sdk-python
d226273bc8eedcf9220ea4999a6f0b9a1a30d99c
0093194d125fc6746f55b8499da1270c64f473fc
refs/heads/master
2023-03-01T06:09:39.644085
2021-01-15T08:23:16
2021-01-15T08:23:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,851
py
# -*- coding: utf-8 -*- # Copyright 2020 Cohesity Inc. class NodePort(object): """Implementation of the 'NodePort' model. VmInfo specifies information of a NodePort per service and port combination within an application instance. Attributes: is_ui_port (bool): TODO: type description here. port (int): TODO: type description here. tag (TagEnum): Specifies use of the nodeport kDefault - No specific service. kHttp - HTTP server. kHttps - Secure HTTP server. kSsh - Secure shell server. """ # Create a mapping from Model property names to API property names _names = { "is_ui_port":'isUiPort', "port":'port', "tag":'tag' } def __init__(self, is_ui_port=None, port=None, tag=None): """Constructor for the NodePort class""" # Initialize members of the class self.is_ui_port = is_ui_port self.port = port self.tag = tag @classmethod def from_dictionary(cls, dictionary): """Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class. """ if dictionary is None: return None # Extract variables from the dictionary is_ui_port = dictionary.get('isUiPort') port = dictionary.get('port') tag = dictionary.get('tag') # Return an object of this model return cls(is_ui_port, port, tag)
[ "ashish@cohesity.com" ]
ashish@cohesity.com
8cc261eb0ecfb093323305bc3cc656d8b5205b78
a6c13fb257563d99c45f79b2fee5c2f2f76251ef
/apps/common/factories.py
197597ddc5ded9aea5a8f9bfe7315a01f742e943
[]
no_license
sipanmargaryan/addproduct
9999cdf9b611ea4f103ed9e58e24c8fc8fe0e3fb
9232c31956f154f3c4349fe3942a331559213c70
refs/heads/master
2022-11-05T19:23:37.209482
2020-06-26T14:44:45
2020-06-26T14:44:45
275,178,682
4
0
null
null
null
null
UTF-8
Python
false
false
800
py
import factory from django.utils import timezone import common.models class ArticleFactory(factory.DjangoModelFactory): title = factory.Sequence(lambda n: 'help text title-{}'.format(n)) description = factory.Sequence(lambda n: 'help text description-{}'.format(n)) class Meta: model = common.models.Article class CategoryFactory(factory.DjangoModelFactory): name = factory.Sequence(lambda n: 'category-{}'.format(n)) class Meta: model = common.models.Category class ServiceFactory(factory.DjangoModelFactory): opening_time = factory.lazy_attribute(lambda x: timezone.now()) closing_time = factory.lazy_attribute(lambda x: timezone.now()) category = factory.SubFactory(CategoryFactory) class Meta: model = common.models.Service
[ "sipanm19@gmail.com" ]
sipanm19@gmail.com
d765931611ffb0b15f7c1c88acfd00e0ac6f9f19
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_145/669.py
bd702b9e219525f76c3fa85711b680bca73aa591
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
688
py
#!/usr/bin/env python import sys import struct import ctypes def binary(num): return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num)) T = int(sys.stdin.readline()) for case in range(0, T): part = sys.stdin.readline() up, down = part.split("/") up = int(up) down = int(down) if int(bin(up&down)[2:]) != 0: print "Case #%s: impossible" % (str(case+1) ) else: for i in range(1, 40): up = 2*up if up >= down: print "Case #%s: %d" % (str(case+1), i ) break
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
72004555bb11e06a24b720c1a3722e4548ca4b2b
dab4f0f373451cfbe0bce205c0fff1a3fc4e9929
/Lab104/tests/test_p1.py
6a51b47a588eb8fe94ac5b06dca66871e218d4b3
[]
no_license
luisejv/autograder_demo
6b08ca31781a0260fd50da698f325afc10da1d4b
3548273784cce7322ab23f850e747412a388f445
refs/heads/master
2022-11-07T22:13:56.550488
2020-06-20T16:48:07
2020-06-20T16:48:07
272,591,572
0
0
null
null
null
null
UTF-8
Python
false
false
4,011
py
from Lib.testInputOutput import set_keyboard_input from Lib.testInputOutput import get_display_output import unittest from gradescope_utils.autograder_utils.decorators import weight, tags from p1 import main class TestP1(unittest.TestCase): def buildingTests(self, inputList): outputList = ["Ingrese frase1: ", "Ingrese frase2: ", "Vocales ", "Consonantes ", "Otros caracteres ", "Frase invertida: "] vowels1 = 0 vowels2 = 0 consonants1 = 0 consonants2 = 0 others1 = 0 others2 = 0 phrase1 = inputList[0] if len(inputList[0]) == len(inputList[1]) else inputList[2] phrase2 = inputList[1] if len(inputList[0]) == len(inputList[1]) else inputList[3] for i in range(len(phrase1)): if phrase1[i].lower() in ['a', 'e', 'i', 'o', 'u']: vowels1 += 1 elif phrase1[i].lower() < 'a' or phrase1[i].lower() > 'z': others1 += 1 else: consonants1 += 1 if phrase2[i].lower() in ['a', 'e', 'i', 'o', 'u']: vowels2 += 1 elif phrase2[i].lower() < 'a' or phrase2[i].lower() > 'z': others2 += 1 else: consonants2 += 1 outputList[2] += "frase1=" + str(vowels1) + ",frase2=" + str(vowels2) outputList[3] += "frase1=" + str(consonants1) + ",frase2=" + str(consonants2) outputList[4] += "frase1=" + str(others1) + ",frase2=" + str(others2) outputList[5] += phrase1[::-1] if others1 > others2 else phrase2[::-1] set_keyboard_input(inputList) main() output = get_display_output() return [output, outputList] @weight(1) @tags("Pregunta 1") def test_error(self): """Pregunta 1 - Probando entradas correctas y erróneas""" outputAssert = self.buildingTests(["Exito en la PC2 ICC 103", "Hola mundo", "Exito en la PC2 ICC 103", "Hola mundo programar :)"]) self.assertEqual(outputAssert[0][0], outputAssert[1][0], f"El mensaje debería ser: \"{outputAssert[1][0]}\"") self.assertEqual(outputAssert[0][1], outputAssert[1][1], f"El mensaje debería ser: \"{outputAssert[1][1]}\"") self.assertEqual(outputAssert[0][2], outputAssert[1][0], f"No estás checkeando que los strings tengan la misma longitud") self.assertEqual(outputAssert[0][3], outputAssert[1][1], f"No estás checkeando que los strings tengan la misma longitud") @weight(1) @tags("Pregunta 1") def test_vowels(self): """Pregunta 1 - Probando vocales""" outputAssert = self.buildingTests(["Exito en la PC2 ICC 103", "Hola mundo programar :)"]) self.assertEqual(outputAssert[0][2], outputAssert[1][2], f"El resultado debería ser: \"{outputAssert[1][2]}\"") @weight(1) @tags("Pregunta 1") def test_consonants(self): """Pregunta 1 - Probando consonantes""" outputAssert = self.buildingTests(["Exito en la PC2 ICC 103", "Hola mundo programar :)"]) self.assertEqual(outputAssert[0][3], outputAssert[1][3], f"El resultado debería ser: \"{outputAssert[1][3]}\"") @weight(1) @tags("Pregunta 1") def test_others(self): """Pregunta 1 - Probando otros caracteres""" outputAssert = self.buildingTests(["Exito en la PC2 ICC 103", "Hola mundo programar :)"]) self.assertEqual(outputAssert[0][4], outputAssert[1][4], f"El resultado debería ser: \"{outputAssert[1][4]}\"") @weight(1) @tags("Pregunta 1") def test_inverted(self): """Pregunta 1 - Probando frase invertida""" outputAssert = self.buildingTests(["Exito en la PC2 ICC 103", "Hola mundo programar :)"]) self.assertEqual(outputAssert[0][5], outputAssert[1][5], f"El resultado debería ser: \"{outputAssert[1][5]}\"") if __name__ == '__main__': t = TestP1() t.test_input(t)
[ "ljauregui@pucp.pe" ]
ljauregui@pucp.pe
e3a44bdd2d39db31d23452d46b95dc993a1dcc7c
c9499da242269382baa5a41363453bf7907d392f
/05_Django/06_model_relation/manytoone/migrations/0001_initial.py
9f1acf14c842de95162929de9c2b2df986586848
[]
no_license
sooy0510/TIL
13adad107c5315d09d78b7317b4b7efecdbcee1f
5da3902f40099638267e9bf96e6e71729dba5232
refs/heads/master
2022-12-27T10:40:22.441330
2019-12-03T08:28:41
2019-12-03T08:28:41
216,502,932
0
1
null
2022-12-08T06:58:12
2019-10-21T07:21:41
CSS
UTF-8
Python
false
false
1,473
py
# Generated by Django 2.2.6 on 2019-11-12 08:23 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Article', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.TextField()), ], ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField()), ], ), migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.TextField()), ('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manytoone.Article')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manytoone.User')), ], ), migrations.AddField( model_name='article', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manytoone.User'), ), ]
[ "qkdqnwpwp@naver.com" ]
qkdqnwpwp@naver.com
559e9d1a9a1c37ba4f8aae45a6b1828a164fe7ce
b685036280331fa50fcd87f269521342ec1b437b
/src/tests/demo_5.py
c5de86e214fd5babcc1639e86f60c6ee47de9df4
[]
no_license
chenqing666/myML_DM_Test
f875cb5b2a92e81bc3de2a0070c0185b7eacac89
5ac38f7872d94ca7cedd4f5057bb93732b5edbad
refs/heads/master
2022-02-26T01:52:06.293025
2019-09-20T06:35:25
2019-09-20T06:35:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
175
py
def fib(times): n = 0 a,b = 0, 1 while n < times: yield b a, b = b, a + b n += 1 return "done" g = fib(2) next(g) next(g) # next(g)
[ "976185561@qq.com" ]
976185561@qq.com
24bd26724a911302d89a95f1304146d522a4c0d0
84fceb8b271112f6c56ea68ab3cff4617c464946
/median.py
c9c08402a75f1616c971fe1ff9ebb2088b453884
[]
no_license
teohongwei898/python
8422bbe88bf69983ec8a826567595e3323f6c513
4f01b37b73bbc77820e985e26f614a58195220ec
refs/heads/master
2018-12-19T14:06:13.486469
2018-12-16T07:50:36
2018-12-16T07:50:36
114,371,504
0
0
null
null
null
null
UTF-8
Python
false
false
306
py
#Write your function here def middle_element(lst): length = len(lst) if length % 2 == 1: return (lst[length//2]) else: a = lst[length//2] b = lst[length//2-1] avg= (a+b)/2 return avg #Uncomment the line below when your function is done print(middle_element([5, 2, -10, -4, 4, 5]))
[ "noreply@github.com" ]
teohongwei898.noreply@github.com
cfb3b5cee8263c61646346ffad780e7563555d9f
3e401b71a56dd348c43ba401d9c0f566c7f93c3a
/accounts/urls.py
f83f120dc8819b208b9f1ab9276b755bf5f1793d
[]
no_license
lokhandeganesh/Django_Repository
4b69dc737d1ea526138bdbd07bbde7caced32609
7326926f8046f855bb154d80f7711e0224595235
refs/heads/master
2023-07-04T16:22:01.751537
2021-08-09T19:54:40
2021-08-09T19:54:40
381,125,137
0
0
null
null
null
null
UTF-8
Python
false
false
288
py
from django.urls import path # From all modules import views# from . import views # set url patern for home/index page# urlpatterns = [ path("register",views.register,name='register'), path("login",views.login,name='login'), path("logout",views.logout,name='logout'), ]
[ "lokhandeganesh8@gmail.com" ]
lokhandeganesh8@gmail.com
b61f437bea9cab0ba67b2cb09f6ff502267e0219
74c82e8f3e61ce0eb765580dbd9679fe928bb162
/research/federated_object_detection_benchmark/data/dataset.py
017e0fe62a5e54233cbcaa847450b0f8b37f6195
[ "Apache-2.0" ]
permissive
liuheng2cqupt/FATE
463de675c60ca5d4d865bd5703fc86232858cd2d
67708d5d327e736d80e2f6726968cf02a926310e
refs/heads/master
2022-11-28T21:52:23.023540
2020-07-31T03:19:04
2020-07-31T03:19:04
286,612,335
1
0
Apache-2.0
2020-08-11T01:05:55
2020-08-11T01:05:55
null
UTF-8
Python
false
false
4,267
py
from __future__ import absolute_import from __future__ import division import torch as t from data.voc_dataset import VOCBboxDataset from skimage import transform as sktsf from torchvision import transforms as tvtsf from data import util import numpy as np from utils.config import opt def inverse_normalize(img): if opt.caffe_pretrain: img = img + (np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1)) return img[::-1, :, :] # approximate un-normalize for visualize return (img * 0.225 + 0.45).clip(min=0, max=1) * 255 def pytorch_normalze(img): """ https://github.com/pytorch/vision/issues/223 return appr -1~1 RGB """ normalize = tvtsf.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) img = normalize(t.from_numpy(img)) return img.numpy() def caffe_normalize(img): """ return appr -125-125 BGR """ img = img[[2, 1, 0], :, :] # RGB-BGR img = img * 255 mean = np.array([122.7717, 115.9465, 102.9801]).reshape(3, 1, 1) img = (img - mean).astype(np.float32, copy=True) return img def preprocess(img, min_size=600, max_size=1000): """Preprocess an image for feature extraction. The length of the shorter edge is scaled to :obj:`self.min_size`. After the scaling, if the length of the longer edge is longer than :param min_size: :obj:`self.max_size`, the image is scaled to fit the longer edge to :obj:`self.max_size`. After resizing the image, the image is subtracted by a mean image value :obj:`self.mean`. Args: img (~numpy.ndarray): An image. This is in CHW and RGB format. The range of its value is :math:`[0, 255]`. Returns: ~numpy.ndarray: A preprocessed image. """ C, H, W = img.shape scale1 = min_size / min(H, W) scale2 = max_size / max(H, W) scale = min(scale1, scale2) img = img / 255. img = sktsf.resize(img, (C, H * scale, W * scale), mode='reflect',anti_aliasing=False) # both the longer and shorter should be less than # max_size and min_size if opt.caffe_pretrain: normalize = caffe_normalize else: normalize = pytorch_normalze return normalize(img) class Transform(object): def __init__(self, min_size=600, max_size=1000): self.min_size = min_size self.max_size = max_size def __call__(self, in_data): img, bbox, label = in_data _, H, W = img.shape img = preprocess(img, self.min_size, self.max_size) _, o_H, o_W = img.shape scale = o_H / H bbox = util.resize_bbox(bbox, (H, W), (o_H, o_W)) # horizontally flip img, params = util.random_flip( img, x_random=True, return_param=True) bbox = util.flip_bbox( bbox, (o_H, o_W), x_flip=params['x_flip']) return img, bbox, label, scale class Dataset: def __init__(self, opt): self.opt = opt self.db = VOCBboxDataset(opt.voc_data_dir, opt.label_names) self.tsf = Transform(opt.min_size, opt.max_size) def __getitem__(self, idx): ori_img, bbox, label, difficult = self.db.get_example(idx) img, bbox, label, scale = self.tsf((ori_img, bbox, label)) # TODO: check whose stride is negative to fix this instead copy all # some of the strides of a given numpy array are negative. return img.copy(), ori_img.shape[1:], bbox.copy(), \ label.copy(), scale, difficult def __len__(self): return len(self.db) class TestDataset: def __init__(self, opt, split='test', use_difficult=True): self.opt = opt self.db = VOCBboxDataset(opt.voc_data_dir, opt.label_names, split=split, use_difficult=use_difficult) def __getitem__(self, idx): ori_img, bbox, label, difficult = self.db.get_example(idx) img = preprocess(ori_img) scale = ori_img.shape[1] / img.shape[1] return img, ori_img.shape[1:], bbox, label, scale, difficult def __len__(self): return len(self.db)
[ "luojiahuan001@gmail.com" ]
luojiahuan001@gmail.com
a5a062e44821422192c7707e75eaecf1f499acd6
3cd6c578fcc0444646aa0f6589c82c27afc3955c
/gamifi/migrations/0008_auto_20210412_0129.py
7fd7a59f6fd006e3f43e672fd43556ba0adf49ec
[]
no_license
ArashMAzizi/FitnessFriendsApp
0715fe41993ee6053b22395f4b1683088b114104
e6cd51cc407c667830fc7cc40414c36d6118ca6d
refs/heads/main
2023-05-01T21:04:07.292670
2021-05-06T18:44:26
2021-05-06T18:44:26
368,673,153
0
0
null
null
null
null
UTF-8
Python
false
false
988
py
# Generated by Django 3.1.7 on 2021-04-12 05:29 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('gamifi', '0007_auto_20210412_0120'), ] operations = [ migrations.AlterField( model_name='aerobicexercise', name='created_at', field=models.DateField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AlterField( model_name='flexibilityexercise', name='created_at', field=models.DateField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AlterField( model_name='strengthexercise', name='created_at', field=models.DateField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), ]
[ "bvndinh@gmail.com" ]
bvndinh@gmail.com
61260ce6e6eb5105d81220d4adf9203b387d204b
9d08c7ee903bb2dabcb86c69f3f82cda0420cee9
/Update Trash/Interpark + telegram.py
7104cef5b1a2d6898c1176eb8c66c03597f3b3f6
[ "MIT" ]
permissive
Hambbuk/Interpark-Ticketing
d8cb0c759a9b43836934fc97a9d549253f203cb7
bba01ceed338cb5f074c3aa2d6086f4e2b8f0f89
refs/heads/main
2023-06-09T03:39:11.007161
2021-06-30T03:14:26
2021-06-30T03:14:26
381,555,860
0
0
MIT
2021-06-30T02:46:48
2021-06-30T02:46:47
null
UTF-8
Python
false
false
9,763
py
from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.alert import Alert from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from tkinter import * from datetime import datetime import numpy, re, pyotp, sys, time, tkinter.ttk, pytesseract, tkinter.font import cv2 as cv from pytesseract import image_to_string from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ChatAction, Update, Bot from telegram.ext import CommandHandler, MessageHandler, CallbackQueryHandler, Updater, Filters import threading dp = Tk() main_frame = Frame(dp) dp.geometry('500x500') dp.title("인터파크 티켓팅 프로그램") main_frame.pack() driver = webdriver.Chrome("es/chromedriver") wait = WebDriverWait(driver, 20) url = "https://ticket.interpark.com/Gate/TPLogin.asp" driver.get(url) id_label = Label(main_frame, text = "아이디") id_label.grid(row = 1, column = 0) id_entry = Entry(main_frame) id_entry.grid(row = 1, column = 1) pw_label = Label(main_frame, text = "비밀번호") pw_label.grid(row = 2, column = 0) pw_entry = Entry(main_frame, show = '*') pw_entry.grid(row = 2, column =1) showcode_label = Label(main_frame, text = "공연번호") showcode_label.grid(row=4, column = 0) showcode_entry = Entry(main_frame) showcode_entry.grid(row=4, column = 1) date_label = Label(main_frame, text = "날짜") date_label.grid(row=5, column = 0) date_entry = Entry(main_frame) date_entry.grid(row=5, column = 1) round_label = Label(main_frame, text = "회차") round_label.grid(row = 6, column = 0) round_entry = Entry(main_frame) round_entry.grid(row=6, column = 1) ticket_label = Label(main_frame, text = "티켓 수") ticket_label.grid(row = 7, column = 0) ticket_entry = Entry(main_frame) ticket_entry.grid(row=7, column = 1) code_time = Entry(main_frame) code_time.grid(row=12, column = 1) def login_go(): driver.switch_to_frame(driver.find_element_by_tag_name('iframe')) driver.find_element_by_name('userId').send_keys(id_entry.get()) driver.find_element_by_id('userPwd').send_keys(pw_entry.get()) driver.find_element_by_id('btn_login').click() def link_go(): driver.get('http://poticket.interpark.com/Book/BookSession.asp?GroupCode=' + showcode_entry.get()) def seat_macro(): driver.switch_to.default_content() seat1_frame = driver.find_element_by_name("ifrmSeat") driver.switch_to_frame(seat1_frame) seat2_frame = driver.find_element_by_name("ifrmSeatDetail") driver.switch_to_frame(seat2_frame) wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'stySeat'))) len_seatn = len(driver.find_elements_by_class_name('stySeat')) print(len_seatn) len_VIP = len(driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/1_90.gif"]')) print(len_VIP) shot = 0 VIP = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/1_90.gif"]') R = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/2_90.gif"]') S = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/3_90.gif"]') A = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/4_90.gif"]') for x in range(0 , len_seatn): try: VIP[x].click() shot = shot + 1 except: try: R[x].click() shot = shot + 1 except: try: S[x].click() shot = shot + 1 except: try: A[x].click() shot = shot + 1 except: break if shot == int(ticket_entry.get()): break def captcha(): driver.switch_to.default_content() seat1_frame = driver.find_element_by_id("ifrmSeat") driver.switch_to_frame(seat1_frame) image = driver.find_element_by_id('imgCaptcha') image = image.screenshot_as_png with open("captcha.png", "wb") as file: file.write(image) image = cv.imread("captcha.png") #Set a threshold value for the image, and save image = cv.cvtColor(image, cv.COLOR_BGR2GRAY) image = cv.adaptiveThreshold(image, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 1) kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3)) image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel, iterations=1) cnts = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] for c in cnts: area = cv.contourArea(c) if area < 50: cv.drawContours(image, [c], -1, (0,0,0), -1) kernel2 = numpy.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]]) image = cv.filter2D(image,-1,kernel2) result = 255 - image captcha_text = image_to_string(result) print(captcha_text) driver.switch_to.default_content() driver.switch_to_frame(seat1_frame) driver.find_element_by_class_name('validationTxt').click() driver.find_element_by_id('txtCaptcha').send_keys(captcha_text) while 1: if driver.find_element_by_class_name('capchaInner').is_displayed(): driver.find_element_by_class_name('refreshBtn').click() captcha() else: break def date_select(): first_frame = driver.find_element_by_id('ifrmBookStep') driver.switch_to_frame(first_frame) #날짜 driver.find_element_by_xpath('(//*[@id="CellPlayDate"])' + "[" + date_entry.get() + "]").click() #회차 wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div[3]/div[1]/div/span/ul/li[' + round_entry.get() + ']/a'))).click() driver.switch_to.default_content() wait.until(EC.element_to_be_clickable((By.ID, 'LargeNextBtnImage'))).click() #다음 try: driver.switch_to.alert().accept() driver.switch_to.default_content() wait.until(EC.presence_of_all_elements_located((By.ID, 'ifrmSeat'))) except: driver.switch_to.default_content() wait.until(EC.presence_of_all_elements_located((By.ID, 'ifrmSeat'))) def go2(): driver.switch_to.default_content() seat1_frame = driver.find_element_by_id("ifrmSeat") driver.switch_to_frame(seat1_frame) seat2_frame = driver.find_element_by_id("ifrmSeatDetail") driver.switch_to_frame(seat2_frame) seat_macro() try: driver.switch_to.alert().accept() driver.switch_to.default_content() driver.switch_to_frame(seat1_frame) driver.find_element_by_id('NextStepImage').click() driver.switch_to.alert().accept() except: driver.switch_to.default_content() driver.switch_to_frame(seat1_frame) driver.find_element_by_id('NextStepImage').click() def go(): code_time.delete(0, END) start_time = time.time() try: driver.find_element_by_class_name('closeBtn').click() date_select() try: captcha() go2() pass except: go2() pass except: date_select() try: captcha() go2() pass except: go2() pass finally: code_time.insert(0 ,"%s 초" % round((time.time() - start_time),3) ) def clock_time(): clock = datetime.now().strftime('%H:%M:%S:%f') time_label.config(text = clock) time_label.after(1, clock_time) BOT_TOKEN = '1289747693:AAEBosjOS2ui3ROkGrPOwmDq04JRrLPOL_U' updater = Updater(token=BOT_TOKEN) def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None): menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)] if header_buttons: menu.insert(0, header_buttons) if footer_buttons: menu.append(footer_buttons) return menu def get_command(update, context): show_list = [] show_list.append(InlineKeyboardButton("시작", callback_data='1')) show_list.append(InlineKeyboardButton("직링", callback_data='2')) show_markup = InlineKeyboardMarkup(build_menu(show_list, len(show_list) - 1)) update.message.reply_text("선택하세요.", reply_markup = show_markup) if update.callback_data == '1': go() elif update.callback_data == '2': link_go() get_handler = CommandHandler('task', get_command) updater.dispatcher.add_handler(get_handler) login_button = Button(main_frame, text = "로그인", command = login_go, height = 2) login_button.grid(row=2, column = 3) link_button = Button(main_frame, text = "직링", command = link_go, height = 2) link_button.grid(row=3, column = 1) start_button = Button(main_frame, text = "시작", command = go, height = 2) start_button.grid(row=8, column = 1) chair_button = Button(main_frame, text = "좌석", command = go2, height = 2) chair_button.grid(row=9, column = 1) captcha_button = Button(main_frame, text = '캡챠', command = captcha, height = 2) captcha_button.grid(row=10, column = 1) time_label = Label(main_frame, height = 2) time_label.grid(row=11, column = 1) clock_time() def updater2(): updater.start_polling() updater.idle() def mainup(): threading.Thread(target=updater2).start() threading.Thread(dp.mainloop()).start() mainup()
[ "drericcity@gmail.com" ]
drericcity@gmail.com
45e2a48fffd5b632648a3d5fd99137b10dcdd15b
1b1a428f3daf6a2c29f02779d6ded29738602134
/Cipher_decoder.py
cfcb3d8650f48227839982f8d3aeedf8403e0d48
[]
no_license
david-bradburn/Vigenere_Cipher
b4acd28929714cec6316e524b84aa86f55f82bd5
67c2d93dca987a93e1b062fd21f3abefb3bb54bd
refs/heads/master
2022-11-05T02:36:44.986355
2020-06-23T12:18:34
2020-06-23T12:18:34
274,378,272
0
0
null
null
null
null
UTF-8
Python
false
false
1,944
py
key = "secret" message = "vUy4n9AzPgS" letter_to_number_dictionary = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7, "i": 8, "j": 9, "k": 10, "l": 11, "m": 12, "n": 13, "o": 14, "p": 15, "q": 16, "r": 17, "s": 18, "t": 19, "u": 20, "v": 21, "w": 22, "x": 23, "y": 24, "z": 25} # Could use ord() def get_key(val): for key, value in letter_to_number_dictionary.items(): if val == value: return key return "key doesn't exist" # def l_to_num(letter): # return ord(letter.lower()) - 97 output = "" key_length = len(key) for letter in range(len(message)): try: output += get_key((letter_to_number_dictionary[message[letter].lower()] + letter_to_number_dictionary[key[letter % key_length].lower()]) % 26) except: print(message[letter]) output += str(message[letter]) print(output) msg = "" for i in range(len(output)): try: msg += get_key((letter_to_number_dictionary[output[i].lower()] - (letter_to_number_dictionary[key[i % key_length].lower()])) % 26) except: msg += output[i] print(output[i]) print(msg) # for letter in key: # print(letter)
[ "david.bradburn2@gmail.com" ]
david.bradburn2@gmail.com
502caa92793ef1dff2c35c396ef823c386566ef9
343a27bca14d6b6d00c2abbd3ed3885043bcb4a0
/session-4/test_download_books.py
d153539bf2a41f799dc73bee9dc44c27aeb2c26e
[]
no_license
hklb94/qualification-seminar-materials-2018
97cf44708aeb3df1d6ac17f829cd024613a52ffd
67da3174f2daf64ae3ed55019dd39be188172fd7
refs/heads/master
2020-03-28T14:45:13.012138
2018-08-06T17:57:57
2018-08-06T17:57:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
755
py
import os import download_books def test_download(): frankenstein_url = 'https://www.gutenberg.org/files/84/84-0.txt' download_books.download(frankenstein_url) assert os.path.isfile('84-0.txt') os.remove('84-0.txt') def test_run(): frankenstein_url = 'https://www.gutenberg.org/files/84/84-0.txt' alice_in_wonderland_url = 'https://www.gutenberg.org/files/11/11-0.txt' jekyll_hyde_url = 'https://www.gutenberg.org/files/43/43-0.txt' urls = [frankenstein_url, alice_in_wonderland_url, jekyll_hyde_url] # Execute the actual function that we want to test download_books.run(urls) for url in urls: file_name = os.path.basename(url) assert os.path.isfile(file_name) os.remove(file_name)
[ "jensegholm@protonmail.com" ]
jensegholm@protonmail.com
5cafefd3be80cf7ef30a14f8aa5ef9d48f942773
b8270e7bc57b91fdf34dc9dc7589f576a73c7b0e
/Minesweeper with pygame.py
2064be7c8de77d00be2eaee6b92fafbaabd64528
[ "MIT" ]
permissive
singh-hemant/minesweeper-with-pygame
429d1d2e568866b11199117eefae8a0dc08da922
831d39ce7f63fddd59e33354ad06ce34fd80884e
refs/heads/master
2022-11-15T09:46:57.251373
2020-07-11T12:11:14
2020-07-11T12:11:14
278,854,424
1
0
null
null
null
null
UTF-8
Python
false
false
21,461
py
import pygame import random import sys class Minesweeper: def __init__(self, size=20): self.size = size self.matrix = [] self.list = [] for i in range(self.size): rows= [] rows2 = [] for j in range(self.size): rows.append(' ') rows2.append(0) self.matrix.append(rows) self.list.append(rows2) def put_mines(self, mines_count =20): for _ in range(mines_count): x = random.randint(0, self.size-1) y = random.randint(0, self.size-1) self.matrix[x][y] = "X" def check_mines(self): for i in range(self.size): for j in range(self.size): count = 0 if self.matrix[i][j] != "X": if i == 0: if j == 0: if self.matrix[i][j+1] == "X": count += 1 if self.matrix[i+1][j+1] == "X": count += 1 if self.matrix[i+1][j] == "X": count += 1 elif j == self.size - 1: if self.matrix[i][j-1] == "X": count += 1 if self.matrix[i+1][j-1] == "X": count += 1 if self.matrix[i+1][j] == "X": count += 1 else: if self.matrix[i][j-1] == "X": count += 1 if self.matrix[i+1][j-1] == "X": count += 1 if self.matrix[i+1][j] == "X": count += 1 if self.matrix[i+1][j+1] == "X": count += 1 if self.matrix[i][j+1] == "X": count += 1 elif i == self.size - 1: if j == 0: if self.matrix[i-1][j] == "X": count += 1 if self.matrix[i-1][j+1] == "X": count += 1 if self.matrix[i][j+1] == "X": count += 1 elif j == self.size - 1: if self.matrix[i-1][j] == "X": count += 1 if self.matrix[i-1][j-1] == "X": count += 1 if self.matrix[i][j-1] == "X": count += 1 else: if self.matrix[i][j-1] == "X": count += 1 if self.matrix[i-1][j-1] == "X": count += 1 if self.matrix[i-1][j] == "X": count += 1 if self.matrix[i-1][j+1] == "X": count += 1 if self.matrix[i][j+1] == "X": count += 1 else: if j == 0: if self.matrix[i-1][j] == "X": count += 1 if self.matrix[i-1][j+1] == "X": count += 1 if self.matrix[i][j+1] == "X": count += 1 if self.matrix[i+1][j+1] == "X": count += 1 if self.matrix[i+1][j] == "X": count += 1 elif j == self.size - 1: if self.matrix[i-1][j] == "X": count += 1 if self.matrix[i-1][j-1] == "X": count += 1 if self.matrix[i][j-1] == "X": count += 1 if self.matrix[i+1][j-1] == "X": count += 1 if self.matrix[i+1][j] == "X": count += 1 else: if self.matrix[i-1][j-1] == "X": count += 1 if self.matrix[i-1][j] == "X": count += 1 if self.matrix[i-1][j+1] == "X": count += 1 if self.matrix[i][j+1] == "X": count += 1 if self.matrix[i+1][j+1] == "X": count += 1 if self.matrix[i+1][j] == "X": count += 1 if self.matrix[i+1][j-1] == "X": count += 1 if self.matrix[i][j-1] == "X": count += 1 if count > 0: self.matrix[i][j] = count # For console printing+ def printing(self): for i in range(self.size): for j in range(self.size): print(self.matrix[i][j], end=" ") print() def main(): minesweeper = Minesweeper(20) minesweeper.put_mines() minesweeper.check_mines() minesweeper.printing() # Game Loop Pygame class Game: def __init__(self): # game properties self.width = 750 self.height = 600 self.matrix_size = 15 self.font_size = 24 self.ith_padding = 15 self.jth_padding = 5 self.white = (255, 255, 255) self.black = (0, 0, 0) self.green = (0, 200, 0) self.red = (150, 20, 0) self.mines_count = 20 self.no_of_boxes_left = self.matrix_size**2 - self.mines_count #self.score = 0 # ---------end ---------- # for Handling background logic self.minesweeper = Minesweeper(self.matrix_size) self.minesweeper.put_mines(self.mines_count) self.minesweeper.check_mines() self.save = [] # create a window pygame.init() self.window = pygame.display.set_mode((self.width, self.height)) self.background = pygame.Surface((self.width, self.height)) pygame.display.set_caption("MineSweeper") self.font = pygame.font.SysFont("Arial", self.font_size, "bold") self.clock = pygame.time.Clock() self.start_game() def gameloop(self): self.sizeb = self.height/self.minesweeper.size run = True x, y, a, b = 0, 0, 0, 0 self.window.fill((255, 255, 255)) while run: for event in pygame.event.get(): if event.type == pygame.QUIT: run = False pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: x, y = event.pos self.save.append((x, y)) for i in range(self.minesweeper.size): for j in range(self.minesweeper.size): if (x, y) in self.save: if (x > i*self.sizeb) and (x < i*self.sizeb + self.sizeb) and (y > j*self.sizeb) and (y < j*self.sizeb + self.sizeb): # Game over finding X if self.minesweeper.matrix[i][j] == "X": self.rect = pygame.draw.rect(self.window, self.red, ( i * self.sizeb + 2, j * self.sizeb + 2, self.sizeb - 2, self.sizeb - 2)) self.window.blit(self.font.render(str(self.minesweeper.matrix[i][j]), True, self.black), (i*self.sizeb+self.ith_padding, j*self.sizeb+self.jth_padding)) run = False pygame.display.update() pygame.time.wait(500) self.game_over() elif self.minesweeper.matrix[i][j] == ' ': self.minesweeper.list[i][j] = 10 self.auto_blank_fill(i, j) else: self.minesweeper.list[i][j] = int(self.minesweeper.matrix[i][j]) self.rect = pygame.draw.rect(self.window, self.green, (i*self.sizeb+2, j*self.sizeb+2, self.sizeb-2, self.sizeb-2)) self.window.blit(self.font.render(str(self.minesweeper.matrix[i][j]), True, self.black), (i*self.sizeb+self.ith_padding, j*self.sizeb+self.jth_padding)) else: pygame.draw.rect(self.window, (0, 100, 100), (i*self.sizeb+2, j*self.sizeb+2, self.sizeb-2, self.sizeb-2)) self.scoring() if self.no_of_boxes_left == 0: self.you_win() pygame.display.update() def auto_blank_fill(self, i, j): self.rect = pygame.draw.rect(self.window, self.green,(i * self.sizeb + 2, j * self.sizeb + 2, self.sizeb - 2, self.sizeb - 2)) self.window.blit(self.font.render(str(self.minesweeper.matrix[i][j]), True, self.black),(i * self.sizeb + self.ith_padding, j * self.sizeb + self.jth_padding)) if self.minesweeper.matrix[i][j]: if i == 0: if j == 0: if self.minesweeper.matrix[i][j + 1] == ' ':# and self.minesweeper.list[i][j+1] == 0: self.auto_blank_fill(i, j + 1) if self.minesweeper.matrix[i + 1][j + 1] == ' ':# and self.minesweeper.list[i][j+1] == 0: self.auto_blank_fill(i+1, j+1) if self.minesweeper.matrix[i + 1][j] == ' ':# and self.minesweeper.list[i][j+1] == 0: self.auto_blank_fill(i+1, j) elif j == self.minesweeper.size - 1: if self.minesweeper.matrix[i][j - 1] == ' ': self.auto_blank_fill(i, j-1) if self.minesweeper.matrix[i + 1][j - 1] == ' ': self.auto_blank_fill(i+1, j-1) if self.minesweeper.matrix[i + 1][j] == ' ': self.auto_blank_fill(i+1, j) # else: # if self.minesweeper.matrix[i][j - 1] == ' ': # self.auto_blank_fill(i, j-1) # if self.minesweeper.matrix[i + 1][j - 1] == ' ': # self.auto_blank_fill(i+1, j-1) # if self.minesweeper.matrix[i + 1][j] == ' ': # self.auto_blank_fill(i+1, j) # if self.minesweeper.matrix[i + 1][j + 1] == ' ': # self.auto_blank_fill(i+1, j+1) # if self.minesweeper.matrix[i][j + 1] == ' ': # self.auto_blank_fill(i, j+1) # elif i == self.minesweeper.size - 1: # if j == 0: # if self.minesweeper.matrix[i - 1][j] == ' ': # self.auto_blank_fill(i-1, j) # if self.minesweeper.matrix[i - 1][j + 1] == ' ': # self.auto_blank_fill(i-1, j+1) # if self.minesweeper.matrix[i][j + 1] == ' ': # self.auto_blank_fill(i, j+1) # elif j == self.minesweeper.size - 1: # if self.minesweeper.matrix[i - 1][j] == ' ': # self.auto_blank_fill(i-1, j) # if self.minesweeper.matrix[i - 1][j - 1] == ' ': # self.auto_blank_fill(i-1, j-1) # if self.minesweeper.matrix[i][j - 1] == ' ': # self.auto_blank_fill(i, j-1) # else: # if self.minesweeper.matrix[i][j - 1] == ' ': # self.auto_blank_fill(i, j-1) # if self.minesweeper.matrix[i - 1][j - 1] == ' ': # self.auto_blank_fill(i-1, j-1) # if self.minesweeper.matrix[i - 1][j] == ' ': # self.auto_blank_fill(i-1, j) # if self.minesweeper.matrix[i - 1][j + 1] == ' ': # self.auto_blank_fill(i-1, j+1) # if self.minesweeper.matrix[i][j + 1] == ' ': # self.auto_blank_fill(i, j+1) # else: # if j == 0: # if self.minesweeper.matrix[i - 1][j] == ' ': # self.auto_blank_fill(i-1, j) # if self.minesweeper.matrix[i - 1][j + 1] == ' ': # self.auto_blank_fill(i-1, j+1) # if self.minesweeper.matrix[i][j + 1] == ' ': # self.auto_blank_fill(i, j+1) # if self.minesweeper.matrix[i + 1][j + 1] == ' ': # self.auto_blank_fill(i+1, j+1) # if self.minesweeper.matrix[i + 1][j] == ' ': # self.auto_blank_fill(i+1, j) # elif j == self.minesweeper.size - 1: # if self.minesweeper.matrix[i - 1][j] == ' ': # self.auto_blank_fill(i-1, j) # if self.minesweeper.matrix[i - 1][j - 1] == ' ': # self.auto_blank_fill(i-1, j-1) # if self.minesweeper.matrix[i][j - 1] == ' ': # self.auto_blank_fill(i, j-1) # if self.minesweeper.matrix[i + 1][j - 1] == ' ': # self.auto_blank_fill(i+1, j-1) # if self.minesweeper.matrix[i + 1][j] == ' ': # self.auto_blank_fill(i+1, j) # # else: # if self.minesweeper.matrix[i - 1][j - 1] == ' ': # self.auto_blank_fill(i-1, j-1) # if self.minesweeper.matrix[i - 1][j] == ' ': # self.auto_blank_fill(i-1, j) # if self.minesweeper.matrix[i - 1][j + 1] == ' ': # self.auto_blank_fill(i-1, j+1) # if self.minesweeper.matrix[i][j + 1] == ' ': # self.auto_blank_fill(i, j+1) # if self.minesweeper.matrix[i + 1][j + 1] == ' ': # self.auto_blank_fill(i+1, j+1) # if self.minesweeper.matrix[i + 1][j] == ' ': # self.auto_blank_fill(i+1, j) # if self.minesweeper.matrix[i + 1][j - 1] == ' ': # self.auto_blank_fill(i+1, j-1) # if self.minesweeper.matrix[i][j - 1] == ' ': # self.auto_blank_fill(i, j-1) def scoring(self): # reset to initial state self.score = 0 self.no_of_boxes_left = self.matrix_size ** 2 - self.mines_count for i in range(self.minesweeper.size): for j in range(self.minesweeper.size): if 0 < self.minesweeper.list[i][j]: self.no_of_boxes_left -= 1 if self.minesweeper.list[i][j] != 10: self.score += self.minesweeper.list[i][j] self.button(120, 40, 620, 50, "Boxes Left :", 18, font_paddingx=10) self.button(120, 100, 620, 100, str(self.no_of_boxes_left), 32, font_paddingx=40, font_paddingy=20) self.button(120, 100, 620, 250, "Score :", 18, font_paddingx=10) self.button(120, 100, 620, 300, str(self.score), 32, font_paddingx=40, font_paddingy=20) def game_over(self): self.background.fill((0, 200, 200)) self.window.blit(self.background,(0, 0)) running = True x, y = 0, 0 while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False if event.type == pygame.MOUSEBUTTONDOWN: x, y = event.pos self.button(250, 80, 240, 140, "GAME OVER", 32,font_paddingx=30,font_paddingy=20) self.button(150, 50, 180, 380, "PLAY AGAIN", 24) self.button(150, 50, 380, 380, "QUIT", 24, font_paddingx=40) if 380 < x < 380 + 150 and 380 < y < 380 + 50: self.button(150, 50, 380, 380, "QUIT", 24, font_paddingx=40, color=(10, 10, 10)) pygame.display.update() pygame.time.wait(10) pygame.quit() sys.exit() if 180 < x < 180 + 150 and 380 < y < 380 + 50: self.button(150, 50, 180, 380, "PLAY AGAIN", 24,color=(10, 10, 10)) #reset counts self.no_of_boxes_left = self.matrix_size ** 2 - self.mines_count self.score = 0 for i in range(self.minesweeper.size): for j in range(self.minesweeper.size): self.minesweeper.list[i][j] = 0 pygame.display.update() pygame.time.wait(10) self.gameloop() pygame.display.update() def you_win(self): self.background.fill((0, 200, 200)) self.window.blit(self.background,(0, 0)) running = True x, y = 0, 0 while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False pygame.quit() sys.exit() if event.type == pygame.MOUSEBUTTONDOWN: x, y = event.pos self.button(250, 80, 240, 140, "YOU WIN!!!", 32, font_paddingx=40, font_paddingy=20) self.button(150, 50, 180, 380, "PLAY AGAIN", 24) self.button(150, 50, 380, 380, "QUIT", 24, font_paddingx=40) if 380 < x < 380 + 150 and 380 < y < 380 + 50: # For button animation self.button(150, 50, 380, 380, "QUIT", 24, font_paddingx=40, color=(10, 10, 10)) pygame.display.update() pygame.time.wait(10) pygame.quit() sys.exit() if 180 < x < 180 + 150 and 380 < y < 380 + 50: # For button animation self.button(150, 50, 180, 380, "PLAY AGAIN", 24,color=(10, 10, 10)) pygame.display.update() pygame.time.wait(10) #reset counts self.minesweeper.put_mines(self.mines_count) self.no_of_boxes_left = self.matrix_size ** 2 - self.mines_count self.score = 0 for i in range(self.minesweeper.size): for j in range(self.minesweeper.size): self.minesweeper.list[i][j] = 0 self.gameloop() pygame.display.update() def button(self, size_x, size_y, x, y, text, font_size, color=(255, 255, 255), font_color=(0, 150, 0), font_paddingx=10, font_paddingy=10): font2 = pygame.font.SysFont("Arial", font_size, "bold") self.rect = pygame.draw.rect(self.window, (100, 0, 0), (x-10, y-10, size_x+20, size_y+20)) self.rect = pygame.draw.rect(self.window, color, (x, y, size_x, size_y)) self.window.blit(font2.render(text, True, font_color), (x + font_paddingx, y + font_paddingy)) def start_game(self): start = True x, y = 0, 0 while start: last = pygame.time.get_ticks() self.background.fill((0, 200, 200)) self.window.blit(self.background, (0, 0)) for event in pygame.event.get(): if event.type == pygame.QUIT: start = False if event.type == pygame.MOUSEBUTTONDOWN: x, y = event.pos self.button(500, 100, 130, 100, "MINESWEEPER", 64, font_paddingx=25, font_paddingy=15, color=(200, 150, 100), font_color=(120, 120, 120)) self.button(260, 80, 240, 280, "START GAME", 36, font_paddingx=25, font_paddingy=15) if 240 < x < 240 + 260 and 280 < y < 280 + 80: self.button(260, 80, 240, 280, "START GAME", 32, font_paddingx=25, font_paddingy=15, color=(0, 0, 0)) pygame.display.update() pygame.time.wait(10) self.gameloop() start = False pygame.display.update() if __name__ == "__main__": Game() pygame.quit() sys.exit()
[ "noreply@github.com" ]
singh-hemant.noreply@github.com
1d3adb241f0a50cf17877e9733f1e5441a4de0ce
ddb0f2ea26c494a113c3bbb578c7c06c8863e278
/greenplumpython/core/gptapply.py
fa4ddfefba98937d45ca48d8ca1ebd019f88f2c0
[]
no_license
tracyzhu2014/GreenplumPython
2914be808beafd41079de31836221936386804b2
19bc9be02f0bfe9cd79da140813041725808beb3
refs/heads/master
2022-11-11T16:12:30.159543
2020-07-02T04:24:31
2020-07-02T04:24:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,223
py
import pandas as pd from greenplumpython.core.gpdatabase import GPDatabase from greenplumpython.core.dataframe_wrapper import DataFrameWrapper from greenplumpython.core.gptable_metadata import GPTableMetadata import inspect import random import string def randomString(stringLength=10): """Generate a random string of fixed length """ letters = string.ascii_lowercase return "tmp_func"+''.join(random.choice(letters) for i in range(stringLength)) def randomStringType(stringLength=10): """Generate a random string of fixed length """ letters = string.ascii_lowercase return "tmp_type"+''.join(random.choice(letters) for i in range(stringLength)) def gptapply(X: DataFrameWrapper, index, gp_database: GPDatabase, py_func, output_meta: GPTableMetadata = None, runtime_id = None, runtime_type = "plcontainer", **kwargs): # Get Function body udt_body = inspect.getsource(py_func) # Get extra arugments for py_func # Build SQL query_sql = "" out_df = gp_database.execute_query(query_sql) if output_meta is not None: out_df_wrapper = DataFrameWrapper(None, output_meta) else: out_df_wrapper = DataFrameWrapper(out_df, None) return out_df_wrapper
[ "noreply@github.com" ]
tracyzhu2014.noreply@github.com
0ac5c2b703ed5fe9d8ed84b121d9e6fd0d7044fb
36c0e7872059251aa241f078c72b751beb873fa6
/jobs/migrations/0002_auto_20190311_0351.py
ab8b691dc3ec3246d479028a9e014cb5dbcb8c63
[]
no_license
ankit-kothari/Django-Projects
685cb316cf28179ca5424df0fcbcea07fb61ad8d
07d3af154b61dfe3c61529184db297ae8a8d4052
refs/heads/master
2020-04-28T05:38:09.603454
2019-03-11T15:18:38
2019-03-11T15:18:38
175,027,657
0
0
null
null
null
null
UTF-8
Python
false
false
326
py
# Generated by Django 2.1.dev20180312174936 on 2019-03-11 03:51 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jobs', '0001_initial'), ] operations = [ migrations.RenameModel( old_name='Jon', new_name='Job', ), ]
[ "ankit.kothari@oracle.com" ]
ankit.kothari@oracle.com
5e6589368c44aff74f6bdda9cf7c0d6b56303911
afad4d43af53de357f16166f8502c204194222e3
/Temel Veri Yapıları ve Objeler/üç ve beş bölünen sayıların toplamı.py
34c651451c12ef0526439bc967f0ac85bccdc059
[]
no_license
erolkahraman/python
d1b1530172345a716eab8c6dae8aed89d788a8f6
09f797a38071db393ca8c796e4b03f7515ae8c8e
refs/heads/master
2022-07-26T10:01:12.696273
2022-07-17T12:20:42
2022-07-17T12:20:42
162,548,964
0
0
null
null
null
null
UTF-8
Python
false
false
257
py
toplam = 0 # liste = [x for x in range(1,1001) if x % 3 == 0 if y % 5 == 0] # liste = [y for y in [x for x in range(1,1001) if x % 3 == 0] if y % 5 == 0] for i in range(1,1000): if i % 3 == 0 or i % 5 == 0: toplam += i print("Toplam:",toplam)
[ "erol.kahraman@gmail.com" ]
erol.kahraman@gmail.com
72b45890e8fbd981e6b6844123d8fc0e1febfd60
38f663cca20acd0bcaac2f9eded01ec6a81f0e54
/chungchun/pybo/config.py
cb55fa398065dfae227233b643f865a0e426950e
[]
no_license
youth28/youth28_Server
f65b2c1a692ed6647da9b6f6eb06db27294495bf
43ec0c6f8ac14088ea0c5da24756b852a7830496
refs/heads/main
2023-07-13T14:10:36.075070
2021-08-24T02:09:05
2021-08-24T02:09:05
330,843,962
0
0
null
null
null
null
UTF-8
Python
false
false
261
py
import pymysql def get_connection(): conn = pymysql.connect( host='127.0.0.1', port=3306, user='root', password='1234', db='youthdb', charset='utf8' ) return conn
[ "noreply@github.com" ]
youth28.noreply@github.com
785c352ed5b9d05cd064702b89712d73639dea8c
3b5e2016390a2c966d1d4d094e3e70e9980379e7
/manage.py
66736de7ff6874e26aed153309f571abe06fd637
[]
no_license
rameshrajagopal/webdevelopment
e87b16dabaecf1b7e1313f59a715866e00dc1ad5
687348c60ef7d89572f30f9d5641264e349ff3c7
refs/heads/master
2020-07-01T01:29:35.519639
2015-05-18T14:59:29
2015-05-18T15:01:21
35,812,297
0
0
null
null
null
null
UTF-8
Python
false
false
250
py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myguide.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[ "mail2.rameshr@gmail.com" ]
mail2.rameshr@gmail.com
bd74a3ab41b48ffe8069d7327a2c0494179fcbfe
fcde32709c62b8ee86da459bb7c8eee52c848118
/code/day03/r4.py
37ad4deb38ed39591d4c123e94a810e47614be79
[]
no_license
klaus2015/py_base
6b92d362c3d7dc0e09205a037f4d580381dac94d
ec32c731c1c2f6a0dab87f1d167397e4fa86b8de
refs/heads/master
2022-07-28T15:49:30.383648
2020-05-11T15:31:43
2020-05-11T15:31:43
261,777,278
0
0
null
null
null
null
UTF-8
Python
false
false
716
py
# state = None # number = int(input("请输入一个整数: ")) # if number % 2 : # state = "奇数" # else: # state = "偶数" # print(state) # state = "奇数" if int(input("请输入整数: ")) % 2 else "偶数" # print(state) year = int(input("请输入年份:")) result = year % 4 == 0 and year % 100 != 0 or year % 400 == 0 if result: day = 29 else: day = 28 print(day) 代码简单,但是可读性差 能被4整除但是不能被100整除,或者可以被400整除 day = 29 if not year % 4 and year % 100 or not year % 400 else 28 day = 29 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0 else 28 result = year % 4 print(result) year = 2000 result = year % 4 print(result)
[ "598467866@qq.com" ]
598467866@qq.com
a4b9d93d338391843fa18a38fd30a88d04acb569
e0ede722874d222a789411070f76b50026bbe3d8
/practice/solution/0894_all_possible_full_binary_trees.py
2727423a40a633641a8545b0f9ac6da90888a70d
[]
no_license
kesarb/leetcode-summary-python
cd67456cb57bdff7ee227dab3930aaf9c2a6ad00
dc45210cb2cc50bfefd8c21c865e6ee2163a022a
refs/heads/master
2023-05-26T06:07:25.943854
2021-06-06T20:02:13
2021-06-06T20:02:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,031
py
# Definition for a binary tree node. # class TreeNode(object): # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution(object): def allPossibleFBT(self, N): """ :type N: int :rtype: List[TreeNode] """ self.value_dict = {1: [TreeNode(0)]} self.res = 0 self.res = self.dfs(N) return self.res def dfs(self, N): temp_list = [] if N in self.value_dict: temp_list = self.value_dict[N] return temp_list for i in range(1, N, 2): for left in self.dfs(i): for right in self.dfs(N - 1 - i): root = TreeNode(0) root.left = left root.right = right temp_list.append(root) self.value_dict[N] = temp_list return temp_list
[ "weikunhan@g.ucla.edu" ]
weikunhan@g.ucla.edu
74fdacf2b5f09abf7cbf07081e0b64c723bfe71a
90aec00b8944d37187d18a7e0b79c646029c7b44
/products/models.py
fe2173fd4e6cb7cc874bcc57ec12c3f5af301802
[]
no_license
kimjuyong/funding_system
b00f800fa080c1731bd7ab7185e017a1fd0d9994
a03abcc2037552a97b17c01e7cedbc71fa78e77b
refs/heads/master
2022-12-14T02:30:23.046721
2019-12-06T09:35:58
2019-12-06T09:35:58
219,980,426
0
0
null
2022-12-08T03:16:59
2019-11-06T11:23:49
Python
UTF-8
Python
false
false
2,673
py
from django.db import models from django.urls import reverse from django_countries.fields import CountryField from core import models as core_models class AbstractItem(core_models.TimeStampedModel): """ Abstract Item """ name = models.CharField(max_length=80) class Meta: abstract = True def __str__(self): return self.name class CarType(AbstractItem): """ CarType Model Definition """ class Meta: verbose_name = "Car Type" class CarRule(AbstractItem): """ CarRule Model Definition """ class Meta: verbose_name = "Car Rule" class FuelType(AbstractItem): """ FuelType Model Definition """ class Meta: verbose_name = "Fuel Type" class Facility(AbstractItem): """ Facility Model Definition """ class Meta: verbose_name_plural = "Facilities" class Photo(core_models.TimeStampedModel): """ Photo Model Definition """ caption = models.CharField(max_length=80) file = models.ImageField(upload_to="car_photos") car = models.ForeignKey("Car", on_delete=models.CASCADE) def __str__(self): return self.caption class Car(core_models.TimeStampedModel): """ Car Model Definition """ name = models.CharField(max_length=140) description = models.TextField() manufactureCountry = CountryField() pickupAddress = models.CharField(max_length=200) guests = models.IntegerField() car_model = models.CharField(max_length=140) seats = models.IntegerField() efficiency = models.IntegerField() check_in = models.DateField() check_out = models.DateField() instant_book = models.BooleanField(default=True) host = models.ForeignKey( "users.User", related_name="cars", on_delete=models.CASCADE ) car_type = models.ForeignKey( "CarType", on_delete=models.SET_NULL, related_name="cars", null=True, blank=True ) fuel_type = models.ForeignKey( "FuelType", on_delete=models.SET_NULL, related_name="cars", null=True, blank=True, ) car_rules = models.ManyToManyField("CarRule", related_name="cars", blank=True) facility = models.ManyToManyField("Facility", related_name="cars", blank=True) def __str__(self): return self.name def get_absolute_url(self): return reverse("cars:detail", kwargs={"pk": self.pk}) def total_rating(self): all_reviews = self.reviews.all() all_rating = 0 if len(all_reviews) > 0: for review in all_reviews: all_rating += review.rating_average() return all_rating / len(all_reviews) return 0
[ "kimwtgr@gmail.com" ]
kimwtgr@gmail.com
ae9378b6d2cb192afb38eb5e68aa4e86ca676ef0
6d7ef1d662cc92f903acc55c1f43ee105adb6473
/Utils/modules.py
f4dc57b65e02576b0a6541440ef2ca6301100899
[]
no_license
ThiruRJST/PaperWork_Public
1f2451317a507dac82dfa0c0327e2133a1a05023
1aab0ae3d89a19aea77fb791d5577ba5da757f47
refs/heads/main
2023-07-18T13:30:02.850019
2021-08-30T10:38:56
2021-08-30T10:38:56
401,305,382
3
0
null
null
null
null
UTF-8
Python
false
false
4,386
py
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim class conv_mod(nn.Module): def __init__(self,in_channels,out_channels,kernel_size=(1,1),stride=1,padding=0,activation='relu'): super(conv_mod,self).__init__() self.mod = nn.Sequential( nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding) nn.BatchNorm2d(out_channels), nn.ReLU(), ) def forward(self,x): x = self.mod(x) return x class MultiScaleA(nn.Module): def __init__(self,in_channels,stream_1_1x1_filters,stream_2_1x1_filters,stream_2_3x3_filters,stream_3_1x1_filters,stream_3_3x3_filters): super(MultiScaleA,self).__init__() self.stream1 = conv_mod(in_channels,stream_1_1x1_filters,kernel_size=(1,1)) #Stream ID=1 self.stream2 = nn.Sequential( conv_mod(in_channels,stream_2_1x1_filters,kernel_size=(1,1)), conv_mod(stream_2_1x1_filters,stream_2_3x3_filters,kernel_size=(3,3),padding=1), #Stream ID=2 ) self.stream3 = nn.Sequential( conv_mod(in_channels,stream_3_1x1_filters,kernel_size=(1,1)), conv_mod(stream_3_1x1_filters,stream_3_3x3_filters[0],kernel_size=(3,3),padding=1), conv_mod(stream_3_3x3_filters[0],stream_3_3x3_filters[1],kernel_size=(3,3),padding=1) ) def forward(self,x): stream1 = self.stream1(x) stream2 = self.stream2(x) stream3 = self.stream3(x) concat = torch.cat([stream1,stream2,stream3],axis=1) return concat class Reduction(nn.Module): def __init__(self,in_channels,red_stream_2_3x3_filters,red_stream_3_1x1_filters,red_stream_3_3x3_filters): super(Reduction,self).__init__() self.stream1_MF = nn.MaxPool2d(kernel_size=(3,3),stride=2) self.stream2_CF = conv_mod(in_channels,red_stream_2_3x3_filters,kernel_size=(3,3),stride=(2,2)) self.stream3_CF = nn.Sequential( conv_mod(in_channels,red_stream_3_1x1_filters,kernel_size=(1,1)), conv_mod(red_stream_3_1x1_filters,red_stream_3_3x3_filters[0],kernel_size=(3,3)), conv_mod(red_stream_3_3x3_filters[0],red_stream_3_3x3_filters[1],kernel_size=(3,3),stride=(2,2),padding=1) ) def forward(self,x): stream1_MF = self.stream1_MF(x) stream2_CF = self.stream2_CF(x) stream3_CF = self.stream3_CF(x) print(stream1_MF.shape,stream2_CF.shape,stream3_CF.shape) return torch.cat([stream1_MF,stream2_CF,stream3_CF],axis=1) class MultiScaleB(nn.Module): def __init__(self,in_channels,Bstream_1_1x1,Bstream_2_1x1,Bstream_2_3x3,Bstream_3_1x1,Bstream_3_3x3): super(MultiScaleB,self).__init__() self.st1 = conv_mod(in_channels,Bstream_1_1x1,kernel_size=(1,1)) self.st2 = nn.Sequential( conv_mod(in_channels,Bstream_2_1x1,kernel_size=(1,1)), conv_mod(Bstream_2_1x1,Bstream_2_3x3[0],kernel_size=(1,3)), conv_mod(Bstream_2_3x3[0],Bstream_2_3x3[1],kernel_size=(3,1),padding=(1,1)) ) self.st3 = nn.Sequential( conv_mod(in_channels,Bstream_3_1x1,kernel_size=(1,1)), conv_mod(Bstream_3_1x1,Bstream_3_3x3[0],kernel_size=(1,3)), conv_mod(Bstream_3_3x3[1],Bstream_3_3x3[2],kernel_size=(3,1)), conv_mod(Bstream_3_3x3[2],Bstream_3_3x3[3],kernel_size=(1,3)), conv_mod(Bstream_3_3x3[3],Bstream_3_3x3[4],kernel_size=(3,1),padding=(2,2)) ) def forward(self,x): st1 = self.st1(x) st2 = self.st2(x) st3 = self.st3(x) print(st1.shape,st2.shape,st3.shape) return torch.cat([st1,st2,st3],axis=1) class self_attention(nn.Module): def __init__(self,feature): super(self_attention,self).__init__() self.f = nn.Conv2d(feature.shape[1],feature.shape[1],kernel_size=(1,1)) self.g = nn.Conv2d(feature.shape[1],feature.shape[1],kernel_size=(1,1)) self.h = nn.Conv2d(feature.shape[1],feature.shape[1],kernel_size=(1,1)) self.soft = nn.Softmax() def forward(self,x): f = self.f(x) g = self.g(x) h = self.h(x) fg = torch.dot(f.T,g) fg_soft = self.soft(fg) return torch.dot(fg_soft,h)
[ "noreply@github.com" ]
ThiruRJST.noreply@github.com
8755ce130eec1e49e1711d0d013ffad390c977c0
e320281b51c509f1aca302fe5dfac606fad52d54
/sshscan_py3.py
bacb5cc97a951ec8b775e806415d24fd0e5c9fc4
[]
no_license
jubbyy/SSHScan
8830d00e121b9b07f679f60f569e0cc8ccd29e7c
52494167f0de4d3ff7c02fa15661f0eda1d83445
refs/heads/master
2020-09-12T07:36:09.731671
2019-11-18T03:39:00
2019-11-18T03:39:00
222,356,987
0
0
null
2019-11-18T03:36:03
2019-11-18T03:36:02
null
UTF-8
Python
false
false
9,616
py
#!/usr/bin/env python # The MIT License (MIT) # # Copyright (c) 2017 Vincent Ruijter # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Cipher detection based on: https://stribika.github.io/2015/01/04/secure-secure-shell.html # import sys, re import socket from optparse import OptionParser, OptionGroup def banner(): banner = """ _____ _____ _ _ _____ / ___/ ___| | | / ___| \ `--.\ `--.| |_| \ `--. ___ __ _ _ __ `--. \`--. | _ |`--. \/ __/ _` | '_ \\ /\__/ /\__/ | | | /\__/ | (_| (_| | | | | \____/\____/\_| |_\____/ \___\__,_|_| |_| evict """ return banner def exchange(ip, port): try: conn = socket.create_connection((ip, port),5) print("[*] Connected to %s on port %i..."%(ip, port)) version = conn.recv(50).split('\n')[0] conn.send('SSH-2.0-OpenSSH_6.0p1\r\n') print(" [+] Target SSH version is: %s" %version) print(" [+] Retrieving ciphers...") ciphers = conn.recv(984) conn.close() return ciphers except socket.timeout: print(" [-] Timeout while connecting to %s on port %i\n"%(ip, port)) return False except socket.error as e: if e.errno == 61: print(" [-] %s\n"%(e.strerror)) pass else: print(" [-] Error while connecting to %s on port %i\n"%(ip, port)) return False def validate_target(target): list = target.split(":") if len(list) != 1 and len(list) != 2: # only valid states print("[-] %s is not a valid target!"%target) return False hostname = list[0] if len(hostname) > 255: print("[-] %s is not a valid target!"%target) return False if hostname[-1] == ".": hostname = hostname[:-1] # strip exactly one dot from the right, if present allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) if not all(allowed.match(x) for x in hostname.split(".")): print("[-] %s is not a valid target!"%target) return False if len(list) == 2: # there is a specific port indication port = list[1] try: validport = int(port) if validport < 1 or validport > 65535: print("[-] %s is not a valid target!"%target) return False except ValueError: print("[-] %s is not a valid target!"%target) return False return target def parse_target(target): if validate_target(target): if not re.search(r'[:*]', target): print("[*] Target %s specified without a port number, using default port 22"%target) target = target+':22' ipport=target.split(':') try: print("[*] Initiating scan for %s on port %s" %(ipport[0], ipport[1])) if not get_output(exchange(ipport[0], int(ipport[1]))): return False except IndexError: print(" [-] Please specify target as 'target:port'!\n") return False except ValueError: print(" [-] Target port error, please specify a valid port!\n") return False def list_parser(list): try: fd=open(list, 'r') targetlist = fd.read().split('\n') targets = [] for target in targetlist: if target: targets.append(target) print("[*] List contains %i targets to scan" %len(targets)) error = 0 for target in targets: if parse_target(target) == False: error+=1 if error > 0: if error == len(targets): print("[*] Scan failed for all %i hosts!"%len(targets)) else: print("[*] Scan completed for %i out of %i targets!" %((len(targets)-error), len(targets))) except IOError as e: if e.filename: print("[-] %s: '%s'"%(e.strerror, e.filename)) else: print("[-] %s"%e.strerror) sys.exit(2) def get_output(rawlist): if rawlist: ciphers = ['3des-cbc','aes128-cbc','aes192-cbc','aes256-cbc','aes128-ctr','aes192-ctr','aes256-ctr','aes128-gcm@openssh.com','aes256-gcm@openssh.com','arcfour','arcfour128','arcfour256','blowfish-cbc','cast128-cbc','chacha20-poly1305@openssh.com'] strong_ciphers = ['chacha20-poly1305@openssh.com','aes256-gcm@openssh.com','aes128-gcm@openssh.com','aes256-ctr','aes192-ctr','aes128-ctr'] weak_ciphers = [] macs = ['hmac-md5','hmac-md5-96','hmac-ripemd160','hmac-sha1','hmac-sha1-96','hmac-sha2-256','hmac-sha2-512','umac-64','hmac-md5-etm@openssh.com','hmac-md5-96-etm@openssh.com','hmac-ripemd160-etm@openssh.com','hmac-sha1-etm@openssh.com','hmac-sha1-96-etm@openssh.com','hmac-sha2-256-etm@openssh.com','hmac-sha2-512-etm@openssh.com','umac-64-etm@openssh.com','umac-128-etm@openssh.com'] strong_macs = ['hmac-sha2-512-etm@openssh.com','hmac-sha2-256-etm@openssh.com','umac-128','umac-128-etm@openssh.com','hmac-sha2-512','hmac-sha2-256','umac-128@openssh.com'] weak_macs = [] kex = ['curve25519-sha256', 'curve25519-sha256@libssh.org','diffie-hellman-group1-sha1','diffie-hellman-group14-sha1','diffie-hellman-group-exchange-sha1','diffie-hellman-group-exchange-sha256','ecdh-sha2-nistp256','ecdh-sha2-nistp384','ecdh-sha2-nistp521','ecdsa-sha2-nistp256-cert-v01@openssh.com','ecdsa-sha2-nistp384-cert-v01@openssh.com','ecdsa-sha2-nistp521-cert-v01@openssh.com'] strong_kex = ['curve25519-sha256', 'curve25519-sha256@libssh.org', 'diffie-hellman-group-exchange-sha256'] weak_kex = [] hka = ['ecdsa-sha2-nistp256-cert-v01@openssh.com','ecdsa-sha2-nistp384-cert-v01@openssh.com','ecdsa-sha2-nistp521-cert-v01@openssh.com','ssh-ed25519-cert-v01@openssh.com','ssh-rsa-cert-v01@openssh.com','ssh-dss-cert-v01@openssh.com','ssh-rsa-cert-v00@openssh.com','ssh-dss-cert-v00@openssh.com','ecdsa-sha2-nistp256','ecdsa-sha2-nistp384','ecdsa-sha2-nistp521','ssh-ed25519','ssh-rsa','ssh-dss'] strong_hka = ['ssh-rsa-cert-v01@openssh.com','ssh-ed25519-cert-v01@openssh.com','ssh-rsa-cert-v00@openssh.com','ssh-rsa','ssh-ed25519'] weak_hka = [] dmacs = [] for i in macs: m = re.search(i, rawlist) if m: dmacs.append(i) if i not in strong_macs: weak_macs.append(i) dciphers = [] for i in ciphers: m = re.search(i, rawlist) if m: dciphers.append(i) if i not in strong_ciphers: weak_ciphers.append(i) dkex = [] for i in kex: m = re.search(i, rawlist) if m: dkex.append(i) if i not in strong_kex: weak_kex.append(i) dhka = [] for i in hka: m = re.search(i, rawlist) if m: dhka.append(i) if i not in strong_hka: weak_hka.append(i) compression = False if re.search("zlib@openssh.com", rawlist): compression = True print(' [+] Detected the following ciphers: ') print_columns(dciphers) print(' [+] Detected the following KEX algorithms: ') print_columns(dkex) print(' [+] Detected the following MACs: ') print_columns(dmacs) print(' [+] Detected the following HostKey algorithms: ') print_columns(dhka) if weak_ciphers: print(' [+] Detected the following weak ciphers: ') print_columns(weak_ciphers) else: print(' [+] No weak ciphers detected!') if weak_kex: print(' [+] Detected the following weak KEX algorithms: ') print_columns(weak_kex) else: print(' [+] No weak KEX detected!') if weak_macs: print(' [+] Detected the following weak MACs: ') print_columns(weak_macs) else: print(' [+] No weak MACs detected!') if weak_hka: print(' [+] Detected the following weak HostKey algorithms: ') print_columns(weak_hka) else: print(' [+] No weak HostKey algorithms detected!') if compression == True: print(" [+] Compression has been enabled!") return True def print_columns(cipherlist): # adjust the amount of columns to display cols = 2 while len(cipherlist) % cols != 0: cipherlist.append('') else: split = [cipherlist[i:i+len(cipherlist)/cols] for i in range(0, len(cipherlist), len(cipherlist)/cols)] for row in zip(*split): print(" " + "".join(str.ljust(c,37) for c in row)) print("\n") def main(): try: print(banner()) parser = OptionParser(usage="usage %prog [options]", version="%prog 1.0") parameters = OptionGroup(parser, "Options") parameters.add_option("-t", "--target", type="string", help="Specify target as 'target' or 'target:port' (port 22 is default)", dest="target") parameters.add_option("-l", "--target-list", type="string", help="File with targets: 'target' or 'target:port' seperated by a newline (port 22 is default)", dest="targetlist") parser.add_option_group(parameters) options, arguments = parser.parse_args() target = options.target targetlist = options.targetlist if target: parse_target(target) else: if targetlist: list_parser(targetlist) else: print("[-] No target specified!") sys.exit(0) except KeyboardInterrupt: print("\n[-] ^C Pressed, quitting!") sys.exit(3) if __name__ == '__main__': main()
[ "noreply@github.com" ]
jubbyy.noreply@github.com
b7db9a533a69d79aed9172b632eea0b34916ba24
213d9c2c3eeb15403dc69de98ed01c0436a35e39
/calc.py
12710f4ab7f7a572109053fc0611b485f8d7f099
[]
no_license
msravi213/1stRepo
e7a8a9643fd9088291fe7bd15afdea24ac130d78
f3bd05fb7750e83e16fdac9e2ce85f5f4764aa34
refs/heads/master
2020-04-28T02:51:11.002125
2019-04-04T03:54:15
2019-04-04T03:54:15
174,914,106
0
0
null
null
null
null
UTF-8
Python
false
false
140
py
def add(x,y): return x + y def subtract(x,y): pass def multiply(x,y): pass def divide(x,y): pass def square(x,y): pass
[ "raviteja.shindhe@gmail.com" ]
raviteja.shindhe@gmail.com
b67726927a44da27cddb100768d5532598314c80
038af1bfd275530413a7b4e28bf0e40eddf632c6
/parsifal/apps/accounts/tests/test_update_emails_view.py
6e3b2e07623eb93ac58bb135d2b97b941ee0e58f
[ "MIT" ]
permissive
vitorfs/parsifal
5c5345ff75b48c5596977c8e0a9c4c537ed4726c
68c3ce3623a210a9c649a27f9d21ae6130541ea9
refs/heads/dev
2023-05-24T16:34:31.899776
2022-08-14T16:30:06
2022-08-14T16:30:06
11,648,402
410
223
MIT
2023-05-22T10:47:20
2013-07-25T00:27:21
Python
UTF-8
Python
false
false
2,130
py
from django.test.testcases import TestCase from django.urls import reverse from parsifal.apps.authentication.tests.factories import UserFactory from parsifal.utils.test import login_redirect_url class TestUpdateEmailsViewView(TestCase): @classmethod def setUpTestData(cls): cls.user = UserFactory(email="john.doe@example.com") cls.url = reverse("settings:emails") def test_login_required(self): response = self.client.get(self.url) self.assertRedirects(response, login_redirect_url(self.url)) def test_get_success(self): self.client.force_login(self.user) response = self.client.get(self.url) with self.subTest(msg="Test get status code"): self.assertEqual(200, response.status_code) parts = ("csrfmiddlewaretoken", "email", "john.doe@example.com") for part in parts: with self.subTest(msg="Test response body", part=part): self.assertContains(response, part) def test_post_success(self): data = { "email": "doe.john@example.com", } self.client.force_login(self.user) response = self.client.post(self.url, data, follow=True) with self.subTest(msg="Test post status code"): self.assertEqual(302, response.redirect_chain[0][1]) with self.subTest(msg="Test post redirect status code"): self.assertEqual(200, response.status_code) with self.subTest(msg="Test success message"): self.assertContains(response, "Account email was updated with success!") with self.subTest(msg="Test form saved data"): self.assertContains(response, 'value="doe.john@example.com"') def test_post_fail(self): data = {"email": "invalidemail"} self.client.force_login(self.user) response = self.client.post(self.url, data) with self.subTest(msg="Test post status code"): self.assertEqual(200, response.status_code) with self.subTest(msg="Test error message"): self.assertContains(response, "Enter a valid email address.")
[ "vitorfs@gmail.com" ]
vitorfs@gmail.com
2ef2ae61dc6a2e0dc532bdd952daabb4ef0a26af
62bccbc5f6825cbb38fcab949ce3144a8546858a
/utils/vis.py
82167b26ead8c072baa4c5a6d136f4c92350cd7b
[]
no_license
Waynehfut/InterpretableDiagnosis
895648170629c56abd5c7e6afb3ef68bac4a0538
66b017cfbc40583b0400b02914edf34a2a9de59a
refs/heads/main
2023-04-26T10:50:05.356229
2021-05-19T02:10:09
2021-05-19T02:10:09
368,718,464
1
0
null
null
null
null
UTF-8
Python
false
false
3,239
py
# -*- coding: utf-8 -*- import torch import cv2 import numpy as np from PIL import Image from collections import OrderedDict import torchvision.transforms.functional as TF import tifffile as tiff from pytorch_grad_cam import GradCAM from pytorch_grad_cam.utils.image import show_cam_on_image def fetch_cam(model, layer, img_path, save_cam_path, stated_dict): # Avoid data parallel problem new_state_dict = OrderedDict() for k, v in stated_dict.items(): name = k[7:] # remove `module.` new_state_dict[name] = v model.load_state_dict(new_state_dict) image = cv2.imread(img_path) rgb_img = np.float32(image) / 255 input_tensor = TF.to_tensor(image) input_tensor.unsqueeze_(0) cam = GradCAM(model=model, target_layer=layer, use_cuda=True) target_category = 4 grayscale_cam = cam(input_tensor=input_tensor, target_category=target_category) grayscale_cam = grayscale_cam[0, :] visualization = show_cam_on_image(rgb_img, grayscale_cam) cv2.imwrite(save_cam_path + '_cam.jpg', visualization) cam_mask = np.uint8(grayscale_cam * 255) thresh = 150 ret, thresh_img = cv2.threshold(cam_mask, thresh, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) contours_mask = np.zeros((image.shape[0], image.shape[1])) cv2.drawContours(contours_mask, contours, -1, 255, 4) return contours_mask def fetch_seg(net, net_dict_path, img_path, input_size, tform, use_gpu, save_img_path): # Avoid data parallel problem stated_dict = torch.load(net_dict_path) new_state_dict = OrderedDict() for k, v in stated_dict.items(): name = k[7:] # remove `module.` new_state_dict[name] = v # load params net.load_state_dict(new_state_dict) net.eval() if use_gpu: net.cuda() test_img = Image.open(img_path) original_img = cv2.imread(img_path) img_orig_size = test_img.size test_img = test_img.resize((input_size, input_size)) test_inp = tform(test_img) if use_gpu: test_inp = test_inp.cuda() probs = net(test_inp.unsqueeze(0)).squeeze(0).cpu() preds = (probs > 0.52).float() preds[preds > 0] = 255 pred_np = np.asarray(preds.numpy(), dtype=np.uint8) # model predict tiff.imwrite(save_img_path + 'seg_mask.tif', pred_np) # resize to original size pred_data = np.zeros((5, img_orig_size[1], img_orig_size[0])) # save pred_mask greyscale for classNum in range(5): pred_data[classNum] = np.array(Image.fromarray(pred_np[classNum]).resize(img_orig_size)) pred_grey = pred_data.sum(axis=0) pred_grey = cv2.dilate(pred_grey, np.ones((15, 15), np.uint8)) pred_grey = np.uint8(pred_grey) thresh = 100 ret, thresh_img = cv2.threshold(pred_grey, thresh, 255, cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(original_img, contours, -1, (0, 255, 0), 4) contours_mask = np.zeros((img_orig_size[1], img_orig_size[0])) cv2.drawContours(contours_mask, contours, -1, 255, 4) cv2.imwrite(save_img_path + 'seg_contours.jpg', original_img) return contours_mask
[ "waynehfut@outlook.com" ]
waynehfut@outlook.com
e37afaeabfcb656b1256de9e0c890e595041d426
6b2f57d812c8c49efbdb78d8811fff7b63ae4dad
/venv/bin/email_validator
ce8b0814340b81654390b69840b8a743ba52dced
[]
no_license
anyac99/template-blog
5e555391ddd0a20604e4c378d0443f129d844258
a270a7be4b6e9c86d025ed7c212a92c512c44177
refs/heads/main
2023-09-02T19:54:37.497476
2021-11-08T23:27:24
2021-11-08T23:27:24
421,213,415
0
0
null
null
null
null
UTF-8
Python
false
false
258
#!/Users/anyacollins/PycharmProjects/flaskblog/venv/bin/python # -*- coding: utf-8 -*- import re import sys from email_validator import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "anyacollins@anyas-air.lan" ]
anyacollins@anyas-air.lan