blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
213 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
246 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
54a7a8cba0c76261822e8420ebdd9b22a638ba22
1ba12eb2be477e7dc99b4f13d1014917e78199aa
/usr/lib/solydxk/constructor/solydxk.py
89f79749e8211f426ccb25c69f76882e3d7ac50e
[]
no_license
KDB2/solydxk-constructor
0704f5ce5ef331f45888348804936cfcf4c43f25
c05b8c38b873bb36eb3c8d3160600f45d5cd4798
refs/heads/master
2021-01-17T06:31:41.055358
2015-11-03T16:02:32
2015-11-03T16:02:32
null
0
0
null
null
null
null
UTF-8
Python
false
false
27,011
py
#! /usr/bin/env python3 import re import threading from os import remove, rmdir, makedirs, system, listdir from shutil import copy, move from datetime import datetime from execcmd import ExecCmd from os.path import join, exists, basename, abspath, dirname, lexists, isdir class IsoUnpack(threading.Thread): def __init__(self, mountDir, unpackIso, unpackDir, queue): threading.Thread.__init__(self) self.ec = ExecCmd() self.mountDir = mountDir self.unpackIso = unpackIso self.unpackDir = unpackDir self.queue = queue self.returnMessage = None def run(self): try: if not exists(self.mountDir): print(("Create mount directory: %s" % self.mountDir)) makedirs(self.mountDir) rootDir = join(self.unpackDir, "root") if not exists(rootDir): print(("Create root directory: %s" % rootDir)) makedirs(rootDir) isolinuxDir = join(self.unpackDir, "boot/isolinux") if not exists(isolinuxDir): print(("Create isolinux directory: %s" % isolinuxDir)) makedirs(isolinuxDir) liveDir = join(self.unpackDir, "boot/live") if not exists(liveDir): print(("Create liveDir directory: %s" % liveDir)) makedirs(liveDir) # Mount the ISO system("mount -o loop '%s' '%s'" % (self.unpackIso, self.mountDir)) # Check isolinux directory mountIsolinux = join(self.mountDir, "isolinux") if not exists(mountIsolinux): self.ec.run("umount --force '%s'" % self.mountDir) self.returnMessage = "ERROR: Cannot find isolinux directory in ISO" fixCfgCmd = None dirs = [] mountSquashfs = None if self.returnMessage is None: subdirs = self.getDirectSubDirectories(self.mountDir) for subdir in subdirs: if self.hasSquashFs(join(self.mountDir, subdir)): mountSquashfs = join(self.mountDir, subdir) if subdir != "live": fixCfgCmd = "sed -i 's/\/%s/\/live/g' %s/isolinux.cfg" % (subdir, isolinuxDir) elif subdir != "isolinux": dirs.append(join(self.mountDir, subdir)) if mountSquashfs is None: self.ec.run("umount --force '%s'" % self.mountDir) self.returnMessage = "ERROR: Cannot find squashfs directory in ISO" if self.returnMessage is None: # Copy files from ISO to unpack directory for d in dirs: self.ec.run("rsync -at --del '%s' '%s'" % (d, join(self.unpackDir, "boot/"))) self.ec.run("rsync -at --del '%s/' '%s'" % (mountIsolinux, isolinuxDir)) self.ec.run("rsync -at --del '%s/' '%s'" % (mountSquashfs, liveDir)) self.ec.run("umount --force '%s'" % self.mountDir) if fixCfgCmd is not None: self.ec.run(fixCfgCmd) # copy squashfs root squashfs = join(liveDir, "filesystem.squashfs") if exists(squashfs): self.ec.run("mount -t squashfs -o loop '%s' '%s'" % (squashfs, self.mountDir)) self.ec.run("rsync -at --del '%s/' '%s/'" % (self.mountDir, rootDir)) self.ec.run("umount --force '%s'" % self.mountDir) # Cleanup rmdir(self.mountDir) # set proper permissions self.ec.run("chmod 6755 '%s'" % join(rootDir, "usr/bin/sudo")) self.ec.run("chmod 0440 '%s'" % join(rootDir, "etc/sudoers")) self.returnMessage = "DONE - ISO unpacked to: %s" % self.unpackDir self.queue.put(self.returnMessage) except Exception as detail: self.ec.run("umount --force '%s'" % self.mountDir) rmdir(self.mountDir) self.returnMessage = "ERROR: IsoUnpack: %(detail)s" % {"detail": detail} self.queue.put(self.returnMessage) def getDirectSubDirectories(self, directory): subdirs = [] names = listdir(directory) for name in names: if isdir(join(directory, name)): subdirs.append(name) return subdirs def hasSquashFs(self, directory): names = listdir(directory) for name in names: if name == "filesystem.squashfs": return True return False class BuildIso(threading.Thread): def __init__(self, distroPath, queue): threading.Thread.__init__(self) self.ec = ExecCmd() self.dg = DistroGeneral(distroPath) self.ed = EditDistro(distroPath) self.queue = queue self.returnMessage = None # Paths distroPath = distroPath.rstrip('/') if basename(distroPath) == "root": distroPath = dirname(distroPath) self.distroPath = distroPath self.rootPath = join(distroPath, "root") self.bootPath = join(distroPath, "boot") self.livePath = join(self.bootPath, "live") self.scriptDir = abspath(dirname(__file__)) # Check for old dir oldDir = join(self.bootPath, "solydxk") if exists(oldDir): self.ec.run("rm -r %s" % oldDir) # Make sure live directory exists if not exists(self.livePath): self.ec.run("mkdir -p %s" % self.livePath) # ISO Name self.isoName = self.dg.description # ISO distribution self.isoBaseName = self.dg.getIsoFileName() self.isoFileName = join(self.distroPath, self.isoBaseName) # Trackers, and webseeds self.trackers = "" self.webseeds = "" trackersPath = join(self.scriptDir, "files/trackers") webseedsPath = join(self.scriptDir, "files/webseeds") if exists(trackersPath): with open(trackersPath, "r") as f: lines = f.readlines() trList = [] for line in lines: trList.append(line.strip()) self.trackers = ",".join(trList) if exists(webseedsPath): with open(webseedsPath, "r") as f: lines = f.readlines() wsList = [] for line in lines: #wsList.append("%s/%s" % (line.strip(), webseedIsoName)) wsList.append("%s/%s" % (line.strip(), self.isoBaseName)) self.webseeds = ",".join(wsList) def run(self): try: if not exists(self.rootPath): self.returnMessage = "ERROR: Cannot find root directory: %s" % self.rootPath if not exists(self.bootPath): self.returnMessage = "ERROR: Cannot find boot directory: %s" % self.bootPath if self.returnMessage is None: print("======================================================") print("INFO: Cleanup and prepare ISO build...") print("======================================================") # Clean-up script = "cleanup.sh" scriptSource = join(self.scriptDir, "files/{}".format(script)) scriptTarget = join(self.rootPath, script) if exists(scriptSource): self.copy_file(scriptSource, scriptTarget) self.ec.run("chmod a+x %s" % scriptTarget) plymouthTheme = self.dg.getPlymouthTheme() #self.ec.run("chroot '%(rootPath)s' /bin/bash %(cleanup)s %(plymouthTheme)s" % {"rootPath": self.rootPath, "cleanup": cleanup, "plymouthTheme": plymouthTheme}) cmd = "/bin/bash %(cleanup)s %(plymouthTheme)s" % {"cleanup": script, "plymouthTheme": plymouthTheme} self.ed.openTerminal(cmd) remove(scriptTarget) rootHome = join(self.rootPath, "root") nanoHist = join(rootHome, ".nano_history") if exists(nanoHist): remove(nanoHist) bashHist = join(rootHome, ".bash_history") if exists(bashHist): remove(bashHist) # Config naming regExp = "solyd.*(\d{6}|-bit)" d = datetime.now() dateString = d.strftime("%Y%m") nameString = "{} {}".format(self.isoName, dateString) # write iso name to boot/isolinux/isolinux.cfg cfgFile = join(self.bootPath, "isolinux/isolinux.cfg") if exists(cfgFile): content = "" with open(cfgFile, 'r') as f: content = f.read() if content != "": content = re.sub(regExp, nameString, content, flags=re.IGNORECASE) # Make sure that the paths are correct (correcting very old stuff) content = re.sub('.lz', '.img', content) content = re.sub('/solydxk/', '/live/', content) with open(cfgFile, 'w') as f: f.write(content) # Write info for grub (EFI) grubFile = join(self.bootPath, "boot/grub/grub.cfg") if exists(grubFile): content = "" with open(grubFile, 'r') as f: content = f.read() if content != "": content = re.sub(regExp, nameString, content, flags=re.IGNORECASE) with open(grubFile, 'w') as f: f.write(content) loopbackFile = join(self.bootPath, "boot/grub/loopback.cfg") if exists(loopbackFile): content = "" with open(loopbackFile, 'r') as f: content = f.read() if content != "": content = re.sub(regExp, nameString, content, flags=re.IGNORECASE) with open(loopbackFile, 'w') as f: f.write(content) # Clean boot/live directory #popen("rm -rf %s/live/*" % self.bootPath) # Vmlinuz vmlinuzSymLink = join(self.distroPath, "root/vmlinuz") if lexists(vmlinuzSymLink): vmlinuzFile = self.ec.run("ls -al %s | cut -d'>' -f2" % vmlinuzSymLink)[0].strip() else: self.returnMessage = "ERROR: %s not found" % vmlinuzSymLink if self.returnMessage is None: vmlinuzPath = join(self.distroPath, "root/%s" % vmlinuzFile) if exists(vmlinuzPath): print("Copy vmlinuz") self.copy_file(vmlinuzPath, join(self.livePath, "vmlinuz")) else: self.returnMessage = "ERROR: %s not found" % vmlinuzPath if self.returnMessage is None: # Initrd initrdSymLink = join(self.distroPath, "root/initrd.img") if lexists(initrdSymLink): initrdFile = self.ec.run("ls -al %s | cut -d'>' -f2" % initrdSymLink)[0].strip() else: self.returnMessage = "ERROR: %s not found" % initrdSymLink if self.returnMessage is None: initrdPath = join(self.distroPath, "root/%s" % initrdFile) if exists(initrdPath): print("Copy initrd") self.copy_file(initrdPath, join(self.livePath, "initrd.img")) else: self.returnMessage = "ERROR: %s not found" % initrdPath if self.returnMessage is None: # Generate UUID #diskDir = join(self.bootPath, ".disk") #if not exists(diskDir): #makedirs(diskDir) #self.ec.run("rm -rf %s/*uuid*" % diskDir) #self.ec.run("uuidgen -r > %s/live-uuid-generic" % diskDir) #copy_file(join(diskDir, "live-uuid-generic"), join(diskDir, "live-uuid-generic")) #Update filesystem.size #self.ec.run("du -b %(directory)s/root/ 2> /dev/null | tail -1 | awk {'print $1;'} > %(directory)s/live/filesystem.size" % {"directory": self.bootPath}) print("======================================================") print("INFO: Start building ISO...") print("======================================================") # build squash root print("Creating SquashFS root...") print("Updating File lists...") dpkgQuery = ' dpkg -l | awk \'/^ii/ {print $2, $3}\' | sed -e \'s/ /\t/g\' ' self.ec.run('chroot \"' + self.rootPath + '\"' + dpkgQuery + ' > \"' + join(self.livePath, "filesystem.packages") + '\"' ) #dpkgQuery = ' dpkg-query -W --showformat=\'${Package} ${Version}\n\' ' #self.ec.run('chroot \"' + self.rootPath + '\"' + dpkgQuery + ' > \"' + join(self.bootPath, "live/filesystem.manifest") + '\"' ) #copy_file(join(self.bootPath, "live/filesystem.manifest"), join(self.bootPath, "live/filesystem.manifest-desktop")) # check for existing squashfs root if exists(join(self.livePath, "filesystem.squashfs")): print("Removing existing SquashFS root...") remove(join(self.livePath, "filesystem.squashfs")) print("Building SquashFS root...") # check for alternate mksquashfs # check for custom mksquashfs (for multi-threading, new features, etc.) mksquashfs = self.ec.run(cmd="echo $MKSQUASHFS", returnAsList=False).strip() rootPath = join(self.distroPath, "root/") squashfsPath = join(self.livePath, "filesystem.squashfs") if mksquashfs == '' or mksquashfs == 'mksquashfs': try: nrprocessors = int(int(self.ec.run("nproc", False, False))/2) if nrprocessors < 1: nrprocessors = 1 except: nrprocessors = 1 cmd = "mksquashfs \"{}\" \"{}\" -comp xz -processors {}".format(rootPath, squashfsPath, nrprocessors) else: cmd = "{} \"{}\" \"{}\"".format(mksquashfs, rootPath, squashfsPath) #print(cmd) self.ec.run(cmd) # build iso print("Creating ISO...") # update manifest files #self.ec.run("/usr/lib/solydxk/constructor/updateManifest.sh %s" % self.distroPath) # update md5 print("Updating md5 sums...") if exists(join(self.bootPath, "md5sum.txt")): remove(join(self.bootPath, "md5sum.txt")) if exists(join(self.bootPath, "MD5SUMS")): remove(join(self.bootPath, "MD5SUMS")) self.ec.run('cd \"' + self.bootPath + '\"; ' + 'find . -type f -print0 | xargs -0 md5sum > md5sum.txt') #Remove md5sum.txt, MD5SUMS, boot.cat and isolinux.bin from md5sum.txt self.ec.run("sed -i '/md5sum.txt/d' %s/md5sum.txt" % self.bootPath) self.ec.run("sed -i '/MD5SUMS/d' %s/md5sum.txt" % self.bootPath) self.ec.run("sed -i '/boot.cat/d' %s/md5sum.txt" % self.bootPath) self.ec.run("sed -i '/isolinux.bin/d' %s/md5sum.txt" % self.bootPath) #Copy md5sum.txt to MD5SUMS (for Debian compatibility) self.copy_file(join(self.bootPath, "md5sum.txt"), join(self.bootPath, "MD5SUMS")) # Update isolinux files syslinuxPath = join(self.rootPath, "usr/lib/syslinux") modulesPath = join(syslinuxPath, "modules/bios") isolinuxPath = join(self.bootPath, "isolinux") self.ec.run("chmod -R +w {}".format(isolinuxPath)) cat = join(isolinuxPath, "boot.cat") if exists(cat): remove(cat) self.copy_file(join(modulesPath, "chain.c32"), isolinuxPath) self.copy_file(join(modulesPath, "hdt.c32"), isolinuxPath) self.copy_file(join(modulesPath, "libmenu.c32"), isolinuxPath) self.copy_file(join(modulesPath, "libgpl.c32"), isolinuxPath) self.copy_file(join(modulesPath, "reboot.c32"), isolinuxPath) self.copy_file(join(modulesPath, "vesamenu.c32"), isolinuxPath) self.copy_file(join(modulesPath, "poweroff.c32"), isolinuxPath) self.copy_file(join(modulesPath, "ldlinux.c32"), isolinuxPath) self.copy_file(join(modulesPath, "libcom32.c32"), isolinuxPath) self.copy_file(join(modulesPath, "libutil.c32"), isolinuxPath) self.copy_file(join(self.rootPath, "boot/memtest86+.bin"), join(isolinuxPath, "memtest86")) self.copy_file("/usr/lib/ISOLINUX/isolinux.bin", isolinuxPath) # remove existing iso if exists(self.isoFileName): print("Removing existing ISO...") remove(self.isoFileName) # build iso according to architecture print("Building ISO...") self.ec.run('genisoimage -input-charset utf-8 -o \"' + self.isoFileName + '\" -b \"isolinux/isolinux.bin\" -c \"isolinux/boot.cat\" -no-emul-boot -boot-load-size 4 -boot-info-table -V \"' + self.isoName + '\" -cache-inodes -r -J -l \"' + self.bootPath + '\"') print("Making Hybrid ISO...") self.ec.run("isohybrid %s" % self.isoFileName) print("Create ISO md5 file...") self.ec.run("echo \"$(md5sum \"%s\" | cut -d' ' -f 1) %s\" > \"%s.md5\"" % (self.isoFileName, self.isoBaseName, self.isoFileName)) print("Create Torrent file...") torrentFile = "%s.torrent" % self.isoFileName if exists(torrentFile): remove(torrentFile) self.ec.run("mktorrent -a \"%s\" -c \"%s\" -w \"%s\" -o \"%s\" \"%s\"" % (self.trackers, self.isoName, self.webseeds, torrentFile, self.isoFileName)) print("======================================================") self.returnMessage = "DONE - ISO Located at: %s" % self.isoFileName print((self.returnMessage)) print("======================================================") self.queue.put(self.returnMessage) except Exception as detail: self.returnMessage = "ERROR: BuildIso: %(detail)s" % {"detail": detail} self.queue.put(self.returnMessage) def copy_file(self, file_path, destination): if exists(file_path): try: copy(file_path, destination) except Exception as detail: print(("ERROR: BuildIso.copy_file: {}".format(detail))) else: print(("ERROR: BuildIso.copy_file: cannot find {}".format(file_path))) # Class to create a chrooted terminal for a given directory # https://wiki.debian.org/chroot class EditDistro(object): def __init__(self, distroPath): self.ec = ExecCmd() self.dg = DistroGeneral(distroPath) distroPath = distroPath.rstrip('/') if basename(distroPath) == "root": distroPath = dirname(distroPath) self.rootPath = join(distroPath, "root") # ISO edition self.edition = self.dg.edition def openTerminal(self, command=""): # Set some paths resolveCnfHost = "/etc/resolv.conf" resolveCnf = join(self.rootPath, "etc/resolv.conf") resolveCnfBak = "%s.bak" % resolveCnf wgetrc = join(self.rootPath, "etc/wgetrc") wgetrcBak = "%s.bak" % wgetrc terminal = "/tmp/constructor-terminal.sh" lockDir = join(self.rootPath, "run/lock/") proc = join(self.rootPath, "proc/") dev = join(self.rootPath, "dev/") pts = join(self.rootPath, "dev/pts/") sys = join(self.rootPath, "sys/") policy = join(self.rootPath, "usr/sbin/policy-rc.d") ischroot = join(self.rootPath, "usr/bin/ischroot") ischrootTmp = join(self.rootPath, "usr/bin/ischroot.tmp") try: # temporary create /run/lock if not exists(lockDir): makedirs(lockDir) # setup environment # copy dns info if exists(resolveCnf): move(resolveCnf, resolveCnfBak) if exists(resolveCnfHost): copy(resolveCnfHost, resolveCnf) # umount /proc /dev /dev/pts /sys self.unmount([pts, dev, proc, sys]) # mount /proc /dev /dev/pts /sys /run /sys self.ec.run("mount --bind /proc '%s'" % proc) self.ec.run("mount --bind /dev '%s'" % dev) self.ec.run("mount --bind /dev/pts '%s'" % pts) self.ec.run("mount --bind /sys '%s'" % sys) # copy apt.conf #copy("/etc/apt/apt.conf", join(self.rootPath, "etc/apt/apt.conf")) # copy wgetrc move(wgetrc, wgetrcBak) copy("/etc/wgetrc", wgetrc) # Let dpkg only start daemons when desired scr = "#!/bin/sh\nexit 101\n" with open(policy, 'w') as f: f.write(scr) self.ec.run("chmod a+x %s" % policy) # Temporary fix ischroot if not exists(ischrootTmp): self.ec.run("mv %s %s" % (ischroot, ischrootTmp)) if not exists(ischroot): self.ec.run("ln -s /bin/true %s" % ischroot) # HACK: create temporary script for chrooting if exists(terminal): remove(terminal) scr = "#!/bin/sh\nchroot '%s' %s\n" % (self.rootPath, command) with open(terminal, 'w') as f: f.write(scr) self.ec.run("chmod a+x %s" % terminal) if self.ec.run('which x-terminal-emulator'): # use x-terminal-emulator if xterm isn't available if exists("/usr/bin/xterm"): self.ec.run('export HOME=/root ; xterm -bg black -fg white -rightbar -title \"%s\" -e %s' % (self.edition, terminal)) else: self.ec.run('export HOME=/root ; x-terminal-emulator -e %s' % terminal) else: print('Error: no valid terminal found') # restore wgetrc move(wgetrcBak, wgetrc) # remove apt.conf #remove(join(self.rootPath, "root/etc/apt/apt.conf")) # move dns info if exists(resolveCnfBak): move(resolveCnfBak, resolveCnf) else: remove(resolveCnf) # umount /proc /dev /dev/pts /sys self.unmount([pts, dev, proc, sys]) # remove temp script if exists(terminal): remove(terminal) # remove policy script if exists(policy): remove(policy) # replace ischroot if exists("%s.tmp" % ischroot): self.ec.run("rm %s" % ischroot) self.ec.run("mv %s.tmp %s" % (ischroot, ischroot)) # cleanup /run self.ec.run("rm -rf %s/run/*" % self.rootPath) except Exception as detail: # restore wgetrc move(wgetrcBak, wgetrc) # remove apt.conf #remove(join(self.rootPath, "etc/apt/apt.conf")) # move dns info if exists(resolveCnfBak): move(resolveCnfBak, resolveCnf) else: remove(resolveCnf) # umount /proc /dev /dev/pts /sys self.unmount([pts, dev, proc, sys]) # remove temp script if exists(terminal): remove(terminal) # remove policy script if exists(policy): remove(policy) # replace ischroot if exists("%s.tmp" % ischroot): self.ec.run("rm %s" % ischroot) self.ec.run("mv %s.tmp %s" % (ischroot, ischroot)) # cleanup /run self.ec.run("rm -rf %s/run/*" % self.rootPath) errText = 'Error launching terminal: ' print((errText, detail)) def unmount(self, mounts=[]): for mount in mounts: self.ec.run("umount --force '%s'" % mount) self.ec.run("umount -l '%s'" % mount) class DistroGeneral(object): def __init__(self, distroPath): self.ec = ExecCmd() distroPath = distroPath.rstrip('/') if basename(distroPath) == "root": distroPath = dirname(distroPath) self.distroPath = distroPath self.rootPath = join(distroPath, "root") self.edition = basename(distroPath) self.description = "SolydXK" infoPath = join(self.rootPath, "etc/solydxk/info") if exists(infoPath): self.edition = self.ec.run(cmd="grep EDITION= {} | cut -d'=' -f 2".format(infoPath), returnAsList=False).strip('"') self.description = self.ec.run(cmd="grep DESCRIPTION= {} | cut -d'=' -f 2".format(infoPath), returnAsList=False).strip('"') def getPlymouthTheme(self): plymouthTheme = "" if exists(join(self.rootPath, "usr/share/plymouth/themes/solydk-logo")): plymouthTheme = "solydk-logo" elif exists(join(self.rootPath, "usr/share/plymouth/themes/solydx-logo")): plymouthTheme = "solydx-logo" return plymouthTheme def getIsoFileName(self): # Get the date string d = datetime.now() serial = d.strftime("%Y%m") # Check for a localized system localePath = join(self.rootPath, "etc/default/locale") if exists(localePath): locale = self.ec.run(cmd="grep LANG= {}".format(localePath), returnAsList=False).strip('"').replace(" ", "") matchObj = re.search("\=\s*([a-z]{2})", locale) if matchObj: language = matchObj.group(1) if language != "en": serial += "_{}".format(language) isoFileName = "{}_{}.iso".format(self.description.lower().replace(' ', '_').split('-')[0], serial) return isoFileName
[ "root@solydxk" ]
root@solydxk
84bc89794412d5e88416f0917f873ba361cbb1cd
41f28fc3b3c7f34b879bacb2e25157b551c054bb
/label_studio/data_manager/functions.py
655b82b5cef12f9c8eed602a30de76d3a8b7085e
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
dolanor-galaxy/label-studio
cd478cb54e4948cbb5226c02e088465cdaa12a6b
722358a6cdfbe5a35e7b16f586675df4b598f74f
refs/heads/master
2023-08-11T08:52:52.433731
2021-09-30T09:52:05
2021-09-30T09:52:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,278
py
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license. """ import logging from collections import OrderedDict from django.conf import settings from rest_framework.generics import get_object_or_404 from core.utils.common import int_from_request from data_manager.prepare_params import PrepareParams from data_manager.models import View from tasks.models import Task TASKS = 'tasks:' logger = logging.getLogger(__name__) class DataManagerException(Exception): pass def get_all_columns(project, *_): """ Make columns info for the frontend data manager """ result = {'columns': []} # frontend uses MST data model, so we need two directional referencing parent <-> child task_data_children = [] i = 0 data_types = OrderedDict() # add data types from config again project_data_types = project.data_types data_types.update(project_data_types.items()) # all data types from import data all_data_columns = project.summary.all_data_columns if all_data_columns: data_types.update({key: 'Unknown' for key in all_data_columns if key not in data_types}) # remove $undefined$ if there is one type at least in labeling config, because it will be resolved automatically if len(project_data_types) > 0: data_types.pop(settings.DATA_UNDEFINED_NAME, None) for key, data_type in list(data_types.items()): # make data types from labeling config first column = { 'id': key, 'title': key if key != settings.DATA_UNDEFINED_NAME else 'data', 'type': data_type if data_type in ['Image', 'Audio', 'AudioPlus', 'Unknown'] else 'String', 'target': 'tasks', 'parent': 'data', 'visibility_defaults': { 'explore': True, 'labeling': key in project_data_types or key == settings.DATA_UNDEFINED_NAME } } result['columns'].append(column) task_data_children.append(column['id']) i += 1 # --- Data root --- data_root = { 'id': 'data', 'title': "data", 'type': "List", 'target': 'tasks', 'children': task_data_children } result['columns'] += [ # --- Tasks --- { 'id': 'id', 'title': "ID", 'type': 'Number', 'help': 'Task ID', 'target': 'tasks', 'visibility_defaults': { 'explore': True, 'labeling': False } }, { 'id': 'completed_at', 'title': 'Completed', 'type': 'Datetime', 'target': 'tasks', 'help': 'Last annotation date', 'visibility_defaults': { 'explore': True, 'labeling': False } }, { 'id': 'total_annotations', 'title': 'Annotations', 'type': "Number", 'target': 'tasks', 'help': 'Total annotations per task', 'visibility_defaults': { 'explore': True, 'labeling': True } }, { 'id': 'cancelled_annotations', 'title': "Cancelled", 'type': "Number", 'target': 'tasks', 'help': 'Total cancelled (skipped) annotations', 'visibility_defaults': { 'explore': True, 'labeling': False } }, { 'id': 'total_predictions', 'title': "Predictions", 'type': "Number", 'target': 'tasks', 'help': 'Total predictions per task', 'visibility_defaults': { 'explore': True, 'labeling': False } }, { 'id': 'annotators', 'title': 'Annotated by', 'type': 'List', 'target': 'tasks', 'help': 'All users who completed the task', 'schema': {'items': project.organization.members.values_list('user__id', flat=True)}, 'visibility_defaults': { 'explore': True, 'labeling': False } }, { 'id': 'annotations_results', 'title': "Annotation results", 'type': "String", 'target': 'tasks', 'help': 'Annotation results stacked over all annotations', 'visibility_defaults': { 'explore': False, 'labeling': False } }, { 'id': 'annotations_ids', 'title': "Annotation IDs", 'type': "String", 'target': 'tasks', 'help': 'Annotation IDs stacked over all annotations', 'visibility_defaults': { 'explore': False, 'labeling': False } }, { 'id': 'predictions_score', 'title': "Prediction score", 'type': "Number", 'target': 'tasks', 'help': 'Average prediction score over all task predictions', 'visibility_defaults': { 'explore': False, 'labeling': False } }, { 'id': 'predictions_results', 'title': "Prediction results", 'type': "String", 'target': 'tasks', 'help': 'Prediction results stacked over all predictions', 'visibility_defaults': { 'explore': False, 'labeling': False } }, { 'id': 'file_upload', 'title': "Source filename", 'type': "String", 'target': 'tasks', 'help': 'Source filename from import step', 'visibility_defaults': { 'explore': False, 'labeling': False } }, { 'id': 'created_at', 'title': 'Created at', 'type': 'Datetime', 'target': 'tasks', 'help': 'Task creation time', 'visibility_defaults': { 'explore': False, 'labeling': False } } ] result['columns'].append(data_root) return result def get_prepare_params(request, project): # use filters and selected items from view view_id = int_from_request(request.GET, 'view_id', 0) if view_id > 0: view = get_object_or_404(request, View, pk=view_id) if view.project.pk != project.pk: raise DataManagerException('Project and View mismatch') prepare_params = view.get_prepare_tasks_params(add_selected_items=True) # use filters and selected items from request if it's specified else: selected = request.data.get('selectedItems', {"all": True, "excluded": []}) if not isinstance(selected, dict): raise DataManagerException('selectedItems must be dict: {"all": [true|false], ' '"excluded | included": [...task_ids...]}') filters = request.data.get('filters', None) ordering = request.data.get('ordering', []) prepare_params = PrepareParams(project=project.id, selectedItems=selected, data=request.data, filters=filters, ordering=ordering) return prepare_params def get_prepared_queryset(request, project): prepare_params = get_prepare_params(request, project) queryset = Task.prepared.only_filtered(prepare_params=prepare_params) return queryset def evaluate_predictions(tasks): """ Call ML backend for prediction evaluation of the task queryset """ if not tasks: return project = tasks[0].project for ml_backend in project.ml_backends.all(): # tasks = tasks.filter(~Q(predictions__model_version=ml_backend.model_version)) ml_backend.predict_many_tasks(tasks) def filters_ordering_selected_items_exist(data): return data.get('filters') or data.get('ordering') or data.get('selectedItems')
[ "noreply@github.com" ]
dolanor-galaxy.noreply@github.com
7ced0a5bfb9b3e5397190462506fd668a94e38af
a4185782266d2e596ff264af76776b82f9a3adf8
/2015/17_1.py
9c1440f546923ffc04173e18138eb3d52c77bae3
[]
no_license
PavithraP/advent
04f2cfc268e3b8c84ac26dbb9bf300036a7502e3
9d9247c3add95263f4db1982d1f96d9f8e8e35ca
refs/heads/master
2021-01-10T16:02:47.754326
2016-12-14T13:50:27
2016-12-14T13:50:27
47,602,508
3
0
null
null
null
null
UTF-8
Python
false
false
270
py
import math cont = [11,30,47,31,32,36,3,1,5,3,32,36,15,11,46,26,28,1,19,3] no = 0 for i in range(int(math.pow(2,20))): num = i count = 0 val = 0 while(num > 0): if num%2 == 1: val += cont[count] num = num / 2 count += 1 if val == 150: no+= 1 print no
[ "pavithra.p@sanctumnetworks.com" ]
pavithra.p@sanctumnetworks.com
4ceb508de96190a7e0a24c04b217aef38ed63e63
fb9722f0bf9556f5c04ba5c2795a7c23e7bff7ca
/lista.py
e6605f71cc764640f8d592c6ae6c6a4b54c215bb
[]
no_license
anastasiacebotari15/List
d59aad164bf082537bed6f86fb3bba087e1a5e22
432dcd0fd6b3b0369b843da71586cd073476d770
refs/heads/main
2023-02-21T08:54:17.280665
2021-01-25T20:04:14
2021-01-25T20:04:14
332,862,203
0
0
null
null
null
null
UTF-8
Python
false
false
330
py
x=[-1,0,-5,-7,-6,5,6,7,9,2,-3] lista1=x print('lista1=', lista1) lista2=sorted(x) print('lista2=', lista2) x.sort(reverse=True) lista3=x print('lista3=', lista3) print(len(x)) print('nr maxim=', max(x)) print('nr minim=', min(x)) x.extend([111]) print('lista4=', x) x.insert(1,222) x.remove(111) print('lista5=', x)
[ "noreply@github.com" ]
anastasiacebotari15.noreply@github.com
ee235f82c46f75248d18f091913758a6b068b1f9
87b2725ccb7509cda0d4f719647192c34bbf7471
/HistogramPlot.py
e5c1ce1adf7878de50ebd4567ee1dabb94e7efd0
[]
no_license
sumeyyeakay/CoronaVirusDataAnalysis
f88a5c9698cd6867059a91b5750f4bd14f414d62
45f4b386b95ed2143d96940e74bdc41854cba466
refs/heads/master
2022-09-09T02:19:35.034587
2020-06-01T15:17:18
2020-06-01T15:17:18
268,553,637
1
0
null
null
null
null
UTF-8
Python
false
false
493
py
# -*- coding: utf-8 -*- """ Created on Tue Apr 28 17:44:03 2020 @author: sumeyyeakay Histogram grafikleri """ import pandas as pd import matplotlib.pyplot as plt df=pd.read_csv("covid_19_data.csv") turkiye = df[df["Country/Region"] == "Turkey"] italya = df[df["Country/Region"] == "Italy"] ispanya = df[df["Country/Region"] == "Spain"] plt.hist(italya.Deaths,bins=10) plt.xlabel("Olum Sayisi") plt.ylabel(" Kurtulan Hasta Sayisi") plt.title("Italya Coronovirus Analizi") plt.show()
[ "sumeyyeakayy@gmail.com" ]
sumeyyeakayy@gmail.com
b8c045ccf9fbfd0be6b2357b5c866a6f5f8c45fb
1426511b59ad3e00a3e037ba3377e41828ae4680
/ca_unemp/serializers.py
eab56c3ab66c86dedab623a052c2279bdcf95514
[]
no_license
hillarykhan/ca-unemp-api
4776ed104a026c2d39c44dbbfca60d27f57c50a4
7b27c4aebdfe72bb0282fc28abb60ede9e6f0813
refs/heads/main
2023-08-24T00:58:15.603062
2021-10-27T04:41:13
2021-10-27T04:41:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
196
py
from rest_framework import serializers from .models import Unemployment class StatSerializer(serializers.ModelSerializer): class Meta: model = Unemployment fields = '__all__'
[ "khan.hillary@gmail.com" ]
khan.hillary@gmail.com
acaaff5ac222121f65916b2c51dba801a44b99f3
37496577a9fa05bf949bd018fca17f0b6d546ecd
/client/pdo/client/scripts/AuctionTestCLI.py
4a1e9c064ad1516c800d154a615e56b89dbcc513
[ "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-permissive", "LicenseRef-scancode-unknown-license-reference", "Apache-2.0", "LicenseRef-scancode-public-domain", "Zlib", "MIT", "CC-BY-4.0" ]
permissive
EugeneYYY/private-data-objects
cce9250648252f4baf92e0007c9584ac82d46401
d96033bbfa9bd3fe72a549487e8e5c83c7c580ca
refs/heads/master
2020-03-15T07:11:36.278038
2018-05-01T21:04:26
2018-05-01T22:40:35
132,023,932
0
0
null
2018-05-03T16:45:45
2018-05-03T16:45:44
null
UTF-8
Python
false
false
20,659
py
# Copyright 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os, sys import argparse import random from string import Template import logging logger = logging.getLogger(__name__) import pprint pp = pprint.PrettyPrinter(indent=4) import pdo.common.crypto as pcrypto from pdo.client.SchemeExpression import SchemeExpression from pdo.common.keys import ServiceKeys from pdo.contract import ContractCode from pdo.contract import ContractState from pdo.contract import Contract from pdo.contract import register_contract from pdo.contract import add_enclave_to_contract from pdo.service_client.enclave import EnclaveServiceClient from pdo.service_client.provisioning import ProvisioningServiceClient enclave_services_by_url = {} enclave_services = {} participant_keys = {} ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def GetEnclaveServiceByURL(url) : global enclave_services_by_url, enclave_service if url not in enclave_services_by_url : eservice = EnclaveServiceClient(url) enclave_services_by_url[url] = eservice enclave_services[eservice.enclave_id] = eservice return enclave_services_by_url[url] ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def GetKeysForIdentity(config, identity) : key_config = config['Key'] global participant_keys if identity not in participant_keys : #keypath = key_config['SearchPath'] #keyfile = Template(key_config['KeyFileTemplate']).substitute({'identity' : identity }) #participant_keys[identity] = ServiceKeys.read_from_file(keyfile, keypath) participant_keys[identity] = ServiceKeys.create_service_keys() return participant_keys[identity] ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def SendMessageAsIdentity(config, contract, invoker_keys, message, fmt = 'python', wait=False) : ledger_config = config.get('Sawtooth') contract_config = config.get('Contract') try : logger.info('send message %s to contract %s', message, contract.contract_code.name) enclave_id = random.choice(contract.provisioned_enclaves) enclave_service = enclave_services[enclave_id] request = contract.create_update_request(invoker_keys, enclave_service, message) response = request.evaluate() logger.debug('result: %s, ', response.result) except Exception as e : logger.error('method invocation failed for message %s: %s', message, str(e)) sys.exit(-1) try : if wait : response.submit_update_transaction(ledger_config, wait=30) else : response.submit_update_transaction(ledger_config) contract.set_state(response.encrypted_state) data_dir = contract_config['DataDirectory'] contract.contract_state.save_to_cache(data_dir=data_dir) except Exception as e: logger.error('transaction submission failed for message %s; %s', message, str(e)) sys.exit(-1) expression = SchemeExpression.ParseExpression(response.result) if fmt == 'scheme' : return expression elif fmt == 'python' : return expression.value else : raise ValueError('unknown format {}'.format(fmt)) # ----------------------------------------------------------------- # ----------------------------------------------------------------- def CreateAndRegisterContract(config, contract_info, creator_keys) : ledger_config = config.get('Sawtooth') contract_config = config.get('Contract') contract_creator_id = creator_keys.identity contract_name = contract_info['Name'] source_file = contract_info['Source'] search_path = contract_config['SourceSearchPath'] contract_code = ContractCode.create_from_scheme_file(contract_name, source_file, search_path = search_path) # -------------------------------------------------- logger.info('register the contract') # -------------------------------------------------- pservice_urls = contract_info.get("ProvisioningServices") provisioning_services = list(map(lambda url : ProvisioningServiceClient(url), pservice_urls)) provisioning_service_keys = list(map(lambda svc : svc.identity, provisioning_services)) contract_id = register_contract(ledger_config, creator_keys, contract_code, provisioning_service_keys) logger.info('registered the contract as %s', contract_id) contract_state = ContractState.create_new_state(contract_id) contract = Contract(contract_code, contract_state, contract_id, contract_creator_id) # -------------------------------------------------- logger.info('provision enclaves') # -------------------------------------------------- eservice_urls = contract_info.get("EnclaveServices") enclave_services = list(map(lambda url : GetEnclaveServiceByURL(url), eservice_urls)) for eservice in enclave_services : secret_list = [] for pservice in provisioning_services : message = pcrypto.string_to_byte_array(eservice.enclave_id + contract_id) signature = creator_keys.sign(message) secret = pservice.get_secret(eservice.enclave_id, contract_id, creator_keys.verifying_key, signature) secret_list.append(secret) secretinfo = eservice.verify_secrets(contract_id, contract_creator_id, secret_list) encrypted_state_encryption_key = secretinfo['encrypted_state_encryption_key'] signature = secretinfo['signature'] txnid = add_enclave_to_contract( ledger_config, creator_keys, contract_id, eservice.enclave_id, secret_list, encrypted_state_encryption_key, signature) contract.set_state_encryption_key(eservice.enclave_id, encrypted_state_encryption_key) # -------------------------------------------------- logger.info('create the initial contract state') # -------------------------------------------------- eservice = random.choice(enclave_services) initialize_request = contract.create_initialize_request(creator_keys, eservice) initialize_response = initialize_request.evaluate() contract.set_state(initialize_response.encrypted_state) logger.info('initial state created') # -------------------------------------------------- logger.info('save the initial state in the ledger') # -------------------------------------------------- txnid = initialize_response.submit_initialize_transaction(ledger_config, wait=30) return contract ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def CreateAssetContract(config) : asset_config = config['AssetContract'] contract_config = config['Contract'] asset_creator_identity = asset_config['Creator'] asset_creator_keys = GetKeysForIdentity(config, asset_creator_identity) contract = CreateAndRegisterContract(config, asset_config, asset_creator_keys) data_dir = contract_config['DataDirectory'] contract.save_to_file(asset_config['Name'], data_dir = data_dir) contract.contract_state.save_to_cache(data_dir = data_dir) return contract ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def CreateAuctionContract(config) : auction_config = config['AuctionContract'] contract_config = config['Contract'] auction_creator_identity = auction_config['Creator'] auction_creator_keys = GetKeysForIdentity(config, auction_creator_identity) contract = CreateAndRegisterContract(config, auction_config, auction_creator_keys) data_dir = contract_config['DataDirectory'] contract.save_to_file(auction_config['Name'], data_dir = data_dir) contract.contract_state.save_to_cache(data_dir = data_dir) return contract ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def CreateRandomAsset(config, asset_contract, invoker_keys, assetname, value = None) : params = {} params['asset'] = "asset_" + assetname params['value'] = random.randint(0, 100) if value is None else value message = Template("'(create \"${asset}\" ${value})").substitute(params) logger.info('create asset %s with value %s', params['asset'], params['value']) result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message) if result is None : raise Exception('failed to create random asset') return params['asset'] ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def EscrowAsset(config, asset_contract, invoker_keys, asset, pubkey) : ## ( ((key "auction") (value 5) (owner "<ownerid>")) "<signature>" ) # first pass... escrow the asset and push the transaction message = "'(escrow \"{0}\" \"{1}\")".format(asset, pubkey) result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message) # get the escrow attestation for handoff to the auction message = "'(escrow-attestation \"{0}\")".format(asset) result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message, fmt='scheme') return (str(result.nth(0)), str(result.nth(1)), str(result.nth(2))) ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def CancelBid(config, auction_contract, asset_contract, invoker_keys) : try : message = "'(cancel-bid)" result = SendMessageAsIdentity(config, auction_contract, invoker_keys, message) message = "'(cancel-attestation)" result = SendMessageAsIdentity(config, auction_contract, invoker_keys, message, fmt='scheme') ## should be: (((key "offered") (value X) (owner "<ownerid")) (dependencies) "<signature>") assetkey = dict(result.nth(0).value)['key'] dependencies = str(result.nth(1)) signature = str(result.nth(2)) message = "'(disburse \"{0}\" {1} {2})".format(assetkey, dependencies, signature) result = SendMessageAsIdentity(config, asset_contract, invoker_keys, message) except : pass ## ----------------------------------------------------------------- ## ----------------------------------------------------------------- def LocalMain(config) : asset_config = config['AssetContract'] auction_config = config['AuctionContract'] user_config = config['Participants'] auction_keys = GetKeysForIdentity(config, auction_config['Creator']) asset_keys = GetKeysForIdentity(config, asset_config['Creator']) # create the asset contract asset_contract = CreateAssetContract(config) asset_contract_pubkey = SendMessageAsIdentity(config, asset_contract, asset_keys, "'(get-public-signing-key)", fmt='python') # ---------- create the asset to use for the auction, minimum bid is 10 ---------- auction_asset = CreateRandomAsset(config, asset_contract, auction_keys, 'auction', value = 10) # ---------- create the assets for each of the identities ---------- assetmap = {} for identity in user_config['Asset'] : user_keys = GetKeysForIdentity(config, identity) assetmap[identity] = CreateRandomAsset(config, asset_contract, user_keys, identity) # ---------- create and initialize the auction contract ---------- auction_contract = CreateAuctionContract(config) auction_contract_pubkey = SendMessageAsIdentity(config, auction_contract, auction_keys, "'(get-public-signing-key)", fmt='python') message = "'(initialize \"{0}\")".format(asset_contract_pubkey) result = SendMessageAsIdentity(config, auction_contract, auction_keys, message, wait=True) # ---------- escrow the auction asset and prime the auction---------- (ecounter, edependencies, esignature) = EscrowAsset( config, asset_contract, auction_keys, auction_asset, str(auction_contract_pubkey)) message = "'(prime-auction* {0} {1} {2})".format(ecounter, edependencies, esignature) result = SendMessageAsIdentity(config, auction_contract, auction_keys, message) # ---------- submit bids ---------- for identity in user_config['Auction'] : asset = assetmap[identity] user_keys = GetKeysForIdentity(config, identity) (ecounter, edependencies, esignature) = EscrowAsset( config, asset_contract, user_keys, asset, auction_contract_pubkey) message = "'(submit-bid* {0} {1} {2})".format(ecounter, edependencies, esignature) result = SendMessageAsIdentity(config, auction_contract, user_keys, message) ## ================================================================= # we have to wait for the transactions to commit before we continue #WaitForStateCommit(lwc, PrivateContractTransaction, asset_contract.ContractID, asset_contract.State.ComputeHash()) #WaitForStateCommit(lwc, PrivateContractTransaction, auction_contract.ContractID, auction_contract.State.ComputeHash()) ## ================================================================= # ---------- get the max bid ---------- message = "'(max-bid)" result = SendMessageAsIdentity(config, auction_contract, auction_keys, message) logger.info("maximum bid: %s", str(result)) # ---------- close the bidding and transfer the assets ---------- message = "'(close-bidding)" result = SendMessageAsIdentity(config, auction_contract, auction_keys, message) message = "'(exchange-attestation)" result = SendMessageAsIdentity(config, auction_contract, auction_keys, message, fmt='scheme') ## should be: (((key "offered") (value X) (owner "<ownerid")) ((key "bid") (value X) (owner "<ownerid")) dep sig) logger.debug("closed bidding with result: %s", str(result)) offered = dict(result.nth(0).value) maxbid = dict(result.nth(1).value) dependencies = str(result.nth(2)) signature = str(result.nth(3)) logger.info('exchange ownership of keys %s and %s', offered['key'], maxbid['key']) message = "'(exchange-ownership \"{0}\" \"{1}\" {2} {3})".format(offered['key'], maxbid['key'], dependencies, signature) result = SendMessageAsIdentity(config, asset_contract, auction_keys, message) # ---------- cancel the remaining bids ---------- for identity in user_config['Auction'] : logger.info("attempt to cancel bid for %s", identity) user_keys = GetKeysForIdentity(config, identity) CancelBid(config, auction_contract, asset_contract, user_keys) # ---------- dump the final state of the contract ---------- result = SendMessageAsIdentity(config, asset_contract, asset_keys, "'(get-state)", fmt='python', wait=True) pp.pprint(result) print("auction contract id = {0}".format(auction_contract.contract_id)) print("asset contract id = {0}".format(asset_contract.contract_id)) sys.exit(0) ## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ## DO NOT MODIFY BELOW THIS LINE ## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ## ----------------------------------------------------------------- ContractHost = os.environ.get("HOSTNAME", "localhost") ContractHome = os.environ.get("CONTRACTHOME") or os.path.realpath("/opt/pdo") ContractEtc = os.environ.get("CONTRACTETC") or os.path.join(ContractHome, "etc") ContractKeys = os.environ.get("CONTRACTKEYS") or os.path.join(ContractHome, "keys") ContractLogs = os.environ.get("CONTRACTLOGS") or os.path.join(ContractHome, "logs") ContractData = os.environ.get("CONTRACTDATA") or os.path.join(ContractHome, "data") ScriptBase = os.path.splitext(os.path.basename(sys.argv[0]))[0] config_map = { 'base' : ScriptBase, 'data' : ContractData, 'etc' : ContractEtc, 'home' : ContractHome, 'host' : ContractHost, 'keys' : ContractKeys, 'logs' : ContractLogs } # ----------------------------------------------------------------- # ----------------------------------------------------------------- def Main() : import pdo.common.config as pconfig import pdo.common.logger as plogger # parse out the configuration file first conffiles = [ 'auction-test.toml' ] confpaths = [ ".", "./etc", ContractEtc ] parser = argparse.ArgumentParser() parser.add_argument('--config', help='configuration file', nargs = '+') parser.add_argument('--config-dir', help='configuration file', nargs = '+') parser.add_argument('--logfile', help='Name of the log file, __screen__ for standard output', type=str) parser.add_argument('--loglevel', help='Logging level', type=str) parser.add_argument('--ledger', help='URL for the Sawtooth ledger', type=str) parser.add_argument('--asset-contract', help='Name of the asset contract', default="integer-key", type = str) parser.add_argument('--asset-identity', help='Identity to use for the asset contract', default="ikey-contract", type=str) parser.add_argument('--auction-contract', help='Name of the auction contract', default="auction", type = str) parser.add_argument('--auction-identity', help='Identity to use for the auction contract', default="auc-contract", type=str) parser.add_argument('--key-dir', help='Directories to search for key files', nargs='+') parser.add_argument('--contract-dir', help='Directories to search for contract files', nargs='+') options = parser.parse_args() # first process the options necessary to load the default configuration if options.config : conffiles = options.config if options.config_dir : confpaths = options.config_dir global config_map config_map['assetidentity'] = options.asset_identity config_map['assetcontract'] = options.asset_contract config_map['auctionidentity'] = options.auction_identity config_map['auctioncontract'] = options.auction_contract try : config = pconfig.parse_configuration_files(conffiles, confpaths, config_map) except pconfig.ConfigurationException as e : logger.error(str(e)) sys.exit(-1) # set up the logging configuration if config.get('Logging') is None : config['Logging'] = { 'LogFile' : '__screen__', 'LogLevel' : 'INFO' } if options.logfile : config['Logging']['LogFile'] = options.logfile if options.loglevel : config['Logging']['LogLevel'] = options.loglevel.upper() plogger.setup_loggers(config.get('Logging', {})) # set up the ledger configuration if config.get('Sawtooth') is None : config['Sawtooth'] = { 'LedgerURL' : 'http://localhost:8008', } if options.ledger : config['Sawtooth']['LedgerURL'] = options.ledger # set up the key search paths if config.get('Key') is None : config['Key'] = { 'SearchPath' : ['.', './keys', ContractKeys] } if options.key_dir : config['Key']['SearchPath'] = options.key_dir # set up the data paths if config.get('Contract') is None : config['Contract'] = { 'SourceSearchPath' : [ '.', './contract', os.path.join(ContractHome, 'contracts') ] } if options.contract_dir : config['Contract']['SourceSearchPath'] = options.contract_dir # GO! LocalMain(config) ## ----------------------------------------------------------------- ## Entry points ## ----------------------------------------------------------------- Main()
[ "byron.marohn@intel.com" ]
byron.marohn@intel.com
d827d71d9c05c7c9a359841ae13e780b7c1620e1
0e0bd9d0082bf71918db9f6c92c2cefd32fd23bd
/guild/commands/runs_import.py
354c23dc47578e9820036cf0779f49107bcd69fb
[ "Apache-2.0", "LicenseRef-scancode-free-unknown" ]
permissive
christabella/guildai
b911d9758296503c431b571dc4696a3690f44b3d
10d34eb9aa02aa4a374c340e75b5d44d9f3d8a25
refs/heads/master
2022-12-17T18:34:45.766299
2020-08-31T12:42:25
2020-08-31T12:42:25
294,189,964
0
0
Apache-2.0
2020-09-09T18:02:13
2020-09-09T18:02:12
null
UTF-8
Python
false
false
2,500
py
# Copyright 2017-2020 TensorHub, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division import click from guild import click_util from . import runs_support def _ac_archive(**_kw): return click_util.completion_dir() def import_params(fn): click_util.append_params( fn, [ runs_support.runs_arg, click.Argument(("archive",)), click.Option( ("-m", "--move"), help="Move imported runs rather than copy.", is_flag=True, ), click.Option( ("--copy-resources",), help="Copy resources for each imported run.", is_flag=True, ), runs_support.all_filters, click.Option( ("-y", "--yes"), help="Do not prompt before importing.", is_flag=True ), ], ) assert fn.__click_params__[-1].name == "runs", fn.__click_params__ fn.__click_params__[-1].autocompletion = _ac_archive return fn @click.command("import") @import_params @click.pass_context @click_util.use_args @click_util.render_doc def import_runs(ctx, args): """Import one or more runs from `ARCHIVE`. `ARCHIVE` must be a directory that contains exported runs. Archive directories can be created using ``guild export``. You may use ``guild runs list --archive ARCHIVE`` to view runs in `ARCHIVE`. By default, resources are NOT copied with each imported run, but their links are maintained. To copy resources, use `--copy-resources`. **WARNING**: Use `--copy-resources` with care as each imported run will contain a separate copy of each resource! {{ runs_support.runs_arg }} If a `RUN` argument is not specified, ``:`` is assumed (all runs are selected). {{ runs_support.all_filters }} """ from . import runs_impl runs_impl.import_(args, ctx)
[ "g@rre.tt" ]
g@rre.tt
af0407d686f5be807f2d3d4b938ec56483a3f89e
d6b0bc433b260b5d519d73087d5df46aa516fcdd
/biobb_adapters/pycompss/biobb_amber/pmemd/pmemd_mdrun.py
e94945a6809b7c30cc12c1d92b7e2ea6151423f4
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
bioexcel/biobb_adapters
b5442fe953b90be4e66faf3460b4a88a40e6d448
3daa84ba83a7951add017dd0f05dc361aa99dfe5
refs/heads/master
2023-08-14T08:46:39.323257
2023-08-02T09:05:21
2023-08-02T09:05:21
157,351,268
0
2
Apache-2.0
2023-04-01T14:56:43
2018-11-13T09:07:36
Common Workflow Language
UTF-8
Python
false
false
3,420
py
# Python import os import sys import traceback # Pycompss from pycompss.api.task import task from pycompss.api.parameter import FILE_IN, FILE_OUT from pycompss.api.multinode import multinode from pycompss.api.constraint import constraint # Adapters commons pycompss from biobb_adapters.pycompss.biobb_commons import task_config # Wrapped Biobb from biobb_amber.pmemd.pmemd_mdrun import PmemdMDRun # Importing class instead of module to avoid name collision task_time_out = int(os.environ.get('TASK_TIME_OUT', 0)) computing_nodes = str(os.environ.get('TASK_COMPUTING_NODES', "1")) computing_units = str(os.environ.get('TASK_COMPUTING_UNITS', "1")) gpu_units = str(os.environ.get('TASK_GPU_UNITS', "0")) @constraint(processors=[{'processorType':'CPU', 'computingUnits':computing_units}, {'processorType':'GPU', 'computingUnits':gpu_units}]) @multinode(computing_nodes=computing_nodes) @task(input_top_path=FILE_IN, input_crd_path=FILE_IN, output_log_path=FILE_OUT, output_traj_path=FILE_OUT, output_rst_path=FILE_OUT, input_mdin_path=FILE_IN, input_cpin_path=FILE_IN, input_ref_path=FILE_IN, output_cpout_path=FILE_OUT, output_cprst_path=FILE_OUT, output_mdinfo_path=FILE_OUT, on_failure="IGNORE", time_out=task_time_out) def _pmemdmdrun(input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path, input_cpin_path, input_ref_path, output_cpout_path, output_cprst_path, output_mdinfo_path, properties, **kwargs): task_config.config_multinode(properties) try: PmemdMDRun(input_top_path=input_top_path, input_crd_path=input_crd_path, output_log_path=output_log_path, output_traj_path=output_traj_path, output_rst_path=output_rst_path, input_mdin_path=input_mdin_path, input_cpin_path=input_cpin_path, input_ref_path=input_ref_path, output_cpout_path=output_cpout_path, output_cprst_path=output_cprst_path, output_mdinfo_path=output_mdinfo_path, properties=properties, **kwargs).launch() except Exception as e: traceback.print_exc() raise e finally: sys.stdout.flush() sys.stderr.flush() def pmemd_mdrun(input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path=None, input_cpin_path=None, input_ref_path=None, output_cpout_path=None, output_cprst_path=None, output_mdinfo_path=None, properties=None, **kwargs): if (output_log_path is None or (os.path.exists(output_log_path) and os.stat(output_log_path).st_size > 0)) and \ (output_traj_path is None or (os.path.exists(output_traj_path) and os.stat(output_traj_path).st_size > 0)) and \ (output_rst_path is None or (os.path.exists(output_rst_path) and os.stat(output_rst_path).st_size > 0)) and \ (output_cpout_path is None or (os.path.exists(output_cpout_path) and os.stat(output_cpout_path).st_size > 0)) and \ (output_cprst_path is None or (os.path.exists(output_cprst_path) and os.stat(output_cprst_path).st_size > 0)) and \ (output_mdinfo_path is None or (os.path.exists(output_mdinfo_path) and os.stat(output_mdinfo_path).st_size > 0)) and \ True: print("WARN: Task PmemdMDRun already executed.") else: _pmemdmdrun( input_top_path, input_crd_path, output_log_path, output_traj_path, output_rst_path, input_mdin_path, input_cpin_path, input_ref_path, output_cpout_path, output_cprst_path, output_mdinfo_path, properties, **kwargs)
[ "andriopau@gmail.com" ]
andriopau@gmail.com
0a231f8117213d6f61ad97b649f38245442e0a0c
afd3464dd2c290b7db5fe379d4374183ea6bd0c3
/catkin_ws/build/pick_objects/catkin_generated/pkg.develspace.context.pc.py
fd44bd91cf37e798cac9f2a7cf2459aba475bc25
[ "MIT" ]
permissive
thatting/thomas-hatting-home-service-robot
7d0750367e5b5bfa48ab697a8fd7796b1338a662
481490eec2d61303e694593f8f018858c82eaac3
refs/heads/master
2020-03-22T18:27:40.537755
2019-02-14T13:13:14
2019-02-14T13:13:14
140,461,737
0
0
null
null
null
null
UTF-8
Python
false
false
374
py
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else [] PROJECT_CATKIN_DEPENDS = "".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else [] PROJECT_NAME = "pick_objects" PROJECT_SPACE_DIR = "/home/nvidia/catkin_ws/devel" PROJECT_VERSION = "0.0.0"
[ "thomashatting@gmail.com" ]
thomashatting@gmail.com
47b910274ca6546bd96488e2c3027896b833a188
7abd8bbbba8f401c4ce9d9ec550a0cae4a6f19ed
/bingads/v12/bulk/entities/__init__.py
afc5d3d8bf175347a50c466420cd874f00447f89
[ "MIT" ]
permissive
stevenblanton/BingAds-Python-SDK
fd2f119db51e1a91962aa5ee4bb86344e58078a8
5b6e6499ae1dcc6fb8ba3032ad1a2b6ee63705c9
refs/heads/master
2020-09-05T12:11:04.168580
2019-11-01T15:49:08
2019-11-01T15:49:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
909
py
__author__ = 'Bing Ads SDK Team' __email__ = 'bing_ads_sdk@microsoft.com' from .common import * from .bulk_error import * from .bulk_entity import * from .bid_suggestion_data import * from .unknown_bulk_entity import * from .bulk_account import * from .bulk_budget import * from .bulk_campaign import * from .bulk_ad_group import * from .bulk_keyword import * from .bulk_campaign_product_scope import * from .bulk_ad_group_product_partition import * from .bulk_campaign_negative_dynamic_search_ad_target import * from .bulk_ad_group_dynamic_search_ad_target import * from .bulk_ad_group_negative_dynamic_search_ad_target import * from .ad_extensions import * from .bulk_ads import * from .bulk_negative_keywords import * from .bulk_negative_sites import * from .audiences import * from .target_criterions import * from .labels import * from .bulk_offline_conversion import * from .bulk_experiment import *
[ "qitia@microsoft.com" ]
qitia@microsoft.com
a3111a79779a9ea0cab3118b5d7b33943dbded16
98fe6781483ec7ff2a8016916edb2611d5c2e64c
/other/text_analysis_report.py
9a852707872523ccce57b5824953e76709b213d4
[]
no_license
catris25/review_rating_prediction
124262d3baed594d812cb1459c3b95cb6a718312
fc296a58e39943d2021263e456dbfdd8b972308a
refs/heads/master
2021-01-16T17:49:47.367954
2018-08-14T05:35:44
2018-08-14T05:35:44
100,015,914
0
0
null
null
null
null
UTF-8
Python
false
false
2,208
py
import numpy as np import pandas as pd import re, math from collections import Counter import matplotlib.pyplot as plt from nltk.tokenize import sent_tokenize, word_tokenize # from nltk.tokenize import RegexpTokenizer, PunktSentenceTokenizer, TweetTokenizer # REMOVE ALL PUNCTUATIONS AND THEN TOKENIZE THE TEXT def tokenize_df(df): df_token = [] for review in df['reviewText']: temp = review sent_length = len(sent_tokenize(temp)) temp = re.sub("[^a-zA-Z']", " ", str(review)) temp = temp.replace("'", "") temp = temp.lower() word_length = len(word_tokenize(temp)) df_token.append({'reviewText': temp, 'word':word_length, 'sentence':sent_length}) df_token = pd.DataFrame(df_token) return df_token input_file='/home/lia/Documents/the_project/dataset/to_use/current/top_30.csv' # input_file = '/home/lia/Documents/the_project/dataset/to_use/helpfulness/samples/30percent/6.csv' df = pd.read_csv(input_file) new_df = tokenize_df(df) print(new_df.describe()) print(new_df.head(10)) # data = new_df['word'] # # plt.hist(data, bins=200) # plt.show() # def outliers_z_score(ys): # threshold = 3 # # mean_y = np.mean(ys) # stdev_y = np.std(ys) # z_scores = [(y - mean_y) / stdev_y for y in ys] # return np.where(np.abs(z_scores) > threshold) # # oz = outliers_z_score(data) # print(oz) # print('Number of words {}'.format (Counter(new_df['word']))) # print('Number of sentences {}'.format (Counter(new_df['sentence']))) # labels, values = zip(*Counter(data).items()) # # indexes = np.arange(len(labels)) # width = 1 # # plt.bar(indexes, values, width) # plt.xticks(indexes + width * 0.5, labels,rotation = "vertical") # plt.show() # for w in new_df['word']: # if w<=10: # print(w) too_long = df.loc[new_df['word'] >= 1000, 'reviewText'] too_short = df.loc[new_df['word'] <= 10, 'reviewText'] print('too long:', len(too_long)) print('too short:', len(too_short)) df['word'] = new_df['word'] del_id = too_long.index.append(too_short.index) temp_df = df.drop(df.index[[del_id]]) print(temp_df.head(10)) # # temp_df.to_csv('/home/lia/Documents/the_project/dataset/top_10_movies/top_10_clean.csv')
[ "blue.star95@outlook.com" ]
blue.star95@outlook.com
c7571fd6f80cb52e31b43fa0fa9746d3faafb0c1
de817cc84baa1ca5cef3ceaff56dc235b00073d9
/dokdo.py
dfbc8aad66a041c9a7e72135de16c2b1fb75b035
[ "MIT" ]
permissive
song9446/Dokdo-HTML-template-compiler-python3
32023dd38f57b091a6d4a8288e07ddb8663c892e
2d26aa7d84c0c7606ae5140126691d6f1a6e930e
refs/heads/master
2020-04-09T01:31:23.923621
2018-12-01T06:36:39
2018-12-01T06:36:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,558
py
#!/usr/bin/python3 from string import Template import lxml.html from lxml import etree import copy import re import os import sass VERSION = "0.1" variable_pattern = re.compile("\{\{\{([^}]+)\}\}\}") def dom2str(element): return lxml.html.tostring(element, encoding=str) def dom2innerstr(element): text = lxml.html.tostring(element, encoding=str) return text[text.find(">")+1:text.rfind("<")] def replace(text, rule, replacer): matches = [(match.start(), match.end(), match.groups()[0].strip()) for match in re.finditer(rule, text)] matches.reverse() characters = list(text) for start, end, variable in matches: characters[start:end] = replacer(variable) return "".join(characters) def compile(path, variables={}, innerhtmls=[], isroot=True, statics={}): # 1. build tree with open(path) as f: text = f.read() # 1.1. replace variable replace(text, variable_pattern, lambda x: variables[x]) if text.strip().startswith("<!DOCTYPE") or text.strip().startswith("<html"): roots = (lxml.html.fromstring(text),) else: roots = lxml.html.fragments_fromstring(text) # 2. substract styles & statics styles = [root for root in roots if root.tag == "style"] + \ [style.drop_tree() or style for root in roots for style in root.xpath(".//style")] for style in styles: if style.get("type") is "text/scss": style.text = sass.compile(string=style.text) poststatics = [root for root in roots if root.tag == "static" and "post" in root.attrib] + \ [static.drop_tree() or static for root in roots for static in root.xpath(".//static") if "post" in static.attrib] prestatics = [root for root in roots if root.tag == "static" and "pre" in root.attrib] + \ [static.drop_tree() or static for root in roots for static in root.xpath(".//static") if "pre" in static.attrib] roots = list(filter(lambda x: x.tag not in ("style", "static"), roots)) if path not in statics: statics[path] = (styles, poststatics, prestatics) # 3. replace imports for imp in (imp for root in roots for imp in root.xpath("//import")): ipath = os.path.join(os.path.dirname(path), imp.get("path")) importing_roots = compile(ipath, variables=imp.attrib, innerhtmls=imp, isroot=False, statics=statics) if len(importing_roots) == 1: importing_roots[0].attrib.update(imp.attrib) if imp in roots: imp_index = roots.index(imp) roots = list(filter(lambda x: x!=imp, roots)) for i, root in enumerate(importing_roots): roots.insert(imp_index + i, root) else: imp_parent = imp.getparent() imp_index = imp_parent.index(imp) imp.drop_tree() for i, root in enumerate(importing_roots): imp_parent.insert(imp_index + i, root) # 4. replace innerhtmls innerhtml_map = {innerhtml.get("id", i):innerhtml for i, innerhtml in enumerate(innerhtmls)} target_innerhtmls = [innerhtml for root in roots for innerhtml in root.xpath(".//innerhtml")] for i, target_innerhtml in enumerate(target_innerhtmls): id_ = target_innerhtml.get("id", i) if id_ in innerhtml_map: innerhtml_map[id_].attrib.update(target_innerhtml.attrib) target_innerhtml.getparent().replace(target_innerhtml, innerhtml_map[id_]) else: target_innerhtml.drop_tree() # 5. if this is a root: put statics and return string if isroot: head = roots[0].xpath("//head")[0] body = roots[0].xpath("//body")[0] etree.SubElement(head, "style").text = "".join((sass.compile(string=dom2innerstr(style)) if style.get("type", "text/css") == "text/scss" else dom2innerstr(style)) \ for i in statics for style in statics[i][0]) for i in statics: for poststatic in statics[i][1]: body.append(poststatic) for prestatic in statics[i][2]: head.append(prestatic) return "".join(dom2str(root) for root in roots) else: return roots if __name__ == "__main__": from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] filename", version="%prog {}".format(VERSION)) parser.add_option("-c", "--src", dest="source", help="source html path", metavar="SRC") parser.add_option("-o", "--out", action="store_false", dest="out", default="a.html", help="destination of output", metavar="OUT") parser.add_option("-C", "--srcdir", dest="sourcedir", help="source dir path(it filters html files automatically)", default="src", metavar="SRCDIR") parser.add_option("-O", "--outdir", dest="outdir", default="build", help="out dir path", metavar="OUTDIR") (option, tags) = parser.parse_args() if tags: print(compile(tags[0])) else: if option.source: with open(option.out, "w") as f: f.write(compile(tags[0])) elif option.sourcedir: compilables = [os.path.join(d, f) for (d, _, fs) in os.walk(option.sourcedir) for f in fs if f.endswith(".html")] if not os.path.exists(option.outdir): os.makedirs(option.outdir) for source in compilables: with open(os.path.join(option.outdir, os.path.basename(source)), "w") as f: f.write(compile(source))
[ "song9446@unist.ac.kr" ]
song9446@unist.ac.kr
c17cbfb454897e208edc74fb6406665a5bd37389
1debb684db5f2434de3793751afc45edcb2d584f
/apps/gtask/templatetags/datetime_tags.py
701d99da0ef2467c96ac5c4250f7b89bba8ee4e1
[]
no_license
rosscdh/SuperDMon
2524aaa1429ce82558723ad5ea8833698380fb85
d0e6dd2f9d2237320b19b53b9be37c888f8c40ff
refs/heads/master
2016-09-05T13:33:55.294196
2012-02-07T14:52:34
2012-02-07T14:52:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
252
py
from datetime import datetime from django import template register = template.Library() @register.filter("timestamp") def timestamp(value): try: return datetime.fromtimestamp(value) except AttributeError: return datetime.now()
[ "ross.crawford@sedo.com" ]
ross.crawford@sedo.com
73b6a55d16f9a0ddb2370537646877ecaa9d332e
464b6f3a8e3662ecc357735b17c5fe859aa9f3e3
/StanCode-Projects/searching_name_system/babygraphics.py
2ee9308af259ad456f5b8b65ffae016860eaec6b
[ "MIT" ]
permissive
jennywei1995/sc-projects
a840f1fcb6e691999a6b8ac31a53c8a5b0f260b8
ec192434a967d68fee4f772ae907e5ef5fa556d2
refs/heads/main
2022-12-30T13:06:44.186249
2020-10-20T07:56:43
2020-10-20T07:56:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,451
py
""" File: babygraphics.py Name: Jenny Wei ----------------- SC101 Baby Names Project Adapted from Nick Parlante's Baby Names assignment by Jerry Liao. ----------------- This file will create a canvas and enable user to use the program to search for babes' names' ranks over decades. Once the user search a name, the corresponding rank in a specific year will be found and added to the canvas, there will also be lines to connect each years' rank and draw a run chart. """ import tkinter import babynames import babygraphicsgui as gui FILENAMES = [ 'data/full/baby-1900.txt', 'data/full/baby-1910.txt', 'data/full/baby-1920.txt', 'data/full/baby-1930.txt', 'data/full/baby-1940.txt', 'data/full/baby-1950.txt', 'data/full/baby-1960.txt', 'data/full/baby-1970.txt', 'data/full/baby-1980.txt', 'data/full/baby-1990.txt', 'data/full/baby-2000.txt', 'data/full/baby-2010.txt' ] CANVAS_WIDTH = 1000 CANVAS_HEIGHT = 600 YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010] GRAPH_MARGIN_SIZE = 20 COLORS = ['red', 'purple', 'green', 'blue'] TEXT_DX = 2 LINE_WIDTH = 2 MAX_RANK = 1000 def get_x_coordinate(width, year_index): """ Given the width of the canvas and the index of the current year in the YEARS list, returns the x coordinate of the vertical line associated with that year. Input: width (int): The width of the canvas year_index (int): The index of the current year in the YEARS list Returns: x_coordinate (int): The x coordinate of the vertical line associated with the specified year. """ x_range = (width - (GRAPH_MARGIN_SIZE * 2)) / len(YEARS) x_coordinate = int(GRAPH_MARGIN_SIZE + (x_range * year_index)) return x_coordinate def draw_fixed_lines(canvas): """ Erases all existing information on the given canvas and then draws the fixed background lines on it. Input: canvas (Tkinter Canvas): The canvas on which we are drawing. Returns: This function does not return any value. """ canvas.delete('all') # delete all existing lines from the canvas # to draw the peripheral line canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE) canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE) canvas.create_line(GRAPH_MARGIN_SIZE, 0, GRAPH_MARGIN_SIZE, CANVAS_HEIGHT) # to draw the line that evenly divided the according to how many years are provided for i in range(len(YEARS)): line_x = get_x_coordinate(CANVAS_WIDTH, i) canvas.create_line(line_x, 0, line_x, CANVAS_HEIGHT) canvas.create_text(line_x + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[i], anchor=tkinter.NW) def draw_names(canvas, name_data, lookup_names): """ Given a dict of baby name data and a list of name, plots the historical trend of those names onto the canvas. Input: canvas (Tkinter Canvas): The canvas on which we are drawing. name_data (dict): Dictionary holding baby name data lookup_names (List[str]): A list of names whose data you want to plot Returns: This function does not return any value. """ draw_fixed_lines(canvas) # draw the fixed background grid # once the user click enter, the data will be shown y_position = ((CANVAS_HEIGHT - (GRAPH_MARGIN_SIZE * 2)) / (MAX_RANK-1)) for i in range(len(lookup_names)): # to determine the color of data's text and line if i <= len(COLORS)-1: color = COLORS[i] else: # while the given colors are all used, the color data used will start from the first color color = COLORS[int((i % len(COLORS)))] # to find the dic of the name that contains its rank over the years baby_dic = name_data[lookup_names[i]] # to create a year list to check if the year matches the constant year list new_year_lst = [] for year, rank in baby_dic.items(): new_year_lst.append(year) # if the names' data doesn't exit in the given file for k in range(len(YEARS)): # assign these names' rank as 1001 if f'{YEARS[k]}' not in new_year_lst: baby_dic[f'{YEARS[k]}'] = '1001' # a list that will contain the y value y_list = [] for j in range(len(YEARS)): for year in baby_dic: # to find the rank of the given name in specific year rank = baby_dic[year] # to add the text of name and its rank of a specific year to the canvas line_x = get_x_coordinate(CANVAS_WIDTH, j) if int(year) == YEARS[j]: if int(rank) > MAX_RANK: new_rank = '*' else: new_rank = rank canvas.create_text(line_x + TEXT_DX, int(y_position * int(rank) + GRAPH_MARGIN_SIZE), text=f'{lookup_names[i]} {new_rank}', anchor=tkinter.SW, fill=color) y_list.append(int(y_position * int(rank) + GRAPH_MARGIN_SIZE)) # to draw the line that connects each year's rank data on the canvas for j in range(len(YEARS) - 1): line_x = get_x_coordinate(CANVAS_WIDTH, j) line_x1 = get_x_coordinate(CANVAS_WIDTH, j + 1) line_y = y_list[j] line_y1 = y_list[j + 1] canvas.create_line(line_x, line_y, line_x1, line_y1, width=LINE_WIDTH, fill=color) # main() code is provided, feel free to read through it but DO NOT MODIFY def main(): # Load data name_data = babynames.read_files(FILENAMES) # Create the window and the canvas top = tkinter.Tk() top.wm_title('Baby Names') canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names) # Call draw_fixed_lines() once at startup so we have the lines # even before the user types anything. draw_fixed_lines(canvas) # This line starts the graphical loop that is responsible for # processing user interactions and plotting data top.mainloop() if __name__ == '__main__': main()
[ "noreply@github.com" ]
jennywei1995.noreply@github.com
61a49f9ce140730c3fb6b664ca5ac5bc8085cfb0
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
/google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/types/media_file_service.py
d18d6a8d09b03c92f8310398e3c6a6a1be1ac137
[ "Apache-2.0" ]
permissive
oltoco/googleapis-gen
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
refs/heads/master
2023-07-17T22:11:47.848185
2021-08-29T20:39:47
2021-08-29T20:39:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,355
py
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.ads.googleads.v6.enums.types import response_content_type as gage_response_content_type from google.ads.googleads.v6.resources.types import media_file as gagr_media_file from google.rpc import status_pb2 # type: ignore __protobuf__ = proto.module( package='google.ads.googleads.v6.services', marshal='google.ads.googleads.v6', manifest={ 'GetMediaFileRequest', 'MutateMediaFilesRequest', 'MediaFileOperation', 'MutateMediaFilesResponse', 'MutateMediaFileResult', }, ) class GetMediaFileRequest(proto.Message): r"""Request message for [MediaFileService.GetMediaFile][google.ads.googleads.v6.services.MediaFileService.GetMediaFile] Attributes: resource_name (str): Required. The resource name of the media file to fetch. """ resource_name = proto.Field( proto.STRING, number=1, ) class MutateMediaFilesRequest(proto.Message): r"""Request message for [MediaFileService.MutateMediaFiles][google.ads.googleads.v6.services.MediaFileService.MutateMediaFiles] Attributes: customer_id (str): Required. The ID of the customer whose media files are being modified. operations (Sequence[google.ads.googleads.v6.services.types.MediaFileOperation]): Required. The list of operations to perform on individual media file. partial_failure (bool): If true, successful operations will be carried out and invalid operations will return errors. If false, all operations will be carried out in one transaction if and only if they are all valid. Default is false. validate_only (bool): If true, the request is validated but not executed. Only errors are returned, not results. response_content_type (google.ads.googleads.v6.enums.types.ResponseContentTypeEnum.ResponseContentType): The response content type setting. Determines whether the mutable resource or just the resource name should be returned post mutation. """ customer_id = proto.Field( proto.STRING, number=1, ) operations = proto.RepeatedField( proto.MESSAGE, number=2, message='MediaFileOperation', ) partial_failure = proto.Field( proto.BOOL, number=3, ) validate_only = proto.Field( proto.BOOL, number=4, ) response_content_type = proto.Field( proto.ENUM, number=5, enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType, ) class MediaFileOperation(proto.Message): r"""A single operation to create media file. Attributes: create (google.ads.googleads.v6.resources.types.MediaFile): Create operation: No resource name is expected for the new media file. """ create = proto.Field( proto.MESSAGE, number=1, oneof='operation', message=gagr_media_file.MediaFile, ) class MutateMediaFilesResponse(proto.Message): r"""Response message for a media file mutate. Attributes: partial_failure_error (google.rpc.status_pb2.Status): Errors that pertain to operation failures in the partial failure mode. Returned only when partial_failure = true and all errors occur inside the operations. If any errors occur outside the operations (e.g. auth errors), we return an RPC level error. results (Sequence[google.ads.googleads.v6.services.types.MutateMediaFileResult]): All results for the mutate. """ partial_failure_error = proto.Field( proto.MESSAGE, number=3, message=status_pb2.Status, ) results = proto.RepeatedField( proto.MESSAGE, number=2, message='MutateMediaFileResult', ) class MutateMediaFileResult(proto.Message): r"""The result for the media file mutate. Attributes: resource_name (str): The resource name returned for successful operations. media_file (google.ads.googleads.v6.resources.types.MediaFile): The mutated media file with only mutable fields after mutate. The field will only be returned when response_content_type is set to "MUTABLE_RESOURCE". """ resource_name = proto.Field( proto.STRING, number=1, ) media_file = proto.Field( proto.MESSAGE, number=2, message=gagr_media_file.MediaFile, ) __all__ = tuple(sorted(__protobuf__.manifest))
[ "bazel-bot-development[bot]@users.noreply.github.com" ]
bazel-bot-development[bot]@users.noreply.github.com
58d23eb63af6add22016b753d43de7f6521fbfb1
279e26d880c2470d0b60fe55b52f36024ecb28b5
/address.py
f65092bd69fcdb218f7a868194846dc937236b2d
[]
no_license
khang-le/unit5-05
0167d40d8070d5889c948a90f13d06ea53581690
c9b4afb6f1361dca227d915c7630ff7e5fe3b1cf
refs/heads/master
2020-09-22T03:51:35.589393
2019-11-30T16:27:46
2019-11-30T16:27:46
225,039,279
0
0
null
null
null
null
UTF-8
Python
false
false
1,797
py
#!/usr/bin/env python3 # Created by : Khang Le # Created on : September 2019 # This program prints out your name, using default function parameters def full_address(first_name, last_name, street_address, city, province, postal_code, apt_number=None): # return full address format full_address = first_name if apt_number is not None: full_address = ("\n" + full_address + " " + last_name + "" + street_address + "" + city + " " + province + " " + postal_code + " " + apt_number) elif apt_number is None: full_address = ("\n" + full_address + " " + last_name + "" + street_address + "" + city + " " + province + " " + postal_code) return full_address.upper() def main(): # get user informations apt_number = None first_name = input("Enter your first name: ") last_name = input("Enter your last name: ") + "\n" street_address = input("Enter your address: ") + "\n" question = input("Do you have an ap.number? (y/n): ") if question.upper() == "Y" or question.upper() == "YES": apt_number = input("Enter your apt.number here: ") + "\n" city = input("Enter your current city: ") province = input("Enter your current province: ") + " " postal_code = input("Enter your postal code: ") if apt_number is not None: address = full_address(first_name, last_name, street_address, city, province, postal_code, apt_number) else: address = full_address(first_name, last_name, street_address, city, province, postal_code) print(("Your shipping informations: {}").format(address)) if __name__ == "__main__": main()
[ "nguyen.khang.le@mths.ca" ]
nguyen.khang.le@mths.ca
7d10a0ba89d020ea8778672c530012d3496bb89b
0ab5b15d1b97b9d72a9e4218ad6b7377c26e76ec
/tkContacts_LAB15.py
c4c4c3d8fbfbf064790aa63503f585440122fa65
[]
no_license
RagggySu/-Sample-work-from-other-person-Portfolio
3beb01e18b5ace8858bb73eb9aad76e67c87d94b
8f5b6d2f3f4d82435cd166d6f4c038ae7352e59c
refs/heads/main
2023-05-05T06:50:13.906847
2021-05-28T18:45:05
2021-05-28T18:45:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,481
py
# Programmer: James Aniciete # Course No.: CSC 157 # Lab No.: 15 # Date: 5/9/2020 from tkinter import * from tkinter import messagebox # for exit button's messagebox import os # for exiting the app import myDatabasefile as dbf import sqlite3 # create table dbf.createTable() # get contactlist contactlist = dbf.selectAll() # function to check for valid data entries def validate(s): # s for string if s.strip("") != "": return True else: return False # function to get the selection from the listbox def selection(): return int(select.curselection()[0]) # function to add a contact def addContact(): if validate(nameVar.get()) == True and validate(phoneVar.get()) == True: dbf.insert(nameVar.get(), phoneVar.get()) canRoll = True # refresh the GUI refresh() elif validate(nameVar.get()) == False: print("Error: Enter a name.") elif validate(phoneVar.get()) == False: print("Error: Enter a phone number.") else: print("Error: Contact not added.\nMake sure that the Name and Phone fields are filled.") # function to update a contact def updateContact(): if validate(nameVar.get()) == True and validate(phoneVar.get()) == True: dbf.update(oName, oPhone, nameVar.get(), phoneVar.get()) canRoll = True # refresh the GUI refresh() elif validate(nameVar.get()) == False: print("Error: Enter a name.") elif validate(phoneVar.get()) == False: print("Error: Enter a phone number.") else: print("Error: Contact not updated.\nMake sure a contact is selected and that the Name and Phone fields are filled.") # function to delete a contact def deleteContact(): try: if messagebox.askokcancel(title = "Delete Contact", message = f"Are you sure you want to delete {contactlist[selection()][0]}'s contact information?") == 1: dbf.delete(nameVar.get(), phoneVar.get()) canRoll = True refresh() except: print("Error: Select a contact to be deleted.") # function to load a contact def loadContact(): try: # not really sure how this works global oName, oPhone oName = contactlist[selection()][0] oPhone = contactlist[selection()][1] # put name and phone selections into a tuple name, phone = contactlist[selection()] # use tuple to assign values to name and phone variables nameVar.set(name) phoneVar.set(phone) except: print("Error: Select a contact from the list.") # function to rollback a change def rollback(): global canRoll if canRoll == True: if (messagebox.askokcancel(title = "Rollback", message = "Would you like to undo the previous change?") == 1): dbf.rollback() refresh() canRoll = False # function to exit the program def exitContact(): app_title = "Contacts" if messagebox.askokcancel(title = app_title, message = "Do you want to exit, OK or Cancel") == 1: # commit and close the database dbf.db.commit() dbf.db.close() os._exit(1) # function that places all widgets into the frame individually def buildFrame () : # define global variables global nameVar, phoneVar, select # create the main window widget root = Tk() # add title to the frame root.title("My Contact List") # create & pack a frame in the root window frame1 = Frame(root) frame1.pack() # on 1st row of frame: # create a label for name Label(frame1, text="Name:").grid(row=0, column=0, sticky=W) # initialize StringVar for name nameVar = StringVar() # assign entry button value to the name var name = Entry(frame1, textvariable=nameVar) # position name var in first row, second column, aligned to the west cell border name.grid(row=0, column=1, sticky=W) # on 2nd row of the frame: # create a label for phone no. Label(frame1, text="Phone:").grid(row=1, column=0, sticky=W) # create string var for phone no. phoneVar= StringVar() # assign entry button value to phone var phone= Entry(frame1, textvariable=phoneVar) # position phone var in second row, second column, aligned to the west phone.grid(row=1, column=1, sticky=W) # create & pack a frame in the root window frame1 = Frame(root) frame1.pack() # add a row of buttons to frame1 with respective callback functions btn1 = Button(frame1,text=" Add ",command=addContact) btn2 = Button(frame1,text="Update",command=updateContact) btn3 = Button(frame1,text="Delete",command=deleteContact) btn4 = Button(frame1,text=" Load ",command=loadContact) btn5 = Button(frame1,text="Rollback",command=rollback) # pack the buttons on the same row to the left btn1.pack(side=LEFT) btn2.pack(side=LEFT) btn3.pack(side=LEFT) btn4.pack(side=LEFT) btn5.pack(side=LEFT) # allow for selection of names from a ListBox with a scrollbar frame1 = Frame(root) frame1.pack() # create a vertical bar widget scroll = Scrollbar(frame1, orient=VERTICAL) # whichever value from the ListBox is clicked is assigned to select # height = # of values visible in the Listbox select = Listbox(frame1, yscrollcommand=scroll.set, height=8) scroll.config (command=select.yview) scroll.pack(side=RIGHT, fill=Y) select.pack(side=LEFT, fill=BOTH) # create frame for Exit button at the bottom of the window frame2 = Frame(root) frame2.pack() # create exit button & pack it btn6 = Button(frame2, text = " Exit ", command = exitContact) btn6.pack() # return root object to allow for the frame to be built return root # sorts the contact list & allows for an update to the ListBox def setList(): contactlist.sort() # delete all elements from the select element select.delete(0, END) # insert each name from the list to the end of the select element for name, phone in contactlist: select.insert(END, name) # refresh function - used add the end of add, update, delete functions def refresh(): global canRoll, contactlist canRoll = True contactlist = dbf.selectAll() setList() # initialize the application root = buildFrame() setList() # set size of window (width x height) root.geometry("300x225") root.mainloop()
[ "noreply@github.com" ]
RagggySu.noreply@github.com
4f56cee030454bf7d814b2615a38c73539bcce37
d186f9763a16cddc161568728827636a8b68f2f2
/src/grpc_service/service_pb2_grpc.py
37cda993f81dc828c5dfc5ef4100daddd986874b
[]
no_license
xvicmanx/machine-learning
12fce38a70b88132d633f8956435d72fc3fee050
8389125e8a0f41c3c803bdfa94f5483ab30897d1
refs/heads/main
2023-02-11T19:35:43.298423
2021-01-06T12:59:29
2021-01-06T12:59:29
308,706,331
1
0
null
null
null
null
UTF-8
Python
false
false
12,434
py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc import service_pb2 as service__pb2 class MachineLearningStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.PredictSalary = channel.unary_unary( '/machine_learning.MachineLearning/PredictSalary', request_serializer=service__pb2.PredictSalaryRequest.SerializeToString, response_deserializer=service__pb2.PredictSalaryResponse.FromString, ) self.PredictPurchase = channel.unary_unary( '/machine_learning.MachineLearning/PredictPurchase', request_serializer=service__pb2.PredictPurchaseRequest.SerializeToString, response_deserializer=service__pb2.PredictPurchaseResponse.FromString, ) self.PredictSegment = channel.unary_unary( '/machine_learning.MachineLearning/PredictSegment', request_serializer=service__pb2.PredictSegmentRequest.SerializeToString, response_deserializer=service__pb2.PredictSegmentResponse.FromString, ) self.GetOptimalCampaignAdOption = channel.unary_unary( '/machine_learning.MachineLearning/GetOptimalCampaignAdOption', request_serializer=service__pb2.GetOptimalCampaignAdOptionRequest.SerializeToString, response_deserializer=service__pb2.GetOptimalCampaignAdOptionResponse.FromString, ) self.PredictReviewOutcome = channel.unary_unary( '/machine_learning.MachineLearning/PredictReviewOutcome', request_serializer=service__pb2.PredictReviewOutcomeRequest.SerializeToString, response_deserializer=service__pb2.PredictReviewOutcomeResponse.FromString, ) self.PredictBankLeaving = channel.unary_unary( '/machine_learning.MachineLearning/PredictBankLeaving', request_serializer=service__pb2.PredictBankLeavingRequest.SerializeToString, response_deserializer=service__pb2.PredictBankLeavingResponse.FromString, ) self.PredictCatOrDog = channel.unary_unary( '/machine_learning.MachineLearning/PredictCatOrDog', request_serializer=service__pb2.PredictCatOrDogRequest.SerializeToString, response_deserializer=service__pb2.PredictCatOrDogResponse.FromString, ) class MachineLearningServicer(object): """Missing associated documentation comment in .proto file.""" def PredictSalary(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PredictPurchase(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PredictSegment(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetOptimalCampaignAdOption(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PredictReviewOutcome(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PredictBankLeaving(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PredictCatOrDog(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_MachineLearningServicer_to_server(servicer, server): rpc_method_handlers = { 'PredictSalary': grpc.unary_unary_rpc_method_handler( servicer.PredictSalary, request_deserializer=service__pb2.PredictSalaryRequest.FromString, response_serializer=service__pb2.PredictSalaryResponse.SerializeToString, ), 'PredictPurchase': grpc.unary_unary_rpc_method_handler( servicer.PredictPurchase, request_deserializer=service__pb2.PredictPurchaseRequest.FromString, response_serializer=service__pb2.PredictPurchaseResponse.SerializeToString, ), 'PredictSegment': grpc.unary_unary_rpc_method_handler( servicer.PredictSegment, request_deserializer=service__pb2.PredictSegmentRequest.FromString, response_serializer=service__pb2.PredictSegmentResponse.SerializeToString, ), 'GetOptimalCampaignAdOption': grpc.unary_unary_rpc_method_handler( servicer.GetOptimalCampaignAdOption, request_deserializer=service__pb2.GetOptimalCampaignAdOptionRequest.FromString, response_serializer=service__pb2.GetOptimalCampaignAdOptionResponse.SerializeToString, ), 'PredictReviewOutcome': grpc.unary_unary_rpc_method_handler( servicer.PredictReviewOutcome, request_deserializer=service__pb2.PredictReviewOutcomeRequest.FromString, response_serializer=service__pb2.PredictReviewOutcomeResponse.SerializeToString, ), 'PredictBankLeaving': grpc.unary_unary_rpc_method_handler( servicer.PredictBankLeaving, request_deserializer=service__pb2.PredictBankLeavingRequest.FromString, response_serializer=service__pb2.PredictBankLeavingResponse.SerializeToString, ), 'PredictCatOrDog': grpc.unary_unary_rpc_method_handler( servicer.PredictCatOrDog, request_deserializer=service__pb2.PredictCatOrDogRequest.FromString, response_serializer=service__pb2.PredictCatOrDogResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'machine_learning.MachineLearning', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class MachineLearning(object): """Missing associated documentation comment in .proto file.""" @staticmethod def PredictSalary(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictSalary', service__pb2.PredictSalaryRequest.SerializeToString, service__pb2.PredictSalaryResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PredictPurchase(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictPurchase', service__pb2.PredictPurchaseRequest.SerializeToString, service__pb2.PredictPurchaseResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PredictSegment(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictSegment', service__pb2.PredictSegmentRequest.SerializeToString, service__pb2.PredictSegmentResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetOptimalCampaignAdOption(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/GetOptimalCampaignAdOption', service__pb2.GetOptimalCampaignAdOptionRequest.SerializeToString, service__pb2.GetOptimalCampaignAdOptionResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PredictReviewOutcome(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictReviewOutcome', service__pb2.PredictReviewOutcomeRequest.SerializeToString, service__pb2.PredictReviewOutcomeResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PredictBankLeaving(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictBankLeaving', service__pb2.PredictBankLeavingRequest.SerializeToString, service__pb2.PredictBankLeavingResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PredictCatOrDog(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/machine_learning.MachineLearning/PredictCatOrDog', service__pb2.PredictCatOrDogRequest.SerializeToString, service__pb2.PredictCatOrDogResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
[ "vic3jo@gmail.com" ]
vic3jo@gmail.com
2fbd7c9248f1dcc4aa90678c7973c0971038f7b3
dbeae28942f79ebe1f844628baf6cb8f7251609b
/modules/state.py
961e9b0dd1677c68fc8b876bae6fae442c30c3b4
[]
no_license
kouheiszk/pokemon-bot
3226614ad699dca261f2c97523b70d3c91a08b00
ba7404b7f6120581ac6602ca0c00ecbd9e0cbfc1
refs/heads/master
2020-05-21T10:12:07.376595
2016-09-13T10:57:01
2016-09-13T10:57:01
66,206,829
1
0
null
null
null
null
UTF-8
Python
false
false
668
py
#!/usr/bin/python # -*- coding: utf-8 -*- from modules.catch import Catch from modules.entities.badges import Badges from modules.entities.hatched_eggs import HatchedEggs from modules.entities.inventory import Inventory from modules.entities.map_objects import MapObjects from modules.entities.player import Player from modules.entities.settings import Settings class State(object): def __init__(self): self.player = Player() self.inventory = Inventory() self.badges = Badges() self.settings = Settings() self.map_objects = MapObjects() self.catch = Catch() self.hatched_eggs = HatchedEggs(self.inventory)
[ "kouhei.szk@gmail.com" ]
kouhei.szk@gmail.com
ce7c48f9f8686e922f04be56fd4bf8ab959eb8de
d9d516490b35d4589787dd1c2f02e1cb39967ae4
/021 Jogo da adivinhação.py
f27f947c56eeb6ea3fe7e4a0cacdc82c2896aca5
[]
no_license
Emerson53na/exercicios-python-3
e3ec9e88e9d413ee9dee432a2c120447a22a3f3d
8f0349a94aca822722c02084c6e3d13cd8c27051
refs/heads/master
2021-05-19T09:31:31.686547
2020-04-22T23:54:41
2020-04-22T23:54:41
251,631,178
0
1
null
null
null
null
UTF-8
Python
false
false
401
py
from random import choice print('=-'*20,'\nVou pensar em um número de 0 a 5.Tente adivinhar...') print('=-'*20) num = int(input('Em que número eu pensei? ')) lista = [0,1,2,3,4,5] cpu = choice(lista) if cpu == num: print('O número escolhido foi: {}\n\033[32mParabens, você ganhou!\033[m'.format(cpu)) else: print('O número escolhido foi: {}\n\033[31mVocê errou!\033[m'.format(cpu))
[ "noreply@github.com" ]
Emerson53na.noreply@github.com
993148bc8da60f6cde60e4ddcf631c383dadd161
2a42392cf93deaccb39b357411c0b49abec0a132
/classcode/anim_and_sound/anim.py
840cb919d1038dfaea799ab71a28e4ca7a054444
[]
no_license
AKilgore/CS112-Spring2012
89aa573b19f1c92055e4832d87c6e5fa0588bccf
9fe50b80d71b4dee92101b993c1f58265eb40ee2
refs/heads/master
2020-12-24T19:27:58.448474
2012-04-30T07:23:40
2012-04-30T07:23:40
3,266,350
0
0
null
null
null
null
UTF-8
Python
false
false
1,213
py
#!/usr/bin/env/ python import pygame class AnimationFrames(object): def __init__(self, frames, loops=-1): self._times = [] self._data = [] total = 0 for t, data in frames: total += t self._times.append(total) self._data.append(data) self.end = total self.loops = loops def get(self, time): if self.loops == -1 or time is < self.loops * self.end: time %= self.end if time > self.end: return self._data[-1] idx = 0 while self._times[idx] < t: idx += 1 return self._data[idx] class Animation(object): def __init__(self, spritesheet, frames): if not isinstance(frames, AnimationFrames): frames = AnimationFrames(frames) self.spritesheet = spritesheet self.frames = frames self.time = 0 self.update(0) def get_frame_data(self, t): return self.frame.get(t) def update(self, dt): self.time += dt self.x, self.y = self.get_frame_data(self.time) def get_current_frame(self): return self.spritesheet.get(self.x, self.y)
[ "mak11@hampshire.edu" ]
mak11@hampshire.edu
d047e999cc18d2f81e6f7afc24a22551af5b8e21
c96f923cba05f4bfefafa24c02818cc98e8caa14
/sum.py
86724be39016ee75ac99bd413acdd8c139cca37c
[]
no_license
saviorseelf/test
0178865ff0fbafe37ee286301669876ecb5e7ae6
7438be19b185e16a92a1c3e72cad402b987edc01
refs/heads/master
2021-05-30T11:11:31.744015
2016-01-21T19:15:18
2016-01-21T19:15:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
from threading import Thread i = 0 def add(): global i for j in range(0,1000000): i += 1 def sub(): global i for j in range(0,1000000): i -= 1 def main(): thread1 = Thread(target = add, args = (),) thread2 = Thread(target = sub, args = (),) thread1.start() thread2.start() thread1.join() thread2.join() print i main()
[ "andershanssen92@gmail.com" ]
andershanssen92@gmail.com
171783a41f6cc03ffad67745ac99b75219895fad
c37de1b37ea7f6e5d0e4b6715be6f6da342cba9a
/examples/vasp/wallet.py
794836a62040bbfc7b35e797ac4dca07f265240e
[ "Apache-2.0" ]
permissive
fil-blue/client-sdk-python
6389d6b40c1af1587b23ecef96a4db5af66e34dd
2105e7362a35e69298de0896e17331006374de57
refs/heads/master
2023-02-15T02:54:40.512655
2021-01-05T23:42:40
2021-01-05T23:42:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
13,300
py
# Copyright (c) The Diem Core Contributors # SPDX-License-Identifier: Apache-2.0 from dataclasses import dataclass, field from http import server from diem import ( identifier, jsonrpc, diem_types, stdlib, testnet, utils, LocalAccount, offchain, ) import logging, threading, typing logger = logging.getLogger(__name__) @dataclass(frozen=True) class User: name: str subaddresses: typing.List[str] = field(default_factory=lambda: []) def kyc_data(self) -> offchain.KycDataObject: return offchain.individual_kyc_data( given_name=self.name, surname=f"surname-{self.name}", address=offchain.AddressObject(city="San Francisco"), ) def additional_kyc_data(self) -> str: return f"{self.name}'s secret" class ActionResult(str): def merge(self, ret: str) -> "ActionResult": if ret == ActionResult.SEND_REQUEST_SUCCESS: return self return self + ", " + ret # the following ActionResult is created for testing purpose to indicate specific task is executed ActionResult.PASS = ActionResult("pass") ActionResult.REJECT = ActionResult("reject") ActionResult.SOFT_MATCH = ActionResult("soft_match") ActionResult.SENT_ADDITIONAL_KYC_DATA = ActionResult("sent_additional_kyc_data") ActionResult.TXN_EXECUTED = ActionResult("transaction_executed") ActionResult.SEND_REQUEST_SUCCESS = ActionResult("send_request_success") BgResult = typing.Union[ActionResult, typing.Tuple[offchain.Action, ActionResult]] @dataclass class WalletApp: """WalletApp is an example of custodial wallet application""" @staticmethod def generate(name: str, client: jsonrpc.Client) -> "WalletApp": """generate a WalletApp running on testnet""" offchain_service_port = offchain.http_server.get_available_port() account = testnet.gen_vasp_account(client, f"http://localhost:{offchain_service_port}") w = WalletApp( name=name, jsonrpc_client=client, parent_vasp=account, offchain_service_port=offchain_service_port, ) w.add_child_vasp() return w name: str jsonrpc_client: jsonrpc.Client parent_vasp: LocalAccount offchain_service_port: int hrp: str = field(default=identifier.TDM) saved_commands: typing.Dict[str, offchain.Command] = field(default_factory=lambda: {}) child_vasps: typing.List[LocalAccount] = field(default_factory=lambda: []) users: typing.Dict[str, User] = field(default_factory=lambda: {}) evaluate_kyc_data_result: typing.Dict[str, ActionResult] = field(default_factory=lambda: {}) manual_review_result: typing.Dict[str, ActionResult] = field(default_factory=lambda: {}) task_queue: typing.List[typing.Callable[["WalletApp"], BgResult]] = field(default_factory=lambda: []) locks: typing.Dict[str, threading.Lock] = field(default_factory=lambda: {}) def __post_init__(self) -> None: self.compliance_key = self.parent_vasp.compliance_key self.offchain_client = offchain.Client(self.parent_vasp.account_address, self.jsonrpc_client, self.hrp) # --------------------- end user interaction -------------------------- def pay( self, user_name: str, intent_id: str, desc: typing.Optional[str] = None, original_payment_reference_id: typing.Optional[str] = None, ) -> typing.Tuple[(str, ActionResult)]: """make payment from given user account to intent_id""" intent = identifier.decode_intent(intent_id, self.hrp) command = offchain.PaymentCommand.init( self.gen_user_account_id(user_name), self.users[user_name].kyc_data(), intent.account_id, intent.amount, intent.currency_code, original_payment_reference_id=original_payment_reference_id, description=desc, ) self.save_command(command) return command.reference_id() def gen_intent_id( self, user_name: str, amount: int, currency: typing.Optional[str] = testnet.TEST_CURRENCY_CODE, ) -> str: account_id = self.gen_user_account_id(user_name) return identifier.encode_intent(account_id, currency, amount) # --------------------- offchain integration -------------------------- def process_inbound_request( self, x_request_id: str, request_sender_address: str, request_bytes: bytes ) -> typing.Tuple[int, bytes]: inbound_command = None try: inbound_command = self.offchain_client.process_inbound_request(request_sender_address, request_bytes) self.save_command(inbound_command) resp = offchain.reply_request(inbound_command.cid) code = 200 except offchain.Error as e: logger.exception(e) resp = offchain.reply_request(inbound_command.cid if inbound_command else None, e.obj) code = 400 return (code, offchain.jws.serialize(resp, self.compliance_key.sign)) def run_once_background_job( self, ) -> BgResult: if len(self.task_queue) == 0: return None task = self.task_queue[0] ret = task(self) self.task_queue.remove(task) return ret # --------------------- admin -------------------------- def start_server(self) -> server.HTTPServer: return offchain.http_server.start_local(self.offchain_service_port, self.process_inbound_request) def add_child_vasp(self) -> jsonrpc.Transaction: self.child_vasps.append(testnet.gen_child_vasp(self.jsonrpc_client, self.parent_vasp)) def add_user(self, name) -> None: self.users[name] = User(name) def vasp_balance(self, currency: str = testnet.TEST_CURRENCY_CODE) -> int: balance = 0 for vasp in [self.parent_vasp] + self.child_vasps: balance += utils.balance(self.jsonrpc_client.get_account(vasp.account_address), currency) return balance def clear_data(self) -> None: self.evaluate_kyc_data_result = {} self.manual_review_result = {} self.users = {} self.saved_commands = {} self.task_queue = [] self.locks = {} # -------- offchain business actions --------------- def _send_additional_kyc_data( self, command: offchain.Command ) -> typing.Tuple[ActionResult, offchain.PaymentCommand]: command = typing.cast(offchain.PaymentCommand, command) account_id = command.my_actor_obj().address _, subaddress = identifier.decode_account(account_id, self.hrp) user = self._find_user_by_subaddress(subaddress) new_cmd = command.new_command(additional_kyc_data=user.additional_kyc_data()) return (ActionResult.SENT_ADDITIONAL_KYC_DATA, new_cmd) def _submit_travel_rule_txn( self, command: offchain.Command, ) -> ActionResult: command = typing.cast(offchain.PaymentCommand, command) child_vasp = self._find_child_vasp(command.sender_account_address(self.hrp)) testnet.exec_txn( self.jsonrpc_client, child_vasp, stdlib.encode_peer_to_peer_with_metadata_script( currency=utils.currency_code(command.payment.action.currency), payee=command.receiver_account_address(self.hrp), amount=command.payment.action.amount, metadata=command.travel_rule_metadata(self.hrp), metadata_signature=bytes.fromhex(command.payment.recipient_signature), ), ) return ActionResult.TXN_EXECUTED def _evaluate_kyc_data(self, command: offchain.Command) -> typing.Tuple[ActionResult, offchain.PaymentCommand]: command = typing.cast(offchain.PaymentCommand, command) op_kyc_data = command.opponent_actor_obj().kyc_data ret = self.evaluate_kyc_data_result.get(op_kyc_data.given_name, ActionResult.PASS) if ret == ActionResult.SOFT_MATCH: return (ret, command.new_command(status=offchain.Status.soft_match)) return (ret, self._kyc_data_result("evaluate key data", ret, command)) def _manual_review(self, command: offchain.Command) -> typing.Tuple[ActionResult, offchain.PaymentCommand]: command = typing.cast(offchain.PaymentCommand, command) op_kyc_data = command.opponent_actor_obj().kyc_data ret = self.manual_review_result.get(op_kyc_data.given_name, ActionResult.PASS) return (ret, self._kyc_data_result("review", ret, command)) def _kyc_data_result( self, action: str, ret: ActionResult, command: offchain.PaymentCommand ) -> offchain.PaymentCommand: if ret == ActionResult.PASS: if command.is_receiver(): return self._send_kyc_data_and_receipient_signature(command) return command.new_command(status=offchain.Status.ready_for_settlement) return command.new_command( status=offchain.Status.abort, abort_code=offchain.AbortCode.reject_kyc_data, abort_message=f"{action}: {ret}", ) def _send_kyc_data_and_receipient_signature( self, command: offchain.PaymentCommand, ) -> offchain.PaymentCommand: sig_msg = command.travel_rule_metadata_signature_message(self.hrp) subaddress = command.receiver_subaddress(self.hrp) user = self._find_user_by_subaddress(subaddress) return command.new_command( recipient_signature=self.compliance_key.sign(sig_msg).hex(), kyc_data=user.kyc_data(), status=offchain.Status.ready_for_settlement, ) # ---------------------- offchain Command --------------------------- def _send_request(self, command: offchain.PaymentCommand) -> ActionResult: self.offchain_client.send_command(command, self.compliance_key.sign) self._enqueue_follow_up_action(command) return ActionResult.SEND_REQUEST_SUCCESS def _enqueue_follow_up_action(self, command: offchain.PaymentCommand) -> None: if command.follow_up_action(): self.task_queue.append(lambda app: app._offchain_business_action(command.reference_id())) def _offchain_business_action(self, ref_id: str) -> BgResult: command = self.saved_commands.get(ref_id) action = command.follow_up_action() if action == offchain.Action.SUBMIT_TXN: return (action, self._submit_travel_rule_txn(command)) actions = { offchain.Action.EVALUATE_KYC_DATA: self._evaluate_kyc_data, offchain.Action.CLEAR_SOFT_MATCH: self._send_additional_kyc_data, offchain.Action.REVIEW_KYC_DATA: self._manual_review, } ret, new_command = actions[action](command) self.save_command(new_command) # return action and action result for test return (action, ret) # ---------------------- commands --------------------------- def save_command(self, command: offchain.Command) -> None: """save command locks prior command by reference id, validate and save new command. in a production implementation, the lock should be database / distributed lock to ensure atomic process(read and write) command by the reference id. """ lock = self.lock(command.reference_id()) if not lock.acquire(blocking=False): msg = f"command(reference_id={command.reference_id()}) is locked" raise offchain.command_error(offchain.ErrorCode.conflict, msg) try: prior = self.saved_commands.get(command.reference_id()) if command == prior: return command.validate(prior) self.saved_commands[command.reference_id()] = command if command.is_inbound(): self._enqueue_follow_up_action(command) else: # outbound self.task_queue.append(lambda app: app._send_request(command)) finally: lock.release() def lock(self, ref_id: str) -> threading.Lock: return self.locks.setdefault(ref_id, threading.Lock()) # ---------------------- users --------------------------- def _find_user_by_subaddress(self, subaddress: bytes) -> User: for u in self.users.values(): if subaddress in u.subaddresses: return u raise ValueError(f"could not find user by subaddress: {subaddress.hex()}, {self.name}") def gen_user_account_id(self, user_name: str) -> str: subaddress = identifier.gen_subaddress() self.users[user_name].subaddresses.append(subaddress) return identifier.encode_account(self._available_child_vasp().account_address, subaddress, self.hrp) # ---------------------- child vasps --------------------------- def _available_child_vasp(self) -> LocalAccount: return self.child_vasps[0] def _find_child_vasp(self, address: diem_types.AccountAddress) -> LocalAccount: for vasp in self.child_vasps: if vasp.account_address == address: return vasp raise ValueError(f"could not find child vasp by address: {address.to_hex()}")
[ "ilx@fb.com" ]
ilx@fb.com
848a890e8baab9228465b85ff2aaf300a3bd3890
59835adaceb26614d0aa51cf8dda2be5be79bcfb
/run_menu.py
91721ab25d52dd5a240b4d7c8ac9c851985b7866
[]
no_license
Farah-H/python_menu
b438e11d649729611ec4aa8ca3a8c9bd0106c3b6
7401eb938a71c03a89da30667ebda4d59f75d4ac
refs/heads/master
2023-01-07T03:28:06.934944
2020-11-08T19:43:54
2020-11-08T19:43:54
310,585,828
0
0
null
null
null
null
UTF-8
Python
false
false
819
py
from waitstaff_class import Waitstaff # This part of the program will actually execute taking an order, saving it, and printing it back to the user #instantiating the waitstaff class jenny = Waitstaff() all_orders = [] # a list to store all orders in (could increment by making this csv output) # prompting the user for which part of the menu they would like to see category = input('Would you like to see our starters, mains, desserts or drinks? Please enter "nothing" if you do not want to see the menu.').lower() # if they are done with (or don't want to read) the menu, they can start to place an order if input('Are you ready to make an order?').lower() == 'yes': this_order = jenny.get_order() print(this_order) print(jenny.print_order(this_order)) else: jenny.display_menu(category)
[ "61236001+farahmh@users.noreply.github.com" ]
61236001+farahmh@users.noreply.github.com
82405e9839e46249f460ed4e84143cc38d8ef32b
55e31bc59b435ccfb60da178d560dedd6248b593
/resources/store.py
b1c85be12f356d4d78b91c85e8bebff15149a086
[]
no_license
kenHsieh25053/flaskapi
a456c2ae28127ba422582693949fcb79bff71977
da1130585fc722910db3c503946ee5d3b8d66591
refs/heads/master
2020-03-10T22:44:36.581563
2018-04-22T09:07:09
2018-04-22T09:07:09
129,625,792
0
0
null
null
null
null
UTF-8
Python
false
false
806
py
from flask_restful import Resource from models.store import StoreModel class Store(Resource): def get(self, name): store = StoreModel.find_by_name(name) if store: return store.json() return {'message': 'Store not found'}, 404 def post(self, name): if StoreModel.find_by_name(name): return {'message': 'A store with name {} already exits.'.format(name)}, 400 store = StoreModel(name) try: store.save_to_db() except: return {'message': 'An error occured while creating the store'}, 500 return store.json(), 201 def delete(self, name): store = StoreModel.find_by_name(name) if store: store.delete_from_db() return {'message': 'Store deleted'} class StoreList(Resource): def get(self): return {'stores': [store.json() for store in StoreModel.query.all()]}
[ "kw1984@livemail.tw" ]
kw1984@livemail.tw
7be70ac3312c262cb16fc7fdd8dcb45124a48f14
d2b2023261ccdcaf560a2e7b0bab13ecdedacfc9
/03/fullbackup.py
00cb6631683557864d36d5b2b9b06ca824c29799
[]
no_license
lilyef2000/lesson
a9d96ffc19f68fa3f044f240de6496b6d69394f6
2a5abb00b9bbb8bb36602ea6e1e8c464accc0759
refs/heads/master
2021-01-10T08:41:14.524421
2016-01-01T18:04:04
2016-01-01T18:04:04
46,460,003
0
0
null
null
null
null
UTF-8
Python
false
false
879
py
#!/usr/bin/python import sys,os,time,logger source_file = sys.argv[1] formated_source_file = source_file.split('/')[-1] backup_dir = '/home/Administrator/lesson/backup/' backup_to_file = '''%s%s_%s.tgz'''% (backup_dir,formated_source_file,time.strftime("%Y%m%d%H%M%S",time.localtime())) def run_backup(runtime='now',exclude_file_name='None'): if len(sys.argv) == 4: print '--------exclude file mode--------' if sys.argv[2] == '-X': exclude_file_name = sys.argv[3] backup_cmd = "tar -cvzfX %s %s %s " %(backup_to_file,exclude_file_name,source_file) else: print '--------Normal mode:--------' backup_cmd = "tar -cvzf %s %s |wc -l" %(backup_to_file,source_file) run_command = os.system(backup_cmd) if run_command == 0: logger.record_log('Full Backup','Success','N/A','test') else: logger.record_log('Full Backup','Failure','N/A','test') run_backup()
[ "lilyef2000@gmail.com" ]
lilyef2000@gmail.com
1c990786b09382998bcbe64210b2d6960dcbb44f
6691d0c71ddb92422fddb5d5994b660ee88a2435
/SDP_Assignments/Game_of_life/game_of_life_vishnu/GolLogic.py
0bbc877b4cafbc1f8997045ede5e4138c3d71dd9
[]
no_license
dadi-vardhan/SDP
fb1b2e49c014d769add0e6244ca302e4b6939de5
f692837c2cda68d8b16d57727d4b727acf545bf2
refs/heads/master
2023-03-13T10:28:49.060533
2021-03-08T16:45:38
2021-03-08T16:45:38
310,674,824
0
1
null
2020-11-23T09:06:31
2020-11-06T18:23:23
Jupyter Notebook
UTF-8
Python
false
false
1,224
py
import time import numpy as np import matplotlib.pyplot as plt class Logic(object): def __init__(self, console): self.state = console.state def neighbour_cell_count(self): ''' Counts the number of cells present at time 't' on the console and returns it. Parameters: none Returns: cell [int] ''' state = self.state cell = (state[0:-2,0:-2] + state[0:-2,1:-1] + state[0:-2,2:] + state[1:-1,0:-2] + state[1:-1,2:] + state[2:,0:-2] + state[2:,1:-1] + state[2:,2:]) return cell def cell_propogation_rules(self): ''' function that defines the rules for the cell-propogation. Parameters: none Returns : state ''' cell = self.neighbour_cell_count() state = self.state cell_birth = (cell == 3) & (state[1:-1,1:-1] == 0) survive = ((cell == 2) | (cell == 3)) & (state[1:-1,1:-1] == 1) state[...] = 0 state[1:-1,1:-1][cell_birth | survive] = 1 total_cell_birth = np.sum(cell_birth) self.total_cell_birth = total_cell_birth total_cell_survived = np.sum(survive) self.total_cell_survived = total_cell_survived return state
[ "vishnu.dadi@smail.h-brs.de" ]
vishnu.dadi@smail.h-brs.de
994164e610d278fe042d18fcfb17557acddd8a41
47a496e0c7ea9adf35c006d193a88357006a370e
/algorithm/TopicB2/TreePagoda.py
fcce3df7058ac52a7a5bc94496f5eb20ed821fda
[]
no_license
Curious-chen/curriculum-design
01ea5aff12c3097f7283571befd7bcfe68149817
036f78a62b15ec8e5c8e1013d124f726fd2bebe4
refs/heads/master
2020-12-06T14:19:29.026158
2020-01-08T06:30:50
2020-01-08T06:30:50
232,483,805
6
1
null
null
null
null
UTF-8
Python
false
false
2,727
py
""" 将正整数排成等边三角形(也叫数塔),三角形的底边有个数, 下图给出了的一个例子。从三角形顶点出发通过一系列相邻整数(在图中用正方形表示), 如何使得到达底边时的总和最大 """ import numpy as np """ https://www.jianshu.com/p/2a7f5cac0d58 """ """ 动态规划 dp[i][j] = max(dp[i+1][j],dp[i+1][j+1])+date[i][j] """ """ (1) 初始化距离数组dp,令距离dp的最后一行复制树塔的最后一行的值 (2) 从树塔倒数第二行开始,自底向上计算 (3) 判断x点的左右孩子的大小,对应的距离dp = 左右孩子中的较大值加上树塔对应位置值 (4) 重复2、3步骤,直到计算完树塔顶端 """ class TreePagoda(object): def __init__(self, pagoda): self.pagoda = np.array(pagoda) # 初始化节点到树塔底的距离 dp = self.pagoda.copy() dp[:-1, :] = 0 self.dp = dp # 下一坐标 self.next = dict() def run(self): index = len(self.pagoda) - 1 for j, value in enumerate(self.pagoda[-1]): yield self.getIndex(index, j), 0, value for i in range(len(self.pagoda) - 2, -1, -1): # 自底向上求得最优值 layer = self.pagoda[i] for j in range(len(layer)): if layer[j] == 0: break self.find(i, j) yield self.getIndex(i, j), self.getIndex(*self.next[(i, j)]), self.dp[i, j] def getIndex(self, i, j): return int(i * (i + 1) / 2 + j) def find(self, i, j): if self.dp[i + 1, j] > self.dp[i + 1, j + 1]: self.dp[i, j] = self.dp[i + 1, j] + self.pagoda[i, j] self.next[(i, j)] = (i + 1, j) else: self.dp[i, j] = self.dp[i + 1, j + 1] + self.pagoda[i, j] self.next[(i, j)] = (i + 1, j + 1) def createdPath(self): cu = (0, 0) yield self.getIndex(*cu) while True: cu = self.next[cu] yield self.getIndex(*cu) if cu[0] == len(self.pagoda) - 1: break def Test(): treePagoda = np.array(((9, 0, 0, 0, 0), (12, 15, 0, 0, 0), (10, 6, 8, 0, 0), (2, 18, 9, 5, 0), (19, 7, 10, 4, 16))) t = TreePagoda(treePagoda) y = t.run() for i in range(15): x = next(y) print(x) t.createdPath() print(t.dp) for i in t.createdPath(): print(i) if __name__ == '__main__': Test()
[ "noreply@github.com" ]
Curious-chen.noreply@github.com
85dbdd459b8e5552ad1d55043b0a1f5779b84c91
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
/python/python_20926.py
194a6671b01c6bb8bdc4a0d1f301faf7b48d8ed5
[]
no_license
AK-1121/code_extraction
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
5297a4a3aab3bb37efa24a89636935da04a1f8b6
refs/heads/master
2020-05-23T08:04:11.789141
2015-10-22T19:19:40
2015-10-22T19:19:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
32
py
# Modifying sys.path PYTHONPATH
[ "ubuntu@ip-172-31-7-228.us-west-2.compute.internal" ]
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
e9413bfa3cd627adaf3cf6bb968577c84e905767
2b84bd7cdcfe9c921fa60fefa2ee1257df33ce38
/utils/email_util.py
713ad3955e46c9b1c3cf07d310a5c6f928855407
[]
no_license
webclinic017/market_monitor
f9cfa4a8443b81830abd9e5900509c7dfdab3e37
9a8a9b6181e1ab4f5d3dad32641ac941c5e4fabf
refs/heads/main
2023-07-28T00:39:34.481170
2021-09-15T12:39:38
2021-09-15T12:39:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,525
py
# Copyright (c) 2015 Shiye Inc. # All rights reserved. # # Author: zsx <zhaoshouxin@shiyejinrong.com> # Date: 2019-03-07 import smtplib from email.mime.text import MIMEText from docs.config.email_cfg.config import mail_info as m class SchedulerError(RuntimeError): def __init__(self, time): self.time = time class EmailUtil(object): def __init__(self): self.__mail_host = m.mail_host self.__mail_user = m.mail_user self.__mail_pass = m.mail_pass self.__mail_to = m.mail_to def send_email(self, email_title, email_content): if email_content is None or len(email_content) == 0: return email_struct = MIMEText(email_content, _subtype="plain", _charset="gb2312") email_struct["Subject"] = email_title email_struct["From"] = "".join(["市场预警", "<", self.__mail_user, ">"]) email_struct["To"] = ";".join(self.__mail_to) # server = smtplib.SMTP() #linux server = smtplib.SMTP_SSL(self.__mail_host, 465) server.connect(self.__mail_host) server.login(self.__mail_user, self.__mail_pass) server.sendmail( email_struct["From"], self.__mail_to, email_struct.as_string()) server.close() def send_email(err_info, email_title="市场预警测试邮件"): email_content = err_info email_util = EmailUtil() email_util.send_email(email_title, email_content) if __name__ == '__main__': send_email("2222")
[ "1125191117@qq.com" ]
1125191117@qq.com
5c6efe87ee9b93f8027bf4a15335244acf89f525
ae2f3356ab79b77090f8eb927f692c23ee070278
/SMA_SES_DES.py
6ae165e143cee8c29eb017cdeffa048c74e8509c
[ "MIT" ]
permissive
ImPHX13/Demand-Forecasting
5cfdbfdd712dc23834f702e347b39bcdf23d1d3d
078e58fed6fdd59e8fbae69e8f54d01e784d4be7
refs/heads/master
2022-11-25T22:07:23.255490
2022-11-18T04:26:40
2022-11-18T04:26:40
265,248,188
2
0
null
null
null
null
UTF-8
Python
false
false
5,881
py
#!/usr/bin/env python # coding: utf-8 import numpy as np import pandas as pd from matplotlib import pyplot as plt from statsmodels.tsa.stattools import adfuller from statsmodels.tsa.seasonal import seasonal_decompose from statsmodels.tsa.arima_model import ARIMA from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() #Import dataset df = pd.read_csv('data.csv',parse_dates=True, dayfirst=True) df['Date'] = pd.to_datetime(df['Date'], format='%d/%m/%y') print(df.dtypes) df.head() df=df.set_index('Date') df.index #Create a timeseseries ts=df['Quantity'] ts.head() #Rolling mean and standard deviation calculation to check for stationarity rolling_mean = ts.rolling(window = 5).mean() rolling_std = ts.rolling(window = 5).std() plt.plot(ts, color = 'blue', label = 'Original') plt.plot(rolling_mean, color = 'red', label = 'Rolling Mean') plt.plot(rolling_std, color = 'black', label = 'Rolling Std') plt.legend(loc = 'best') plt.title('Rolling Mean & Rolling Standard Deviation') plt.show() #ADF test for checking stationarity of timeseries result = adfuller(ts) print('ADF Statistic: {}'.format(result[0])) print('p-value: {}'.format(result[1])) print('Critical Values:') for key, value in result[4].items(): print('\t{}: {}'.format(key, value)) #Timeseries log transformation ts_log = np.log(ts) plt.plot(ts_log) result = adfuller(ts_log) print('ADF Statistic: {}'.format(result[0])) print('p-value: {}'.format(result[1])) print('Critical Values:') for key, value in result[4].items(): print('\t{}: {}'.format(key, value)) #Function for ADF test def get_stationarity(timeseries): rolling_mean = timeseries.rolling(window=5).mean() rolling_std = timeseries.rolling(window=5).std() original = plt.plot(timeseries, color='blue', label='Original') mean = plt.plot(rolling_mean, color='red', label='Rolling Mean') std = plt.plot(rolling_std, color='black', label='Rolling Std') plt.legend(loc='best') plt.title('Rolling Mean & Standard Deviation') plt.show(block=False) result = adfuller(timeseries) print('ADF Statistic: {}'.format(result[0])) print('p-value: {}'.format(result[1])) print('Critical Values:') for key, value in result[4].items(): print('\t{}: {}'.format(key, value)) rolling_mean = ts_log.rolling(window=5).mean() ts_log_minus_mean = ts_log - rolling_mean ts_log_minus_mean.dropna(inplace=True) get_stationarity(ts_log_minus_mean) #Exponential Decay rolling_mean_exp_decay = ts_log.ewm(halflife=5, min_periods=0, adjust=True).mean() ts_log_exp_decay = ts_log - rolling_mean_exp_decay ts_log_exp_decay.dropna(inplace=True) get_stationarity(ts_log_exp_decay) #Timeseries log shifted to make it stationary ts_log_shift = ts_log - ts_log.shift() ts_log_shift.dropna(inplace=True) get_stationarity(ts_log_shift) #Timeseries log differenced to make it stationary ts_log_diff = ts_log - ts_log.shift() plt.plot(ts_log_diff) ts_log_diff.dropna(inplace=True) get_stationarity(ts_log_diff) #Seasonal Decomposition to check for seasonality and trends from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose(ts_log,freq=7) trend = decomposition.trend seasonal = decomposition.seasonal residual = decomposition.resid plt.subplot(411) plt.plot(ts_log, label='Original') plt.legend(loc='best') plt.subplot(412) plt.plot(trend, label='Trend') plt.legend(loc='best') plt.subplot(413) plt.plot(seasonal,label='Seasonality') plt.legend(loc='best') plt.subplot(414) plt.plot(residual, label='Residuals') plt.legend(loc='best') plt.tight_layout() ts_log_decompose = residual ts_log_decompose.dropna(inplace=True) get_stationarity(ts_log_decompose) #ACF and PACF plots to find p and q values from statsmodels.tsa.stattools import acf, pacf lag_acf = acf(ts_log_diff, nlags=20) lag_pacf = pacf(ts_log_diff, nlags=20, method='ols') plt.subplot(121) plt.plot(lag_acf) plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.title('Autocorrelation Function') plt.subplot(122) plt.plot(lag_pacf) plt.axhline(y=0,linestyle='--',color='gray') plt.axhline(y=-1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.axhline(y=1.96/np.sqrt(len(ts_log_diff)),linestyle='--',color='gray') plt.title('Partial Autocorrelation Function') plt.tight_layout() #Gridsearch for ideal p,q parameters based on lowest AIC value import statsmodels.api as sm resDiff = sm.tsa.arma_order_select_ic(ts_log, max_ar=7, max_ma=7, ic='aic', trend='c') print('ARMA(p,q) =',resDiff['aic_min_order'],'is the best.') #Fitting ARIMA model from the obtained (p,d,q) values from statsmodels.tsa.arima_model import ARIMA model = ARIMA(ts_log, order=(1, 1, 0)) results_ARIMA = model.fit(disp=-1) plt.plot(ts_log_diff) plt.plot(results_ARIMA.fittedvalues, color='red') predictions_ARIMA_diff = pd.Series(results_ARIMA.fittedvalues, copy=True) print(predictions_ARIMA_diff.head()) #Bring back the predictions to original scale predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum() print(predictions_ARIMA_diff_cumsum.head()) predictions_ARIMA_log = pd.Series(ts_log.ix[0], index=ts_log.index) predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0) predictions_ARIMA_log.head() #Plot of Actual vs Forecasted values predictions_ARIMA = np.exp(predictions_ARIMA_log) plt.plot(ts) plt.plot(predictions_ARIMA) plt.title('ARIMA MAPE: %.4f'% np.mean(np.abs(predictions_ARIMA-ts)/np.abs(ts))) #RMSE and MAPE calculations mape = np.mean(np.abs(predictions_ARIMA - ts)/np.abs(ts)) rmse = np.mean((predictions_ARIMA - ts)**2)**.5 print(mape) print(rmse) #Summary of ARIMA model results_ARIMA.summary()
[ "noreply@github.com" ]
ImPHX13.noreply@github.com
2cc9faf3e8e17c9e733a3ce6a37951dfcd9caabb
5602c3572852f8574dff7173fd19c32c48520b28
/rigify/rigs/basic/raw_copy.py
2ebbe13382bfcfe90dd4692ae3038b58086e1ad6
[]
no_license
Dancingbubble/blender-addons
58be022f1d8f712ca83acdbd765336e74074a14d
a6ee5b0e6f6a945c33b6159fd0536d548b23ccb6
refs/heads/master
2023-02-19T22:19:53.125675
2021-01-01T20:54:21
2021-01-01T20:54:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,521
py
#====================== BEGIN GPL LICENSE BLOCK ====================== # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # #======================= END GPL LICENSE BLOCK ======================== # <pep8 compliant> import bpy from ...utils.naming import strip_org, strip_prefix, choose_derived_bone, is_control_bone from ...utils.mechanism import copy_custom_properties_with_ui from ...utils.widgets import layout_widget_dropdown, create_registered_widget from ...base_rig import BaseRig from ...base_generate import SubstitutionRig from itertools import repeat ''' Due to T80764, bone name handling for 'limbs.raw_copy' was hard-coded in generate.py class Rig(SubstitutionRig): """ A raw copy rig, preserving the metarig bone as is, without the ORG prefix. """ def substitute(self): # Strip the ORG prefix during the rig instantiation phase new_name = strip_org(self.base_bone) new_name = self.generator.rename_org_bone(self.base_bone, new_name) return [ self.instantiate_rig(InstanceRig, new_name) ] ''' class RelinkConstraintsMixin: """ Utilities for constraint relinking. """ def relink_bone_constraints(self, bone_name): if self.params.relink_constraints: for con in self.get_bone(bone_name).constraints: self.relink_single_constraint(con) relink_unmarked_constraints = False def relink_single_constraint(self, con): if self.params.relink_constraints: parts = con.name.split('@') if len(parts) > 1: self.relink_constraint(con, parts[1:]) elif self.relink_unmarked_constraints: self.relink_constraint(con, ['']) def relink_move_constraints(self, from_bone, to_bone, *, prefix=''): if self.params.relink_constraints: src = self.get_bone(from_bone).constraints dest = self.get_bone(to_bone).constraints for con in list(src): if con.name.startswith(prefix): dest.copy(con) src.remove(con) def relink_bone_parent(self, bone_name): if self.params.relink_constraints: self.generator.disable_auto_parent(bone_name) parent_spec = self.params.parent_bone if parent_spec: old_parent = self.get_bone_parent(bone_name) new_parent = self.find_relink_target(parent_spec, old_parent or '') or None self.set_bone_parent(bone_name, new_parent) return new_parent def relink_constraint(self, con, specs): if con.type == 'ARMATURE': if len(specs) == 1: specs = repeat(specs[0]) elif len(specs) != len(con.targets): self.raise_error("Constraint {} actually has {} targets", con.name, len(con.targets)) for tgt, spec in zip(con.targets, specs): if tgt.target == self.obj: tgt.subtarget = self.find_relink_target(spec, tgt.subtarget) elif hasattr(con, 'subtarget'): if len(specs) > 1: self.raise_error("Only the Armature constraint can have multiple '@' targets: {}", con.name) if con.target == self.obj: con.subtarget = self.find_relink_target(specs[0], con.subtarget) def find_relink_target(self, spec, old_target): if spec == '': return old_target elif spec in {'CTRL', 'DEF', 'MCH'}: result = choose_derived_bone(self.generator, old_target, spec.lower()) if not result: result = choose_derived_bone(self.generator, old_target, spec.lower(), by_owner=False) if not result: self.raise_error("Cannot find derived {} bone of bone '{}' for relinking", spec, old_target) return result else: if spec not in self.obj.pose.bones: self.raise_error("Cannot find bone '{}' for relinking", spec) return spec @classmethod def add_relink_constraints_params(self, params): params.relink_constraints = bpy.props.BoolProperty( name = "Relink Constraints", default = False, description = "For constraints with names formed like 'base@bonename', use the part after '@' as the new subtarget after all bones are created. Use '@CTRL', '@DEF' or '@MCH' to simply replace the prefix" ) params.parent_bone = bpy.props.StringProperty( name = "Parent", default = "", description = "Replace the parent with a different bone after all bones are created. Using simply CTRL, DEF or MCH will replace the prefix instead" ) @classmethod def add_relink_constraints_ui(self, layout, params): r = layout.row() r.prop(params, "relink_constraints") if params.relink_constraints: r = layout.row() r.prop(params, "parent_bone") layout.label(text="Constraint names have special meanings.", icon='ERROR') class Rig(BaseRig, RelinkConstraintsMixin): def find_org_bones(self, pose_bone): return pose_bone.name def initialize(self): self.relink = self.params.relink_constraints def parent_bones(self): self.relink_bone_parent(self.bones.org) def configure_bones(self): org = self.bones.org if is_control_bone(org): copy_custom_properties_with_ui(self, org, org, ui_controls=[org]) def rig_bones(self): self.relink_bone_constraints(self.bones.org) def generate_widgets(self): org = self.bones.org widget = self.params.optional_widget_type if widget and is_control_bone(org): create_registered_widget(self.obj, org, widget) @classmethod def add_parameters(self, params): self.add_relink_constraints_params(params) params.optional_widget_type = bpy.props.StringProperty( name = "Widget Type", default = '', description = "Choose the type of the widget to create" ) @classmethod def parameters_ui(self, layout, params): col = layout.column() col.label(text='This rig type does not add the ORG prefix.') col.label(text='Manually add ORG, MCH or DEF as needed.') self.add_relink_constraints_ui(layout, params) pbone = bpy.context.active_pose_bone if pbone and is_control_bone(pbone.name): layout_widget_dropdown(layout, params, "optional_widget_type") #add_parameters = InstanceRig.add_parameters #parameters_ui = InstanceRig.parameters_ui def create_sample(obj): """ Create a sample metarig for this rig type. """ # generated by rigify.utils.write_metarig bpy.ops.object.mode_set(mode='EDIT') arm = obj.data bones = {} bone = arm.edit_bones.new('DEF-bone') bone.head[:] = 0.0000, 0.0000, 0.0000 bone.tail[:] = 0.0000, 0.0000, 0.2000 bone.roll = 0.0000 bone.use_connect = False bones['DEF-bone'] = bone.name bpy.ops.object.mode_set(mode='OBJECT') pbone = obj.pose.bones[bones['DEF-bone']] pbone.rigify_type = 'basic.raw_copy' pbone.lock_location = (False, False, False) pbone.lock_rotation = (False, False, False) pbone.lock_rotation_w = False pbone.lock_scale = (False, False, False) pbone.rotation_mode = 'QUATERNION' bpy.ops.object.mode_set(mode='EDIT') for bone in arm.edit_bones: bone.select = False bone.select_head = False bone.select_tail = False for b in bones: bone = arm.edit_bones[bones[b]] bone.select = True bone.select_head = True bone.select_tail = True arm.edit_bones.active = bone return bones
[ "angavrilov@gmail.com" ]
angavrilov@gmail.com
4fcd5f7a94f65e8208038c8f3ad8ad80fbf84495
0e531fa04060ca129a1c3323c7c403a373e6c00d
/pca2tracks.py
144f71e0fac0d9de660813931366a2c86113f2fa
[]
no_license
zhipenglu/xist_structure
6b71f4f718991d22d00d5b0fc8008b6e97581b62
0dfb910d0b303fc94d421c66bb2e484b8e72297e
refs/heads/master
2020-04-22T23:17:33.781817
2019-02-14T18:22:33
2019-02-14T18:22:33
170,736,269
0
0
null
null
null
null
UTF-8
Python
false
false
1,819
py
""" pca2tracks.py This script converts the PCA analysis results for RIP/CLIP enrichment to a minimal number of tracks for display on IGV. This approach provides more useful information than the heatmap. The input file is *pca_array.pc.txt, and output are the first few tracks that explain the most variance (e.g. *pc1.bedgraph). Input format: Interval NAME MEAN PC1 PC2 ... hsXIST_0_100 hsXIST_0_100 value value value ... Example: cd /Users/lu/Documents/chang/eCLIP/fripsum python ~/Documents/scripts/pca2tracks.py \ frip_gap_hsXIST_geometric_100nt_pca_array.pc.txt 7 \ frip_gap_hsXIST_geometric_100nt_pca_array For the PCA results from gene level, need to transpose the matrix############### python ~/Documents/scripts/pca2tracks.py \ frip_gap_hsXIST_geometric_100nt_pca_gene.pc.txt 7 array \ frip_gap_hsXIST_geometric_100nt_pca_gene """ import sys if len(sys.argv) < 4: print "Usage: python pca2tracks.py pca_file track_number dim output_prefix" print "dim: gene or array" sys.exit() pcafile = open(sys.argv[1], 'r') ntracks = int(sys.argv[2]) dimension = sys.argv[3] outputprefix = sys.argv[4] pcadata = pcafile.readlines()[1:] #input as a list, remove the header line pcamatrix = [line.strip('\n').split() for line in pcadata] meanbedgraph = open(outputprefix + "_mean.bedgraph", 'w') #output mean bedgraph meanout = '' for row in pcamatrix: meanout += ('\t'.join(row[0].split('_') + row[2:3]) +'\n') meanbedgraph.write(meanout) meanbedgraph.close() for i in range(ntracks): #output major principal component tracks pctrack = open(outputprefix + '_pc' + str(i+1) + '.bedgraph', 'w') pctrackout = '' for row in pcamatrix: pctrackout += ('\t'.join(row[0].split('_') + row[3+i:4+i]) + '\n') pctrack.write(pctrackout) pctrack.close() pcafile.close()
[ "noreply@github.com" ]
zhipenglu.noreply@github.com
c82afac573bf870007f2a26a2677f45d8e51d99c
04ae1836b9bc9d73d244f91b8f7fbf1bbc58ff29
/1233/solution.py
c47461e1a3ab14eb3051ffb577ac9f8ff8d4de5e
[]
no_license
zhangruochi/leetcode
6f739fde222c298bae1c68236d980bd29c33b1c6
cefa2f08667de4d2973274de3ff29a31a7d25eda
refs/heads/master
2022-07-16T23:40:20.458105
2022-06-02T18:25:35
2022-06-02T18:25:35
78,989,941
14
6
null
null
null
null
UTF-8
Python
false
false
1,365
py
class Node(): def __init__(self, str_): self.str_ = str_ def __eq__(self, other): return self.str_ == other.str_ def __repr__(self): return self.str_ def __repr__(self): return self.str_ def __hash__(self): return hash(self.str_) def __call__(self,str_): return Node(str_) class Solution: def removeSubfolders(self, folder: List[str]) -> List[str]: trie = {} res = [] def transfrom(f): return list(map(Node, f.strip("/").split("/"))) folder = list( map(transfrom, folder)) print(folder) for f in folder: trie_pointer = trie for char in f: trie_pointer = trie_pointer.setdefault(char, {}) trie_pointer["#"] = "#" def combine(path): return "/"+"/".join([str(node) for node in path]) def dfs(trie, path): nonlocal res if "#" in trie: res.append(combine(path)) return for char in trie: path.append(char) dfs(trie[char],path) path.pop() dfs(trie, []) return res
[ "zrc720@gmail.com" ]
zrc720@gmail.com
b55e30d6f12b49a52c2c808328cfba62b35668cb
71711bd2c11a3c0cbbc99bcfa78384d005e07828
/puct_mcts/datasets.py
f2aa99600a387a45d927073b70ec24d3e7ff95c7
[ "BSD-3-Clause" ]
permissive
kastnerkyle/exploring_species_counterpoint
9365b2485cd227e375521f769ba1bfbd62c7b629
dda762463e64036adeba7efd46c51daaaf906019
refs/heads/master
2021-09-13T10:55:03.096300
2018-04-28T19:00:21
2018-04-28T19:00:21
103,225,538
0
0
null
null
null
null
UTF-8
Python
false
false
14
py
../datasets.py
[ "kastnerkyle@gmail.com" ]
kastnerkyle@gmail.com
fa4650d4a8f4d6e62f671e455d2f45eaa553ced4
d9b0e4be5b29c6bdb806eeb2b6df340aa26d1152
/payloads/shop2.py
016c15fba07f2ae5dd9a4a3ca6bbe7da515a824f
[ "MIT" ]
permissive
opoudel/sculptbf-bot
ba5a4fb3550ffd51620d38d5171413cb89fbe136
3d9307bc4506844c8a693db68217d37fe2e76130
refs/heads/master
2020-12-02T11:34:19.539736
2017-07-21T15:39:20
2017-07-21T15:39:20
96,653,410
0
0
null
null
null
null
UTF-8
Python
false
false
3,652
py
# -*- coding: utf-8 -*- import json def shop(recipient_id): return json.dumps({ "recipient": { "id": recipient_id }, "message": { "attachment": { "type": "template", "payload": { "template_type": "list", "top_element_style": "compact", "elements": [ { "title": "Lypo - Spheric Vitamin C", "image_url": "https://sculptbf-bot.herokuapp.com/static/lypo.png", "subtitle": "Price: $48", "buttons": [ { "type": "web_url", "title": "Buy", "url": "http://sculptbf.co.nz/index.php/product/lypo-spheric-vit-c/", "webview_height_ratio": "tall" } ] }, { "title": "ASAP Moisturizer Sun Screen 50+", "image_url": "https://sculptbf-bot.herokuapp.com/static/asap.png", "subtitle": "Price: $65", "buttons": [ { "type": "web_url", "title": "Buy", "url": "http://sculptbf.co.nz/index.php/product/asap-moisturizer-sun-screen-50/", "webview_height_ratio": "tall" } ] }, { "title": "Cosmedix Purity Clean", "image_url": "https://sculptbf-bot.herokuapp.com/static/cosmedix.png", "subtitle": "Price: Not in Stock!!", "buttons": [ { "type": "web_url", "title": "Buy", "url": "http://sculptbf.co.nz/index.php/product/cosmedix-purity-clean/", "webview_height_ratio": "tall" } ] }, { "title": "Skin Medica TNS Essential Serum", "image_url": "https://sculptbf-bot.herokuapp.com/static/skin_medica.png", "subtitle": "Price: $250", "buttons": [ { "type": "web_url", "title": "Buy", "url": "http://sculptbf.co.nz/index.php/product/skin-medica-tns-essential-serum/", "webview_height_ratio": "tall" } ] } ], "buttons": [ { "title": "View More", "type": "postback", "payload": "MORE_SHOPPING_3" } ] } } } })
[ "opoudel@me.com" ]
opoudel@me.com
fe5b26c41e27f960c84721814d918ba912d334fe
2aee7676daad10456a34fe23ce952966c05718ff
/regular_expression/q3.py
0b17a5f0733331c189c670e8f860c6394bec5ba8
[]
no_license
sharonsabu/pythondjango2021
405b45bc08717301315016d7ccb9b4a03c631475
1dfb60b92296bc85248bad029a3fd370745623a6
refs/heads/master
2023-04-18T19:39:40.378956
2021-05-02T05:48:56
2021-05-02T05:48:56
333,471,566
0
0
null
null
null
null
UTF-8
Python
false
false
194
py
from re import * pattern="a{2,3}" #checks for min 2 and max 3 no of "a" matcher=finditer(pattern,"aaaacaabbaaab") for match in matcher: print(match.start()) print(match.group())
[ "sharonsabu100@gmail.com" ]
sharonsabu100@gmail.com
aa300723ff8030d337ad1c65d8905af0053a9077
760578355ed00ce758591b9a0b4929a3105de530
/query/protocols/Gamespy.py
ed4437c4d9ffc4c0f2cfdf96f9a4022703cf0062
[ "MIT" ]
permissive
SanSource/GameQuery
6c385e7607d7ad7fca0782ef8eea839f838268a7
b10845bffc872e9ce3d3d5d4016fd1905b3b8b0c
refs/heads/master
2020-12-29T16:07:39.179677
2017-08-20T22:59:33
2017-08-20T22:59:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,802
py
from ..connection import BaseUDP from ..helpers import async_raise_on_timeout from ..parser.helpers import QueryBytes class Gamespy1(BaseUDP): @async_raise_on_timeout async def get_info(self): reader, writer = await self._connection.connect() query = QueryBytes() query.append(b'\\info\\', None) writer.write(query.buffer) return self.parse_info(QueryBytes(await reader.readline())) def parse_info(self, response): list_info = list() list_split = response.buffer[1:].split(b'\\') list_info = list(zip(list_split[::2], list_split[1::2])) return list_info class Gamespy2(BaseUDP): @async_raise_on_timeout async def get_info(self): reader, writer = await self._connection.connect() query = QueryBytes() query.append(b'\xFE\xFD\x00\x43\x4F\x52\x59\xFF\x00\x00', None) writer.write(query.buffer) return self.parse_info(QueryBytes(await reader.readline())) def parse_info(self, response): # if response[0:5] != b'\x00CORY': # list_commands = response[5:].split(b'\x00\x00\x00')[0].split(b'\x00') list_info = list() list_split = response.buffer[5:].split(b'\x00\x00\x00')[0].split(b'\x00') list_info = list(zip(list_split[::2], list_split[1::2])) return list_info class Gamespy3(BaseUDP): is_challenge = False @async_raise_on_timeout async def get_info(self): reader, writer = await self._connection.connect() timestamp = b'\x04\x05\x06\x07' # timestamp query = QueryBytes() query.append(b'\xFE\xFD\x09', None) query.append(timestamp, None) if self.is_challenge: writer.write(query.buffer) response = QueryBytes(await reader.readline()) if response.buffer[:5] != b'\x09' + timestamp: raise Exception() # fixme challange_int = int(response.buffer[5:-1]).to_bytes(4, 'big', signed=True) query.append(challange_int, None) query.append(b'\xFF\x00\x00\x01', None) query.set(b'\x00', QueryBytes.BIG_TYPE_BYTE, 1, offset=2) writer.write(query.buffer) return self.parse_info(QueryBytes(await reader.readline())) def parse_info(self, response): # if response[0] != 0x00 or response[1:5] != timestamp or response[15] != 0x00: # list_commands = response # list_commands.remove('p1073741829') # fix for Unreal Tournament 3 because he have invalid data ? list_info = list() list_split = response.buffer[16:-2].split(b'\x00\x00\x01')[0].split(b'\x00') list_info = list(zip(list_split[::2], list_split[1::2])) return list_info class Gamespy4(Gamespy3): is_challenge = True
[ "patryk.sondej@gmail.com" ]
patryk.sondej@gmail.com
d930901a91772e4d664bb3b770867aa984a3e77f
08aadcd04337ee45b01e6bd7f5cc9d87cd433bfd
/basic_projects/2D lists and nested loops.py
3098b812d793bcbe6b1a711c279db30a87fbdf59
[]
no_license
AnthonyPerugini/Training_projects
eb12acc36f0c2562ea9da6ca76221ea32bd73d38
bf4d8027740abedbcce296675a7484fae5e1095f
refs/heads/master
2021-03-10T04:43:14.473043
2020-03-16T18:31:32
2020-03-16T18:31:32
246,419,607
0
0
null
null
null
null
UTF-8
Python
false
false
137
py
number_grid = [ [1, 2, 3], [4, 5, 6], [7, 8, 9], [0] ] for row in number_grid: for col in row: print(col)
[ "Anthony.r.perugini@gmail.com" ]
Anthony.r.perugini@gmail.com
b71909c9661e6baf2be15d0e61a3055456d35d1a
290b4c7ca63a975b38e55018cc38bd2766e14639
/ORC_app/jni-build/jni/include/tensorflow/tensorflow.bzl
bb0e46adddd64bf4473131cda060e9cc6eee198f
[ "MIT" ]
permissive
luoabd/EMNIST-ORC
1233c373abcc3ed237c2ec86491b29c0b9223894
8c2d633a9b4d5214e908550812f6a2489ba9eb72
refs/heads/master
2022-12-27T14:03:55.046933
2020-01-16T15:20:04
2020-01-16T15:20:04
234,325,497
0
1
MIT
2022-12-11T13:32:42
2020-01-16T13:25:23
C++
UTF-8
Python
false
false
20,524
bzl
# -*- Python -*- # Parse the bazel version string from `native.bazel_version`. def _parse_bazel_version(bazel_version): # Remove commit from version. version = bazel_version.split(" ", 1)[0] # Split into (release, date) parts and only return the release # as a tuple of integers. parts = version.split('-', 1) # Turn "release" into a tuple of integers version_tuple = () for number in parts[0].split('.'): version_tuple += (int(number),) return version_tuple # Check that a specific bazel version is being used. def check_version(bazel_version): if "bazel_version" in dir(native) and native.bazel_version: current_bazel_version = _parse_bazel_version(native.bazel_version) minimum_bazel_version = _parse_bazel_version(bazel_version) if minimum_bazel_version > current_bazel_version: fail("\nCurrent Bazel version is {}, expected at least {}\n".format( native.bazel_version, bazel_version)) pass # Return the options to use for a C++ library or binary build. # Uses the ":optmode" config_setting to pick the options. load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_cuda_tests_tags") # List of proto files for android builds def tf_android_core_proto_sources(): return ["//tensorflow/core:" + p for p in tf_android_core_proto_sources_relative()] # As tf_android_core_proto_sources, but paths relative to # //third_party/tensorflow/core. def tf_android_core_proto_sources_relative(): return [ "example/example.proto", "example/feature.proto", "framework/allocation_description.proto", "framework/attr_value.proto", "framework/device_attributes.proto", "framework/function.proto", "framework/graph.proto", "framework/kernel_def.proto", "framework/log_memory.proto", "framework/op_def.proto", "framework/step_stats.proto", "framework/summary.proto", "framework/tensor.proto", "framework/tensor_description.proto", "framework/tensor_shape.proto", "framework/tensor_slice.proto", "framework/types.proto", "framework/versions.proto", "lib/core/error_codes.proto", "protobuf/config.proto", "protobuf/saver.proto", "util/memmapped_file_system.proto", "util/saved_tensor_slice.proto", "util/test_log.proto", ] # Returns the list of pb.h headers that are generated for # tf_android_core_proto_sources(). def tf_android_core_proto_headers(): return ["//tensorflow/core/" + p.replace(".proto", ".pb.h") for p in tf_android_core_proto_sources_relative()] def if_cuda(a, b=[]): return select({ "//third_party/gpus/cuda:cuda_crosstool_condition": a, "//conditions:default": b, }) def if_android_arm(a, b=[]): return select({ "//tensorflow:android_arm": a, "//conditions:default": b, }) def tf_copts(): return (["-fno-exceptions", "-DEIGEN_AVOID_STL_ARRAY",] + if_cuda(["-DGOOGLE_CUDA=1"]) + if_android_arm(["-mfpu=neon"]) + select({"//tensorflow:android": [ "-std=c++11", "-DMIN_LOG_LEVEL=0", "-DTF_LEAN_BINARY", "-O2", ], "//tensorflow:darwin": [], "//conditions:default": ["-pthread"]})) # Given a list of "op_lib_names" (a list of files in the ops directory # without their .cc extensions), generate a library for that file. def tf_gen_op_libs(op_lib_names): # Make library out of each op so it can also be used to generate wrappers # for various languages. for n in op_lib_names: native.cc_library(name=n + "_op_lib", copts=tf_copts(), srcs=["ops/" + n + ".cc"], deps=(["//tensorflow/core:framework"]), visibility=["//visibility:public"], alwayslink=1, linkstatic=1,) def tf_gen_op_wrapper_cc(name, out_ops_file, pkg=""): # Construct an op generator binary for these ops. tool = out_ops_file + "_gen_cc" native.cc_binary( name = tool, copts = tf_copts(), linkopts = ["-lm"], linkstatic = 1, # Faster to link this one-time-use binary dynamically deps = (["//tensorflow/cc:cc_op_gen_main", pkg + ":" + name + "_op_lib"]) ) # Run the op generator. if name == "sendrecv_ops": include_internal = "1" else: include_internal = "0" native.genrule( name=name + "_genrule", outs=[out_ops_file + ".h", out_ops_file + ".cc"], tools=[":" + tool], cmd=("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " + "$(location :" + out_ops_file + ".cc) " + include_internal)) # Given a list of "op_lib_names" (a list of files in the ops directory # without their .cc extensions), generate individual C++ .cc and .h # files for each of the ops files mentioned, and then generate a # single cc_library called "name" that combines all the # generated C++ code. # # For example, for: # tf_gen_op_wrappers_cc("tf_ops_lib", [ "array_ops", "math_ops" ]) # # # This will ultimately generate ops/* files and a library like: # # cc_library(name = "tf_ops_lib", # srcs = [ "ops/array_ops.cc", # "ops/math_ops.cc" ], # hdrs = [ "ops/array_ops.h", # "ops/math_ops.h" ], # deps = [ ... ]) def tf_gen_op_wrappers_cc(name, op_lib_names=[], other_srcs=[], other_hdrs=[], pkg=""): subsrcs = other_srcs subhdrs = other_hdrs for n in op_lib_names: tf_gen_op_wrapper_cc(n, "ops/" + n, pkg=pkg) subsrcs += ["ops/" + n + ".cc"] subhdrs += ["ops/" + n + ".h"] native.cc_library(name=name, srcs=subsrcs, hdrs=subhdrs, deps=["//tensorflow/core:core_cpu"], copts=tf_copts(), alwayslink=1,) # Invoke this rule in .../tensorflow/python to build the wrapper library. def tf_gen_op_wrapper_py(name, out=None, hidden=[], visibility=None, deps=[], require_shape_functions=False): # Construct a cc_binary containing the specified ops. tool_name = "gen_" + name + "_py_wrappers_cc" if not deps: deps = ["//tensorflow/core:" + name + "_op_lib"] native.cc_binary( name = tool_name, linkopts = ["-lm"], copts = tf_copts(), linkstatic = 1, # Faster to link this one-time-use binary dynamically deps = (["//tensorflow/core:framework", "//tensorflow/python:python_op_gen_main"] + deps), visibility = ["//tensorflow:internal"], ) # Invoke the previous cc_binary to generate a python file. if not out: out = "ops/gen_" + name + ".py" native.genrule( name=name + "_pygenrule", outs=[out], tools=[tool_name], cmd=("$(location " + tool_name + ") " + ",".join(hidden) + " " + ("1" if require_shape_functions else "0") + " > $@")) # Make a py_library out of the generated python file. native.py_library(name=name, srcs=[out], srcs_version="PY2AND3", visibility=visibility, deps=[ "//tensorflow/python:framework_for_generated_wrappers", ],) # Define a bazel macro that creates cc_test for tensorflow. # TODO(opensource): we need to enable this to work around the hidden symbol # __cudaRegisterFatBinary error. Need more investigations. def tf_cc_test(name, deps, linkstatic=0, tags=[], data=[], size="medium", suffix="", args=None): name = name.replace(".cc", "") native.cc_test(name="%s%s" % (name.replace("/", "_"), suffix), size=size, srcs=["%s.cc" % (name)], args=args, copts=tf_copts(), data=data, deps=deps, linkopts=["-lpthread", "-lm"], linkstatic=linkstatic, tags=tags,) def tf_cuda_cc_test(name, deps, tags=[], data=[], size="medium"): tf_cc_test(name=name, deps=deps, tags=tags + ["manual"], data=data, size=size) tf_cc_test(name=name, suffix="_gpu", deps=deps + if_cuda(["//tensorflow/core:gpu_runtime"]), linkstatic=if_cuda(1, 0), tags=tags + tf_cuda_tests_tags(), data=data, size=size) # Create a cc_test for each of the tensorflow tests listed in "tests" def tf_cc_tests(tests, deps, linkstatic=0, tags=[], size="medium", args=None): for t in tests: tf_cc_test(t, deps, linkstatic, tags=tags, size=size, args=args) def tf_cuda_cc_tests(tests, deps, tags=[], size="medium"): for t in tests: tf_cuda_cc_test(t, deps, tags=tags, size=size) # Build defs for TensorFlow kernels # When this target is built using --config=cuda, a cc_library is built # that passes -DGOOGLE_CUDA=1 and '-x cuda', linking in additional # libraries needed by GPU kernels. def tf_gpu_kernel_library(srcs, copts=[], cuda_copts=[], deps=[], hdrs=[], **kwargs): cuda_copts = ["-x", "cuda", "-DGOOGLE_CUDA=1", "-nvcc_options=relaxed-constexpr", "-nvcc_options=ftz=true", "--gcudacc_flag=-ftz=true"] + cuda_copts native.cc_library( srcs = srcs, hdrs = hdrs, copts = copts + if_cuda(cuda_copts), deps = deps + if_cuda([ "//tensorflow/core:cuda", "//tensorflow/core:gpu_lib", ]), alwayslink=1, **kwargs) def tf_cuda_library(deps=None, cuda_deps=None, copts=None, **kwargs): """Generate a cc_library with a conditional set of CUDA dependencies. When the library is built with --config=cuda: - both deps and cuda_deps are used as dependencies - the gcudacc runtime is added as a dependency (if necessary) - The library additionally passes -DGOOGLE_CUDA=1 to the list of copts Args: - cuda_deps: BUILD dependencies which will be linked if and only if: '--config=cuda' is passed to the bazel command line. - deps: dependencies which will always be linked. - copts: copts always passed to the cc_library. - kwargs: Any other argument to cc_library. """ if not deps: deps = [] if not cuda_deps: cuda_deps = [] if not copts: copts = [] native.cc_library( deps = deps + if_cuda(cuda_deps + ["//tensorflow/core:cuda"]), copts = copts + if_cuda(["-DGOOGLE_CUDA=1"]), **kwargs) def tf_kernel_library(name, prefix=None, srcs=None, gpu_srcs=None, hdrs=None, deps=None, alwayslink=1, **kwargs): """A rule to build a TensorFlow OpKernel. May either specify srcs/hdrs or prefix. Similar to tf_cuda_library, but with alwayslink=1 by default. If prefix is specified: * prefix*.cc (except *.cu.cc) is added to srcs * prefix*.h (except *.cu.h) is added to hdrs * prefix*.cu.cc and prefix*.h (including *.cu.h) are added to gpu_srcs. With the exception that test files are excluded. For example, with prefix = "cast_op", * srcs = ["cast_op.cc"] * hdrs = ["cast_op.h"] * gpu_srcs = ["cast_op_gpu.cu.cc", "cast_op.h"] * "cast_op_test.cc" is excluded With prefix = "cwise_op" * srcs = ["cwise_op_abs.cc", ..., "cwise_op_tanh.cc"], * hdrs = ["cwise_ops.h", "cwise_ops_common.h"], * gpu_srcs = ["cwise_op_gpu_abs.cu.cc", ..., "cwise_op_gpu_tanh.cu.cc", "cwise_ops.h", "cwise_ops_common.h", "cwise_ops_gpu_common.cu.h"] * "cwise_ops_test.cc" is excluded """ if not srcs: srcs = [] if not hdrs: hdrs = [] if not deps: deps = [] if prefix: if native.glob([prefix + "*.cu.cc"], exclude = ["*test*"]): if not gpu_srcs: gpu_srcs = [] gpu_srcs = gpu_srcs + native.glob([prefix + "*.cu.cc", prefix + "*.h"], exclude = ["*test*"]) srcs = srcs + native.glob([prefix + "*.cc"], exclude = ["*test*", "*.cu.cc"]) hdrs = hdrs + native.glob([prefix + "*.h"], exclude = ["*test*", "*.cu.h"]) cuda_deps = ["//tensorflow/core:gpu_lib"] if gpu_srcs: tf_gpu_kernel_library( name = name + "_gpu", srcs = gpu_srcs, deps = deps, **kwargs) cuda_deps.extend([":" + name + "_gpu"]) tf_cuda_library( name = name, srcs = srcs, hdrs = hdrs, copts = tf_copts(), cuda_deps = cuda_deps, linkstatic = 1, # Needed since alwayslink is broken in bazel b/27630669 alwayslink = alwayslink, deps = deps, **kwargs) def tf_kernel_libraries(name, prefixes, deps=None, **kwargs): """Makes one target per prefix, and one target that includes them all.""" for p in prefixes: tf_kernel_library(name=p, prefix=p, deps=deps, **kwargs) native.cc_library(name=name, deps=[":" + p for p in prefixes]) # Bazel rules for building swig files. def _py_wrap_cc_impl(ctx): srcs = ctx.files.srcs if len(srcs) != 1: fail("Exactly one SWIG source file label must be specified.", "srcs") module_name = ctx.attr.module_name cc_out = ctx.outputs.cc_out py_out = ctx.outputs.py_out src = ctx.files.srcs[0] args = ["-c++", "-python"] args += ["-module", module_name] args += ["-l" + f.path for f in ctx.files.swig_includes] cc_include_dirs = set() cc_includes = set() for dep in ctx.attr.deps: cc_include_dirs += [h.dirname for h in dep.cc.transitive_headers] cc_includes += dep.cc.transitive_headers args += ["-I" + x for x in cc_include_dirs] args += ["-I" + ctx.label.workspace_root] args += ["-o", cc_out.path] args += ["-outdir", py_out.dirname] args += [src.path] outputs = [cc_out, py_out] ctx.action(executable=ctx.executable.swig_binary, arguments=args, mnemonic="PythonSwig", inputs=sorted(set([src]) + cc_includes + ctx.files.swig_includes + ctx.attr.swig_deps.files), outputs=outputs, progress_message="SWIGing {input}".format(input=src.path)) return struct(files=set(outputs)) _py_wrap_cc = rule(attrs={ "srcs": attr.label_list(mandatory=True, allow_files=True,), "swig_includes": attr.label_list(cfg=DATA_CFG, allow_files=True,), "deps": attr.label_list(allow_files=True, providers=["cc"],), "swig_deps": attr.label(default=Label( "//tensorflow:swig")), # swig_templates "module_name": attr.string(mandatory=True), "py_module_name": attr.string(mandatory=True), "swig_binary": attr.label(default=Label("//tensorflow:swig"), cfg=HOST_CFG, executable=True, allow_files=True,), }, outputs={ "cc_out": "%{module_name}.cc", "py_out": "%{py_module_name}.py", }, implementation=_py_wrap_cc_impl,) # Bazel rule for collecting the header files that a target depends on. def _transitive_hdrs_impl(ctx): outputs = set() for dep in ctx.attr.deps: outputs += dep.cc.transitive_headers return struct(files=outputs) _transitive_hdrs = rule(attrs={ "deps": attr.label_list(allow_files=True, providers=["cc"]), }, implementation=_transitive_hdrs_impl,) def transitive_hdrs(name, deps=[], **kwargs): _transitive_hdrs(name=name + "_gather", deps=deps) native.filegroup(name=name, srcs=[":" + name + "_gather"]) # Create a header only library that includes all the headers exported by # the libraries in deps. def cc_header_only_library(name, deps=[], **kwargs): _transitive_hdrs(name=name + "_gather", deps=deps) native.cc_library(name=name, hdrs=[":" + name + "_gather"], **kwargs) def tf_custom_op_library_additional_deps(): return [ "//google/protobuf", "//third_party/eigen3", "//tensorflow/core:framework_headers_lib", ] # Helper to build a dynamic library (.so) from the sources containing # implementations of custom ops and kernels. def tf_custom_op_library(name, srcs=[], gpu_srcs=[], deps=[]): cuda_deps = [ "//tensorflow/core:stream_executor_headers_lib", "//third_party/gpus/cuda:cudart_static", ] deps = deps + tf_custom_op_library_additional_deps() if gpu_srcs: basename = name.split(".")[0] cuda_copts = ["-x", "cuda", "-DGOOGLE_CUDA=1", "-nvcc_options=relaxed-constexpr", "-nvcc_options=ftz=true", "--gcudacc_flag=-ftz=true"] native.cc_library( name = basename + "_gpu", srcs = gpu_srcs, copts = if_cuda(cuda_copts), deps = deps + if_cuda(cuda_deps)) cuda_deps.extend([":" + basename + "_gpu"]) native.cc_binary(name=name, srcs=srcs, deps=deps + if_cuda(cuda_deps), linkshared=1, linkopts = select({ "//conditions:default": [ "-lm", ], "//tensorflow:darwin": [], }), ) def tf_extension_linkopts(): return [] # No extension link opts def tf_extension_copts(): return [] # No extension c opts def tf_py_wrap_cc(name, srcs, swig_includes=[], deps=[], copts=[], **kwargs): module_name = name.split("/")[-1] # Convert a rule name such as foo/bar/baz to foo/bar/_baz.so # and use that as the name for the rule producing the .so file. cc_library_name = "/".join(name.split("/")[:-1] + ["_" + module_name + ".so"]) extra_deps = [] _py_wrap_cc(name=name + "_py_wrap", srcs=srcs, swig_includes=swig_includes, deps=deps + extra_deps, module_name=module_name, py_module_name=name) native.cc_binary( name=cc_library_name, srcs=[module_name + ".cc"], copts=(copts + ["-Wno-self-assign", "-Wno-write-strings"] + tf_extension_copts()), linkopts=tf_extension_linkopts(), linkstatic=1, linkshared=1, deps=deps + extra_deps) native.py_library(name=name, srcs=[":" + name + ".py"], srcs_version="PY2AND3", data=[":" + cc_library_name]) def tf_py_test(name, srcs, size="medium", data=[], main=None, args=[], tags=[], shard_count=1, additional_deps=[]): native.py_test( name=name, size=size, srcs=srcs, main=main, args=args, tags=tags, visibility=["//tensorflow:internal"], shard_count=shard_count, data=data, deps=[ "//tensorflow/python:extra_py_tests_deps", "//tensorflow/python:kernel_tests/gradient_checker", ] + additional_deps, srcs_version="PY2AND3") def cuda_py_test(name, srcs, size="medium", data=[], main=None, args=[], shard_count=1, additional_deps=[]): test_tags = tf_cuda_tests_tags() tf_py_test(name=name, size=size, srcs=srcs, data=data, main=main, args=args, tags=test_tags, shard_count=shard_count, additional_deps=additional_deps) def py_tests(name, srcs, size="medium", additional_deps=[], data=[], tags=[], shard_count=1, prefix=""): for src in srcs: test_name = src.split("/")[-1].split(".")[0] if prefix: test_name = "%s_%s" % (prefix, test_name) tf_py_test(name=test_name, size=size, srcs=[src], main=src, tags=tags, shard_count=shard_count, data=data, additional_deps=additional_deps) def cuda_py_tests(name, srcs, size="medium", additional_deps=[], data=[], shard_count=1): test_tags = tf_cuda_tests_tags() py_tests(name=name, size=size, srcs=srcs, additional_deps=additional_deps, data=data, tags=test_tags, shard_count=shard_count)
[ "abdellah.lahnaoui@gmail.com" ]
abdellah.lahnaoui@gmail.com
3259d0615171353e16e44fb0506a5558587028c0
d037002f9d2b383ef84686bbb9843dac8ee4bed7
/tutorials/Trash/Distributed-DRL/torch/sac_test/utils/environment.py
c86069ea34cea9e7eb5b64d4846270b3babd3d96
[ "MIT" ]
permissive
ICSL-hanyang/Code_With_RL
4edb23ca24c246bb8ec75fcf445d3c68d6c40b6d
1378996e6bf6da0a96e9c59f1163a635c20b3c06
refs/heads/main
2023-08-15T18:37:57.689950
2021-10-18T07:31:59
2021-10-18T07:31:59
392,944,467
0
0
null
2021-08-05T07:20:57
2021-08-05T07:20:56
null
UTF-8
Python
false
false
971
py
import gym class Environment: def __init__(self,env_name): self.env = gym.make(env_name) self.state_dim = self.env.observation_space.shape[0] self._max_episode_steps = self.env._max_episode_steps self.can_run = False self.state = None if type(self.env.action_space) == gym.spaces.box.Box : #Continuous self.action_dim = self.env.action_space.shape[0] self.is_discrete = False else : self.action_dim = self.env.action_space.n self.is_discrete = True def reset(self): assert not self.can_run self.can_run = True self.state = self.env.reset() return self.state def step(self,action): assert self.can_run next_state, reward, done, info = self.env.step(action) self.state = next_state if done == True: self.can_run = False return next_state, reward, done, info
[ "nzy1414117007@gmail.com" ]
nzy1414117007@gmail.com
28849c5633fc880b6e4043d6ee95027eb192b0fe
d77b363dd92fd61ff0f1fc75ffb9836dea201524
/main.py
f44ecfc93f61f7c047c6480e45d49d15fc1f7556
[]
no_license
BalticPinguin/ArgonMD
6309ac8cf2aceb115f2615c81b62eaeacb5bf286
ddb723e7b34ec8b150acf187aaff3d61df9c0f08
refs/heads/master
2016-09-03T07:39:16.201770
2015-08-10T19:42:32
2015-08-10T19:42:32
39,498,512
1
0
null
null
null
null
UTF-8
Python
false
false
1,318
py
#!/usr/bin/python3 import physics as ph import sys def frange(start, stop, step): #imitate range, but with floats. r = start i=0 while r <= stop: yield r i+=1 r =start + i*step def main(argv): assert len(argv)==1, "only temperature and alpha are allowed as input-parameter!" N=256 #number of particles #N=32 #number of particles #length in angstroem, integer required #L=10 #size of box; needs to be cubic for the method to be working T=float(argv[0]) dt=10 #10 ps per step t=3e3 #3 fs of simulation time. alpha=0.02 L=21 #--> density of rho=1.8 g/cm^3 #L=10.5 #--> density of rho=1.8 g/cm^3 #time-step (in ps) #t=30e3 output="box.dat" output2="pairDist.dat" #now, start simulation #particle,mass=ph.testBox(N,L, T) #particle,mass=ph.testForce(N,L, T) #particle,mass=ph.seed_fcc(N,L,T) particle,mass=ph.seed_small(N,L,T) #particle,mass=ph.seed(N,L, T) force=ph.update_force(particle,L) #get forces #ph.print_conf(particle,output, output2,0, L) for time in frange(dt,t,dt): force,particle=ph.propagate(force,particle,L, dt,mass, alpha,T) if time >2e3: # don't waste time, printing dumb data. ph.print_conf(particle,output, output2, time, L) if __name__=="__main__": main(sys.argv[1:])
[ "tobias.moehle@uni-rostock.de" ]
tobias.moehle@uni-rostock.de
cdc9c0fe13be7945a2a837c9dfa2b6ee764b8977
8b881e5a11a4b69362edf70929570964644aab75
/src/ai/AlphaBetaOwnSeeds.py
eb7539b7632553ff9285607c9d3a507bb67ba13b
[]
no_license
BpGameHackSoc/kalahai
18b84bf528c6e5e12e2ac0b0abb3052fec4b81c8
abc2ce1aa4c766fd1cadb62bf3bf4d92b9fe5f56
refs/heads/master
2021-09-02T10:16:27.051245
2017-12-23T10:23:52
2017-12-23T10:25:30
110,739,312
2
1
null
2017-11-19T14:39:11
2017-11-14T20:10:11
Python
UTF-8
Python
false
false
760
py
import numpy as np from . import AlphaBeta from model import Side class AlphaBetaOwnSeeds(AlphaBeta.AlphaBeta): def evaluate(self,state): south_holes = state.board.get_holes(Side.SOUTH) north_holes = state.board.get_holes(Side.NORTH) south_store = state.board.get_store(Side.SOUTH) north_store = state.board.get_store(Side.NORTH) val = (self.keepable_seeds(south_holes) -self.keepable_seeds(north_holes)) *0.25 val += south_store - north_store return val def keepable_seeds(self,buckets): size = len(buckets) clipper = np.array(range(size,0,-10))-np.ones(size) return np.sum(np.clip(buckets,None,clipper)) def move(self, state): return super().move(state)
[ "gergely.halacsy@gmail.com" ]
gergely.halacsy@gmail.com
31b58b74e967def34fcd7730cc4170cb953bf04e
d23ddee7237f138d003b44d859d12a9f8385cfce
/app.py
acc6da3dca2bd6c32be0c58631cb41c1bbe758e2
[]
no_license
Kelby-Wilson/sqlalchemy_challenge
a6497bde709e8edf838949b75cf1e2a7fa011074
3b8ba0e3a1ac237ae319532eba892445b5be4912
refs/heads/master
2022-12-03T17:29:22.003559
2020-08-26T16:52:57
2020-08-26T16:52:57
262,371,228
0
0
null
null
null
null
UTF-8
Python
false
false
5,189
py
import numpy as np import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func, inspect from flask import Flask, jsonify import datetime as dt # Relative Date ### # Database Setup ### engine = create_engine("sqlite:///hawaii.sqlite", connect_args={'check_same_thread': False}, echo=True) # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # Save reference to the table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) ### # Flask Setup ### app = Flask(__name__) ### # Flask Routes ### @app.route("/") def welcome(): """List all available api routes.""" return"""<html> <h1>List of all available Honolulu, HI API routes</h1> <ul> <br> <li> Return a list of precipitations from last year: <br> <a href="/api/v1.0/precipitation">/api/v1.0/precipitation</a> </li> <br> <li> Return a JSON list of stations from the dataset: <br> <a href="/api/v1.0/stations">/api/v1.0/stations</a> </li> <br> <li> Return a JSON list of Temperature Observations (tobs) for the previous year: <br> <a href="/api/v1.0/tobs">/api/v1.0/tobs</a> </li> <br> <li> Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided: <br>Replace &ltstart&gt with a date in Year-Month-Day format. <br> <a href="/api/v1.0/2017-01-01">/api/v1.0/2017-01-01</a> </li> <br> <li> Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive: <br> Replace &ltstart&gt and &ltend&gt with a date in Year-Month-Day format. <br> <br> <a href="/api/v1.0/2017-01-01/2017-01-07">/api/v1.0/2017-01-01/2017-01-07</a> </li> <br> </ul> </html> """ @app.route("/api/v1.0/precipitation") def precipitation(): # Docstring """Return a list of precipitations from last year""" # Design a query to retrieve the last 12 months of precipitation data and plot the results max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first() # Get the first element of the tuple max_date = max_date[0] # Calculate the date 1 year ago from today # The days are equal 366 so that the first day of the year is included year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366) # Perform a query to retrieve the data and precipitation scores results_precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_ago).all() # Convert list of tuples into normal list precipitation_dict = dict(results_precipitation) return jsonify(precipitation_dict) @app.route("/api/v1.0/stations") def stations(): # Docstring """Return a JSON list of stations from the dataset.""" # Query stations results_stations = session.query(Measurement.station).group_by(Measurement.station).all() # Convert list of tuples into normal list stations_list = list(np.ravel(results_stations)) return jsonify(stations_list) @app.route("/api/v1.0/tobs") def tobs(): # Docstring """Return a JSON list of Temperature Observations (tobs) for the previous year.""" # Design a query to retrieve the last 12 months of precipitation data and plot the results max_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first() # Get the first element of the tuple max_date = max_date[0] # Calculate the date 1 year ago from today # The days are equal 366 so that the first day of the year is included year_ago = dt.datetime.strptime(max_date, "%Y-%m-%d") - dt.timedelta(days=366) # Query tobs results_tobs = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= year_ago).all() # Convert list of tuples into normal list tobs_list = list(results_tobs) return jsonify(tobs_list) @app.route("/api/v1.0/<start>") def start(start=None): # Docstring """Return a JSON list of tmin, tmax, tavg for the dates greater than or equal to the date provided""" from_start = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).group_by(Measurement.date).all() from_start_list=list(from_start) return jsonify(from_start_list) @app.route("/api/v1.0/<start>/<end>") def start_end(start=None, end=None): # Docstring """Return a JSON list of tmin, tmax, tavg for the dates in range of start date and end date inclusive""" between_dates = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all() between_dates_list=list(between_dates) return jsonify(between_dates_list) if __name__ == '__main__': app.run(debug=True)
[ "noreply@github.com" ]
Kelby-Wilson.noreply@github.com
e5679a098872822f28be752dec6bb6519196d5b7
8a5ab3d33e3b653c4c64305d81a85f6a4582d7ac
/PySide/QtCore/QTimer.py
5e91243992b9f324a3a089a65f93db3242e8a538
[ "Apache-2.0" ]
permissive
sonictk/python-skeletons
be09526bf490856bb644fed6bf4e801194089f0d
49bc3fa51aacbc2c7f0c7ab86dfb61eefe02781d
refs/heads/master
2020-04-06T04:38:01.918589
2016-06-09T20:37:43
2016-06-09T20:37:43
56,334,503
0
0
null
2016-04-15T16:30:42
2016-04-15T16:30:42
null
UTF-8
Python
false
false
1,511
py
# encoding: utf-8 # module PySide.QtCore # from /corp.blizzard.net/BFD/Deploy/Packages/Published/ThirdParty/Qt4.8.4/2015-05-15.163857/prebuilt/linux_x64_gcc41_python2.7_ucs4/PySide/QtCore.so # by generator 1.138 # no doc # no imports from QObject import QObject class QTimer(QObject): # no doc def interval(self, *args, **kwargs): # real signature unknown pass def isActive(self, *args, **kwargs): # real signature unknown pass def isSingleShot(self, *args, **kwargs): # real signature unknown pass def killTimer(self, *args, **kwargs): # real signature unknown pass def setInterval(self, *args, **kwargs): # real signature unknown pass def setSingleShot(self, *args, **kwargs): # real signature unknown pass def singleShot(self, *args, **kwargs): # real signature unknown pass def start(self, *args, **kwargs): # real signature unknown pass def startTimer(self, *args, **kwargs): # real signature unknown pass def stop(self, *args, **kwargs): # real signature unknown pass def timerEvent(self, *args, **kwargs): # real signature unknown pass def timerId(self, *args, **kwargs): # real signature unknown pass def __init__(self, *more): # real signature unknown; restored from __doc__ """ x.__init__(...) initializes x; see help(type(x)) for signature """ pass staticMetaObject = None timeout = None __new__ = None
[ "yliangsiew@blizzard.com" ]
yliangsiew@blizzard.com
23cfee1ada500316d73bc8ad4983d16ddaefb85b
c71ad354837830987f17ab93ca3f7ceb6d405311
/khajuri/bin/pipeline_test.py
772f8726ae18693915e513ab87a0fac87cf3679f
[]
no_license
zigvu/samosa
f353248a75fe7a83a8a59b375b104abec8d1d855
3962b3c7bab9d26bf871d257e15dd39c45ffaddd
refs/heads/master
2021-03-30T18:12:58.441901
2016-02-20T00:12:30
2016-02-20T00:12:30
50,481,153
0
0
null
null
null
null
UTF-8
Python
false
false
1,390
py
#!/usr/bin/env python import logging import os import glob import argparse import _init_paths from khajuri.pipeline.run_pipeline import RunPipeline from khajuri.multi.clip import Clip from tools.files.file_utils import FileUtils def parse_args(): """Parse input arguments.""" parser = argparse.ArgumentParser(description='Test on zigvu model on clips') parser.add_argument('--clip_folder', dest='clip_folder', help='Path to clips', required=True) parser.add_argument('--output_path', dest='output_path', help='Output folder path', required=True) args = parser.parse_args() return args if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) logging.debug('Start testing.') args = parse_args() runPipeline = RunPipeline() allClipFiles = glob.glob("{}/*.mp4".format(args.clip_folder)) for clipFile in allClipFiles: clipNumber = os.path.splitext(os.path.basename(clipFile))[0] clipOutPath = os.path.join(args.output_path, clipNumber) clip = Clip() clip.clip_id = clipNumber clip.clip_path = clipFile clip.result_path = os.path.join(clipOutPath, 'clip.pkl') runPipeline.clipdbQueue.put(clip) logging.debug('RabbitToClip: process clip: {}'.format(clip.clip_id)) runPipeline.start() runPipeline.join()
[ "eacharya@gmail.com" ]
eacharya@gmail.com
7fd9141000ee1b4be8b4f5dd9b969abf33c9eac9
d5dbae52bbfded54436a665f614a2793029371ea
/models/model2csv.py
64ec1c92bead5bf32dd3801e0c4c0df6e534c482
[ "Apache-2.0" ]
permissive
bmarggraff/allie
88b97acffebe2c1876b379d478b293bfb9edfefb
2e2f8780f0a42229b582703455e9ce1d42cf9f96
refs/heads/master
2022-11-28T02:27:55.100030
2020-08-07T19:55:46
2020-08-07T19:55:46
285,911,411
1
0
null
2020-08-07T20:03:08
2020-08-07T20:03:07
null
UTF-8
Python
false
false
2,962
py
''' AAA lllllll lllllll iiii A:::A l:::::l l:::::l i::::i A:::::A l:::::l l:::::l iiii A:::::::A l:::::l l:::::l A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee | \/ | | | | | | . . | ___ __| | ___| |___ | |\/| |/ _ \ / _` |/ _ \ / __| | | | | (_) | (_| | __/ \__ \ \_| |_/\___/ \__,_|\___|_|___/ Creates an excel sheet of all currently trained models with their model performances; useful to summarize all modeling sessions quickly; outputs to current directory. Usage: python3 model2csv.py ''' import os, json import pandas as pd def id_folder(): curdir=os.getcwd() directories=['audio_models', 'text_models', 'image_models', 'video_models', 'csv_models'] metrics_list=list() model_names=list() for i in range(len(directories)): try: os.chdir(curdir) os.chdir(directories[i]) listdir=os.listdir() folders=list() for j in range(len(listdir)): if listdir[j].find('.') < 0: folders.append(listdir[j]) curdir2=os.getcwd() for j in range(len(folders)): os.chdir(curdir2) os.chdir(folders[j]) os.chdir('model') listdir2=os.listdir() jsonfile=folders[j]+'.json' for k in range(len(listdir2)): if listdir2[k] == jsonfile: g=json.load(open(jsonfile)) metrics_=g['metrics'] metrics_list.append(metrics_) model_names.append(jsonfile[0:-5]) except: pass # print(directories[i]) # print('does not exist...') return metrics_list, model_names metrics_list, model_names=id_folder() accuracies=list() roc_curve=list() for i in range(len(model_names)): accuracies.append(metrics_list[i]['accuracy']) roc_curve.append(metrics_list[i]['roc_auc']) data={'model names': model_names, 'accuracies': accuracies, 'roc_auc': roc_curve} print(model_names) print(accuracies) print(roc_curve) df=pd.DataFrame.from_dict(data) df.to_csv('models.csv')
[ "noreply@github.com" ]
bmarggraff.noreply@github.com
a254ecc9342fa1c6acec1d6dd7d1b9ee994945ee
8ae3e86fd736b65825a8c810560a73d17da74575
/solrdataimport/dataload/cqlbuilder.py
ee377653b8dbfc43530f90d778226878ff1f73fd
[ "Apache-2.0" ]
permissive
pisceanfoot/solrdataimport
68d12e6ab96f7ed856e8187806981af8635920d6
a7f97cda5eb4ff569e67e5636a9217e9fe1a5fb5
refs/heads/master
2021-01-10T06:17:16.154994
2018-03-17T07:14:09
2018-03-17T07:14:09
49,885,709
2
1
Apache-2.0
2018-03-17T07:14:10
2016-01-18T15:29:32
Python
UTF-8
Python
false
false
2,135
py
# -*- coding:utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals, \ with_statement import logging from solrdataimport.cass.cassClient import CassandraClient from solrdataimport.cass.cassSchema import CassSchema logger = logging.getLogger(__name__) class CqlBuilder(object): @classmethod def buildCacheKey(cls, cql, params): return cql + '_'.join(map(str, params)) @classmethod def buildCql(cls, fullDataImport, table, table_key, rowKey=None): cql = 'select * from {0}'.format(table) appendKey = [] if not fullDataImport and table_key: appendKey = table_key if rowKey: for key in rowKey: appendKey.append(key) if appendKey: key = ' = ? and '.join(appendKey) cql = cql + ' where ' + key + ' = ?;' return cql @classmethod def buildParam(cls, fullDataImport, table, table_key, row=None, rowKey=None, **kwargs): if fullDataImport: return None params = [] if table_key: for x in table_key: if x not in kwargs: raise Exception('key %s not found in param', x) column_type = cls.__fetchFieldType(table, x) params.append(CassandraClient.wrapper(column_type, kwargs.pop(x))) if row and rowKey: for key in rowKey: fetchKey = rowKey[key].lower() column_type = cls.__fetchFieldType(table, key) params.append(CassandraClient.wrapper(column_type, row[fetchKey])) return params @classmethod def __fetchFieldType(cls, table, field): logger.debug('fetch filed type for table "%s" field "%s"', table, field) schema = CassSchema.load(table) field_name_lower = field.lower() if field_name_lower in schema: return schema[field_name_lower] else: logger.error('field "%s" not in table "%s"', field, table) raise Exception('field "%s" not in table "%s"', field, table)
[ "pisceanfoot@gmail.com" ]
pisceanfoot@gmail.com
fcafef610287029b1a2c87cfaac8bd9b6790c9b0
285f136156a925b05b5d51f3a4021813a455b971
/backend/handlers/__init__.py
73a855f9a4321fcd162797bf6c8a09cc1dbcc598
[ "Apache-2.0" ]
permissive
kubikvid/weather-this-day
41185aacbbdcf65578576bf6f5974d00a00a3275
ada662f191ee122190168265d3d50e925ef26630
refs/heads/master
2020-05-21T21:24:21.959135
2019-05-13T01:42:14
2019-05-13T01:42:14
186,151,156
0
0
null
null
null
null
UTF-8
Python
false
false
419
py
# Copyright (c) 2019. Lorem ipsum dolor sit amet, consectetur adipiscing elit. # Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan. # Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna. # Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus. # Vestibulum commodo. Ut rhoncus gravida arcu. from handlers import history
[ "moonquiz@ya.ru" ]
moonquiz@ya.ru
07bf5e876ec76acc417629cf2befc0a819977d2d
4a4d727cab138c5a3bf3bfb05d48084ba06bd5d4
/Python master/MODULO 7 - API/primeiro_api.py
1bd2268ba3f4021a784fbde76a9f426ed37e5ca4
[]
no_license
RoniNunes/python
9e9d61e69deab02ee9e9955a5e95c7e6ef610e7a
52f6b068f469fc63907b84f67e6005f9b7964442
refs/heads/master
2023-06-16T11:37:35.046750
2021-07-02T11:37:32
2021-07-02T11:37:32
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,621
py
from flask import Flask, jsonify, request app = Flask(__name__) postagens = [ { 'titulo': 'Api com Flask', 'autor': 'Roni nunes' }, { 'titulo': 'Voce ja usou o Selenium?', 'autor': 'Roni nunes' }, { 'titulo': 'Como instalar o python', 'autor': 'Roni nunes' } ] nova_postagem = [ { 'titulo': 'Nova postagem com Flask', 'autor': 'Roni nunes' }] @app.route('/postagens', methods=['GET']) def obter_todas_postagens(): return jsonify(postagens), 200 @app.route('/postagens/<int:postagem_id>', methods=['GET']) def obter_postagens_por_id(postagem_id): #Passamos o ID que queremos consultar. return jsonify(postagens[postagem_id]), 200 @app.route('/postagens', methods=['POST']) def nova_postagem(): postagem = request.get_json() postagens.append(postagem) return jsonify({'mensagem': 'Recurso criado com sucesso'}), 200 @app.route('/postagens/<int:postagem_id>', methods=['PUT']) def atualizar_postagem(postagem_id):#Passamos o ID que queremos consultar. resultado = request.get_json() postagens[postagem_id].update(resultado) return jsonify(postagens[postagem_id]), 200 @app.route('/postagens/<int:postagem_id>', methods=['DELETE']) def excluir_postagem(postagem_id):#Passamos o ID que queremos consultar. postagem = postagens[postagem_id] del postagens[postagem_id] return jsonify({'mensagem': 'A postagem foi excluida com sucesso'}), 200 if __name__ == '__main__': app.run(port=5000, host='localhost',debug=True)
[ "noreply@github.com" ]
RoniNunes.noreply@github.com
9c398ed840e6c2bc5aa61edeb589e34f35fb1ef5
c36d43dc3ebb5ab987bda1cd7329a6fab58af45b
/semnet/interp/evaluator.py
5575ddf4edf6448507606f8f00ce119d1381ded7
[]
no_license
patgrasso/semnet
e37cacfdab0903b0b5aed5ac010e071f24decb65
e5fd8912a1768f3f59dee937199feaa2158c925c
refs/heads/master
2021-01-12T06:38:10.173145
2016-12-31T00:04:35
2016-12-31T00:04:35
77,401,845
1
1
null
null
null
null
UTF-8
Python
false
false
158
py
class Evaluator(object): def __init__(self, env): self.env = env def valueof(self, node, node_list): self.env.get(node["word"])
[ "pgrasso@stevens.edu" ]
pgrasso@stevens.edu
5b8829efc99be0d97be1f033a445e8090d9021fe
7c0a5b40e86c876e72d3a635a60978dbf1c79c8b
/__init__.py
dbc8e29cabf9ba156d1b1396ed22dcd9204f2a28
[]
no_license
BlenderCN-Org/selection_logic
a48e396f2ebfaf6f750bfa5871f33d49c69b15ba
7d240d626d699e5b41f1b45728730f41a360fc77
refs/heads/master
2020-05-23T21:20:40.599162
2018-10-10T17:35:25
2018-10-10T17:35:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
797
py
bl_info = { "name": "Selection Logic", "description": "Advanced selections based on logical conditions.", "author": "Omar Ahmad", "version": (1, 0), "blender": (2, 79, 0), "location": "View3D", "warning": "", "category": "Mesh" } import bpy from . import ui from . import operators class SelectByExpressionOperator(bpy.types.Operator): bl_idname = "mesh.select_by_expression" bl_label = "Select By Expression" def execute(self, context): operators.selectVertices(context) return {'FINISHED'} def register(): ui.register() operators.register() bpy.utils.register_class(SelectByExpressionOperator) def unregister(): ui.unregister() operators.unregister() bpy.utils.unregister_class(SelectByExpressionOperator)
[ "omar.squircleart@gmail.com" ]
omar.squircleart@gmail.com
91b43cda449292a11f4a69bb1dffb18b7872d0b9
32349a7406af3f6926e508dd4154a9042cd8a0b6
/DAA/Dynammic Programming/edit_distance.py
3fd4b208d55bfa264a9def65b8de2f5664df98c2
[]
no_license
anumehaagrawal/LabWork-Sem-4
d78b95b61b2ec94d1ad143768200b739d40c2105
782430f67bb423b84749295a3fef61f241293032
refs/heads/master
2021-05-12T07:30:30.746342
2018-04-17T03:11:12
2018-04-17T03:11:12
117,244,508
2
1
null
null
null
null
UTF-8
Python
false
false
485
py
def edit_distance(str1,str2,n,m): dp_array =[[0 for i in range(m)] for k in range(n)] for i in range(n): for k in range(m): if i==0: dp_array[i][k] = k elif k==0 : dp_array[i][k] = i if str1[i] ==str2[k]: dp_array[i][k] = dp_array[i-1][k-1] else: dp_array[i][k] = 1+ min(dp_array[i][k-1],dp_array[i-1][k],dp_array[i-1][k-1]) print(dp_array[n-1][m-1]) def main(): str1 = "hello" str2 = "heeeee" edit_distance(str1,str2,len(str1),len(str2)) main()
[ "anuzenith29@gmail.com" ]
anuzenith29@gmail.com
9488c0f83f1e5752703d6f5e72ddae45c675c8e9
86095e9590db8bab47b95752b967d9dbb88647da
/client.py
6ee40913d22e329ed34554e2633080860679cf5e
[]
no_license
jrestuccio/python-udp-filetransfer
3cb2e4ec5d0751d133e648fefc20db73755e75c4
0c9e4cf278279a0fb980749eb9a3a2a8ca5796e9
refs/heads/master
2020-12-03T08:13:27.254990
2014-05-05T11:41:08
2014-05-05T11:41:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
14,013
py
""" :title: client.py :author: Josephine Lim :description: Client to download files from server Summary of packet types: 1 = 0b0001 = read request = \x00\x00\x00\x01 2 = 0b0010 = read response = \x00\x00\x00\x02 4 = 0b0100 = open request = \x00\x00\x00\x04 8 = 0b1000 = open response = \x00\x00\x00\x08 9 = 0b1001 = close request = \x00\x00\x00\x09 """ from socket import * import sys import select import struct import random class Client(object): NUM_BYTES_TO_READ = 1400 #Total bytes sent inc header will be <1500 to prevent fragmentation over Ethernet links epoch_no = 0 handle_no = 0 def __init__(self): """Sets up UDP socket, obtains 5 values at command line: Filename to be read from server Filename under which received file is to be stored locally IP address or hostname of server (localhost if client is run on same machine) Port number of server Probability of packet loss, p """ self.client_socket = socket(AF_INET, SOCK_DGRAM) # Value for number of bytes socket can receive. ( For best match with hardware and network realities, # the value should be a relatively small power of 2, for example, 4096) self.buffer_ = 2048 self.file_read = self.get_file_read_arg() self.local_filename = self.get_local_filename_arg() self.ip = self.get_ip_arg() self.port = self.get_port_arg() self.p = self.get_p_arg() self.address = (self.ip, self.port) # Create file on local system with name provided, to write our received file to self.file_write = open(self.local_filename, 'wb') self.eof = False def get_file_read_arg(self): """Gets the name of the file to receive from the command line. Throws an error if it is empty or more than 100 characters.""" try: arg = sys.argv[1] file_read = str(arg) except IndexError: print "Please provide the name of the file that you wish to receive." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") if (len(file_read) > 100): print "Name of file must be equal to or less than 100 characters." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") else: return file_read def get_local_filename_arg(self): """Gets the name under which received file is to be stored locally, from the command line. Throws an error if it is empty.""" try: arg = sys.argv[2] local_filename = str(arg) except IndexError: print "Please provide the name under which the received file is to be stored locally." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") else: return local_filename def get_ip_arg(self): """Gets the ip number or hostname of the server from the command line. Throws an error if it is empty.""" try: arg = sys.argv[3] ip = str(arg) except IndexError: print "The IP address or hostname of the server must be provided." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") else: return ip def get_port_arg(self): """Gets the port number of the server from the command line. Throws an error if it is empty, not an integer, or not in the range of 1024 - 60000.""" try: arg = sys.argv[4] port = int(arg) except ValueError: print "Port must be a number only." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") except IndexError: print "Port number must be provided." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") if any([port < 1024, port > 60000]): print "Port must be between 1024 and 60000" sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") else: return port def get_p_arg(self): """Gets the probability of packet loss, p, from the command line. Throws an error if it is empty, or not a float in the range of 0.0 - 1.0.""" try: arg = sys.argv[5] p = float(arg) except IndexError: print "The probability of packet loss, p, must be provided." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") if (p < 0.0 or p > 1.0): print "p value must be between 0.0 and 1.0 inclusive." sys.exit("Example usage:\n\nclient.py myfile.txt receivedfile.txt 127.0.0.1 6060 0.0") else: return p def recv_invalid_response(self, recv_data, invalid_type = ""): """When bit signature is invalid or wrong packet type is received, discard packet and print error message.""" if (invalid_type == "bit_signature"): print("Error: Packet received from outside our network (wrong bit signature)") recv_data = "" elif (invalid_type == "response_type"): print("Error: Wrong response type in packet received.") recv_data = "" return def send_open_request(self): """Sends an open-request packet to the server in binary. Format of packet is: 4 bytes - bit signature - 0b1101 4 bytes - open request type - 0b0100 100 bytes - filename to be read as ASCII string """ print "Sending open request for file named ", self.file_read send_data = struct.pack("!2I100s", 0b1101, 0b0100, self.file_read) self.client_socket.sendto(send_data, self.address) return def recv_open_response(self, recv_payload): """When client receives an (already-validated) open-response packet from the server, it unpacks the payload and saves the received fields as instance variables if file found.""" unpacked_payload = struct.unpack("!?Q2I", recv_payload) # Read status field. If set to False, ignore remaining fields and # generate error msg (file not found) before exiting. # Each unpacked value is a tuple, so [0] accesses the value that we want status = unpacked_payload[0:1][0] if status == False: print "Error: File not found." sys.exit() #If set to True, read remaining fields. elif status == True: print("File found.") self.file_length = unpacked_payload[1:2][0] self.epoch_no = unpacked_payload[2:3][0] self.handle_no = unpacked_payload[3:][0] return def send_read_request(self, start_position): """Sends a read request packet to the server in binary. Format of packet is: 4 bytes - bit signature - 0b1101 4 bytes - read request type - 0b0001 4 bytes - epoch number - provided by server in open response 4 bytes - handle number - provided by server in open response 4 bytes - start position of the block to be read from the file - incremented sequentially 4 bytes - number of bytes to read - 1400 """ send_data = struct.pack("!6I", 0b1101, 0b0001, self.epoch_no, self.handle_no, start_position, self.NUM_BYTES_TO_READ) self.client_socket.sendto(send_data, self.address) return def recv_read_response(self, recv_payload): """When client receives an (already-validated) read-response packet from the server, it unpacks payload, checks that epoch number and handle number are correct and status field is 'OK', and appends file data received to the local file at the given start position.""" #Only unpack the headers because we want to store the file data as binary unpacked_payload = struct.unpack('!H3IQ', recv_payload[:22]) status = unpacked_payload[0:1][0] epoch_no = unpacked_payload[1:2][0] handle_no = unpacked_payload[2:3][0] #Check that file handle is the same, to make sure it is the same file request. if (self.epoch_no == epoch_no and self.handle_no == handle_no): start_position = unpacked_payload[3:4][0] num_bytes_been_read = unpacked_payload[4:5][0] # If we receive less bytes than the number we requested to read, this means that # end of file has been reached if (num_bytes_been_read < self.NUM_BYTES_TO_READ): self.eof = True data_to_write = recv_payload[22:] #If status field says that response contains real data: Append to file. Otherwise react #depending on error code received. #Status 00 = OK #Status 01 = Epoch no. of file handle doesnt match epoch no. of current invocation #Status 10 = No context found for file-handle and no data has been read #Status 11 = Context could be found but start position out of range if (status == 0b00): self.file_append.seek(start_position) self.file_append.write(data_to_write) elif (status == 0b01): print("Error: Epoch no. of file handle doesnt match epoch no. of current invocation") sys.exit() elif (status == 0b10): print("Error: No context found for file-handle and no data has been read") sys.exit() elif(status == 0b11): print("Error: Context could be found but start position out of range") sys.exit() else: print("Error: File handle does not match file handle stored in client. Wrong file received.") sys.exit() #Then return control to read_service_loop() method so that next iteration of send_read_request #from new start position is called. return def send_close_request(self): """Sends a close request packet to the server to close the file object. Format of packet is: 4 bytes - bit signature - 0b1101 4 bytes - close request type - 0b1001 4 bytes - epoch number 4 bytes - handle number """ data = struct.pack("!4I", 0b1101, 0b1001, self.epoch_no, self.handle_no) self.client_socket.sendto(data, self.address) self.client_socket.close() return def open_service_loop(self): """Loop that governs the timing and retransmission of open request packets, then checks packets received for the bit signature and response type fields to ensure that they are correct.""" print "Attempting to receive file", self.file_read, "from", self.ip, "at port", self.port, "." recv_data = None num_retransmits = 0 #Start timer, retransmit after each timeout of one second. If receive response within the timer, move on to next step. #Limit number of retransmits to 60 so as not to enter infinite loop. while(num_retransmits < 60): num_retransmits += 1 self.send_open_request() input_socket = [self.client_socket] inputready,outputready,exceptready = select.select(input_socket,[],[], 1) #if timer expires without input becoming ready, empty list is returned. So go to next iteration of loop (retransmit) if (inputready == []): continue else: try: recv_data = self.client_socket.recv(self.buffer_) except Exception as exception_: print("Wrong port number or IP address provided, or server is not available at the moment.") sys.exit() print("Received a packet.") #Generate a random number between 0 and 1 with uniform distribution to simulate packet loss. if (random.uniform(0,1) < self.p): recv_data = None print("Packet dropped randomly to simulate packet losses") continue bit_signature = recv_data[0:4] response_type = recv_data[4:8] recv_payload = recv_data[8:] #Check that bit signature is valid (packet is from our network) if bit_signature != "\x00\x00\x00\r": recv_invalid_response(recv_data, "bit_signature") continue else: #We have only ever sent a open_request, so the only viable response at this point is an open_response. #If this field contains anything else, it is an invalid packet. Retransmit request. if response_type != "\x00\x00\x00\x08": self.recv_invalid_response(recv_data, "response_type") continue else: #Bit signature and response type fields are both valid. print("Received open response from server...") self.recv_open_response(recv_payload) break if (num_retransmits >= 60): print ("Exceeded number of retransmissions allowed. Exiting program.") sys.exit() return def read_service_loop(self): """Loop that governs the timing and retransmission of read request packets, then checks packets received for the bit signature and response type fields to ensure that they are correct.""" #Increment start_position each time packet sent, send a read request packet for each new position. #Expect to receive a read_response packet for each time read request sent. recv_data = None print("Sending request to server to read and receive file...") start_position = 0 while(self.eof == False): print("Reading from byte " + str(start_position)) num_retransmits = 0 #Loop for retransmissions of the same start position while(num_retransmits < 60): num_retransmits = num_retransmits + 1 self.send_read_request(start_position) input_socket = [self.client_socket] inputready,outputready,exceptready = select.select(input_socket,[],[], 1) if (inputready == []): continue else: recv_data = self.client_socket.recv(self.buffer_) if (random.uniform(0,1) < self.p): recv_data = None print("Packet dropped randomly to simulate packet losses") continue bit_signature = recv_data[0:4] response_type = recv_data[4:8] recv_payload = recv_data[8:] if bit_signature != "\x00\x00\x00\r": self.recv_invalid_response(recv_data, "bit_signature") continue else: if response_type == "\x00\x00\x00\x02": #Packet is valid, proceed to recv_read_response to append this bit of file received into local_filename self.file_append = open(self.local_filename, 'r+b') self.recv_read_response(recv_payload) break else: self.recv_invalid_response(recv_data, "response_type") continue start_position = start_position + self.NUM_BYTES_TO_READ if (num_retransmits >= 60): print ("Exceeded number of retransmissions allowed. Exiting program.") sys.exit() return client = Client() client.open_service_loop() client.read_service_loop() client.send_close_request() print ("File received successfully. Program will now exit.") sys.exit()
[ "thecodeman66@hotmail.com" ]
thecodeman66@hotmail.com
20cb0d0b09a6ffefdcad9798b490f37d638c9fec
73ffeccb2b50320536e375c255c1a48f5dfa4493
/quantified_self_project/settings.py
080b42090d5b843aa1ce6c8b14cd8290e86b11be
[]
no_license
justinetroyke/qs-django
0db7737b96d5deb1e3c6f81a25097b87a4da61c5
095524f8d0e8e83e702bfb02dbab8fb6bd650d17
refs/heads/master
2020-03-27T13:34:49.323224
2018-08-29T15:02:51
2018-08-29T15:02:51
146,617,327
0
0
null
null
null
null
UTF-8
Python
false
false
3,391
py
""" Django settings for quantified_self_project project. Generated by 'django-admin startproject' using Django 2.1. For more information on this file, see https://docs.djangoproject.com/en/2.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.1/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'zztez77xu&)++b!lnr+1yeis@sqced!id%6g-n%v6y3)64z9=9' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'api', 'rest_framework', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'quantified_self_project.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'quantified_self_project.wsgi.application' # Database # https://docs.djangoproject.com/en/2.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.AllowAny' ] } # Internationalization # https://docs.djangoproject.com/en/2.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), )
[ "jjtroyke@gmail.com" ]
jjtroyke@gmail.com
a77ed31a71760f495bdfed54cbe1295c506714c3
6dd65ba20f60ee02e5d449d1bbe61865a993ab3b
/Monthly_Bussiest_Route.py
9d0eb6636f660d57d29a3156c505f071b3bb0262
[]
no_license
subhanshugpt07/Aviation_Big_Data_2017
a065aa52afaa287d489b88cd89c2df3544521fb0
fd5d68c6f9dfa92853ba67e2cfceda8d15f602bb
refs/heads/master
2021-07-03T08:20:12.739348
2017-09-24T23:21:44
2017-09-24T23:21:44
104,683,057
2
0
null
null
null
null
UTF-8
Python
false
false
3,243
py
from pyspark.sql.functions import * import csv from pyspark.sql.types import * from pyspark.sql.functions import * from pyspark import SparkContext from pyspark.sql import HiveContext from pyspark.sql.functions import * from pyspark.sql.functions import udf from pyspark.sql.types import BooleanType from pyspark.sql import Row import csv from pyspark.sql import SQLContext def parseCSV(idx, part): if idx==0: part.next() for p in csv.reader(part): if p[14] < p[23]: if p[0] == '2014': yield Row(YEAR = p[0], MONTH = int(p[2]), ORIGIN=p[14], ORIGIN_AIRPORT_ID = p[11], DEST = p[23], DEST_AIRPORT_ID = p[20], ROUTE = (p[14],p[23])) elif p[0] == '2015': yield Row(YEAR = p[0], MONTH = int(p[2])+12, ORIGIN=p[14], ORIGIN_AIRPORT_ID = p[11], DEST = p[23], DEST_AIRPORT_ID = p[20], ROUTE = (p[14],p[23])) elif p[0] == '2016': yield Row(YEAR = p[0], MONTH = int(p[2])+24, ORIGIN=p[14], ORIGIN_AIRPORT_ID = p[11], DEST = p[23], DEST_AIRPORT_ID = p[20], ROUTE = (p[14],p[23])) else: pass else: if p[0] == '2014': yield Row(YEAR = p[0], MONTH = int(p[2]), ORIGIN=p[23], ORIGIN_AIRPORT_ID = p[11], DEST = p[14], DEST_AIRPORT_ID = p[20], ROUTE = (p[23],p[14])) elif p[0] == '2015': yield Row(YEAR = p[0], MONTH = int(p[2])+12, ORIGIN=p[23], ORIGIN_AIRPORT_ID = p[11], DEST = p[14], DEST_AIRPORT_ID = p[20], ROUTE = (p[23],p[14])) elif p[0] == '2016': yield Row(YEAR = p[0], MONTH = int(p[2])+24, ORIGIN=p[23], ORIGIN_AIRPORT_ID = p[11], DEST = p[14], DEST_AIRPORT_ID = p[20], ROUTE = (p[23],p[14])) else: pass def main(sc): spark = HiveContext(sc) sqlContext = HiveContext(sc) print "holaaaaa" rows = sc.textFile('../lmf445/Flight_Project/Data/864625436_T_ONTIME_2*.csv').mapPartitionsWithIndex(parseCSV) df = sqlContext.createDataFrame(rows) busiest_route_month_pivot = \ df.select('ORIGIN_AIRPORT_ID', 'ROUTE', 'MONTH') \ .groupBy('ROUTE').pivot('MONTH').count() busiest_route_month_pivot.toPandas().to_csv('Output/MonthlyRoutes.csv') if __name__ == "__main__": sc = SparkContext() main(sc) # In[ ]:
[ "sg4595@nyu.edu" ]
sg4595@nyu.edu
24c336f380a817f634b1f446450fdffa2ad476f9
7ace4c9742af543db1965afec55b115b38d70aea
/programs/classconsrtuctor.py
1d91c88cb6d14bb1fccabcfaaa58948dd4b781f5
[]
no_license
abhis021/C-DAC
8a7472517fb9d664cdcf1d6b33146219da970943
cd002a5740f63aa6fd25b982a4c7f2942877f12d
refs/heads/main
2023-08-25T03:36:35.726671
2021-10-17T07:41:27
2021-10-17T07:41:27
416,581,367
0
0
null
null
null
null
UTF-8
Python
false
false
265
py
class partyanimal: x=0 name=' ' def __init__(self,name1): self.name=name1 def party(self): self.x=self.x+1 print(self.name,'party count',self.x) an=partyanimal('sally') an.party() na=partyanimal('jim') na.party() na.party()
[ "abhisheku722@gmail.com" ]
abhisheku722@gmail.com
eb66be29af0e15d10254c571bd6fd7164a88478f
3b0a27a6fbaed8a3cba81a70f0142e99b8ce60c7
/blender/io_import_sprites/export_scripts.py
8a16ddcd39c4a45f817cbd941ce7ef358f390af0
[]
no_license
sangohan/flumpkit
43b263bdf8076c5e02234b1ccd644370a93ec2d0
017a3f94b9363b719a6a502a4c42e66bfc305223
refs/heads/master
2021-01-16T20:31:51.898801
2013-08-07T18:03:58
2013-08-07T18:03:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
13,996
py
## Author: Daniel Gerson ##GPL 3.0 unless otherwise specified. import bpy from bpy.types import Operator from bpy.types import Menu, Panel import mathutils import math import os import collections import json import re from bpy.props import (StringProperty, BoolProperty, EnumProperty, IntProperty, FloatProperty, CollectionProperty, ) from bpy_extras.object_utils import AddObjectHelper, object_data_add from bpy_extras.image_utils import load_image from mathutils import Vector from mathutils import Quaternion #the from part represents directory and filenames #the import part represents a class or method name etc from bl_ui.space_view3d_toolbar import View3DPanel print("LOADING: import_scripts.py!!!!") from io_import_sprites.common import ( SpritesFunctions, FlumpProps ) class EXPORT_OT_flump_to_json(Operator, SpritesFunctions): bl_idname = "export_sprites.flump_to_json" bl_label = "Export Json" bl_options = {'REGISTER', 'UNDO'} props = bpy.props.PointerProperty(type=FlumpProps) def execute(self, context): ## self.props = bpy.context.scene.FlumpProps self.export_to_json(context) return {'FINISHED'} #inverts y axis def transform_point(self, x, y, width, height): return (x, height - y) def transform_location(self, x, y): return (x, -y) #take transform of plane and convert into pivot def get_pivot(self, arm_name, bone_name, obj, width, height): #use relative #TODO, find by armature name if not bpy.data.armatures[0].bones[bone_name].use_relative_parent: tx = width /2.0 ox = -obj.location.x +tx oy = -obj.location.y return self.transform_point(ox, oy, width, height) tx = width /2.0 ox = -obj.location.x +tx oy = -obj.location.y + (height /2.0) return self.transform_point(ox, oy, width, height) def export_to_json(self, context): #~ jsonFile = get_json_file(); #~ print(jsonFile) props = bpy.context.scene.FlumpProps jsonFile = props.flump_library json_data=open(jsonFile) data = json.load(json_data) json_data.close() #we now have the file in data. #now create a new movies area movies = [] data['movies'] = movies data['frameRate'] = bpy.context.scene.render.fps movie = {} movies.append(movie) movie['id'] = props.movie_id movie['layers'] = [] #get layers armature_name = 'Armature' bpy.context.scene.objects.active = bpy.context.scene.objects[armature_name] #context now armature arm = bpy.context.scene.objects.active ob_act = bpy.context.scene.objects.active.animation_data.action curves = ob_act.fcurves bone_keys = bpy.context.object.pose.bones.keys() #some of these bones are layers layers = (b for b in bone_keys if 'flump_layer' in bpy.context.object.pose.bones[b]) #Assumes one symbol per layer symbols = {} for child in arm.children: symbols[child.parent_bone] = child #object, not name layer_frames ={} #loop through curves, add keyframes to ALL bones that are influenced by this bone for curve_id, curve in enumerate(curves) : obj_name =re.search(r'"(\w+)"', curve.data_path).group(1) if obj_name not in layer_frames: layer_frames[obj_name] = [] for key in curve.keyframe_points : frame, value = key.co #add frame to ALL objects that share obj_name TODO (parents) layer_frames[obj_name].append(int(frame)) # do something with curve_id, frame and value ## self.report({'INFO'}, 'EXTRACT {0},{1},{2}'.format(curve_id, frame, value)) layer_frames[obj_name] = sorted(list(set(layer_frames[obj_name]))) #add parents keyframes to child for bone in bpy.data.armatures[0].bones[:]: parents = [p.name for p in bone.parent_recursive] for parent in parents: layer_frames[bone.name].extend(layer_frames[parent]) layer_frames[bone.name] = sorted(list(set(layer_frames[bone.name]))) sequence_length = int(bpy.context.scene.frame_end) layer_zdict = {} #loop through layer_frames for bone_name in layers: frames = layer_frames[bone_name] #add json layer json_layer = {} json_layer['name'] = bone_name json_keyframes = [] json_layer['keyframes'] = json_keyframes zdepth = None keyframe_container = {} #old way, straight for i in range(len(frames)): nextframe = sequence_length if (i+1 < len(frames)): nextframe = frames[i+1] json_frame, loc_z = self.create_keyframe(frames[i], bone_name, armature_name, symbols) keyframe_container[frames[i]] = json_frame #fit to curve constants = (sequence_length, armature_name, bone_name, symbols) for i in range(len(frames)): nextframe = sequence_length if (i+1 < len(frames)): nextframe = frames[i+1] self.fit_to_curve(frames[i], nextframe, keyframe_container, constants) #sort, add duration, add json to final list, #rotation hack frames = sorted(list(set(keyframe_container.keys()))) rot_adjust = 0 for i in range(len(frames)): nextframe = sequence_length if (i+1 < len(frames)): nextframe = frames[i+1] json_frame = keyframe_container[frames[i]] json_keyframes.append(json_frame) json_frame['duration'] = nextframe - frames[i] #rotation hack (fixes smooth >360 flips, dislikes long transitions). json_frame['skew'][0] += rot_adjust json_frame['skew'][1] += rot_adjust if nextframe is not sequence_length: rotation1 = json_frame['skew'][0] rotation2 = keyframe_container[frames[i+1]]['skew'][0] + rot_adjust if rotation1 - rotation2 > math.pi: rot_adjust += 2*math.pi if rotation1 - rotation2 < -math.pi: rot_adjust -= 2*math.pi #find z depth order (useful to do this at the same time loc, rotQ, scale = self.get_bone_transform(0, bone_name) if zdepth is None: #only run on first keyframe zdepth = loc[2] if zdepth not in layer_zdict: layer_zdict[zdepth] = [] layer_zdict[zdepth].append(json_layer) #add json layers in correct zdepth order, as determined by first keyframe. for z in sorted(list(layer_zdict.keys())): #not thread safe ;-) for item in layer_zdict[z]: movie['layers'].append(item) #json_layer ## self.report({'INFO'}, 'EXTRACT {0},{1},{2}'.format(loc,rotQ.to_euler(),scale)) with open(jsonFile, 'w') as outfile: json.dump(data, outfile) return #adds keyframes to match linear to curve. def fit_to_curve(self, start_frame, end_frame, keyframe_container, constants): #extract constants sequence_length, armature_name, bone_name, symbols = constants for i in range(start_frame, end_frame): transform_start = None transform_end = None #generate start keyframe if start_frame not in keyframe_container: json_start, transform_start = self.create_keyframe(start_frame, bone_name, armature_name, symbols) keyframe_container[start_frame] = json_start else: #TODO redundant sometimes. transform_start = self.get_bone_transform(start_frame, bone_name) #generate end keyframe if end_frame not in keyframe_container: json_end, transform_end = self.create_keyframe(end_frame, bone_name, armature_name, symbols) keyframe_container[end_frame] = json_end else: #TODO redundant sometimes. transform_end = self.get_bone_transform(end_frame, bone_name) #get transform of frame i transform_i = self.get_bone_transform(i, bone_name) #interpolate start and end transforms at i percent = (i - start_frame)/ (end_frame - start_frame) loc = transform_start[0] + (transform_end[0] - transform_start[0]) * percent rz_start = transform_start[1].to_euler().z rz_end = transform_end[1].to_euler().z rz = rz_start + (rz_end - rz_start) * percent scale = transform_start[2] + (transform_end[2] - transform_start[2]) * percent #test match = True if (abs(loc[0] - transform_i[0][0]) > 1): match = 1 if (abs(loc[1] - transform_i[0][1]) > 1): match = 2 ri = transform_i[1].to_euler().z angle_diff = ((ri - rz)/math.pi*180) % 360 if (angle_diff > 1 and angle_diff < 359): match = 3 #TODO scale ## if match is not True: ## self.report({'INFO'}, 'match {0}'.format(angle_diff)) if match is True: #matches where it is supposed to be continue mid_frame = int((start_frame + end_frame)/2) if mid_frame in [start_frame, end_frame]: return #recursion self.fit_to_curve(start_frame, mid_frame, keyframe_container, constants) self.fit_to_curve(mid_frame, end_frame, keyframe_container, constants) return def get_bone_transform(self, frame, bone_name): bpy.context.scene.frame_set(frame) pose_bone = bpy.context.object.pose.bones[bone_name] obj = pose_bone.id_data matrix = obj.matrix_world * pose_bone.matrix ## loc, rotQ, scale = matrix.decompose() return matrix.decompose() def create_keyframe(self, frame, bone_name, armature_name, symbols): json_frame = {} json_frame['index'] = frame #store frame values loc, rotQ, scale = self.get_bone_transform(frame, bone_name) #bounding box local_coords = symbols[bone_name].bound_box[:] coords = [p[:] for p in local_coords] width = coords[0][0] * -2 height = coords[0][1] * -2 x, y = self.transform_location(loc[0], loc[1]) json_frame['loc'] =[x, y] angle = -rotQ.to_euler().z #* math.pi / 180 json_frame['skew'] = [angle, angle] json_frame['scale'] = [scale[0], scale[1]] json_frame['pivot'] = self.get_pivot(armature_name, bone_name, symbols[bone_name], width, height) json_frame['ref'] = symbols[bone_name].name return json_frame, (loc, rotQ, scale)
[ "daniel@mambo.co.za" ]
daniel@mambo.co.za
800613bb979e2a651e7833167d3b6536f748963a
699add6df73ad158b8ebeb5f9de4aada5820f205
/facebook/app/posts/models/comments.py
51bab010f0aef4c5c779bd1f65e15e568916fbfe
[]
no_license
ricagome/Api-Facebook-Clone
4f035ad280e6cb48d375fd87a9f62eecce67eb51
fae5c0b2e388239e2e32a3fbf52aa7cfd48a7cbb
refs/heads/main
2023-08-17T12:34:33.379017
2021-10-05T21:23:32
2021-10-05T21:23:32
null
0
0
null
null
null
null
UTF-8
Python
false
false
694
py
"""Comment model.""" # Django from django.db import models # Utilities from app.utils.models import FbModel class Comment(FbModel): """Comment model.""" user = models.ForeignKey('users.User', on_delete=models.CASCADE) profile = models.ForeignKey('users.Profile', on_delete=models.CASCADE) post = models.ForeignKey('posts.Post', on_delete=models.CASCADE) text = models.TextField(help_text='write a comment', max_length=250) reactions = models.IntegerField(default=0) def __str__(self): """Return username, post title and comment.""" return '@{} has commented {} on {}'.format( self.user.username, self.text, self.post)
[ "juliancamilohermida@hotmail.com" ]
juliancamilohermida@hotmail.com
2d0fe84cfd8f2ee9d2079fa3b668038f362c4362
e48faca9b6e2016ae936a77e8acc2f9bce08d207
/series_in_func.py
d6418cb68aeeb2c92160d18f6dd99020f817e56e
[]
no_license
ramachitikineddy/becomecoder
84d7315e7f99c1e18855350c9f14729ba8e57087
d4e9611bb8a82dd0fb85a33e9b00443daee1e781
refs/heads/main
2023-05-06T20:59:51.022901
2021-05-29T10:33:17
2021-05-29T10:33:17
367,065,138
1
1
null
null
null
null
UTF-8
Python
false
false
172
py
def seq(n): if n%2: return 3*n+1 return n//2 n=int(input()) print(n,end=" ") while (n:=seq(n)): print(n,end=" ") if n==1: break
[ "noreply@github.com" ]
ramachitikineddy.noreply@github.com
d0e2832e8ee5e98f43faaa16e7637d13c046db78
29fc564df8ee16a2d140cbd150260e04f4ddc5c5
/0x0A-python-inheritance/10-square.py
6c8775c89972ea3d1271833f32c5db7e5717d8fe
[]
no_license
ChristianAgha/holbertonschool-higher_level_programming
9359fdf4e3f30ed4422a0af59672ac5ff397d4a2
cce59b31aba3e2a09cb4bf76a6fcfeefa7ab5031
refs/heads/master
2021-01-20T07:15:31.258319
2017-09-27T05:53:25
2017-09-27T05:53:25
89,984,551
0
0
null
null
null
null
UTF-8
Python
false
false
1,313
py
#!/usr/bin/python3 """Geometry Module""" class BaseGeometry: """class BaseGeometry""" def area(self): """raises an Exception with the message area() is not implemented""" raise Exception("area() is not implemented") def integer_validator(self, name, value): """validates value""" if type(value) is not int: raise TypeError("{} must be an integer".format(name)) if value <= 0: raise ValueError("{} must be greater than 0".format(name)) class Rectangle(BaseGeometry): """class Rectangle""" def __init__(self, width, height): """initialization""" self.__width__ = width self.__height__ = height BaseGeometry.integer_validator(self, "width", width) BaseGeometry.integer_validator(self, "height", height) def __str__(self): """for print""" return("[Rectangle] {}/{}".format(self.__width__, self.__height__)) def area(self): """return area""" return self.__width__ * self.__height__ class Square(Rectangle): """class Square""" def __init__(self, size): """initialization""" self.__size__ = size Rectangle.__init__(self, size, size) def area(self): """return area""" return self.__size__ ** 2
[ "christianagha@gmail.com" ]
christianagha@gmail.com
5483a62a0289eaf03b82b517c8e78dd11f7e8a9d
4a2f163e603f90d5b9a4b2a100d7bc7bc77d1c95
/predicting_biological_response/hemy_example.py
401b7f3d5dd2f883930c7bfdf5ca5cfa2b058519
[]
no_license
tusonggao/data_cck
d781334bd1d425f6ecd613ebdb194835846e3adb
91d48589e8431fd00d70348dcb049c52fdcd2c7f
refs/heads/master
2020-04-09T03:59:09.931284
2020-01-26T15:54:14
2020-01-26T15:54:14
160,005,725
0
0
null
null
null
null
UTF-8
Python
false
false
155
py
# https://blog.csdn.net/data_scientist/article/details/79036382 # https://blog.csdn.net/Gin077/article/details/84339790 # https://github.com/rushter/heamy
[ "tusonggao@163.com" ]
tusonggao@163.com
2df9cffd7c706f44089b51dd1178e45e110bfbc7
8149d1030b5bc62cc82d5afedbe7486daedbf8c5
/[829][Consecutive Numbers Sum][Medium].py
4810671219d8327bd315d73d7fbaf90d1a403a40
[]
no_license
guofei9987/leetcode_python
faef17bb59808197e32ed97e92e2222862e2ba8c
23703a6fb5028d982b3febc630e28f9bb65a82a6
refs/heads/master
2020-03-21T18:24:33.014579
2019-10-12T13:29:03
2019-10-12T13:29:03
138,889,760
1
0
null
null
null
null
UTF-8
Python
false
false
55
py
# https://leetcode.com/problems/consecutive-numbers-sum
[ "guofei9987@foxmail.com" ]
guofei9987@foxmail.com
88e7be6d96ec8e784aba5e12b0692d4c5beb1949
2db7597686f33a0d700f7082e15fa41f830a45f0
/Python/LeetCode2.0/DP/72.Edit Distance.py
b071302d4d3bdf3daf32936c19f8404f75c65131
[]
no_license
Leahxuliu/Data-Structure-And-Algorithm
04e0fc80cd3bb742348fd521a62bc2126879a70e
56047a5058c6a20b356ab20e52eacb425ad45762
refs/heads/master
2021-07-12T23:54:17.785533
2021-05-17T02:04:41
2021-05-17T02:04:41
246,514,421
2
0
null
null
null
null
UTF-8
Python
false
false
1,595
py
#!/usr/bin/python # -*- coding: utf-8 -*- # @Time : 2020/05/09 ''' input: two words: str; the length of word is from 0 to inf output: int; the number of modify steps corner case: one of the word is ‘’ → len(word2) both words are ‘’ → 0 Method - DP Steps: build DP table; the size of table is (len(word1) + 1)* (len(word2) + 1) dp[i][j]: the optimal solution when the size of word1 is i, the size of word2 is j dp[i][j] = dp[i-1][j-1], word1[i - 1] != word2[j - 1] = min(dp[i][j-1], dp[i-1][j],dp[i-1][j-1]) + 1, word1[i - 1] == word2[j - 1] result is dp[len(word2)][len(word1)] base case: dp[0][j] = j dp[i][0] = i Time Complexity: O(NM), N is the length of word1 and M is the length of word2 Space Complexity: O(NM), DP table’s size ''' # 易错点,注意哪个word是行,哪个word是列; word1[i - 1] != word2[j - 1], 减1不能忘 class Solution: def minDistance(self, word1: str, word2: str) -> int: m = len(word1) n = len(word2) if m == 0: return n if n == 0: return m dp = [[0] * (m + 1) for _ in range(n + 1)] for i in range(n + 1): for j in range(m + 1): if i == 0: dp[i][j] = j elif j == 0: dp[i][j] = i elif word2[i - 1] == word1[j - 1]: dp[i][j] = dp[i - 1][j - 1] else: dp[i][j] = min(dp[i][j - 1], dp[i - 1][j], dp[i - 1][j - 1]) + 1 return dp[n][m]
[ "58391184+Leahxuliu@users.noreply.github.com" ]
58391184+Leahxuliu@users.noreply.github.com
89a66584f244256442569d26ef92908874f586c1
7da8913218b6450e83c3833f21315630717c7d88
/thomasStudents/odu.py
c3de060757e7fb196b013215e6237d1f35168bf4
[]
no_license
andrefisch/PythonScripts
b028bec4ebf0f4442face3602dd136235efc32fa
bd68981ac931ab9ea7b44761647f5e2fff04e4c8
refs/heads/master
2021-01-17T13:10:49.227307
2017-07-06T04:30:43
2017-07-06T04:30:43
57,985,283
0
0
null
null
null
null
UTF-8
Python
false
false
2,842
py
from urllib.parse import urlencode from urllib.request import Request, urlopen import pandas import openpyxl import re import math import pygame, time ''' 1. import spreadsheet 2. for loop: A. find value in cell C(row) B. make a request to database C. find student in response using last name C(row) and first name B(row) D. if name exists in database: a. replace empty cell D(row) with email address 3. save file HTTPError: HTTP Error 500: Internal Server Error ''' # 1. # Open the file for editing xfile = openpyxl.load_workbook('odu.xlsx') # Open the worksheet we want to edit sheet = xfile.get_sheet_by_name('students') # Open the finished playing sound pygame.init() pygame.mixer.music.load('note.mp3') # Some servers get annoyed if you make too many requests so dont do them all at once # Start here start = 17978 # End here end = sheet.max_row + 1 # end = 6000 for row in range (start, end): if (row % 999 == 0): print ("GIVING THE SERVER A FIVE MINUTE BREAK") xfile.save('odu.xlsx') pygame.mixer.music.play() time.sleep(3) pygame.mixer.music.stop() time.sleep(300) pygame.mixer.music.play() time.sleep(3) pygame.mixer.music.stop() print ("BREAK IS OVER, BACK TO WORK!") # A. firstName = sheet['B' + str(row)].value lastName = sheet['C' + str(row)].value # B. if ((' ' in firstName) or (' ' in lastName)): continue else: url = 'https://www.odu.edu/directory/?F_NAME=' + firstName + "&L_NAME=" + lastName + "&SEARCH_IND=S" # post_fields = {'L_NAME': lastName, "F_NAME": firstName, "SEARCH_IND": "S"} request = Request(url)#, urlencode(post_fields).encode()) json = urlopen(request).read() # Make sure there are any results for the search if "<table" in str(json): try: html = pandas.read_html(json) email = html[0][1][3] for i in range(2, len(html[0][1])): if lastName in html[0][0][i] and firstName in html[0][0][i]: p = re.compile('\w*@odu\.edu') # print (isinstance(html[0][2][i], str)) if (isinstance(html[0][1][i], str)): m = p.search(html[0][1][i]) if (m): sheet['D' + str(row)] = m.group() # Keep track of how close we are to being done print (str(format((row - start) / (end - start) * 100.00, '.2f')) + "%: " + m.group()) except Exception: pass xfile.save('odu.xlsx') pygame.mixer.music.play() time.sleep(3) pygame.mixer.music.stop() pygame.mixer.music.play() time.sleep(3) pygame.mixer.music.stop()
[ "anfischl@gmail.com" ]
anfischl@gmail.com
ddad2ca9b7b59fdf640e2b0a0f29fdc4854b3efb
a1a789f14eb2d5c039fbf61283b03f2f1e0d2651
/jeopardy/migrations/0002_auto_20150622_0957.py
7c525a02cd0795373e6b581937c6f647021a3936
[ "MIT" ]
permissive
codefisher/web_games
279bf5be5a348951e6ae3361c24b696ac841e01c
d09ffb8f86b24e04568b2a33c94aa49d80455715
refs/heads/master
2021-01-10T13:10:33.097712
2017-07-12T05:46:37
2017-07-12T05:46:37
36,868,291
0
0
null
null
null
null
UTF-8
Python
false
false
817
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('jeopardy', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='points', options={'verbose_name': 'Points', 'verbose_name_plural': 'Points'}, ), migrations.AddField( model_name='question', name='bonus', field=models.IntegerField(default=0), preserve_default=False, ), migrations.AlterField( model_name='question', name='topic', field=models.ForeignKey(related_name='topicopi', verbose_name='Topic', to='jeopardy.Topic', on_delete=models.CASCADE), ), ]
[ "mail@codefisher.org" ]
mail@codefisher.org
92d9d24d3beb5ec8799d88be94123456d4805482
9da1a3470d60a667167ecba0a49915296de2fbc8
/server/app/utils/token_util.py
f57cff5e5ac332cd85b84fb05b608e2dbac6f71e
[ "MIT" ]
permissive
csu-xiao-an/web_info_monitor
5d01d296b2fc9583a1029df30af1cd89feff4419
5f39254a4ae014e1a2017006290585b4648cc013
refs/heads/master
2020-07-27T08:46:16.882741
2019-09-09T14:08:44
2019-09-09T14:08:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,545
py
from flask import jsonify from itsdangerous import TimedJSONWebSignatureSerializer, SignatureExpired, \ BadSignature from app.models import User import os secret_key = os.environ.get("secret_key", "recar") #返回token字符串 def generate_auth_token(uid, is_amdin=False, scope=None, expiration=5000): #通过flask提供的对象,传入过期时间和flask的SECRET_KEY """生成令牌""" s = TimedJSONWebSignatureSerializer(secret_key, expires_in=expiration) #token里面的值,是技术方案需要订的,做相关的业务逻辑验证,uid唯一值表示当前请求的客户端 #type表示客户端类型,看业务场景进行增删 #scope权限作用域 #设置过期时间,这个是必须的,一般设置两个小时 return s.dumps({ 'uid': uid, 'is_amdin': is_amdin, 'scope':scope }).decode('ascii') # token验证 def verify_token(token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) # except SignatureExpired: # raise MyHttpAuthFailed('token expired') # # return {'message': 'token expired'}, return_code.Unauthorized#token_expired() # valid token, but expired # except BadSignature: # raise MyHttpAuthFailed('token invalid') # # return {'message':'token invalid'}, return_code.Unauthorized #invalid_token() # invalid token except: return None user = User.query.filter_by(id=data['uid']).first() return user
[ "yansiyu@360.net" ]
yansiyu@360.net
9aa84188689bfa3d627c30002874472a97dc229a
499ff5445e2017d042690c0429cf2e767a7f623f
/coral/io/_abi.py
b19a2ab0ec287ad6d000026ece9b71f749677f3a
[ "MIT" ]
permissive
blthree/coral
b6ab934c10271d7b790130fe45e622b7c66921b4
30514735d9a51487583535a3a7e3fbfd0fe15ed8
refs/heads/master
2021-01-22T10:14:52.018579
2017-02-19T00:28:33
2017-02-19T00:28:33
81,997,699
0
0
null
2017-02-14T22:58:59
2017-02-14T22:58:59
null
UTF-8
Python
false
false
3,069
py
'''Read and write DNA sequences.''' import coral as cr import numpy as np import os from . import parsers from .exceptions import UnsupportedFileError def read_abi(path, trim=True, attach_trace=True): '''Read a single ABI/AB1 Sanger sequencing file. :param path: Full path to input file. :type path: str :param trim: Determines whether the sequence will be trimmed using Richard Mott's algorithm (trims based on quality). :type trim: bool :param attach_trace: Determines whether to attach the trace result as a .trace attribute of the returned sequence and the trace peak locations as a .tracepeaks attribute. The trace attribute is a 2D numpy array with 4 columns in the order GATC. :type attach_trace: bool :returns: DNA sequence. :rtype: coral.DNA ''' filename, ext = os.path.splitext(os.path.split(path)[-1]) abi_exts = ['.abi', '.ab1'] if ext in abi_exts: with open(path) as f: abi = parsers.ABI(f) else: raise UnsupportedFileError('File format not recognized.') seq = abi.seq_remove_ambig(abi.seq) # Attach the trace results to the seq if attach_trace: order = abi.data['baseorder'].upper() trace = [abi.data['raw' + str(order.index(b) + 1)] for b in 'GATC'] trace = np.array(trace) tracepeaks = np.array(abi.data['tracepeaks']) if trim: try: sequence = cr.DNA(abi.trim(seq)) except ValueError: # A ValueError is raised if the sequence is too short pass trim_start = seq.index(str(sequence)) # Adjust trace data based on trimming idx = (trim_start, trim_start + len(sequence)) peaks = tracepeaks[idx[0]:idx[1]] sequence.trace = trace[peaks[0]:peaks[-1], :] sequence.tracepeaks = peaks else: sequence = cr.DNA(seq) sequence.name = abi.name return sequence def read_abis(directory, trim=True, attach_trace=True): '''Read all ABI sequences files in a directory. :param directory: Path to directory containing sequencing files. :type directory: str :param trim: Determines whether the sequence will be trimmed using Richard Mott's algorithm (trims based on quality). :type trim: bool :param attach_trace: Determines whether to attach the trace result as a .trace attribute of the returned sequence. The trace attribute is a 2D numpy array with 4 columns in the order GATC. :type attach_trace: bool :returns: A list of DNA sequences. :rtype: coral.DNA list ''' dirfiles = os.listdir(directory) abis = [] for dirfile in dirfiles: path = os.path.join(directory, dirfile) try: abis.append(read_abi(path, trim=trim, attach_trace=attach_trace)) except UnsupportedFileError: pass return abis
[ "nbolten@gmail.com" ]
nbolten@gmail.com
89e6f9abf269be06d699b31d7a18f80d863cd0af
ea57b713f59d2e2a8d6f4b0b6938c20a8ae6d67d
/fetchQzone/iszhi.py
96f87ced87c63b0bdd70fb54d9775a8bf09cc8d9
[]
no_license
guoyu07/fetchQzone
9919f9fad3d44a4643ebaba61d534f3d99c95f8f
db0d69b7d4369bd8aaafc2af8f14fdbe6316d294
refs/heads/master
2021-05-28T20:50:23.052035
2015-03-06T05:10:56
2015-03-06T05:10:56
null
0
0
null
null
null
null
UTF-8
Python
false
false
181
py
def iszhi(x): cnt=0 if x<=1: return False for m in range(1,x+1): if x%m==0: ++cnt if cnt>2: return True return False
[ "zhangxu1573@qq.com" ]
zhangxu1573@qq.com
ab0c049cca67cdb3f90aa2e8ce48ecceed5f6ce8
83acd2e879b8d1dfbd7d735193539b8537e86d08
/pyropod/ropod/ftsm/ftsm_base.py
b5a56108172f6d678c51713eb4724ab28dca21d7
[]
no_license
HBRS-SDP/ropod_common
89b296e6bb56dc225319850036d3a63efd46ace9
5ce24b8ae79239f4fd5d2249fd33d1b1061eaceb
refs/heads/master
2020-05-09T23:39:11.209722
2019-03-12T12:59:48
2019-03-12T12:59:48
181,508,576
0
0
null
2019-04-15T14:52:18
2019-04-15T14:52:18
null
UTF-8
Python
false
false
1,339
py
from pyftsm.ftsm import FTSM, FTSMStates, FTSMTransitions class FTSMBase(FTSM): '''ROPOD-specific implementation of a fault-tolerant state machine @author Alex Mitrevski @maintainer Alex Mitrevski, Santosh Thoduka, Argentina Ortega Sainz @contact aleksandar.mitrevski@h-brs.de, santosh.thoduka@h-brs.de, argentina.ortega@h-brs.de ''' def __init__(self, name, dependencies, max_recovery_attempts=1): super(FTSMBase, self).__init__(name, dependencies, max_recovery_attempts) def init(self): '''Method for component initialisation; returns FTSMTransitions.INITIALISED by default ''' return FTSMTransitions.INITIALISED def configuring(self): '''Method for component configuration/reconfiguration; returns FTSMTransitions.DONE_CONFIGURING by default ''' return FTSMTransitions.DONE_CONFIGURING def ready(self): '''Method for the behaviour of a component when it is ready for operation, but not active; returns FTSMTransitions.RUN by default ''' return FTSMTransitions.RUN def running(self): '''Abstract method for the behaviour of a component during active operation ''' pass def recovering(self): '''Abstract method for component recovery ''' pass
[ "aleksandar.mitrevski@h-brs.de" ]
aleksandar.mitrevski@h-brs.de
edc33e4a7d63438dd82b67c0afebd70a4f1e0c49
6fb6a62a33b13690f3c95c166f07a736836308b6
/functions/cartupdate/main.py
483369e75fa95ce5f8173cd6d6f83a32c2c1ff5e
[]
no_license
Dualic/petshop
88172ed47d65ccef79342524262b4de26995a463
03443c0b8c2a3a12e9552a5924b99745fb4b6465
refs/heads/master
2023-07-16T08:53:22.961111
2021-09-03T12:16:17
2021-09-03T12:16:17
401,612,658
0
2
null
2021-09-03T09:40:14
2021-08-31T07:29:40
Python
UTF-8
Python
false
false
1,334
py
def getsecret(secretname): import google.cloud.secretmanager as secretmanager client = secretmanager.SecretManagerServiceClient() name = f"projects/week10-1-324606/secrets/{secretname}/versions/latest" response = client.access_secret_version(request={"name": name}) return response.payload.data.decode("UTF-8") def cartupdate(request): import psycopg2 dbname = getsecret("dbname") user = "postgres" password = getsecret("dbpassword") host = getsecret("host") conn = None request_json = request.get_json(silent=True) id = request_json.get("id") customer_id = request_json.get("customer_id") product_id = request_json.get("product_id") amount = request_json.get("amount") SQL = "UPDATE cart SET customer_id = %s, product_id = %s, amount = %s WHERE id = %s;" result = "Update failed" try: conn = psycopg2.connect(host=host, dbname=dbname, user=user, password=password) cursor = conn.cursor() cursor.execute(SQL, (customer_id, product_id, amount, id)) conn.commit() cursor.close() result = "Update success" except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result
[ "ilkka.o.pekkala@gmail.com" ]
ilkka.o.pekkala@gmail.com
005779a57f96302b20a3bcde3152d53965d436f1
d496d504bf4ccdb59fbbeeee7b5d70ae7ab136b8
/ts_development/version1/ts__development/models/models.py
7448e94a50dfbd30c0d9546155d7beafbc13bcf3
[]
no_license
taybahsoftegy-dev/ts-modules
6e92bb0748238fcde38df146ab73ae311f16df55
cf4ec549943a0ba29d203ef1611a337040389d64
refs/heads/master
2022-11-08T23:18:37.734241
2020-06-29T12:45:15
2020-06-29T12:45:15
275,809,983
0
0
null
null
null
null
UTF-8
Python
false
false
1,976
py
# -*- coding: utf-8 -*- from odoo import models, fields, api,_ import time class Development_Tracking(models.Model): _name = 'development.tracking' _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _check_company_auto = True # _mail_post_access = 'read' serial = fields.Integer(string='Serial',tracking=True) Date = fields.Date(default=lambda *a: time.strftime('%Y-%m-%d'),tracking=True) module = fields.Char(string='Module',tracking=True) form = fields.Char(string = 'Form',tracking=True) report = fields.Char(string='Report',tracking=True) new = fields.Char(string='New',tracking=True) description = fields.Text(string= 'Description',tracking=True) status = fields.Selection([ ('open', 'Open'), ('closed', 'Closed'), ('rejected', 'Rejected')], default='open', tracking=True) time_consumed = fields.Char('Time Consumed',tracking=True) user_id = fields.Many2one('res.users', default=lambda self: self.env.uid, index=True, tracking=True) user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) partner_id = fields.Many2one('res.partner', string='Customer', default=lambda self: self.env.uid,) user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) # class Development_Tracking(models.Model): # _name = 'Development.Tracking' # _description = 'For Tracking Development for TaybahSoft' # name = fields.Char() # value = fields.Integer() # value2 = fields.Float(compute="_value_pc", store=True) # description = fields.Text() # # @api.depends('value') # def _value_pc(self): # for record in self: # record.value2 = float(record.value) / 100
[ "dev.mohamedfci@gmail.com" ]
dev.mohamedfci@gmail.com
0d3b60023a60eed6ae0274a83fd1daecbd04b513
95749b75c446df3ce4aabb03d5aec90de793e207
/gemini/taskapp/celery.py
722f621c5679f886e12c4c93ba9692df4ba43474
[]
no_license
Hawk94/gemini
8288a11499c4cc12c8c79641a51b5e99afe268c5
3a4d0b13488b8e9fbc40dc3cde338b61bc04b494
refs/heads/master
2020-06-24T11:37:22.204269
2017-07-12T20:33:21
2017-07-12T20:33:21
96,935,334
0
0
null
null
null
null
UTF-8
Python
false
false
1,398
py
import os from celery import Celery from django.apps import apps, AppConfig from django.conf import settings if not settings.configured: # set the default Django settings module for the 'celery' program. os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover app = Celery('gemini') class CeleryConfig(AppConfig): name = 'gemini.taskapp' verbose_name = 'Celery Config' def ready(self): # Using a string here means the worker will not have to # pickle the object when using Windows. app.config_from_object('django.conf:settings') installed_apps = [app_config.name for app_config in apps.get_app_configs()] app.autodiscover_tasks(lambda: installed_apps, force=True) if hasattr(settings, 'RAVEN_CONFIG'): # Celery signal registration from raven import Client as RavenClient from raven.contrib.celery import register_signal as raven_register_signal from raven.contrib.celery import register_logger_signal as raven_register_logger_signal raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN']) raven_register_logger_signal(raven_client) raven_register_signal(raven_client) @app.task(bind=True) def debug_task(self): print('Request: {0!r}'.format(self.request)) # pragma: no cover
[ "x99tom.miller@gmail.com" ]
x99tom.miller@gmail.com
b534f887f4eef332a9a1d5dc5f0a6b197b40df84
29ad238bedc14b3c268b22777391b25fb8701858
/config.py
5c010a83ee93ad59c31d5579d15039b6b2d83b60
[]
no_license
chiris-ye/-
2c079efe602f390fc5fdfd2d3a74d73d840c3cfd
84b836a637c6647ab13d801caff03359956536c0
refs/heads/master
2021-12-10T02:36:58.345901
2021-11-03T06:40:14
2021-11-03T06:40:14
262,729,476
0
0
null
2020-05-10T06:52:20
2020-05-10T06:52:19
null
UTF-8
Python
false
false
174
py
class config(): embed_dim = 300 hidden_dim = 300 layers = 1 dropout = 0.1 seq_in_size=7200 fc_dim=100 out_dim=2 mind_dim = 600 man_dim=16
[ "noreply@github.com" ]
chiris-ye.noreply@github.com
d24b0c9ae9dcf47759d369bdaf972fc87c046577
8dfd0de8519bf29565cf44ac342587a2b93fb086
/sonar.py
6dbb5b8b2981536bd1f86487a0012dbc577fb58c
[]
no_license
ThePfarrer/Invent-Your-Own-Games
d058fdbb5f7408ab5ac3b4a301298fda62b0d458
ae13a457277f0cad53185bb1d611203eb78c22b0
refs/heads/master
2023-02-09T17:57:11.661474
2021-01-06T18:46:41
2021-01-06T18:46:41
323,171,153
0
0
null
null
null
null
UTF-8
Python
false
false
7,797
py
# Sonar Treasure Hunt import random import sys import math def get_new_board(): # Create a new 60x15 board data structure. board = [] for x in range(60): # The main list is list of 60 lists. board.append([]) # Each list in the main list has 15 single-character strings. for y in range(15): # Use different characters for the ocean to make it more readable. if random.randint(0, 1) == 0: board[x].append('~') else: board[x].append('`') return board def draw_board(board): # Draw the board data structure. # Initial space for the numbers down the left side of the board tens_digits_line = ' ' for i in range(1, 6): tens_digits_line += (' ' * 9) + str(i) # Print the numbers across the top of the board. print(tens_digits_line) print(' ' + ('0123456789' * 6)) print() # Print each of the 15 rows. for row in range(15): # Single-digit numbers need to be padded with an extra space. if row < 10: extra_space = ' ' else: extra_space = '' # Create the string for this row on the board. board_row = '' for column in range(60): board_row += board[column][row] print(f'{extra_space}{row} {board_row} {row}') # Print the numbers across the bottom of the board. print() print(' ' + ('0123456789' * 6)) print(tens_digits_line) def get_random_chests(num_chests): # Create a list of chest data structures (two-item lists of x, y int coordinates). chests = [] while len(chests) < num_chests: new_chest = [random.randint(0, 59), random.randint(0, 14)] if new_chest not in chests: # Make sure a chest is not already here. chests.append(new_chest) return chests def is_on_board(x, y): # Return True if the coordinates are on the board; otherwise, return False. return x >= 0 and x <= 59 and y >= 0 and y <= 14 def make_move(board, chests, x, y): # Change the board data structure with a sonar device character. Remove treasure chests from the chests list as they are found. # Return False if this is an invalid move. # Otherwise, return the string of the result of this move. smallest_distance = 100 # Any chest will be closer than 100. for cx, cy in chests: distance = math.sqrt((cx - x)**2 + (cy - y)**2) if distance < smallest_distance: # We want the closest treasure chest. smallest_distance = distance smallest_distance = round(smallest_distance) if smallest_distance == 0: # xy is directly on a treasure chest! chests.remove([x, y]) return 'You have found a sunken treasure chest!' else: if smallest_distance < 10: board[x][y] = str(smallest_distance) return f'Treasure detected at a distance of {smallest_distance} from the sonar device.' else: board[x][y] = 'X' return 'Sonar did not detect anything. All treasure chests out of range.' def enter_player_move(previous_moves): # Let the player enter their move. Return a two-item list of int xy coordinates. print('Where do you want to drop the next sonar device? (0-59 0-14) (or type quit)') while True: move = input() if move.lower() == 'quit': print('Thanks for playing!') sys.exit() move = move.split() if len(move) == 2 and move[0].isdigit() and move[1].isdigit() and is_on_board(int(move[0]), int(move[1])): if [int(move[0]), int(move[1])] in previous_moves: print('You already moved there.') continue return [int(move[0]), int(move[1])] print('Enter a number from 0 to 59, a space, then a number from 0 to 14.') def show_instructions(): print('''Instructions: You are the captain of the Simon, a treasure-hunting ship. Your current mission is to use sonar devices to find three sunken treasure chests at the bottom of the ocean. But you only have cheap sonar that finds distance, not direction. Enter the coordinates to drop a sonar device. The ocean map will be marked with how far away the nearest chest is, or an X if it is beyond the sonar device's range. For example, the C marks are where chests are. The sonar device shows a 3 because the closest chest is 3 spaces away. 1 2 3 012345678901234567890123456789012 0 ~~`~`~~~`~``~~```~``~~~````~~``~~ 0 1 ~`~~~~``~``~``~``~```~~`~``~```~~ 1 2 ``X~~3~~~`~C~````````~~~`~```~``` 2 3 ```~``~~`~~`~``~~~``~~~`~`~~~~~`~ 3 4 ~~~~~`````~`C```~`~`~~`~~```~```` 4 012345678901234567890123456789012 1 2 3 (In the real game, the chests are not visible in the ocean.) Press enter to continue...''') input() print(''' When you drop a sonar device directly on a chest, you retrieve it and the other sonar devices update to show how far away the next nearest chest is. The chests are beyond the range of the sonar device on the left, so it shows an X. 1 2 3 012345678901234567890123456789012 0 ~~`~`~~~`~``~~```~``~~~````~~``~~ 0 1 ~`~~~~``~``~``~``~```~~`~``~```~~ 1 2 ``X~~7~~~`~C~````````~~~`~```~``` 2 3 ```~``~~`~~`~``~~~``~~~`~`~~~~~`~ 3 4 ~~~~~`````~`C```~`~`~~`~~```~```` 4 012345678901234567890123456789012 1 2 3 The treasure chests don't move around. Sonar devices can detect treasure chests up to a distance of 9 spaces. Try to collect all 3 chests before running out of sonar devices. Good luck! Press enter to continue...''') input() print('S O N A R !') print() print('Would you like to view the instructions? (yes/no)') if input().lower().startswith('y'): show_instructions() while True: # Game setup sonar_devices = 20 the_board = get_new_board() the_chests = get_random_chests(3) draw_board(the_board) previous_moves = [] while sonar_devices > 0: # Show sonar device and chest statuses. print( f'You have {sonar_devices} sonar device(s) left. {len(the_chests)} treasure chest(s) remaining.') x, y = enter_player_move(previous_moves) # We must track all moves so that sonar devices can be updated. previous_moves.append([x, y]) move_result = make_move(the_board, the_chests, x, y) if move_result == False: continue else: if move_result == 'You have found a sunken treasure chest!': # Update all the sonar devices currently on the map. for x, y in previous_moves: make_move(the_board, the_chests, x, y) draw_board(the_board) print(move_result) if len(the_chests) == 0: print( 'You have found all the sunken treasure chests! Congratulations and good game!') break sonar_devices -= 1 if sonar_devices == 0: print( 'We\'ve run out of sonar devices! Now we have to tunr the ship around and head') print('for home with treasure chests still out there! Game over.') print(' The remaining chests were here:') for x, y in the_chests: print(f' {x}, {y}') print('Do you want to play again? (yes or no)') if not input().lower().startswith('y'): sys.exit()
[ "orezpablo@gmail.com" ]
orezpablo@gmail.com
1bb19df97eb432adc4d8988bc491abf66979b71f
babf32f611200957e4e2a6bd3c156916b891c43f
/mysite/settings.py
b9d178757d2991419be0a8125ff4f05d4507fd1a
[]
no_license
Tawfiq-Abu/new_blog
5faffc2f569d4cc4f7e56ea9207d5ac97c64e5cd
10743b8ac6ef665a928e909aba8f4c1d4557964f
refs/heads/main
2023-02-19T16:54:43.002556
2021-01-19T11:44:10
2021-01-19T11:44:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,076
py
""" Django settings for mysite project. Generated by 'django-admin startproject' using Django 3.1.4. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'g7o50)e+v6(d)n&jxt@zfg$_^p!0)ub&v6n735=ysw*e+#okaf' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'myblog', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/'
[ "tawfiqabubakr7@gmail.com" ]
tawfiqabubakr7@gmail.com
abedc4c120a71cfaac46c76124d5f686290bce4b
2255a4eb151b85df055d3b66455bd788b6928592
/lcs.py
24442756a9a6765b6654a20e307ef03c08b6fd1c
[]
no_license
mloo3/LocalHooks
fcfe073d6be32b54421b860920a3de59a948282c
9ff07384e544150d2677906683a7f55c31ebd4dc
refs/heads/master
2021-01-01T19:22:27.343172
2017-07-28T19:35:33
2017-07-28T19:35:33
98,575,650
0
0
null
null
null
null
UTF-8
Python
false
false
807
py
def lcs(x,y): m = len(x) n = len(y) l = [[None]*(n+1) for i in range(m+1)] for i in range(m+1): for j in range(n+1): if i == 0 or j == 0: l[i][j] = 0 elif x[i-1] == y[j-1]: l[i][j] = l[i-1][j-1]+1 else: l[i][j] = max(l[i-1][j],l[i][j-1]) index = l[m][n] lcs = [""]*(index+1) lcs[index]="\0" i=m j=n while i > 0 and j > 0: if x[i-1]==y[j-1]: lcs[index-1]=x[i-1] i-=1 j-=1 index-=1 elif l[i-1][j] > l[i][j-1]: i-=1 else: j-=1 #print('\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in l])) return "".join(lcs) x = "aggtab" y = "gxtxayb" print(lcs(x,y))
[ "slayer71432@gmail.com" ]
slayer71432@gmail.com
9c33d363aec75e149c68e57f14c11dbc0baa71bd
3825f56bef58063374d56d06a9de3418d04bedd6
/exercices/advanced-modules/stringio.py
168ebcf3ec63621fd6fabcba9afb42aea7e44b71
[ "MIT" ]
permissive
cfascina/python-learning
a989869846fe8eca45f2f0717ea958bd603d12e5
1bc1d4032fb68456a092229de94b5207db7e9143
refs/heads/master
2020-05-20T08:48:06.121746
2019-07-11T18:07:01
2019-07-11T18:07:01
185,482,445
0
0
null
null
null
null
UTF-8
Python
false
false
446
py
import io # StringIO method sets the string as file like object file = io.StringIO("This is just a normal string.") # Read the file and print it print(file.read()) # Reset the cursor and writes a new string file.seek(0) file.write("Second line written to the file like object.") # Reset the cursor, read the file again and print it file.seek(0) print(file.read()) # Close the file like object when contents are no longer needed file.close()
[ "cfascina@gmail.com" ]
cfascina@gmail.com
8ccd44a76e64b8cc0ad921f213460c409e895266
cc7b4e71b3c27240ec650a75cc6f6bbab5e11387
/crdb/templatetags/email_tags.py
b13eedd6c32b7950e6ee3313c89e155c42547e14
[ "MIT" ]
permissive
jsayles/CoworkingDB
0cdada869d950a28cfef20d1b9c1eb3eb4d7b1c2
78776910eba0354a7fd96b2e2c53a78e934d8673
refs/heads/master
2023-02-22T23:11:19.040799
2021-12-28T19:13:39
2021-12-28T19:13:39
883,951
3
0
MIT
2023-02-15T17:59:10
2010-09-02T18:36:43
Python
UTF-8
Python
false
false
764
py
import os from django.template import Library from django import template from django.conf import settings from django.utils.html import format_html from django.urls import reverse from crdb.models import EmailAddress register = template.Library() @register.simple_tag def email_verified(email): if not email: return None if not isinstance(email, EmailAddress): # Got a string so we should pull the object from the database email = EmailAddress.objects.get(email=email) if email.is_verified(): return "" html = '<span style="color:red;">( <a target="_top" style="color:red;" href="{}">{}</a> )</span>' link = email.get_send_verif_link() label = "Not Verified" return format_html(html, link, label)
[ "jsayles@gmail.com" ]
jsayles@gmail.com
385836ada1f0c7aa8919ec7aeb97acca6aea94c0
644b13f90d43e9eb2fae0d2dc580c7484b4c931b
/network2.py
5dbc8833c5526d15e355e3169680c46c4a5bc280
[]
no_license
yeonnseok/ps-algorithm
c79a41f132c8016655719f74e9e224c0870a8f75
fc9d52b42385916344bdd923a7eb3839a3233f18
refs/heads/master
2020-07-09T11:53:55.786001
2020-01-26T02:27:09
2020-01-26T02:27:09
203,962,358
0
0
null
null
null
null
UTF-8
Python
false
false
1,318
py
def cal_ans(): temp = [] ans = 0 for i in range(len(src)): if src[i] == 0: if len(temp) == 5: temp = temp[1:] temp.append(i) else: ans += i * len(temp) - sum(temp) for j in temp: link[i + 1].append(j + 1) link[j + 1].append(i + 1) return ans def cal_group(): cnt, group = 0, 0 zero_one = False start, end = -1, 0 for i in range(len(src)): start = i + 1 if src[i] == 1: group += 1 else: break for i in range(len(src) - 1, -1, -1): end = i + 1 if src[i] == 0: group += 1 else: break for i in range(start, end): if src[i] == 0: cnt += 1 elif src[i] == 1: if cnt >= 5: group += (cnt - 4) elif i >= 1 and src[i-1] == 0: zero_one = True cnt = 0 if zero_one and len(src) != 1: return group + 1 return group num_of_case = int(input()) for case in range(1, num_of_case + 1): n = int(input()) src = list(map(int, input().split())) link = [[] for _ in range(n + 1)] print("#%d" % case, end=" ") print(cal_ans(), end=" ") print(cal_group())
[ "smr603@snu.ac.kr" ]
smr603@snu.ac.kr
e1e3124cd44931303505037d6d88f51555fb555a
403e7f22b8dd4119fc83d153d6dc6e3520ac1922
/python-scripts/S3/awsS3PutBigFiles.py
b1f76cdc33db30acf3ebdecee8ddf7bb2eea8edd
[]
no_license
vincedgy/AWS-Scripts
1e56c13245b38f5c520a4207acf544f1d01ac5cb
f350167c200700daea23ad9dcbe609ab1d7b90d9
refs/heads/master
2020-03-29T00:39:13.453200
2017-11-01T20:34:25
2017-11-01T20:34:25
94,635,738
2
0
null
null
null
null
UTF-8
Python
false
false
1,442
py
""" """ # Create a big file (100 Mb): # dd if=/dev/zero of=/tmp/bigfile bs=1024 count=0 seek=$[1024*100] import os import sys import threading import boto3 from boto3.s3 import transfer class ProgressPercentage(object): def __init__(self, filename): self._filename = filename self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock() def __call__(self, bytes_amount): # To simplify we'll assume this is hooked up # to a single filename. with self._lock: self._seen_so_far += bytes_amount percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write( "\r%s %s / %s (%.2f%%)" % ( self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush() # --------------------------------------------------------------------- # Main if __name__ == '__main__': client = boto3.client('s3', 'eu-west-1') config = transfer.TransferConfig( multipart_threshold=8 * 1024 * 1024, max_concurrency=10, num_download_attempts=10, ) uploading = transfer.S3Transfer(client, config) uploading.upload_file( '/tmp/bigfile', 'e-attestations-ova', 'bigfile', callback=ProgressPercentage('/tmp/bigfile') )
[ "vincent.dagoury@gmail.com" ]
vincent.dagoury@gmail.com
346dfc71b0db9a749e8ee1d65b7425c276ff9cb1
4577d8169613b1620d70e3c2f50b6f36e6c46993
/students/1797637/homework01/program03.py
1dea672b0e9890cc0e4a8907a314950ef5731495
[]
no_license
Fondamenti18/fondamenti-di-programmazione
cbaf31810a17b5bd2afaa430c4bf85d05b597bf0
031ec9761acb1a425fcc4a18b07884b45154516b
refs/heads/master
2020-03-24T03:25:58.222060
2018-08-01T17:52:06
2018-08-01T17:52:06
142,419,241
0
0
null
null
null
null
UTF-8
Python
false
false
1,579
py
def codifica(chiave, testo): ''' Viene codificato e restituito un testo, fornito il testo stesso e una chiave di codifica''' codifica=codifica_chiave(chiave) for indice,carattere in enumerate(testo): if carattere in codifica.keys(): testo = testo[:indice]+ testo[indice:].replace(testo[indice],codifica[carattere],1) return testo def decodifica(chiave, testo): ''' Viene decodificato e restituito un testo, fornito il testo stesso e una chiave di codifica''' decodifica=decodifica_chiave(chiave) for indice,carattere in enumerate(testo): if carattere in decodifica.keys(): testo = testo[:indice]+ testo[indice:].replace(testo[indice],decodifica[carattere],1) return testo def codifica_chiave(chiave): chiave=processa_chiave(chiave) chiave_ord=''.join(sorted(chiave)) codifica={} for indice,carattere in enumerate(chiave_ord): codifica[carattere]=chiave[indice] return codifica def decodifica_chiave(chiave): chiave=processa_chiave(chiave) chiave_ord=''.join(sorted(chiave)) decodifica={} for indice,carattere in enumerate(chiave): decodifica[carattere]=chiave_ord[indice] return decodifica def processa_chiave(chiave): for carattere in chiave: if ord(carattere)<ord('a') or ord(carattere)>ord('z'): chiave= chiave.replace(carattere,'') chiave=elimina_copie(chiave) return chiave def elimina_copie(chiave): for carattere in chiave: if carattere in chiave[chiave.find(carattere)+1:]: chiave= chiave.replace(carattere,'',1) return chiave
[ "a.sterbini@gmail.com" ]
a.sterbini@gmail.com
732ef0438ed7f6a4a45a2ba312e54337afc3e84a
c7f8193a80d68b6144af8d9b2e2f012bf463af6a
/busstop.py
e02b0e8415b6ff5dc87ad36202eda1409cd94c78
[]
no_license
marcteale/DAKboard-OneBusAway-integration
8c060360062f07d1be4e1f88e7d0759b3efd8a8d
9803aa8568828e8b0533e5c4f452b50f424805b1
refs/heads/master
2020-03-21T08:29:31.120908
2018-10-03T19:36:04
2018-10-03T19:36:04
138,347,228
0
0
null
null
null
null
UTF-8
Python
false
false
4,173
py
#!/usr/bin/env python3.6 import configparser import json import os import sys from datetime import datetime import requests def get_departures_for_stop(departures, stop_id, routes, minutes_before, minutes_after, server, apikey): """Fetch the departures for the requested stop and return them as a dict.""" r = requests.get('{}/api/where/arrivals-and-departures-for-stop/{}.json'.format(server, stop_id), params={'key': apikey, 'minutesBefore': minutes_before, 'minutesAfter': minutes_after}) rj = r.json() stop_name = '' if r.ok: for stop in rj['data']['references']['stops']: if stop['id'] == stop_id: stop_name = stop['name'] break current_time = datetime.fromtimestamp(rj['currentTime'] / 1000) if rj['data']['entry']['arrivalsAndDepartures']: for a in rj['data']['entry']['arrivalsAndDepartures']: if a['departureEnabled'] and (routes is None or a['routeShortName'] in routes): if a['predicted'] and a['predictedDepartureTime'] != 0: departure_string = 'predictedDepartureTime' else: departure_string = 'scheduledDepartureTime' departure_time = datetime.fromtimestamp(a[departure_string] / 1000) delta = int((departure_time - current_time).seconds / 60) value = "{} - {} minute{}".format(a['routeShortName'], delta, '' if abs(delta) == 1 else 's') subtitle = '{}'.format(departure_string.replace('DepartureTime', '')) departures.append({'value': value, 'title': stop_name, 'subtitle': subtitle}) else: departures.append( {'value': 'No scheduled departures', 'title': stop_name, 'subtitle': 'No departures schedule or predicted in the next {} minutes.'.format(minutes_after)} ) else: departures.append({'value': 'Failed to fetch data', 'title': '', 'subtitle': rj['text']}) return departures def get_config(): """Read the config file.""" config = configparser.ConfigParser(allow_no_value=True) configfile = os.path.abspath(os.path.dirname(__file__)) + '/busstop.conf' config.read(configfile) routes = [unicode(r.strip()) for r in config.get('defaults', 'routes').split(',')] \ if config.has_option('defaults', 'routes') else None defaults = {'minutesbefore': config.get('defaults', 'minutesbefore'), 'minutesafter': config.get('defaults', 'minutesafter'), 'routes': routes, 'apikey': os.environ['APIKEY'], 'server': config.get('defaults', 'server')} config.remove_section('defaults') config.remove_section('defaults') return config, defaults def app(environ, start_response): status = "200 OK" try: config, defaults = get_config() results = [] ok = True except Exception as e: status = "500 Internal Server Error" results = json.dumps({'title': 'Error', 'value': e.message, 'subtitle': ''}) ok = False if ok: for section in config.sections(): minsBefore = config.get(section, 'minutesbefore') \ if config.has_option(section, 'minutesbefore') else defaults['minutesbefore'] minsAfter = config.get(section, 'minutesafter') \ if config.has_option(section, 'minutesafter') else defaults['minutesafter'] routes = [unicode(r.strip()) for r in config.get(section, 'routes').split(',')] \ if config.has_option(section, 'routes') else defaults['routes'] stopId = section results = get_departures_for_stop(results, stopId, routes, minsBefore, minsAfter, defaults['server'], defaults['apikey']) data = str.encode(json.dumps(results)) response_headers = [ ("Content-Type", "application/json"), ("Content-Length", str(len(data))) ] start_response(status, response_headers) return iter([data])
[ "marc.teale@openmarket.com" ]
marc.teale@openmarket.com
cfd392a9079699ee6d0b693e945546b5a1178576
53fab060fa262e5d5026e0807d93c75fb81e67b9
/backup/user_301/ch41_2019_04_04_16_40_15_344267.py
6c41a0d67bc67884cf85bc1629a7262fa142531b
[]
no_license
gabriellaec/desoft-analise-exercicios
b77c6999424c5ce7e44086a12589a0ad43d6adca
01940ab0897aa6005764fc220b900e4d6161d36b
refs/heads/main
2023-01-31T17:19:42.050628
2020-12-16T05:21:31
2020-12-16T05:21:31
306,735,108
0
0
null
null
null
null
UTF-8
Python
false
false
102
py
a=input('que palavra? ') while a!='desisto': a=input('que palavra? ') print(voce acertou)
[ "you@example.com" ]
you@example.com
a105b75168724e5d6040804652d0f8dd4fadeb5e
ca97700838056596c072a0b63934f179c6fbac17
/_21_ev_differentDER.py
6e2edf1852af3d9af8f4081db991a47e467510ae
[]
no_license
mlamlamla/powernet_pyGridlabD_eval
c18bff98164eb6df4ae79a157b840a59c19ff6d9
54275cbd86517bb1728e72824ba16fcbec99e767
refs/heads/master
2022-04-17T11:50:17.273163
2020-04-03T18:23:18
2020-04-03T18:23:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
17,644
py
import os import pandas as pd import numpy as np def get_monthly(run,ind,month,df_total_load_all=None): folder = '/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind directory = run + '_' + ind + '_vis' #Procurement costs df_system = pd.read_csv(run+'/' + directory +'/df_system.csv',index_col=[0],parse_dates=True).iloc[(24*60):] #df_system = df_system.iloc[24*60:] df_system['measured_real_energy'] = df_system['measured_real_power']/60. df_system['p_max'] = p_max df_system['WS_capped'] = df_system[["WS", "p_max"]].min(axis=1) df_system['procurement_cost'] = df_system['measured_real_energy']*df_system['WS_capped'] # in MW and USD/MWh proc_cost_Jan_nomarket = df_system['procurement_cost'].sum() print('Procurement cost in '+month+' (no market): '+str(proc_cost_Jan_nomarket)) #Total house load no market df_total_load = pd.read_csv(folder+'/total_load_all.csv',skiprows=range(8)).iloc[(24*60):] df_total_load['# timestamp'] = df_total_load['# timestamp'].map(lambda x: str(x)[:-4]) df_total_load = df_total_load.iloc[:-1] df_total_load['# timestamp'] = pd.to_datetime(df_total_load['# timestamp']) df_total_load.set_index('# timestamp',inplace=True) df_total_load = df_total_load/1000 #convert to MW df_total_load = df_total_load/60. #convert to energy df_total_load_gross = df_total_load.copy() #Subtract PV generation and add EV consumption df_PV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_PV_state.csv') list_PV = list(pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_PV_state.csv')['inverter_name']) list_EV = list(pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv')['EV_name']) list_EV_inv = [] for EV in list_EV: EV_inv = 'EV_inverter'+EV[2:] list_EV_inv += [EV_inv] if len(list_PV) + len(list_EV) > 0: df_inv_load = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/total_P_Out.csv',skiprows=range(8)).iloc[(24*60):] df_inv_load['# timestamp'] = df_inv_load['# timestamp'].map(lambda x: str(x)[:-4]) df_inv_load = df_inv_load.iloc[:-1] df_inv_load['# timestamp'] = pd.to_datetime(df_inv_load['# timestamp']) df_inv_load.set_index('# timestamp',inplace=True) df_inv_load = (df_inv_load/1000000)/60 # to MWh #Include PV if len(list_PV) > 0: df_PV = df_inv_load[list_PV] #W -> MW (comes from GridlabD) for house in df_total_load.columns: if house in (df_PV_appl['house_name']).tolist(): PV_inv = df_PV_appl['inverter_name'].loc[df_PV_appl['house_name'] == house].iloc[0] df_total_load[house] = df_total_load[house] - df_PV[PV_inv] #Include EV consumption if len(list_EV): df_EV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv') df_EV = df_inv_load[list_EV_inv] for house in df_total_load.columns: if house in (df_EV_appl['house_name']).tolist(): EV_inv = 'EV_inverter'+df_EV_appl['EV_name'].loc[df_EV_appl['house_name'] == house].iloc[0][2:] df_total_load[house] = df_total_load[house] - df_EV[EV_inv] #EV_inv is negatively defined if df_total_load_all is None: #print('df_total_load_all doesnot exist yet') df_total_load_all = df_total_load.copy() #Becomes master load df else: df_total_load_all = df_total_load_all.append(df_total_load) energy_nomarket_Jan = df_total_load.sum().sum() # Total net energy print('Energy in '+month+' (no market): '+str(energy_nomarket_Jan)) # print(str(len(df_system)/(24*60))+' days') # print(str(len(df_total_load)/(24*60))+' days') # print(str(len(df_inv_load)/(24*60))+' days') return df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan def get_monthly_wm(run,ind,month,df_total_base_market=None,df_total_flex_market=None,df_cleared_market=None): folder = '/Users/admin/Documents/powernet/powernet_markets_mysql/'+run + '/' + run + '_' + ind directory = run + '/' + run + '_' + ind + '_vis' #Procurement cost df_system = pd.read_csv(directory+'/df_system.csv',index_col=[0],parse_dates=True).iloc[(24*60):] df_system = df_system #.iloc[24*60:] df_system['measured_real_energy'] = df_system['measured_real_power']/60. #MW df_system['p_max'] = p_max df_system['WS_capped'] = df_system[["WS", "p_max"]].min(axis=1) df_system['procurement_cost'] = df_system['measured_real_energy']*df_system['WS_capped'] # in MW and USD/MWh proc_cost_Jan_market = df_system['procurement_cost'].sum() print('Procurement cost in '+month+' (market): '+str(proc_cost_Jan_market)) #print(str(len(df_system)/(24*60))+' days') #Total house load with market df_total_load = pd.read_csv(folder+'/total_load_all.csv',skiprows=range(8)).iloc[(24*60):] df_total_load['# timestamp'] = df_total_load['# timestamp'].map(lambda x: str(x)[:-4]) df_total_load = df_total_load.iloc[:-1] df_total_load['# timestamp'] = pd.to_datetime(df_total_load['# timestamp']) df_total_load.set_index('# timestamp',inplace=True) df_total_load = df_total_load/1000 #convert to MW df_total_load = df_total_load/60. #convert to energy df_hvac_load = pd.read_csv(folder+'/hvac_load_all.csv',skiprows=range(8)).iloc[(24*60):] df_hvac_load['# timestamp'] = df_hvac_load['# timestamp'].map(lambda x: str(x)[:-4]) df_hvac_load = df_hvac_load.iloc[:-1] df_hvac_load['# timestamp'] = pd.to_datetime(df_hvac_load['# timestamp']) df_hvac_load.set_index('# timestamp',inplace=True) df_hvac_load = df_hvac_load/1000 #convert to MW df_hvac_load = df_hvac_load/60. #convert to energy df_base_load = df_total_load.copy() df_flex_load = df_total_load.copy() df_total_load.data = 0.0 #Get list of flexible appliances df_PV_appl = pd.read_csv(folder+'/df_PV_state.csv') list_PV = list(df_PV_appl['inverter_name']) df_EV_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind+'/df_EV_state.csv') list_EV = list(df_EV_appl['EV_name']) list_EV_inv = [] for EV in list_EV: EV_inv = 'EV_inverter'+EV[2:] df_Bat_appl = pd.read_csv('/Users/admin/Documents/powernet/powernet_markets_mysql/'+run+'/'+run + '_' + ind +'/df_battery_state.csv') list_Bat = list(df_Bat_appl['battery_name']) list_Bat_inv = [] for Bat in list_Bat: Bat_inv = 'Bat_inverter'+Bat[7:] list_Bat_inv += [Bat_inv] if len(list_PV) + len(list_Bat) + len(list_EV_inv) > 0: df_inv_load = pd.read_csv(folder+'/total_P_Out.csv',skiprows=range(8)).iloc[(24*60):] df_inv_load['# timestamp'] = df_inv_load['# timestamp'].map(lambda x: str(x)[:-4]) df_inv_load = df_inv_load.iloc[:-1] df_inv_load['# timestamp'] = pd.to_datetime(df_inv_load['# timestamp']) df_inv_load.set_index('# timestamp',inplace=True) df_inv_load = (df_inv_load/1000000)/60 # to MWh df_PV = df_inv_load[list_PV] df_EV = df_inv_load[list_EV_inv] df_Bat = df_inv_load[list_Bat_inv] df_base_load = df_total_load - df_hvac_load #for100% flex hvac! df_flex_load = df_hvac_load.copy() for house in df_hvac_load.columns: if len(list_PV) > 0: if house in (df_PV_appl['house_name']).tolist(): PV_inv = df_PV_appl['inverter_name'].loc[df_PV_appl['house_name'] == house].iloc[0] df_flex_load[house] = df_flex_load[house] - df_PV[PV_inv] if len(list_EV_inv): if house in (df_EV_appl['house_name']).tolist(): EV_inv = 'EV_inverter'+df_EV_appl['EV_name'].loc[df_EV_appl['house_name'] == house].iloc[0][2:] df_flex_load[house] = df_flex_load[house] - df_EV[EV_inv] #EV_inv is negatively defined if len(list_Bat) > 0: if house in (df_Bat_appl['house_name']).tolist(): Bat_inv = 'Bat_inverter'+df_Bat_appl['battery_name'].loc[df_Bat_appl['house_name'] == house].iloc[0][7:] df_flex_load[house] = df_flex_load[house] - df_Bat[Bat_inv] #Bat_inv is negatively defined #Clearing prices df_cleared = pd.read_csv(folder+'/df_prices.csv',parse_dates=[0]).iloc[24*12:] #USD/MWh df_cleared.rename(columns={'Unnamed: 0':'timedate'},inplace=True) df_cleared.set_index('timedate',inplace=True) df_cleared = df_cleared[['clearing_price']] df_cleared_long = pd.DataFrame(index=df_total_load.index,columns=['clearing_price'],data=df_cleared['clearing_price']) df_cleared_long.fillna(method='ffill',inplace=True) # print(str(len(df_system)/(24*60))+' days') # print(str(len(df_total_load)/(24*60))+' days') # print(str(len(df_hvac_load)/(24*60))+' days') # print(str(len(df_inv_load)/(24*60))+' days') # print(str(len(df_cleared_long)/(24*60))+' days') #Total load if df_total_base_market is None: print('df_total_load_all_market doesnot exist yet') df_total_base_market = df_base_load.copy() #Becomes master load df df_total_flex_market = df_flex_load.copy() #Becomes master load df df_cleared_market = df_cleared_long.copy() else: df_total_base_market = df_total_base_market.append(df_base_load) df_total_flex_market = df_total_flex_market.append(df_flex_load) df_cleared_market = df_cleared_market.append(df_cleared_long) energy_nomarket_Jan = df_total_load.sum().sum() if len(list_PV) > 0: energy_nomarket_Jan -= df_PV.sum().sum() if len(list_EV) > 0: energy_nomarket_Jan -= df_EV.sum().sum() if len(list_Bat) > 0: energy_nomarket_Jan -= df_Bat.sum().sum() #Use baseload only df_system['measured_real_energy_base'] = df_base_load.sum(axis=1) df_system['procurement_cost_base'] = df_system['measured_real_energy_base']*df_system['WS_capped'] # in MW and USD/MWh proc_cost_Jan_market = df_system['procurement_cost_base'].sum() energy_nomarket_Jan = df_system['measured_real_energy_base'].sum() print('Energy in '+month+' (market): '+str(energy_nomarket_Jan)) return df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_nomarket_Jan ############## #GENERAL SETTINGS ############## run = 'FinalReport2' #'FinalReport_Jul1d' settings_file = '/Users/admin/Documents/powernet/powernet_markets_mysql/settings_final2.csv' df_settings = pd.read_csv(settings_file) p_max = 100. risk_prem = 1.025 ############## #SETTINGS: Only HVAC, no other DER # #NO market: 64,65,66 #With market: 70,71,72 // 103, 104, 105 (with reference price based on forward prices) ############## print('Only HVAC, no other DER') ############## #NO MARKET YET ############## df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan = get_monthly(run,'0064','JANUARY') df_total_load_all, proc_cost_Jul_nomarket, energy_nomarket_Jul = get_monthly(run,'0065','JULY',df_total_load_all) df_total_load_all, proc_cost_Oct_nomarket, energy_nomarket_Oct = get_monthly(run,'0066','OCTOBER',df_total_load_all) #Calculate the retail tariff for procurement of energy proc_cost_nomarket = proc_cost_Jan_nomarket + proc_cost_Jul_nomarket + proc_cost_Oct_nomarket print('Procurement cost (no market, no DER): '+str(proc_cost_nomarket)) energy_nomarket= energy_nomarket_Jan + energy_nomarket_Jul + energy_nomarket_Oct retail_nomarket = proc_cost_nomarket/energy_nomarket print('Retail tariff (no market, no DER): '+str(retail_nomarket)) #Calculate cost for houses without a market under a constant retail tariff df_cost_nomarket = df_total_load_all*retail_nomarket df_cost = pd.DataFrame(index=df_cost_nomarket.columns,columns=['costs_nomarket'],data=df_cost_nomarket.sum(axis=0)) df_cost['costs_nomarket_riskprem5'] = df_cost['costs_nomarket']*risk_prem print('Total customer bills (no market, no DER) over three weeks: '+str(df_cost['costs_nomarket_riskprem5'].sum())) #print('Calculate for year') ############## #MARKET ############## df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_market_Jan = get_monthly_wm(run,'0106','JANUARY') df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jul_market, energy_market_Jul = get_monthly_wm(run,'0107','JULY',df_total_base_market, df_total_flex_market,df_cleared_market) df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Oct_market, energy_market_Oct = get_monthly_wm(run,'0108','OCTOBER',df_total_base_market, df_total_flex_market,df_cleared_market) proc_cost_market = proc_cost_Jan_market + proc_cost_Jul_market + proc_cost_Oct_market print('Procurement cost (market, HVAC only): '+str(proc_cost_market)) retail_new = (proc_cost_market)/(energy_market_Jan + energy_market_Jul + energy_market_Oct) print('New retail tariff (with market): '+str(retail_new)) #Calculate consumer costs df_costs_market = df_total_base_market*retail_nomarket + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index") df_cost['cost_market_oldRR'] = df_costs_market.sum(axis=0) df_cost['cost_market_oldRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem print('Total customer bills (market, HVAC only) at old RR: '+str(df_cost['cost_market_oldRR_riskprem5'].sum())) df_costs_market = df_total_base_market*retail_new + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index") df_cost['cost_market_newRR'] = df_costs_market.sum(axis=0) df_cost['cost_market_newRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem print('Total customer bills (market, HVAC only) at new RR: '+str(df_cost['cost_market_newRR_riskprem5'].sum())) df_cost['abs_change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket']) df_cost['change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100 print('\nMedian type 1600 old RR') print(df_cost['change_oldRR'].median()) df_cost['abs_change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket']) df_cost['change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100 print('\nMedian type 1600 new RR') print(df_cost['change_newRR'].median()) #df_cost.to_csv(run+'/cost_changes_procneutral_1600_all.csv') # ############## # #SETTINGS: Only other DER, no HVAC # # # #NO market: 79,80,81 # #With market: 70,71,72 # ############## # print('No HVAC, only other DER') # ############## # #NO MARKET YET # ############## # df_total_load_all, proc_cost_Jan_nomarket, energy_nomarket_Jan = get_monthly(run,'0079','JANUARY') # df_total_load_all, proc_cost_Jul_nomarket, energy_nomarket_Jul = get_monthly(run,'0080','JULY',df_total_load_all) # df_total_load_all, proc_cost_Oct_nomarket, energy_nomarket_Oct = get_monthly(run,'0081','OCTOBER',df_total_load_all) # #Calculate the retail tariff for procurement of energy # proc_cost_nomarket = proc_cost_Jan_nomarket + proc_cost_Jul_nomarket + proc_cost_Oct_nomarket # print('Procurement cost (no market, with PV and EV): '+str(proc_cost_nomarket)) # energy_nomarket= energy_nomarket_Jan + energy_nomarket_Jul + energy_nomarket_Oct # retail_nomarket = proc_cost_nomarket/energy_nomarket # print('Retail tariff (no market, with PV and EV): '+str(retail_nomarket)) # #Calculate cost for houses without a market under a constant retail tariff # df_cost_nomarket = df_total_load_all*retail_nomarket # df_cost = pd.DataFrame(index=df_cost_nomarket.columns,columns=['costs_nomarket'],data=df_cost_nomarket.sum(axis=0)) # df_cost['costs_nomarket_riskprem5'] = df_cost['costs_nomarket']*risk_prem # print('Total customer bills (no market, with PV and EV): '+str(df_cost['costs_nomarket_riskprem5'].sum())) # print('Calculate for year') # ############## # #MARKET # ############## # df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jan_market, energy_market_Jan = get_monthly_wm(run,'0076','JANUARY') # df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Jul_market, energy_market_Jul = get_monthly_wm(run,'0077','JULY',df_total_base_market, df_total_flex_market,df_cleared_market) # df_total_base_market, df_total_flex_market, df_cleared_market, proc_cost_Oct_market, energy_market_Oct = get_monthly_wm(run,'0078','OCTOBER',df_total_base_market, df_total_flex_market,df_cleared_market) # proc_cost_market = proc_cost_Jan_market + proc_cost_Jul_market + proc_cost_Oct_market # print('Procurement cost (market, other DER): '+str(proc_cost_market)) # retail_new = (proc_cost_market)/(energy_market_Jan + energy_market_Jul + energy_market_Oct) # print('New retail tariff (with market): '+str(retail_new)) # #Calculate consumer costs # df_costs_market = df_total_base_market*retail_nomarket + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index") # df_cost['cost_market_oldRR'] = df_costs_market.sum(axis=0) # df_cost['cost_market_oldRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem # print('Total customer bills (market, other DER) at old RR: '+str(df_cost['cost_market_oldRR_riskprem5'].sum())) # df_costs_market = df_total_base_market*retail_new + df_total_flex_market.multiply(df_cleared_market['clearing_price'], axis="index") # df_cost['cost_market_newRR'] = df_costs_market.sum(axis=0) # df_cost['cost_market_newRR_riskprem5'] = df_costs_market.sum(axis=0)*risk_prem # print('Total customer bills (market, other DER) at new RR: '+str(df_cost['cost_market_newRR_riskprem5'].sum())) # df_cost['abs_change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket']) # df_cost['change_oldRR'] = (df_cost['cost_market_oldRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100 # print('\nMedian type 1600 old RR') # print(df_cost['change_oldRR'].median()) # df_cost['abs_change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket']) # df_cost['change_newRR'] = (df_cost['cost_market_newRR'] - df_cost['costs_nomarket'])/df_cost['costs_nomarket']*100 # print('\nMedian type 1600 new RR') # print(df_cost['change_newRR'].median())
[ "admin@admins-air.attlocal.net" ]
admin@admins-air.attlocal.net
03ab69e575d2a03c8d9095898808b1c4e3877e59
6db68bd7f4e792d3df009671c10cbe93f963c5e6
/NOC_Chp0/NOC_0_3/walker.py
db7d99af07feffb25e3763feabc401da99e501ab
[]
no_license
mickardinal/The-Nature-of-Code-Python
0ce9125b92707a9de4dd57a77c4a92c04df66467
a883e365051826228002317741df7d198eae6dfe
refs/heads/master
2020-03-11T20:55:21.939021
2018-04-25T16:29:06
2018-04-25T16:29:06
130,250,157
1
0
null
null
null
null
UTF-8
Python
false
false
408
py
class Walker(object): def __init__(self): self.x = width/2 self.y = height/2 def display(self): stroke(0) point(self.x, self.y) def step(self): r = random(0, 1) if r< 0.4: self.x += 1 elif r < 0.6: self.x -= 1 elif r < 0.8: self.y += 1 else: self.y -= 1
[ "jsrdccsx@gmail.com" ]
jsrdccsx@gmail.com
3923da15d3cfb9a730088a4d9708e6a18aa4ff3f
2ef742fe5e3208715208ff711eb2046acc1f5ef6
/NathHorrigan/wsgi.py
b4e20145e98ba5cc686716da381e9690d4db59ac
[]
no_license
NathHorrigan/NathHorrigan.com
9ac53208061b16d3f8bc4a00e4575df83083dc7c
636165b718659cf5dcd70ed29251ae69b4b09748
refs/heads/master
2020-03-20T16:23:07.531145
2018-08-31T22:22:13
2018-08-31T22:22:13
137,537,826
0
0
null
null
null
null
UTF-8
Python
false
false
405
py
""" WSGI config for NathHorrigan project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "NathHorrigan.settings.dev") application = get_wsgi_application()
[ "nathan_horrigan@icloud.com" ]
nathan_horrigan@icloud.com
8eac566ccd717ac44dc96ccf4939d880776e6da5
abeb7f8ce8fa3fe3035ad6d7139273266588248f
/bottles.py
6797be3262b2af6facecda607921990935effc46
[]
no_license
mohanoatc/pythonSamples
dcddd6a9d989c5435d17bc888aa19ed6bc94c1c1
6ff5657e24d46b9d47561e9c9c5fe5735f65aea3
refs/heads/master
2020-03-22T14:16:46.219459
2018-07-15T12:03:17
2018-07-15T12:03:17
140,166,562
0
0
null
null
null
null
UTF-8
Python
false
false
470
py
for bottles in range(10, 0, -1): if bottles > 1: print(bottles, " bottles of beer on the wall") print(bottles, " bottles of the beer ") else: print(bottles, " bottle of beer on the wall") print(bottles, " bottle of the beer ") print("Take one down.\nPass it around.") if bottles > 1: print(bottles - 1, "bottle of beer on the wall\n") else: print("No more bottles of beer on the wall\n")
[ "noreply@github.com" ]
mohanoatc.noreply@github.com
1ae7978cbc58218d181868d7280ebd339c401050
099f7e9234cd8b3afa6f7cd8cb81a654ca5043ea
/models/payment.py
38bf0252591bf723514586c8ec8f04e40171c1d6
[]
no_license
nazrinshahaf/Nextagram_python
1716893e7b4466fec5b9d48fd630e00d01f2b74f
8738929ca6f11da6943b9093f05bd445ff58e951
refs/heads/master
2022-12-11T21:45:58.316999
2020-02-04T10:35:55
2020-02-04T10:35:55
235,014,627
0
0
null
2021-06-02T00:56:25
2020-01-20T03:55:13
HTML
UTF-8
Python
false
false
492
py
from models.base_model import BaseModel import peewee as pw from models.user import User from models.user_images import User_images from config import S3_LOCATION from playhouse.hybrid import hybrid_property from flask_login import current_user class Payment(BaseModel): user = pw.ForeignKeyField(User, backref='donations') image = pw.ForeignKeyField(User_images, backref='donations') amount = pw.IntegerField(null = False, default= 5) message = pw.TextField(null=True)
[ "nazrinfernandez@gmail.com" ]
nazrinfernandez@gmail.com
10dfdf1f98da77c3edb8bc6c1a987c773d2ff61f
bb4e603d41c040114a6161427593e30fad02828b
/classwork4.py
4db534ef9a08d9437b14bcf379813698bf674fbe
[]
no_license
MS-Dok/pythonCore
40871c8dc53bee583fb12a6366db2275521d6e6e
d0d89997022f0e284626035d6fa61d94183d8f80
refs/heads/master
2021-07-02T00:52:31.621971
2020-10-20T12:29:40
2020-10-20T12:29:40
184,242,248
1
0
null
null
null
null
UTF-8
Python
false
false
4,404
py
"""1. Створити список цілих чисел, які вводяться з терміналу та визначити серед них максимальне та мінімальне число. """ user_value=int(input("Enter the value: ")) print("Min value is",min([x for x in range(user_value)])) print("Max value is",max([x for x in range(user_value)])) """ 2. В інтервалі від 1 до 10 визначити числа • парні, які діляться на 2, • непарні, які діляться на 3, • числа, які не діляться на 2 та 3. """ user_input_start,user_input_finish=int(input("Please enter the start value ")),int(input("Please enter the end value ")) print(list([x for x in range(user_input_start,user_input_finish) if x%2==0])) print(list([x for x in range(user_input_start,user_input_finish) if x%3==0])) print(list([x for x in range(user_input_start,user_input_finish) if x%3!=0 and x%2!=0])) """ 3. Написати програму, яка обчислює факторіал числа, яке користувач вводить.(не використовувати рекурсивного виклику функції) num_list = [int(input("Enter int {}: ".format(i+1))) for i in range(3)] """ while True: user_input=int(input("Enter the value: ")) if user_input>=0: break if user_input==0: "Factorial of 0 is equal to 1" else: result=1 for i in range(1,user_input+1): result*=i print("Factorial of {} is equal to {}".format(user_input,result)) """ 4. Напишіть скрипт, який перевіряє логін, який вводить користувач. Якщо логін вірний (First), то привітайте користувача. Якщо ні, то виведіть повідомлення про помилку. (використайте цикл while) """ while True: user_input=input("Please enter the login:\n") if user_input=="First": break else: print("Incorrect login. Please try again\n") """ 5. Перший випадок. Написати програму, яка буде зчитувати числа поки не зустріне від’ємне число. При появі від’ємного числа програма зупиняється (якщо зустрічається 0 програма теж зупиняється). """ some_array=[] while True: user_input=int(input("Please enter the >0 value ")) if user_input >0: some_array.append(user_input) else: break print(some_array) """ 6. Другий випадок. На початку на вхід подається кількість елементів послідовності, а потім самі елементи. При появі від’ємного числа програма зупиняється (якщо зустрічається 0 програма теж зупиняється). """ some_array_2=[] quantity=int(input("Please enter the quantity of numbers ")) i=0 while i<quantity: value_to_add=int(input("Please enter the value to add ")) if value_to_add>0: some_array_2.append(value_to_add) i+=1 else: print("<=0 value entered. Termination") break print(some_array_2) """ 7. Знайти прості числа від 10 до 30, а всі решта чисел представити у вигляді добутку чисел (наприклад 10 equals 2 * 5 11 is a prime number 12 equals 2 * 6 13 is a prime number 14 equals 2 * 7 ………………….) """ list_ex=[x for x in range(10,30)] for i in list_ex: if i%2==0: print("{} equals 2*{}".format(str(i),int(i/2))) elif i%3==0: print("{} equals 3*{}".format(str(i),int(i/3))) else: print(str(i)+" is primal number") """ 8. Відсортувати слова в реченні в порядку їх довжини (використати List Comprehensions) """ sentence="На початку на вхід подається кількість елементів послідовності а потім самі елементи." print(sorted([x for x in set(sentence.lower().split())],key=len))
[ "noreply@github.com" ]
MS-Dok.noreply@github.com
20ba1ba73360f4befafe0351c226f32696426e2f
a8163b09c4b4a58fc82cdb6ff8df29197fd15945
/_OldVersion/index.py
2756bb9c27b13e6d15761ae94704a0c1842d6512
[]
no_license
zhudonlin/Fuck_HENUDC
bd78a78f0807e96fdfda36a727c2c017cab7ad9c
0aa398333c1d8e42c4820f6b80292509af46cfd0
refs/heads/main
2023-07-14T13:43:51.803980
2021-09-03T08:08:13
2021-09-03T08:08:13
402,689,111
1
0
null
2021-09-03T07:46:57
2021-09-03T07:46:56
null
UTF-8
Python
false
false
16,520
py
# -*- coding: utf-8 -*- import sys import json import uuid import oss2 import yaml import base64 import requests import time import random import uanalyse from pyDes import des, CBC, PAD_PKCS5 from datetime import datetime, timedelta, timezone from urllib.parse import urlparse from urllib3.exceptions import InsecureRequestWarning import notification # debug模式 debug = True if debug: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) rand_lon = str(random.randint(0, 9)) rand_lat = str(random.randint(0, 9)) # 读取yml配置 def getYmlConfig(yaml_file='config.yml'): file = open(yaml_file, 'r', encoding="utf-8") file_data = file.read() file.close() config = yaml.load(file_data, Loader=yaml.FullLoader) return dict(config) # 全局配置 config = getYmlConfig(yaml_file='config.yml') # 获取当前utc时间,并格式化为北京时间 def getTimeStr(): utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc) bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8))) return bj_dt.strftime("%Y-%m-%d %H:%M:%S") # 输出调试信息,并及时刷新缓冲区 def log(content): print(getTimeStr() + ' ' + str(content)) sys.stdout.flush() # 获取今日校园api def getCpdailyApis(user): apis = {} user = user['user'] if 'cpdaily' in user['ua']: print('你UA输入的有问题,请看说明书!') exit(2) if 'Android' not in user['ua']: print('你UA输入的有问题,请看说明书!') exit(2) schools = requests.get(url='https://mobile.campushoy.com/v6/config/guest/tenant/list', verify=not debug).json()[ 'data'] flag = True for one in schools: if one['name'] == user['school']: if one['joinType'] == 'NONE': log(user['school'] + ' 未加入今日校园') exit(-1) flag = False params = { 'ids': one['id'] } res = requests.get(url='https://mobile.campushoy.com/v6/config/guest/tenant/info', params=params, verify=not debug) data = res.json()['data'][0] joinType = data['joinType'] idsUrl = data['idsUrl'] ampUrl = data['ampUrl'] if 'campusphere' in ampUrl or 'cpdaily' in ampUrl: parse = urlparse(ampUrl) host = parse.netloc res = requests.get(parse.scheme + '://' + host) parse = urlparse(res.url) apis[ 'login-url'] = idsUrl + '/login?service=' + parse.scheme + r"%3A%2F%2F" + host + r'%2Fportal%2Flogin' apis['host'] = host ampUrl2 = data['ampUrl2'] if 'campusphere' in ampUrl2 or 'cpdaily' in ampUrl2: parse = urlparse(ampUrl2) host = parse.netloc res = requests.get(parse.scheme + '://' + host) parse = urlparse(res.url) apis[ 'login-url'] = idsUrl + '/login?service=' + parse.scheme + r"%3A%2F%2F" + host + r'%2Fportal%2Flogin' apis['host'] = host break if flag: log(user['school'] + ' 未找到该院校信息,请检查是否是学校全称错误') exit(-1) log(apis) return apis # 登陆并获取session def getSession(user, apis): user = user['user'] params = { # 'login_url': 'http://authserverxg.swu.edu.cn/authserver/login?service=https://swu.cpdaily.com/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay', 'login_url': 'https://ids.henu.edu.cn/authserver/login?service=https%3A%2F%2Fids.henu.edu.cn%2Fauthserver%2Fmobile%2Fcallback%3FappId%3D277935239', 'needcaptcha_url': '', 'captcha_url': '', 'username': user['username'], 'password': user['password'] } cookies = {} # 借助上一个项目开放出来的登陆API,模拟登陆 if 'enable' in user: if user['enable'] == 0: print('您设定了enable=0,安全模式将不会获取COOKIE,您想要使用的话请修改config.yml里面的到enable=1!') sendMessage('如果您看到这条消息,请您去github上重新设置您的config。', user, '报错提醒-今日校园自动签到') exit(888) if user['usecookies'] == 0: res = '' try: j = 0 for i in range(0, 5): print("使用config中定义的api") res = requests.post(config['login']['api'], data=params) if 'success' not in res.json()['msg']: time.sleep(5) print(f'第{j + 1}次未获取到Cookies') j = j + 1 else: break if 'success' not in res.json()['msg']: print(f'{j}次尝试也没有cookies,可能学校服务器坏了,自己弄吧!') sendMessage(f'如果您看到这条消息,证明{j}次尝试也没有cookies,可能学校服务器坏了,自己弄吧!', user) exit(888) print(res.json()) except Exception as e: res = requests.post(url='http://www.zimo.wiki:8080/wisedu-unified-login-api-v1.0/api/login', data=params) print("使用子墨的API") if 'success' not in res.json()['msg']: print('用子墨的API也没有获取到Cookies') sendMessage(f'如果您看到这条消息,证明子墨的api也没有获取到cookies,可能学校服务器坏了,自己弄吧!', user, '报错提醒-今日校园自动签到') # cookieStr可以使用手动抓包获取到的cookie,有效期暂时未知,请自己测试 # cookieStr = str(res.json()['cookies']) cookieStr = str(res.json()['cookies']) print('已从API获取到Cookie') # exit(999) else: cookieStr = user['cookies'] print('使用文件内Cookie') print(cookieStr) # log(cookieStr) 调试时再输出 # if cookieStr == 'None': # log(res.json()) # exit(-1) # log(cookieStr) # 解析cookie for line in cookieStr.split(';'): name, value = line.strip().split('=', 1) cookies[name] = value session = requests.session() session.cookies = requests.utils.cookiejar_from_dict(cookies, cookiejar=None, overwrite=True) return session # 获取最新未签到任务并全部签到 def getUnSignedTasksAndSign(session, apis, user): headers = { 'Accept': 'application/json, text/plain, */*', 'User-Agent': user['user']['ua'] + ' cpdaily/9.0.0 wisedu/9.0.0', 'content-type': 'application/json', 'Accept-Encoding': 'gzip,deflate', 'Accept-Language': 'zh-CN,en-US;q=0.8', 'Content-Type': 'application/json;charset=UTF-8' } print(headers) # 第一次请求每日签到任务接口,主要是为了获取MOD_AUTH_CAS res = session.post( url='https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'.format(host=apis['host']), headers=headers, data=json.dumps({})) # 第二次请求每日签到任务接口,拿到具体的签到任务 res = session.post( url='https://{host}/wec-counselor-sign-apps/stu/sign/getStuSignInfosInOneDay'.format(host=apis['host']), headers=headers, data=json.dumps({})) print(res.json()) if len(res.json()['datas']['unSignedTasks']) < 1: log('当前没有未签到任务') sendMessage('当前没有未签到任务', user['user']) exit(0) elif time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]: print('未在签到时间,等会再来吧!') sendMessage('自定义限制:未在签到时间,等会再来吧!', user['user']) # exit(8) # TODO 删掉 # log(res.json()) for i in range(0, len(res.json()['datas']['unSignedTasks'])): # if '出校' in res.json()['datas']['unSignedTasks'][i]['taskName'] == False: # if '入校' in res.json()['datas']['unSignedTasks'][i]['taskName'] == False: latestTask = res.json()['datas']['unSignedTasks'][i] params = { 'signInstanceWid': latestTask['signInstanceWid'], 'signWid': latestTask['signWid'] } task = getDetailTask(session, params, apis, user) print(task) if time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]: print('未在签到时间,等会再来吧!') form = fillForm(task, session, user, apis) print(form) if time.localtime().tm_hour in [18, 19, 20, 21, 22, 23, 24, 0, 1, 2, 3, 4, 5, 6, 7]: print('未在签到时间,等会再来吧!') submitForm(session, user, form, apis) # 获取签到任务详情 def getDetailTask(session, params, apis, user): headers = { 'Accept': 'application/json, text/plain, */*', 'User-Agent': user['user']['ua'] + ' cpdaily/9.0.0 wisedu/9.0.0', 'content-type': 'application/json', 'Accept-Encoding': 'gzip,deflate', 'Accept-Language': 'zh-CN,en-US;q=0.8', 'Content-Type': 'application/json;charset=UTF-8' } print(headers) res = session.post( url='https://{host}/wec-counselor-sign-apps/stu/sign/detailSignInstance'.format(host=apis['host']), headers=headers, data=json.dumps(params)) data = res.json()['datas'] return data # 填充表单 def fillForm(task, session, user, apis): user = user['user'] form = {} if task['isPhoto'] == 1: fileName = uploadPicture(session, user['photo'], apis) form['signPhotoUrl'] = getPictureUrl(session, fileName, apis) else: form['signPhotoUrl'] = '' if task['isNeedExtra'] == 1: extraFields = task['extraField'] defaults = config['cpdaily']['defaults'] extraFieldItemValues = [] for i in range(0, len(extraFields)): default = defaults[i]['default'] extraField = extraFields[i] if config['cpdaily']['check'] and default['title'] != extraField['title']: log('第%d个默认配置项错误,请检查' % (i + 1)) sendMessage('提交错误' + '第%d个默认配置项错误,请检查' % (i + 1), user) exit(-1) extraFieldItems = extraField['extraFieldItems'] for extraFieldItem in extraFieldItems: if extraFieldItem['content'] == default['value']: extraFieldItemValue = {'extraFieldItemValue': default['value'], 'extraFieldItemWid': extraFieldItem['wid']} # 其他,额外文本 if extraFieldItem['isOtherItems'] == 1: extraFieldItemValue = {'extraFieldItemValue': default['other'], 'extraFieldItemWid': extraFieldItem['wid']} extraFieldItemValues.append(extraFieldItemValue) # log(extraFieldItemValues) # 处理带附加选项的签到 form['extraFieldItems'] = extraFieldItemValues # form['signInstanceWid'] = params['signInstanceWid'] form['signInstanceWid'] = task['signInstanceWid'] form['longitude'] = user['lon'] + rand_lon form['latitude'] = user['lat'] + rand_lat form['isMalposition'] = user['isMalposition'] form['uaIsCpadaily'] = True ################这个参数一定不能穿帮!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! form['abnormalReason'] = user['abnormalReason'] form['position'] = user['address'] # TODO 这个参数的名称有待考究 需要抓包见分晓!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! return form # 上传图片到阿里云oss def uploadPicture(session, image, apis): url = 'https://{host}/wec-counselor-sign-apps/stu/sign/getStsAccess'.format(host=apis['host']) res = session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps({}), verify=not debug) datas = res.json().get('datas') fileName = datas.get('fileName') accessKeyId = datas.get('accessKeyId') accessSecret = datas.get('accessKeySecret') securityToken = datas.get('securityToken') endPoint = datas.get('endPoint') bucket = datas.get('bucket') bucket = oss2.Bucket(oss2.Auth(access_key_id=accessKeyId, access_key_secret=accessSecret), endPoint, bucket) with open(image, "rb") as f: data = f.read() bucket.put_object(key=fileName, headers={'x-oss-security-token': securityToken}, data=data) res = bucket.sign_url('PUT', fileName, 60) # log(res) return fileName # 获取图片上传位置 def getPictureUrl(session, fileName, apis): url = 'https://{host}/wec-counselor-sign-apps/stu/sign/previewAttachment'.format(host=apis['host']) data = { 'ossKey': fileName } res = session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps(data), verify=not debug) photoUrl = res.json().get('datas') return photoUrl # DES加密 def DESEncrypt(s, key='b3L26XNL'): key = key iv = b"\x01\x02\x03\x04\x05\x06\x07\x08" k = des(key, CBC, iv, pad=None, padmode=PAD_PKCS5) encrypt_str = k.encrypt(s) # print(encrypt_str) print(f'加密结束的内容为:{base64.b64encode(encrypt_str).decode()}') return base64.b64encode(encrypt_str).decode() # 提交签到任务 def submitForm(session, user, form, apis): user = user['user'] # Cpdaily-Extension extension = { "lon": user['lon'] + rand_lon, "model": uanalyse.ua2model(user['ua']), "appVersion": "9.0.0", "systemVersion": uanalyse.ua2androidver(user['ua']), "userId": user['username'], "systemName": "android", "lat": user['lat'] + rand_lat, "deviceId": str(uuid.uuid1()) } headers = { 'tenantId': 'henu', 'User-Agent': user['ua'] + ' okhttp/3.12.4', 'CpdailyStandAlone': '0', 'extension': '1', 'Cpdaily-Extension': DESEncrypt(json.dumps(extension)), 'Content-Type': 'application/json; charset=utf-8', 'Accept-Encoding': 'gzip', # 'Host': 'swu.cpdaily.com', 'Connection': 'Keep-Alive' } print(extension) print(headers) # print('程序还有一步就提交了,已暂停') # exit(888) # TODO 设置提交锁的位置 res = session.post(url='https://{host}/wec-counselor-sign-apps/stu/sign/submitSign'.format(host=apis['host']), headers=headers, data=json.dumps(form)) message = res.json()['message'] if message == 'SUCCESS': log('自动签到成功') sendMessage('自动签到成功', user, title='今日校园签到成功通知') else: log('自动签到失败,原因是:' + message) sendMessage('自动签到失败' + message, user) # sendMessage('自动签到失败,原因是:' + message, user['email']) exit(0) # 发送邮件通知 def sendMessage(msg, user, title='[INFO] 今日校园自动签到信息通知'): if msg.count("未开始") > 0: return '' print(user) try: if user['useserverchan'] != 0: log('正在发送微信通知') log(getTimeStr()) # sendMessageWeChat(msg + getTimeStr(), '今日校园自动签到结果通知') notification.send_serverchan(user['serverchankey'], title, getTimeStr() + ' ' + msg) except Exception as e: log("send failed") # 主函数 '''def main(): try: continue except: print("有一个user出错啦") continue # 提供给腾讯云函数调用的启动函数 def main_handler(event, context): try: main() except Exception as e: raise e else: return 'success''' if __name__ == '__main__': # print(extension) #print(main_handler({}, {})) for user in config['users']: print(user) apis = getCpdailyApis(user) session = getSession(user, apis) getUnSignedTasksAndSign(session, apis, user)
[ "yulonger@outlook.com" ]
yulonger@outlook.com
e8b2f8c81f953e4c0e4a8d266dceb71804203e01
7f25740b1ef47edc24db1a3618b399959b073fe1
/1029_17_smallproject.py
97673d239a34ef5759856f9eeba050bcf1977446
[]
no_license
pjh9362/PyProject
b2d0aa5f8cfbf2abbd16232f2b55859be50446dc
076d31e0055999c1f60767a9d60e122fb1fc913e
refs/heads/main
2023-01-09T12:12:06.913295
2020-11-07T15:32:03
2020-11-07T15:32:03
306,814,117
0
0
null
null
null
null
UTF-8
Python
false
false
198
py
cost = int(input()) cpn = input() if cpn == "Cash3000": print(cost-3000) elif cpn == "Cash5000": print(cost-5000) else: print("쿠폰이 적용되지 않았습니다.") print(cost)
[ "pjh9362@gmail.com" ]
pjh9362@gmail.com
aa9c14845c14707dc3ac40e78df6b0a435a73c19
051fff90eb3fcb1f928c5857992fef351fc1ba04
/output/figuresAndTables/makeFinalTables.py
92d6e9cd2931bb2d07a7fdcae6dad5e5ed9ca5cd
[ "MIT" ]
permissive
AndresYague/Snuppat
1503c8a729513d857a04a7963b8256451c9f6cd1
8a7f73fbc260bab67b5d38ed1efc628980f5047c
refs/heads/master
2021-06-08T11:22:11.930896
2021-04-08T12:58:53
2021-04-08T12:58:53
67,886,532
0
0
null
null
null
null
UTF-8
Python
false
false
2,455
py
def getKeyList(indx, lst): '''Return adecuate value from list''' if indx >= 0 and indx < len(lst): return lst[indx] else: return "--" def printTable(storeNamVal): '''Print table in order''' nCol = 3 keys = [x for x in storeNamVal.keys()]; keys.sort() nEls = len(keys); div = nEls/nCol # Get number of lines for tables nlines = div if nEls % nCol == 0 else int(div) + 1 firstOfSecond = None for ii in range(nEls): zz1 = getKeyList(ii, keys) nam1, val1 = storeNamVal.get(zz1, ("--", "--")) zz2 = getKeyList(ii + nlines, keys) nam2, val2 = storeNamVal.get(zz2, ("--", "--")) zz3 = getKeyList(ii + nlines*2, keys) nam3, val3 = storeNamVal.get(zz3, ("--", "--")) if firstOfSecond is None: firstOfSecond = zz2 elif zz1 == firstOfSecond: break print("{} & {} & {:5.2f} & ".format(nam1, zz1, float(val1)), end = " ") print("{} & {} & {:5.2f} & ".format(nam2, zz2, float(val2)), end = " ") if val3 != "--": print("{} & {} & {:5.2f}\\\\".format(nam3, zz3, float(val3))) else: print("{} & {} & {}\\\\".format(nam3, zz3, val3)) def main(): '''Transform plottedValues.dat into .tex tables''' arch = "plottedValues.dat" data = "../../data/species.dat" # Index zz and names zToName = {} with open(data, "r") as fread: for line in fread: lnlst = line.split() zz = int(lnlst[0]) - int(lnlst[2]) name = lnlst[1] name = name[0].upper() + name[1:] zToName[zz] = name # Create and print tables storeNamVal = {} with open(arch, "r") as fread: for line in fread: if "#" in line: if len(storeNamVal) > 0: printTable(storeNamVal) print() storeNamVal = {} print(line) continue lnlst = line.split() if len(lnlst) == 0: continue zz = int(lnlst[0]) name = zToName[zz] val = lnlst[1] storeNamVal[zz] = (name, val) if len(storeNamVal) > 0: printTable(storeNamVal) if __name__ == "__main__": main()
[ "and.yague@gmail.com" ]
and.yague@gmail.com
43f6176cdac6fed43d610aadb95791ffb1bc8e31
5f6e95aa83ca132c732f644c51e786785e9bdd2f
/src/e_psu/e_psu/urls.py
c6ecbf46fdf3b32c7d3230b71dd508c60b649c90
[]
no_license
kerupuksambel/django-e-pantau
9905a9902752fd5143e03326a0ab585f09ccb50d
bbadcd31984c9bd254ac2cc23a30f55a9fe5b997
refs/heads/master
2022-12-22T07:28:58.444314
2020-10-02T14:35:05
2020-10-02T14:35:05
300,641,855
0
0
null
null
null
null
UTF-8
Python
false
false
1,268
py
"""e_psu URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include from .views import home_view urlpatterns = [ path("", home_view, name="home"), path('admin/', admin.site.urls), path('admin_kelola/serah_terima/', include("serah_terima.urls")), path('laporan/', include("laporan.urls")), path('warga/', include("warga.urls")), path('admin_kelola/', include("admin_kelola.urls")), path('admin_skpd/', include("admin_skpd.urls")) ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "kerupuksambel.2000@gmail.com" ]
kerupuksambel.2000@gmail.com
3947c1886e64b2e14da5a55a34c8661ff9cdde6c
ed8c7fba9c5592b14ab79eac399813d9d0537b7d
/website/migrations/0001_initial.py
fac9f699cda3d22a2ad1bac05bf8e333a9cb5fe6
[]
no_license
OpenWebCurtin/Catching-out-corruption
4834f7d95393b71009347237aff08f7726049a7a
33617c4d01dd33f118aaac4c562948598f6206ba
refs/heads/main
2023-01-18T19:21:26.723344
2020-11-23T13:07:19
2020-11-23T13:07:19
315,027,698
0
0
null
null
null
null
UTF-8
Python
false
false
14,429
py
# Generated by Django 2.2.6 on 2019-10-31 16:18 from django.conf import settings import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): initial = True dependencies = [ ('auth', '0011_update_proxy_permissions'), ] operations = [ migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')), ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), ], managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.CreateModel( name='AsyncJob', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('priority', models.IntegerField()), ('status', models.IntegerField(choices=[(0, 'Unprocessed'), (1, 'Finished'), (2, 'Error'), (3, 'Unsupported')], default=0)), ], ), migrations.CreateModel( name='DocumentResult', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('document', models.CharField(max_length=128)), ('occurs_total', models.IntegerField(default=0)), ('occurs_agenda_items', models.IntegerField(default=0)), ('normalised_score', models.IntegerField(default=0)), ], ), migrations.CreateModel( name='File', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('filename', models.CharField(max_length=128)), ], options={ 'permissions': [('upload', 'Can upload documents using the PDF upload service.'), ('delete', 'Can delete documents using the file deletion service.'), ('recover', 'Can recover deleted documents using the file recovery service.')], }, ), migrations.CreateModel( name='FileDeletionRequest', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('admin', models.CharField(max_length=128)), ('delete_by', models.IntegerField(choices=[(0, 'Delete files by filename.'), (1, 'Delete files by uploader.')], default=0)), ('target_file', models.CharField(blank=True, max_length=128, null=True)), ('target_uploader', models.CharField(blank=True, max_length=128, null=True)), ], ), migrations.CreateModel( name='FileRecoveryRequest', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('admin', models.CharField(max_length=128)), ('recover_by', models.IntegerField(choices=[(0, 'Recover files by filename.'), (1, 'Recover files by uploader.')], default=0)), ('target_file', models.CharField(blank=True, max_length=128, null=True)), ('target_uploader', models.CharField(blank=True, max_length=128, null=True)), ], ), migrations.CreateModel( name='KeyPhraseOptionSet', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('key_phrase', models.CharField(blank=True, default='', max_length=128)), ('key_phrase_type', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)), ('key_phrase_importance', models.DecimalField(decimal_places=2, max_digits=3, null=True)), ], ), migrations.CreateModel( name='PrivilegeModification', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('admin', models.CharField(max_length=128)), ('target_user', models.CharField(max_length=128)), ('target_group', models.CharField(choices=[('regular user', 'Regular user'), ('privileged user', 'Privileged user'), ('administrator', 'Administrator')], default=0, max_length=32)), ], ), migrations.CreateModel( name='RelationResult', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('kp1', models.CharField(max_length=128)), ('kp2', models.CharField(max_length=128)), ('kp3', models.CharField(max_length=128)), ('kp4', models.CharField(max_length=128)), ('kp5', models.CharField(max_length=128)), ('document', models.CharField(blank=True, default='', max_length=128)), ('agenda_item_file', models.CharField(blank=True, default='', max_length=128)), ('agenda_item', models.CharField(blank=True, default='', max_length=128)), ('description', models.CharField(blank=True, default='', max_length=128)), ('search_type', models.IntegerField()), ], ), migrations.CreateModel( name='Search', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('search_by', models.IntegerField(choices=[(0, 'Search by relation'), (1, 'Search by document')], default=0)), ('search_t', models.IntegerField(choices=[(0, 'Search minutes'), (1, 'Search non-minutes')], default=0)), ('fbm', models.BooleanField(default=False)), ('fbm_filename', models.CharField(blank=True, default='', max_length=128)), ('fbm_uploader', models.CharField(blank=True, default='', max_length=128)), ('fbm_upload_date_start', models.DateField(null=True)), ('fbm_upload_date_end', models.DateField(null=True)), ('fbc', models.BooleanField(default=False)), ('fbc_council', models.CharField(blank=True, default='', max_length=128)), ('fbc_publish_date_start', models.DateField(null=True)), ('fbc_publish_date_end', models.DateField(null=True)), ('key_phrase1', models.CharField(blank=True, default='', max_length=128)), ('key_phrase2', models.CharField(blank=True, default='', max_length=128)), ('key_phrase3', models.CharField(blank=True, default='', max_length=128)), ('key_phrase4', models.CharField(blank=True, default='', max_length=128)), ('key_phrase5', models.CharField(blank=True, default='', max_length=128)), ('key_phrase_type1', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)), ('key_phrase_type2', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)), ('key_phrase_type3', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)), ('key_phrase_type4', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)), ('key_phrase_type5', models.IntegerField(choices=[(0, 'Any keyword type'), (1, 'Councillor name'), (2, 'Person name'), (3, 'Business name'), (4, 'Property address')], default=0, null=True)), ('key_phrase_importance1', models.DecimalField(decimal_places=2, max_digits=3, null=True)), ('key_phrase_importance2', models.DecimalField(decimal_places=2, max_digits=3, null=True)), ('key_phrase_importance3', models.DecimalField(decimal_places=2, max_digits=3, null=True)), ('key_phrase_importance4', models.DecimalField(decimal_places=2, max_digits=3, null=True)), ('key_phrase_importance5', models.DecimalField(decimal_places=2, max_digits=3, null=True)), ], options={ 'permissions': [('search', 'Can search using the document search feature.')], }, ), migrations.CreateModel( name='AsyncJobType', fields=[ ('job_base', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='website.AsyncJob')), ], ), migrations.CreateModel( name='UploadedFile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('file', models.FileField(upload_to='uploads/')), ('filename', models.CharField(blank=True, default='', max_length=128)), ('type', models.IntegerField(choices=[(0, 'Public minutes document.'), (1, 'Public non-minutes document.'), (2, 'Private non-minutes document.')], default=0)), ('document_category', models.CharField(default='generic', max_length=128)), ('uploader', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='RecoveryRequestItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileRecoveryRequest')), ('target_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.UploadedFile')), ], ), migrations.CreateModel( name='DeletionRequestItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileDeletionRequest')), ('target_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.UploadedFile')), ], ), migrations.CreateModel( name='ProcessingJob', fields=[ ('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')), ('file_name', models.CharField(max_length=128)), ], bases=('website.asyncjobtype',), ), migrations.CreateModel( name='FileRecoveryJob', fields=[ ('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')), ('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileRecoveryRequest')), ], bases=('website.asyncjobtype',), ), migrations.CreateModel( name='FileDeletionJob', fields=[ ('asyncjobtype_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='website.AsyncJobType')), ('scheduled_time', models.FloatField()), ('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.FileDeletionRequest')), ], bases=('website.asyncjobtype',), ), ]
[ "r.a.clydesdale+bb-ccp@gmail.com" ]
r.a.clydesdale+bb-ccp@gmail.com
a86eb97efcd2033e7ba2688689a2d35a96976693
48295cd5f8e7a1b1cfda8b9642012611488156ce
/users/migrations/0004_auto_20191123_1158.py
69f25c440fe48acfb912e16d8e4f514085e401e8
[]
no_license
mugglecoder/airbnb-clone
0c47445761e9f9fd82805299ddab46e382e9b5a4
6276cdeaa13b1a88697b62d322dcb871d9a5e25a
refs/heads/master
2022-12-10T14:00:47.409310
2020-01-05T14:14:39
2020-01-05T14:14:39
212,250,078
0
0
null
2022-12-10T11:01:26
2019-10-02T03:44:49
Python
UTF-8
Python
false
false
563
py
# Generated by Django 2.2.5 on 2019-11-23 02:58 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0003_auto_20191123_1051'), ] operations = [ migrations.RenameField( model_name='user', old_name='email_confirmed', new_name='email_verified', ), migrations.AddField( model_name='user', name='email_secret', field=models.CharField(blank=True, default='', max_length=20), ), ]
[ "winkknd@naver.com" ]
winkknd@naver.com
7d24324bd1f5837946c3a16a2bf594cd700afd24
9d53d831b631c5431d625848ca0dbd1e4a02eb78
/pybo/models.py
7f387f16dda1be32c0a6e106a2f4bc1f0512818a
[]
no_license
jghee/Django_pratice
2b918f730dc40cd6f0c9881ad1c176906e84de8f
859befa7b04df8dd119cd6c8985d0c13edd7521a
refs/heads/main
2023-06-20T19:56:37.231458
2021-07-17T02:43:25
2021-07-17T02:43:25
383,633,921
0
0
null
null
null
null
UTF-8
Python
false
false
1,344
py
from django.db import models from django.contrib.auth.models import User # Create your models here. class Question(models.Model): subject = models.CharField(max_length=200) content = models.TextField() create_date = models.DateTimeField() author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='author_question') modify_date = models.DateTimeField(null=True, blank=True) voter = models.ManyToManyField(User, related_name='voter_question') def __str__(self): return self.subject class Answer(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) content = models.TextField() create_date = models.DateTimeField() author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='author_answer') modify_date = models.DateTimeField(null=True, blank=True) voter = models.ManyToManyField(User, related_name='voter_answer') class Comment(models.Model): author = models.ForeignKey(User, on_delete=models.CASCADE) content = models.TextField() create_date = models.DateTimeField() modify_date = models.DateTimeField(null=True, blank=True) question = models.ForeignKey(Question, null=True, blank=True, on_delete=models.CASCADE) answer = models.ForeignKey(Answer, null=True, blank=True, on_delete=models.CASCADE)
[ "ghj171937@gmail.com" ]
ghj171937@gmail.com
181d7604566e31eea4b774b2ae9b3356926009e6
a40950330ea44c2721f35aeeab8f3a0a11846b68
/VTK/Actors/ThreeLine.py
e780418bfccbe2f4be8ca077eaf8f0c68c4225b5
[]
no_license
huang443765159/kai
7726bcad4e204629edb453aeabcc97242af7132b
0d66ae4da5a6973e24e1e512fd0df32335e710c5
refs/heads/master
2023-03-06T23:13:59.600011
2023-03-04T06:14:12
2023-03-04T06:14:12
233,500,005
3
1
null
null
null
null
UTF-8
Python
false
false
3,218
py
import vtk # Visualize colors = vtk.vtkNamedColors() # Create points p0 = [0.0, 0.0, 0.0] p1 = [1.0, 0.0, 0.0] p2 = [1.0, 1.0, 0.0] p3 = [0.0, 1.0, 0.0] p4 = [2.0, 0.0, 0.0] p5 = [2.0, 1.0, 0.0] # LineSource: draw a line with two points def createLine1(): lineSource = vtk.vtkLineSource() lineSource.SetPoint1(p1) lineSource.SetPoint2(p2) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(lineSource.GetOutputPort()) return mapper # LineSource Multi-point continuous straight line def createLine2(): lineSource = vtk.vtkLineSource() points = vtk.vtkPoints() points.InsertNextPoint(p0) points.InsertNextPoint(p1) points.InsertNextPoint(p2) points.InsertNextPoint(p3) lineSource.SetPoints(points) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(lineSource.GetOutputPort()) return mapper # LineSource multi-point set geometry + topology def createLine3(): # 多条线添加 一个points_actor添加多条线段 # Create a vtkPoints object and store the points in it points = vtk.vtkPoints() points.InsertNextPoint(p0) points.InsertNextPoint(p1) points.InsertNextPoint(p2) points.InsertNextPoint(p3) points.InsertNextPoint(p4) points.InsertNextPoint(p5) # Create a cell array to store the lines in and add the lines to it lines = vtk.vtkCellArray() # for i in range(0, 5, 2): # line = vtk.vtkLine() # line.GetPointIds().SetId(0, i) # line.GetPointIds().SetId(1, i + 1) # lines.InsertNextCell(line) line = vtk.vtkLine() # 默认为2个端点, # print(line.GetPointIds()) # line.GetPointIds().SetNumberOfIds(4) # 可以设置为N个端点 line.GetPointIds().SetId(0, 0) # SetId第一个参数为端点ID, 第二个参数为点的ID line.GetPointIds().SetId(1, 1) lines.InsertNextCell(line) line.GetPointIds().SetId(0, 1) line.GetPointIds().SetId(1, 4) # line.GetPointIds().SetId(2, 4) lines.InsertNextCell(line) # Create a polydata to store everything in linesPolyData = vtk.vtkPolyData() # Add the points to the dataset geometry linesPolyData.SetPoints(points) # Add the lines to the dataset topology linesPolyData.SetLines(lines) # Setup actor and mapper mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(linesPolyData) return mapper def main(): renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.SetWindowName("Line") renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) # Visualize colors = vtk.vtkNamedColors() renderer.SetBackground(colors.GetColor3d("Silver")) actor = vtk.vtkActor() # The first way # actor.SetMapper(createLine1()) # The second way # actor.SetMapper(createLine2()) # The third way actor.SetMapper(createLine3()) actor.GetProperty().SetLineWidth(4) actor.GetProperty().SetColor(colors.GetColor3d("Peacock")) renderer.AddActor(actor) renderWindow.Render() renderWindowInteractor.Start() if __name__ == '__main__': main()
[ "443765159@qq.com" ]
443765159@qq.com
06a25a1b6196b3b4b67262bea39f8289fb2daa7e
c059ed04ed5f72d11dbe3b01e9395bacd28b6e8b
/문자열내p와y개수.py
fdb32a8a8483982f6580418362fe2487966dd8ad
[]
no_license
kimhyewon0/kimhyewon0.github.io
532b5feb214d686865b8e6169251de8dca7a2caf
eaac275ff5b933e477099c9b4c3a1b69e05fa521
refs/heads/master
2021-01-23T04:13:25.509101
2019-09-22T16:40:30
2019-09-22T16:40:30
33,710,735
0
0
null
null
null
null
UTF-8
Python
false
false
126
py
def solution(s): s=s.upper() if s.count('P') ==s.count('Y'): return True else: return False print(solution("Py"))
[ "coope0357@gmail.com" ]
coope0357@gmail.com