blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
213 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
246 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
372e5d2a06935c31531ffc115e0579a28d541b6a
db61a87fd7952b04b59d8daac282c776423d7114
/app/models.py
2636d94536304cf8cbc4129fbd9c29a5e008a9c5
[]
no_license
Marshall-Diffey/Order_Up_SQLAlchemy
7df59ea6f9f51be920e49a12e168672b6ad8600e
e65588a042b43b7c2645f1843e374adbaca6541d
refs/heads/main
2023-04-12T09:39:25.040631
2021-05-12T23:34:27
2021-05-12T23:34:27
366,809,318
0
0
null
null
null
null
UTF-8
Python
false
false
1,623
py
from flask_sqlalchemy import SQLAlchemy from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash db = SQLAlchemy() class Employee(db.Model, UserMixin): __tablename__ = 'employees' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(100), nullable=False) employee_number = db.Column(db.Integer, nullable = False, unique=True) hashed_password = db.Column(db.String(250), nullable = False) @property def password(self): return self.hashed_password @password.setter def password(self, password): self.hashed_password = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password, password) class Menu(db.Model): __tablename__ = 'menus' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(30), nullable=False) class MenuItemType(db.Model): __tablename__ = 'menu_item_types' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(20), nullable=False) class MenuItem(db.Model): __tablename__ = 'menu_items' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(50), nullable= False) price = db.Column(db.Float, nullable=False) menu_id = db.Column(db.Integer, db.ForeignKey('menus.id'), nullable=False) menu_type_id = db.Column(db.Integer, db.ForeignKey('menu_item_types.id'), nullable=False) type = db.relationship('MenuItemType', back_populates='menu_type_id') menu = db.relationship('Menu', back_populates='menu_id')
[ "marshalldiffey@yahoo.com" ]
marshalldiffey@yahoo.com
0621b89dd6b05e512d441b4979260367e3f6e5c3
d4f7d1b2eea81e42db32638d0fdaf35908786a95
/mshow/updateList.py
36ebd3a5e544f3ce88a75b99feda9783577f88ae
[]
no_license
gyuha/mshow_downloader
e47e1d1a26203b5dbba4b36e0ad2f01d87368235
f44f6f706bdc3742bb892f9c96557a327fe040c3
refs/heads/master
2022-08-24T03:53:51.697041
2022-08-13T15:30:28
2022-08-13T15:30:28
161,354,810
7
0
null
2022-08-13T15:30:10
2018-12-11T15:31:19
Python
UTF-8
Python
false
false
3,446
py
from bs4 import BeautifulSoup from collections import OrderedDict from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from mshow.config import Config from mshow.dataSave import loadJsonFile from mshow.driver import driver_init, driver_close from mshow.driver import retry_wait from mshow.imagesDownload import pathName import urllib.parse import os import re LIST_URL = "%s/bbs/board.php?bo_table=manga&page=%d" # 기존에 다운로드 받았던 id 목록 def downloadFiles(folderList): downloadList = [] for downloaded in folderList: data = loadJsonFile(os.path.join("download", downloaded, "data.json")) if not data: continue if "id" in data: downloadList.append(data["id"]) return downloadList # 다운받은적 있는지 무식하게 찾는다 def existDownload(folerList, mangaId): for downloaded in folerList: data = loadJsonFile(os.path.join("download", downloaded, "data.json")) if not data: continue if "id" in data: if mangaId == data["id"]: return data["id"] return "" def parseList(folderList, driver): downloadedList = downloadFiles(folderList) updateList = [] try: html = driver.page_source bs = BeautifulSoup(html, "html.parser") subjects = bs.find( "div", {"class", "list-container"}).find_all("div", {"class", "post-info"}) for subject in subjects: mangaId = subject.find("a")['href'] mangaId = urllib.parse.unquote(mangaId) mangaId = re.sub(r"^.*manga_id=", "", mangaId) mangaId = mangaId.replace("+", " ") # print(title) if mangaId == "": continue if mangaId in downloadedList: if mangaId not in updateList: updateList.append(mangaId) except Exception as e: print(e) return updateList return updateList # 만화책에서 제목을 보고 업데이트 목록을 가져 옴 def filterDownloadedList(folerList, driver, page): c = Config() wait = WebDriverWait(driver, 30) # print(LIST_URL%(c.getDomain(), page)) driver.get(LIST_URL % (c.getDomain(), page)) wait.until(EC.presence_of_element_located( (By.CSS_SELECTOR, '#thema_wrapper'))) driver.execute_script("window.stop();") updateList = parseList(folerList, driver) # if len(updateList) == 0: # retry_wait(7, "[업데이트목록] ") # updateList = parseList(folerList, driver) # if len(updateList) == 0: # return filterDownloadedList(folerList, driver, page) return updateList # 만화책 업데이트 목록 가져 오기 def getUpdateList(driver, updateSize=3): folerList = os.listdir("download") updateList = [] for i in range(1, updateSize + 1): print("[%d / %d] Download update list" % (i, updateSize), end="\r") downed = filterDownloadedList(folerList, driver, i) updateList = updateList + downed updateList = list(set(updateList)) num = 0 print("") print("Updated : %d" % len(updateList)) for l in updateList: num = num + 1 print(" %d. %s" % (num, l)) return updateList def checkAllDownload(): folderList = os.listdir("download") updateList = [] for downloaded in folderList: data = loadJsonFile(os.path.join("download", downloaded, "data.json")) if not data: continue if "id" in data: updateList.append(data["id"]) return updateList
[ "nicegyuha@gmail.com" ]
nicegyuha@gmail.com
e548da9b76ae377c55faac5fcbf80779b1b12dcd
82326639459108ec750c724ec3118b0f210389ef
/configs/swa/swa_cascade_s50_rfp_normal.py
829753ee10abc4ae10851eee9c6f86b151cd8eba
[ "Apache-2.0" ]
permissive
hlcedu/TileDetection
e9fdf27e11455b04d30ddbcb82f9fb6004c4da91
77b5ef4bb4db29f5ffe6a6fa9f87b4bfe8516e4c
refs/heads/main
2023-03-25T07:25:34.862983
2021-03-23T12:52:56
2021-03-23T12:52:56
353,211,852
1
0
Apache-2.0
2021-03-31T03:21:43
2021-03-31T03:21:43
null
UTF-8
Python
false
false
798
py
_base_ = ['../tile_round2/cascade_s50_rfp_mstrain_with_normal.py', '../_base_/swa.py'] only_swa_training = True # whether to perform swa training swa_training = True # load the best pre_trained model as the starting model for swa training swa_load_from = 'work_dirs/round2/cascade_s50_rfp_mstrain_aug_with_normal/latest.pth' swa_resume_from = None # swa optimizer swa_optimizer = dict(_delete_=True, type='Adam', lr=7e-5) swa_optimizer_config = dict(grad_clip=None) # swa learning policy swa_lr_config = dict( policy='cyclic', target_ratio=(1, 0.01), cyclic_times=12, step_ratio_up=0.0) swa_total_epochs = 12 # swa checkpoint setting swa_checkpoint_config = dict(interval=1, filename_tmpl='swa_epoch_{}.pth') work_dir = 'work_dirs/round2/swa_cascade_s50_rfp_mstrain_with_normal'
[ "huangyifei@fabu.ai" ]
huangyifei@fabu.ai
ea7f3396c57626afd2225e916f31262d410b5840
6cfdab6c944528c02a7cc77228a3c336105d63c3
/benchmarks/ODT/scaling_magni.informatik.tu-cottbus.de_20-03-06_00:43/plot.py
37233d7d7a773c1a81486371daba8e775d2a3ace
[]
no_license
ManyThreads/mythos-applications
a07a4a992ee7a30f294238741f2612eb9304f071
79a47229146f665ddc2aa171770260267a3a5b32
refs/heads/master
2021-07-09T01:12:29.119193
2020-12-17T10:37:22
2020-12-17T10:37:22
216,578,126
0
0
null
null
null
null
UTF-8
Python
false
false
2,716
py
import numpy as np import matplotlib.pyplot as plt import sys as sys #function for reading the run times from ODTLES out files def read_data( file_name ): data = [] with open(file_name) as fp: line = fp.readline() while line: words = line.split() if(len(words)>1): if(words[0] == "Duration"): words[3] = words[3].replace(":", ".") data.append(float(words[3])) line = fp.readline() return(sorted(data)) # reading data data_linux_OMP1 = np.median(read_data("ODTLinuxOMP1.out")) data_mythos_OMP1 = np.median(read_data("ODTMythosOMP1.out")) data_linux_OMP2 = np.median(read_data("ODTLinuxOMP2.out")) data_mythos_OMP2 = np.median(read_data("ODTMythosOMP2.out")) data_linux_OMP4 = np.median(read_data("ODTLinuxOMP4.out")) data_mythos_OMP4 = np.median(read_data("ODTMythosOMP4.out")) data_linux_OMP8 = np.median(read_data("ODTLinuxOMP8.out")) data_mythos_OMP8 = np.median(read_data("ODTMythosOMP8.out")) data_linux_OMP16 = np.median(read_data("ODTLinuxOMP16.out")) data_mythos_OMP16 = np.median(read_data("ODTMythosOMP16.out")) data_linux_OMP24 = np.median(read_data("ODTLinuxOMP24.out")) data_mythos_OMP24 = np.median(read_data("ODTMythosOMP24.out")) data_to_plot_scalability_linux = [data_linux_OMP1, data_linux_OMP2, data_linux_OMP4, data_linux_OMP8, data_linux_OMP16, data_linux_OMP24] data_to_plot_scalability_mythos = [data_mythos_OMP1, data_mythos_OMP2, data_mythos_OMP4, data_mythos_OMP8, data_mythos_OMP16, data_mythos_OMP24] thread_data = [1,2,4,8,16,24] # x = np.linspace(0.0,1.0,6) fig1, ax1 = plt.subplots() ax1.set_title("Scalability of ODTLES on Linux and MyThOS") p1 = plt.plot(thread_data, data_to_plot_scalability_linux, 'k-o') p2 = plt.plot(thread_data, data_to_plot_scalability_mythos, 'r-d') # bp1 = plt.boxplot(data_to_plot_scalability_linux,0,'', widths=0.1, # positions=x-1/6*0.25, # patch_artist=True, boxprops=dict(facecolor="C0")) # bp2 = plt.boxplot(data_to_plot_scalability_mythos,0,'', widths=0.1, # positions=x+1/6*0.25, # patch_artist=True, boxprops=dict(facecolor="C2")) ax1.legend([p1[0],p2[0]], ['Linux', 'MyThOS'], loc='upper left') # ax1.set_xticklabels(['1', '2', '4', '8', # '16', '24']) # plt.xticks(x) # ax1.set_xlim([0,25]) #ax1.set_ylim([30,50]) ax1.set_ylabel("Runtime in seconds") ax1.set_xlabel("Number of threads") fig1.savefig("ODTLES.pdf")
[ "gypsephi@b-tu.de" ]
gypsephi@b-tu.de
711375a12f3cbbac96635936842be4bdd32bb861
6af2ec50d3afcf9b638096badaa19d7be80f7cb8
/captcha/tujian.py
a60fe278f1faeaa070e6779f457a36f627dd7e48
[ "MIT" ]
permissive
WYEEE/JDMemberCloseAccount
acc73ace420b948699c948e27629f33743c04a51
1471cbcc330725d9af713ddac2c18f543dc08591
refs/heads/main
2023-05-28T12:01:29.155311
2021-06-11T10:14:57
2021-06-11T10:14:57
368,819,680
0
0
MIT
2021-06-11T10:45:49
2021-05-19T09:48:43
Python
UTF-8
Python
false
false
1,318
py
import os import sys import base64 import json import requests sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) class TuJian(object): """ 图鉴验证类 图鉴打码地址:https://www.ttshitu.com """ def __init__(self, username, password): from utils.logger import Log self.logger = Log().logger self.username = username self.password = password def post_pic(self, im, type_id): """ 提交图片 :return: """ base64_data = base64.b64encode(im) b64 = base64_data.decode() data = {"username": self.username, "password": self.password, "typeid": type_id, "image": b64} ret = json.loads(requests.post("http://api.ttshitu.com/predict", json=data).text) if ret['success']: return ret["data"] else: self.logger.error(ret["message"]) sys.exit(1) @staticmethod def report_error(pid): """ 提交错误图片ID :param pid: :return: """ data = {"id": pid} ret = json.loads(requests.post("http://api.ttshitu.com/reporterror.json", json=data).text) if ret['success']: return "报错成功" else: return ret["message"]
[ "yqchilde@gmail.com" ]
yqchilde@gmail.com
d7bbfd2760b81ed149e6934e5af021b63f9918e5
c609ee5dd092f571e120181408f6fc40b00ec9b1
/Judgement.py
7e6e105b673c4af28f52d78294eab231156fe8d6
[]
no_license
s1280068/DiceGame
9a138fee21ab9fb1e7151e1712906b773248fa22
8fde44cad5e821e454905e0d03c5b89d8df0e6d0
refs/heads/master
2023-06-11T06:33:04.085154
2021-07-02T03:25:50
2021-07-02T03:25:50
382,206,976
0
0
null
null
null
null
UTF-8
Python
false
false
283
py
import random print('Rolling the dice...') Dice = [] Dice.append(random.randint(1,6)) Dice.append(random.randint(1,6)) print(f'Die 1: {Dice[0]}') print(f'Die 1: {Dice[1]}') print(f'Total value: {sum(Dice)}') if sum(Dice) > 7: print(f'You won!') else: print(f'You lost!')
[ "s1280068@u-aizu.ac.jp" ]
s1280068@u-aizu.ac.jp
561487ce846747b6d7fb0034befaeceaa9bf589e
4ae6e54a01e25d370929b49bbaa91c51b003d41a
/wwwroot/app/cgi-bin/AutograderBackEnd.py
a5ec7da5cc2a9cd932b912fdd77e998cb02ccbfb
[]
no_license
rdasxy/programming-autograder
8197a827236dc5384f6f3ceeaf2fbadefdd5506c
f885c1cd37721e1cd0b3bf3b49cc44b9adb64d92
refs/heads/master
2021-01-22T05:33:28.971055
2012-12-27T21:53:24
2012-12-27T21:53:24
null
0
0
null
null
null
null
UTF-8
Python
false
false
22,367
py
# 4th major iteration - refactoring to deal with changed authentication procedures # and to deal with each problem in parallel. import os, sys import subprocess import random import string import winprocess import win32pipe import win32file import pickle import autograde_utilities import thread import Queue import time import datetime import smtplib import collections import zipfile import autograder def ArchiveResults(JobTriple): ''' Record this attempt in archive. Gets 3-tuple: Job (itself a named tuple), result (string), error (string, possibly empty) ''' D = dict() D['UserID'] = JobTriple[0].UserID D['CourseNum'] = JobTriple[0].CourseNum D['ProblemNum'] = JobTriple[0].ProblemNum D['ProblemID']= JobTriple[0].ProblemID D['Timestamp'] = JobTriple[0].Timestamp D['Files']= JobTriple[0].Files D['Result'] = JobTriple[1] Path = 'c:/users/public/archive' Fname = JobTriple[0].UserID + JobTriple[0].CourseNum + "%04d"%JobTriple[0].ProblemID + str(JobTriple[0].Timestamp).replace(' ', '').replace(':','') Fname = Fname +'.pkl' Fullname = os.path.join(Path, Fname) Zipname = os.path.join(Path, 'archive.zip') F = open(Fullname, 'wb') pickle.dump(D, F) F.close() Z = zipfile.ZipFile(Zipname, 'a', zipfile.ZIP_DEFLATED) Z.write(Fullname, os.path.basename(Fullname)) Z.close() os.remove(Fullname) def EmailResults(AJob, Result, Error): # includes code from: http://www.mkyong.com/python/how-do-send-email-in-python-via-smtplib/ # setup login information #print "Emailing results." prefix = AJob.UserID if prefix in ('hareb', 'spatzs'): suffix = '@umkc.edu' else: suffix = '@mail.umkc.edu' Addy = prefix + suffix gmail_acct = 'umkcautograder@gmail.com' gmail_pwd = 'SaulAndBrian' # build message Body = "\nThis is an automatically generated email from the autograder. Do not reply to this address. " Body += "Contact the course instructor if you have questions." Body += "\nHere are the results from your submission for problem %s, %s:\n" % (AJob.ProblemNum, AJob.CourseNum) Body += Result + '\n' + Error + '\n' header = 'To:' + Addy + '\n' + 'From: ' + gmail_acct + '\n' + 'Subject:Autograder results \n' msg = header + Body # Now deal with the smtp server smtpserver = smtplib.SMTP("smtp.gmail.com",587) smtpserver.ehlo() smtpserver.starttls() smtpserver.ehlo smtpserver.login(gmail_acct, gmail_pwd) #print header smtpserver.sendmail(gmail_acct, Addy, msg) #print 'done!' smtpserver.close() def PostResults(ResultQueue): ''' pull results from queue, deal w/ logging etc. This function is called as a separate thread. It blocks waiting for things to be added to the queue; if nothing is added, it blocks until the main process dies after a 30-sec or so timeout, taking this thread with it. Queue contains 3-tuples: Job (namedtuple), Result (str), ErrMsg (str, may be empty)''' # collections.namedtuple(JobType, ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files']) while not ResultQueue.empty(): #print "Posting results, line 90" # TODO: Add code to save student's submission in archive. NextJob = ResultQueue.get() # this blocks as long as necessary. ArchiveResults(NextJob) # NextJob[0].Files = None autograder.ReportGradeStatus(NextJob[0], NextJob[1]) EmailResults(NextJob[0], NextJob[1], NextJob[2]) def Grade(JobList): ''' called by chron job--gets a named tuple representing the list of pending jobs. Spins off new threads for dealing with each job. Snoozes a bit, then dies.''' ResultsQueue = Queue.Queue() SandboxList = list() while JobList: Settings = dict() ProblemDict = dict() Job = JobList.pop(0) if not Job.Files: # Student didn't turn anything in ResultsQueue.put( (Job, 'SubmissionError', 'No files submitted')) Settings, ProblemDict = SetUpSubmission(Job) SandboxList.append(ProblemDict['SandboxDir']) if not Settings: # Can't set up the problem ResultsQueue.put( (Job, 'SystemError', "Can't set up problem; see administrator")) return # and we're out of here. # Otherwise paths are set up & sandbox is ready. Settings['cs101'] = HandleSubmission Settings['cs282'] = HandleMARSSubmission try: #IOFiles = ProblemDict['IOPairs'] ProblemDict['FileToRun']=os.path.join(ProblemDict['SandboxDir'], ProblemDict['Run']) if 'ExtraFiles' in ProblemDict: Extras = ProblemDict['ExtraFiles'] else: Extras = [] except KeyError: os.rmdir(ProblemDict['SandboxDir']) ResultsQueue.put( (Job, 'SystemError', 'Misread configuration data; see administrator')) return #NextJob = JobList.pop(0) ## ReportGradeStatus(NextJob.UserID, NextJob.CourseNum, NextJob.ProblemNum, ## NextJob.Timestamp, 'Submitted') try: FuncToRun = Settings[Job.CourseNum] except KeyError: print "Course number not found, don't know which language to run." print "Grade, line 138" FuncToRun(Job, Settings, ProblemDict, ResultsQueue) #thread.start_new_thread(HandleSubmission, (Job, Settings, ProblemDict, ResultQueue)) # HandleSubmission will post results to queue. Start 1 thread to handle # results by pulling them off queue & dealing with them. PostResults(ResultsQueue) #thread.start_new_thread(PostResults, (ResultQueue,)) #time.sleep(15) # which should be more than enough for everything to finish. # When this function ends, all threads and the queue they're operating on # go away. In the vast majority of cases, they're long since done anyway; # the producer threads (HandleSubmission) are done and the consumer # (PostResults) is waiting for results that will never come. But just in case # something was left over & blocked, the end of function will clean them up. for Dir in SandboxList: try: autograde_utilities.Cleanup(Dir) os.rmdir(Dir) except Exception, e: # if anything goes wrong, ignore it; utility script will fix later. #print e os.chdir('..') try: os.rmdir(Dir) except Exception, e: pass #print "Still didn't work.", e def ReadSystemConfig(): try: F = open('c:/autograder.ini') Stuff = dict() for line in F: Setting = line.split('=') if Setting[0]: Key = Setting[0].strip() Val=Setting[1].strip() Stuff[Key] = Val F.close() except IOError: pass except KeyError: return None return Stuff def ReadProblemINI(ProblemPath): try: F=open(os.path.join(ProblemPath, 'template.txt')) except IOError: return False ProblemDict=dict() for line in F: if len(line) > 2: thingy = line.split(':') if thingy[0]: Key = thingy[0].strip() Val=thingy[1].strip() ProblemDict[Key]=Val F.close() # Note: Some things might be lists. Convert them. try: SubmitList=[F.lower().strip() for F in ProblemDict['SubmissionFiles'].split()] ProblemDict['SubmissionFiles']=SubmitList except KeyError: pass try: ExtraList=[F.lower().strip() for F in ProblemDict['ExtraFiles'].split()] ExtraPath=os.path.join(ProblemPath, 'ExtraFiles') Extras = [os.path.join(ExtraPath, F) for F in ExtraList] ProblemDict['ExtraFiles']=Extras except KeyError: pass try: SubmitList=[F.lower().strip() for F in ProblemDict['IOPairs'].split()] TupList = list() while SubmitList: try: (i, o) = SubmitList[0], SubmitList[1] SubmitList.pop(0) SubmitList.pop(0) except IndexError: pass else: TupList.append((i, o)) ProblemDict['IOPairs']=TupList except KeyError: pass try: IOPath=ProblemDict['IOPath'] except KeyError: IOPath='' ProblemDict['IOPath'] = os.path.join(ProblemPath, IOPath) return ProblemDict def SetUpSubmission(Job): Settings = ReadSystemConfig() if not Settings: return False, "Can't read system configuration" ProblemPath=os.path.join(Settings['ProblemPath'], Job.CourseNum, '%04d' % Job.ProblemID) if not os.path.isdir(ProblemPath): return False, "Can't find problem directory" else: Settings['ProblemPath'] = ProblemPath ProblemDict=ReadProblemINI(ProblemPath) if not ProblemDict: return False, "Can't read problem configuration" TimeStr = str(Job.Timestamp) # Sandbox dir looks something like: # Sandbox\abcxyz02072012-01-17120102030000\stuff goes here # for problem 0207 submitted by student 'abcxyz' on 2012-01-17 at 12:01:02.030000 PM # Timestamp is a datetime object, and the string version of it has characters # that can't be part of a directory path. So fix it. TempDir = Job.UserID + ('%04d' % Job.ProblemNum) + TimeStr for ch in ' :.,': TempDir = TempDir.replace(ch, '') ProblemDict['SandboxDir'] = os.path.join(Settings['SandboxDir'], TempDir) try: os.mkdir(ProblemDict['SandboxDir']) except WindowsError: ProblemDict['SandboxDir'] = None return False, "Can't configure problem." return Settings, ProblemDict def HandleSubmission(Job, Settings, ProblemDict, ResultsQueue): ''' handle the traffic-cop aspects of a submission. Parameters: Job : The job that we're about to process. a named tuple ResultsQueue: The queue that we should post results to for later processing. Actions: For this problem, retrieve the list of system supplied files (if any) and list of (input,output) tuples. Feed the HandleFile function the problem, submission, and single (i, o) pairs until either: All input cases have been handled successfully; or Any submission has returned anything other than 'Correct.' If any case returned anything other than 'Correct': Post this job, Status, ErrMsg to results queue. Example: job, 'SyntaxError', traceback or: job, 'OutputError', 'Excessive output detected.' otherwise: Post this job, 'Correct', '' to results queue Returns: Nothing ''' #InputDir = ProblemDict['IOPath'] # Now process each set of I/O files; continue until all done, or an error is hit. for IOTuple in ProblemDict['IOPairs']: if 'Extras' not in ProblemDict: ProblemDict['Extras'] = None Res, Err = HandleFile(Job, os.path.join(ProblemDict['IOPath'], IOTuple[0]), os.path.join(ProblemDict['IOPath'], IOTuple[1]), ProblemDict) if Res != 'Correct': ResultsQueue.put((Job, Res, Err)) # Post results & exit early #os.rmdir(ProblemDict['SandboxDir']) return # If we're here, then all files were processed correctly. #autograde_utilities.ReportGradeStatus(StudentID, ProblemID, Res) ResultsQueue.put( (Job, 'Correct', '')) #os.rmdir(ProblemDict['SandboxDir']) return def HandleMARSSubmission(Job, Settings, ProblemDict, ResultsQueue): ''' Process one student's submission on one set of input data using MARS. Parameters: Job: The named tuple containing, among other things, the files submitted by the student and their contents. InputFileName: The name (including path if needed) of the ONE file with sample input for this test. CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for the specified input. FileNameToRun: The name (excluding path) of the ONE file that is to run to test the student's code. This must be present in Job or SystemSuppliedFileList. SystemSuppliedFileList: The (possibly empty or missing) list of other files (including paths) which are needed to run this problem's code (class files, driver programs, etc) Returns: tuple of strings (Res, Err). Res is a brief description ('Correct', 'Runtime exceeded', etc), and Err is an error message (possibly empty string). ''' # set up some labels for later (exit codes) ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \ 4:'Excessive Output', 5:'Submission Error', 6:'Assembly Error',\ 7:'Runtime Error'} # Make sure we've got everything we're expecting; if we don't, skip all this. ExpectedFiles = [Filename for (Filename, contents) in Job.Files] try: ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList except (TypeError, KeyError): # if there was no list of other needed files. pass Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles] if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected: Res = "File " + ProblemDict['Run'] + " was expected, but not found." Err = ExitMsg[5] return Err, Res # even if we're going ahead, we can free up some memory del(ExpectedFiles) del(Expected) # Create working (temporary) directory, copy files into it ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun) try: for f in Job.Files: Fname = f[0] Code = f[1] open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code) try: if ProblemDict['Extras']: # SystemSuppliedFileList: for f in ProblemDict['Extras']: Code = open(f).read() open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code) except KeyError: pass except IOError: return ('SystemError', 'Contact Administrator or Instructor') # Setup I/O for program we're testing. Input = open(InputFileName).read() os.chdir(ProblemDict['WritePath']) open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input) In = open('input.txt') Out = open('output.txt', 'w') #Err = open('error.txt', 'w') # Run that sucker! try: ExitCode = winprocess.run('java -jar c:\\Mars.jar nc p sm ae6 se7 %s' % ProblemDict['Run'], stdin=In, \ stdout=Out, mSec=5000, desktop='') except WindowsError, msg: if 'timeout exceeded' in str(msg): ExitCode = 2 # time out elif ExitCode not in (0, 6, 7): ExitCode = 3 # some other Windows error # Exit code of 0 indicates no error, as usual. # Exit code 6 indicates assembly error # Exit code 7 indicates runtime error #Done with files. In.close() Out.close() #Err.close() # Grab output if os.path.getsize('output.txt') < 5.0e6: Out = open('output.txt').read() else: # more than 5 megabytes output, something's wrong ExitCode = 4 # so set error flag Out = '' # & set Out to a safe value, but don't touch file. # grab error message if any. #Err = open('error.txt').read() # Cleanup temporary directory autograde_utilities.Cleanup(ProblemDict['WritePath']) #os.chdir(StartPath) # os.rmdir(WritePath) # Check output for validity. Correct = str(open(CorrectOutputFileName).read()) Out = Out.replace('\r', '') Correct = Correct.replace('\r', '') try: Result = ExitMsg[ExitCode] except KeyError: Result = autograde_utilities.CompareWithFormatting(Correct, Out) return Result, '' def HandleFile(Job, InputFileName, CorrectOutputFileName, ProblemDict): #FileNameToRun, SystemSuppliedFileList=None): ''' Process one student's submission on one set of input data. Parameters: Job: The named tuple containing, among other things, the files submitted by the student and their contents. InputFileName: The name (including path if needed) of the ONE file with sample input for this test. CorrectOutputFileName: The name (including path if needed) of the ONE file with correct output for the specified input. FileNameToRun: The name (excluding path) of the ONE file that is to run to test the student's code. This must be present in Job or SystemSuppliedFileList. SystemSuppliedFileList: The (possibly empty or missing) list of other files (including paths) which are needed to run this problem's code (class files, driver programs, etc) Returns: tuple of strings (Res, Err). Res is a brief description ('Correct', 'Runtime exceeded', etc), and Err is an error message (possibly empty string). ''' # set up some labels for later (exit codes) ExitMsg = {1:'Translation Error', 2:'Time Limit Exceeded', 3:'Windows Error', \ 4:'Excessive Output', 5:'Submission Error'} # Make sure we've got everything we're expecting; if we don't, skip all this. ExpectedFiles = [Filename for (Filename, contents) in Job.Files] try: ExpectedFiles += ProblemDict['Extras'] # SystemSuppliedFileList except (TypeError, KeyError): # if there was no list of other needed files. pass Expected = [os.path.basename(name).lower().strip() for name in ExpectedFiles] if os.path.basename(ProblemDict['Run']).lower().strip() not in Expected: Res = "File " + ProblemDict['Run'] + " was expected, but not found." Err = ExitMsg[5] return Err, Res # even if we're going ahead, we can free up some memory del(ExpectedFiles) del(Expected) # Create working (temporary) directory, copy files into it ProblemDict['WritePath'] = os.path.dirname(ProblemDict['FileToRun']) #FileNameToRun) try: for f in Job.Files: Fname = f[0] Code = f[1] open(ProblemDict['WritePath']+'/'+os.path.basename(Fname),'w').write(Code) if ProblemDict['Extras']: # SystemSuppliedFileList: for f in ProblemDict['Extras']: Code = open(f).read() open(ProblemDict['WritePath']+'/'+os.path.basename(f),'w').write(Code) except IOError: return ('SystemError', 'Contact Administrator or Instructor') # Setup I/O for program we're testing. Input = open(InputFileName).read() os.chdir(ProblemDict['WritePath']) open(os.path.join(ProblemDict['WritePath'], 'input.txt'),'w').write(Input) In = open('input.txt') Out = open('output.txt', 'w') Err = open('error.txt', 'w') # Run that sucker! try: ExitCode = winprocess.run('python %s' % ProblemDict['Run'], stdin=In, \ stdout=Out, stderr=Err, mSec=5000, desktop='') except WindowsError, msg: if 'timeout exceeded' in str(msg): ExitCode = 2 # time out else: ExitCode = 3 # some other Windows error # Exit code of 0 indicates no error, as usual. #Done with files. In.close() Out.close() Err.close() # Grab output if os.path.getsize('output.txt') < 5.0e6: Out = open('output.txt').read() else: # more than 5 megabytes output, something's wrong ExitCode = 4 # so set error flag Out = '' # & set Out to a safe value, but don't touch file. # grab error message if any. Err = open('error.txt').read() # Cleanup temporary directory autograde_utilities.Cleanup(ProblemDict['WritePath']) #os.chdir(StartPath) # os.rmdir(WritePath) # Check output for validity. Correct = str(open(CorrectOutputFileName).read()) Out = Out.replace('\r', '') Correct = Correct.replace('\r', '') try: Result = ExitMsg[ExitCode] except KeyError: Result = autograde_utilities.CompareWithFormatting(Correct, Out) return Result, Err def RunTest(): JobType = collections.namedtuple('JobType', ['UserID', 'CourseNum', 'ProblemNum', 'ProblemID', 'Timestamp', 'Files']) JobList = list() UserID = 'hareb' CourseNum="CS101" ProblemNum='1' ProblemID='0102' Timestamp=str(time.localtime()) f = open('c:/users/public/problems/cs101/0102/example0102.py').read() Files = list() Files.append( ('solution.py', f)) Job = JobType(UserID, CourseNum, ProblemNum, ProblemID, Timestamp, Files) JobList.append(Job) f = open('c:/users/public/problems/cs101/0103/example0103.py').read() Files = list() Files.append( ('example0103.py', f) ) Timestamp = str(time.localtime()) Job = JobType(UserID, CourseNum, '002', '0103', Timestamp, Files) JobList.append(Job) Grade( JobList ) # print "Done." if __name__ == '__main__': connection = autograder.getConnection() Cursor = connection.cursor() cmd = """UPDATE Jobs SET Status = 'pending' WHERE SequenceNumber = 21""" Cursor.execute(cmd) connection.commit() connection.close() Jobs = autograder.getJobs() Grade(Jobs) #RunTest() ## ## OK, Res, Err = HandleSubmission(1, '0102', ['example0102.py']) ## print "Your result:", Res ## if Err: ## print "Error message:\n", Err ## ## if OK: ## print '\tNeed to update database if this is first success on this problem.' ## else: ## print '\tNeed to update database if this is first attempt on this problem.' ##
[ "rdasxy@gmail.com" ]
rdasxy@gmail.com
cf74d4ec90b798fcd24c62ce780f63475b1edb49
5c66d5d3856d9da448464e19d19be28dae7033cd
/test.py
fce3aec46cf4e7143009ce4ac503e8a120b55d4b
[]
no_license
S0Easy/script
079892f8a2e0efb803f80e94e5d4dbaeaa3f57e9
c3e57cfb5ec96e1bff5f1fa3c853587b4bdaaafa
refs/heads/main
2023-07-19T04:02:22.198867
2021-09-03T12:01:08
2021-09-03T12:01:08
402,626,362
0
0
null
null
null
null
UTF-8
Python
false
false
209
py
import gitlab import datetime print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) gitlab_url = "http://10.10.10.22/" token = "-xxVsB6xy4ayF-YmsYh4" gitlab.Gitlab(gitlab_url, token).projects.get("2")
[ "java01@qq.com" ]
java01@qq.com
7f893bc5ede151e3ba8385f0ff5bff7a0cfe4beb
497ead1ee1e09a2530aa771ae059989e341684d7
/python/cuml/dask/preprocessing/LabelEncoder.py
4c731de842b1d1109949e96d1b4ad9f7128da6cf
[ "Apache-2.0" ]
permissive
xieliaing/cuml
193f5753696bbfd4de8e3eaef919c18da2fd1d1a
78092ddde28d5a810e45d6186f049c1309121408
refs/heads/master
2022-11-10T16:45:38.818055
2022-11-03T23:12:07
2022-11-03T23:12:07
159,592,316
0
0
Apache-2.0
2018-11-29T01:59:07
2018-11-29T01:59:07
null
UTF-8
Python
false
false
7,769
py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from cuml.dask.common.base import BaseEstimator from cuml.dask.common.base import DelayedTransformMixin from cuml.dask.common.base import DelayedInverseTransformMixin from toolz import first from collections.abc import Sequence from dask_cudf.core import DataFrame as dcDataFrame from dask_cudf.core import Series as daskSeries from cuml.common.exceptions import NotFittedError from cuml.preprocessing import LabelEncoder as LE class LabelEncoder(BaseEstimator, DelayedTransformMixin, DelayedInverseTransformMixin): """ An nvcategory based implementation of ordinal label encoding Parameters ---------- handle_unknown : {'error', 'ignore'}, default='error' Whether to raise an error or ignore if an unknown categorical feature is present during transform (default is to raise). When this parameter is set to 'ignore' and an unknown category is encountered during transform or inverse transform, the resulting encoding will be null. Examples -------- Converting a categorical implementation to a numerical one .. code-block:: python >>> from dask_cuda import LocalCUDACluster >>> from dask.distributed import Client >>> import cudf >>> import dask_cudf >>> from cuml.dask.preprocessing import LabelEncoder >>> import pandas as pd >>> pd.set_option('display.max_colwidth', 2000) >>> cluster = LocalCUDACluster(threads_per_worker=1) >>> client = Client(cluster) >>> df = cudf.DataFrame({'num_col':[10, 20, 30, 30, 30], ... 'cat_col':['a','b','c','a','a']}) >>> ddf = dask_cudf.from_cudf(df, npartitions=2) >>> # There are two functionally equivalent ways to do this >>> le = LabelEncoder() >>> le.fit(ddf.cat_col) # le = le.fit(data.category) also works <cuml.dask.preprocessing.LabelEncoder.LabelEncoder object at 0x...> >>> encoded = le.transform(ddf.cat_col) >>> print(encoded.compute()) 0 0 1 1 2 2 3 0 4 0 dtype: uint8 >>> # This method is preferred >>> le = LabelEncoder() >>> encoded = le.fit_transform(ddf.cat_col) >>> print(encoded.compute()) 0 0 1 1 2 2 3 0 4 0 dtype: uint8 >>> # We can assign this to a new column >>> ddf = ddf.assign(encoded=encoded.values) >>> print(ddf.compute()) num_col cat_col encoded 0 10 a 0 1 20 b 1 2 30 c 2 3 30 a 0 4 30 a 0 >>> # We can also encode more data >>> test_data = cudf.Series(['c', 'a']) >>> encoded = le.transform(dask_cudf.from_cudf(test_data, ... npartitions=2)) >>> print(encoded.compute()) 0 2 1 0 dtype: uint8 >>> # After train, ordinal label can be inverse_transform() back to >>> # string labels >>> ord_label = cudf.Series([0, 0, 1, 2, 1]) >>> ord_label = le.inverse_transform( ... dask_cudf.from_cudf(ord_label,npartitions=2)) >>> print(ord_label.compute()) 0 a 1 a 2 b 0 c 1 b dtype: object >>> client.close() >>> cluster.close() """ def __init__(self, *, client=None, verbose=False, **kwargs): super().__init__(client=client, verbose=verbose, **kwargs) def fit(self, y): """ Fit a LabelEncoder (nvcategory) instance to a set of categories Parameters ---------- y : dask_cudf.Series Series containing the categories to be encoded. Its elements may or may not be unique Returns ------- self : LabelEncoder A fitted instance of itself to allow method chaining Notes -------- Number of unique classes will be collected at the client. It'll consume memory proportional to the number of unique classes. """ _classes = y.unique().compute() el = first(y) if isinstance(y, Sequence) else y self.datatype = ('cudf' if isinstance(el, (dcDataFrame, daskSeries)) else 'cupy') self._set_internal_model(LE(**self.kwargs).fit(y, _classes=_classes)) return self def fit_transform(self, y, delayed=True): """ Simultaneously fit and transform an input This is functionally equivalent to (but faster than) LabelEncoder().fit(y).transform(y) """ return self.fit(y).transform(y, delayed=delayed) def transform(self, y, delayed=True): """ Transform an input into its categorical keys. This is intended for use with small inputs relative to the size of the dataset. For fitting and transforming an entire dataset, prefer `fit_transform`. Parameters ---------- y : dask_cudf.Series Input keys to be transformed. Its values should match the categories given to `fit` Returns ------- encoded : dask_cudf.Series The ordinally encoded input series Raises ------ KeyError if a category appears that was not seen in `fit` """ if self._get_internal_model() is not None: return self._transform(y, delayed=delayed, output_dtype='int32', output_collection_type='cudf') else: msg = ("This LabelEncoder instance is not fitted yet. Call 'fit' " "with appropriate arguments before using this estimator.") raise NotFittedError(msg) def inverse_transform(self, y, delayed=True): """ Convert the data back to the original representation. In case unknown categories are encountered (all zeros in the one-hot encoding), ``None`` is used to represent this category. Parameters ---------- X : dask_cudf Series The string representation of the categories. delayed : bool (default = True) Whether to execute as a delayed task or eager. Returns ------- X_tr : dask_cudf.Series Distributed object containing the inverse transformed array. """ if self._get_internal_model() is not None: return self._inverse_transform(y, delayed=delayed, output_collection_type='cudf') else: msg = ("This LabelEncoder instance is not fitted yet. Call 'fit' " "with appropriate arguments before using this estimator.") raise NotFittedError(msg)
[ "noreply@github.com" ]
xieliaing.noreply@github.com
b374191a7cf732d53d219ab1e5838ac5a74b3ab2
8fcc27160f8700be46296568260fa0017a0b3004
/client/cherrypy/test/test_virtualhost.py
e9b88bd297cb6047933124c32d619fd6c0d22cc0
[]
no_license
connoryang/dec-eve-serenity
5d867f4eedfa896a4ef60f92556356cafd632c96
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
refs/heads/master
2021-01-22T06:33:16.303760
2016-03-16T15:15:32
2016-03-16T15:15:32
56,389,750
1
0
null
2016-04-16T15:05:24
2016-04-16T15:05:24
null
UTF-8
Python
false
false
3,718
py
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\cherrypy\test\test_virtualhost.py import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import cherrypy from cherrypy.test import helper class VirtualHostTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return 'Hello, world' index.exposed = True def dom4(self): return 'Under construction' dom4.exposed = True def method(self, value): return 'You sent %s' % repr(value) method.exposed = True class VHost: def __init__(self, sitename): self.sitename = sitename def index(self): return 'Welcome to %s' % self.sitename index.exposed = True def vmethod(self, value): return 'You sent %s' % repr(value) vmethod.exposed = True def url(self): return cherrypy.url('nextpage') url.exposed = True static = cherrypy.tools.staticdir.handler(section='/static', dir=curdir) root = Root() root.mydom2 = VHost('Domain 2') root.mydom3 = VHost('Domain 3') hostmap = {'www.mydom2.com': '/mydom2', 'www.mydom3.com': '/mydom3', 'www.mydom4.com': '/dom4'} cherrypy.tree.mount(root, config={'/': {'request.dispatch': cherrypy.dispatch.VirtualHost(**hostmap)}, '/mydom2/static2': {'tools.staticdir.on': True, 'tools.staticdir.root': curdir, 'tools.staticdir.dir': 'static', 'tools.staticdir.index': 'index.html'}}) setup_server = staticmethod(setup_server) def testVirtualHost(self): self.getPage('/', [('Host', 'www.mydom1.com')]) self.assertBody('Hello, world') self.getPage('/mydom2/', [('Host', 'www.mydom1.com')]) self.assertBody('Welcome to Domain 2') self.getPage('/', [('Host', 'www.mydom2.com')]) self.assertBody('Welcome to Domain 2') self.getPage('/', [('Host', 'www.mydom3.com')]) self.assertBody('Welcome to Domain 3') self.getPage('/', [('Host', 'www.mydom4.com')]) self.assertBody('Under construction') self.getPage('/method?value=root') self.assertBody("You sent u'root'") self.getPage('/vmethod?value=dom2+GET', [('Host', 'www.mydom2.com')]) self.assertBody("You sent u'dom2 GET'") self.getPage('/vmethod', [('Host', 'www.mydom3.com')], method='POST', body='value=dom3+POST') self.assertBody("You sent u'dom3 POST'") self.getPage('/vmethod/pos', [('Host', 'www.mydom3.com')]) self.assertBody("You sent 'pos'") self.getPage('/url', [('Host', 'www.mydom2.com')]) self.assertBody('%s://www.mydom2.com/nextpage' % self.scheme) def test_VHost_plus_Static(self): self.getPage('/static/style.css', [('Host', 'www.mydom2.com')]) self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/css;charset=utf-8') self.getPage('/static2/dirback.jpg', [('Host', 'www.mydom2.com')]) self.assertStatus('200 OK') self.assertHeader('Content-Type', 'image/jpeg') self.getPage('/static2/', [('Host', 'www.mydom2.com')]) self.assertStatus('200 OK') self.assertBody('Hello, world\r\n') self.getPage('/static2', [('Host', 'www.mydom2.com')]) self.assertStatus(301)
[ "masaho.shiro@gmail.com" ]
masaho.shiro@gmail.com
a0d3caee1fbf6c2afadd6139c75f0fb247dbe328
b24e45267a8d01b7d3584d062ac9441b01fd7b35
/Usuario/.history/views_20191102195546.py
879e6589a3c510e2404c8ff9b59bed87520c898f
[]
no_license
slalbertojesus/merixo-rest
1707b198f31293ced38930a31ab524c0f9a6696c
5c12790fd5bc7ec457baad07260ca26a8641785d
refs/heads/master
2022-12-10T18:56:36.346159
2020-05-02T00:42:39
2020-05-02T00:42:39
212,175,889
0
0
null
2022-12-08T07:00:07
2019-10-01T18:56:45
Python
UTF-8
Python
false
false
3,630
py
from django.shortcuts import render from rest_framework import status from rest_framework.response import Response from rest_framework.decorators import api_view, permission_classes from rest_framework.permissions import AllowAny from rest_framework_simplejwt.tokens import RefreshToken from .models import Usuario from .serializers import UsuarioSerializer SUCCESS = 'exito' ERROR = 'error' DELETE_SUCCESS = 'eliminado' UPDATE_SUCCESS = 'actualizado' CREATE_SUCCESS = 'creado' @api_view(['GET', ]) def api_detail_usuario_view(request, identificador): try: usuario = Usuario.objects.get(identificador = identificador) except usuario.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.method == 'GET': serializer = UsuarioSerializer(usuario) return Response(serializer.data) @api_view(['PUT',]) def api_update_usuario_view(request, identificador): try: usuario = Usuario.objects.get(identificador = identificador) except usuario.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.method == 'PUT': serializer = UsuarioSerializer(usuario, data=request.data) data = {} if serializer.is_valid(): serializer.save() data[SUCCESS] = UPDATE_SUCCESS return Response(data=data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @api_view(['DELETE',]) def api_delete_usuario_view(request, identificador): try: usuario = Usuario.objects.get(identificador=identificador) except usuario.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) if request.method == 'DELETE': operation = usuario.delete() data = {} if operation: data[SUCCESS] = DELETE_SUCCESS return Response(data=data) @api_view(['POST',]) @permission_classes([AllowAny,]) def api_create_usuario_view(request): if request.method == 'POST': serializer = UsuarioSerializer(data=request.data) data = {} if serializer.is_valid(): usuario = serializer.save() data['response'] = "se registró de forma exitosa" data['nombre'] = usuario.nombre data['usuario'] = usuario.usuario return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @api_view(["POST"]) @permission_classes([AllowAny,]) def api_login(request): usuario = request.data.get("usuario") contraseña = request.data.get("contraseña") if usuario is None or contraseña is None: return Response({'error': 'No existen contraseña ni usuario'}, status=HTTP_400_BAD_REQUEST) usuario = authenticate(usuario=usuario, contraseña=contraseña) get_tokens_for_user(usuario) return { 'refresh': str(token), 'access': str(token.access_token), } def for_user(cls, user): """ Returns an authorization token for the given user that will be provided after authenticating the user's credentials. """ user_id = getattr(user, api_settings.USER_ID_FIELD) if not isinstance(user_id, int): user_id = str(user_id) token = cls() token[api_settings.USER_ID_CLAIM] = user_id return token refresh = RefreshToken.for_user(user) def authenticate(usuario, contraseña): usuario = Usuario.objects.get(usuario= usuario, contraseña=contraseña) if not usuario: raise serializers.ValidationError({'error': 'Usuario no existe'}, status=HTTP_404_NOT_FOUND) return usuario
[ "slalbertojesus@gmail.com" ]
slalbertojesus@gmail.com
fc94bfc8c1c5fa1df951fab64844fca2a07b5310
8cf593d60d02be4692bce873656c85466fc1d8e1
/cw7/Zad1.py
44641eb1ec798de123460d5e7fff7ebaa097f101
[]
no_license
michals-lab/Python
502ad5202c962808d499e16545aea5ca73c3fe21
09e2c76368215e3345ee5130bf56ca626ffbbe38
refs/heads/master
2021-01-25T23:55:35.358748
2020-05-20T07:26:42
2020-05-20T07:26:42
243,231,231
0
0
null
null
null
null
UTF-8
Python
false
false
91
py
import numpy as nu x = nu.arange(3).reshape(1,3) y = nu.arange(6,9).reshape(1,3) print(x*y)
[ "59738119+michals-lab@users.noreply.github.com" ]
59738119+michals-lab@users.noreply.github.com
ca94fb0f772c696f598a29b72de22c76e653eb65
67a60399e79144054a5cca8436218c1005423978
/frux_app_server/__init__.py
1bb9603cde757fa3455ec4109debe17e1626ebae
[ "Apache-2.0", "MIT" ]
permissive
camidvorkin/frux-app-server
e84616009b11bc354c46e4fa54f64d3d3aa6384b
21098234a7867908250022e3e1c0580417d1ca35
refs/heads/main
2023-06-28T00:50:27.991923
2021-07-30T19:43:51
2021-07-30T19:43:51
360,342,828
3
1
null
null
null
null
UTF-8
Python
false
false
199
py
try: import importlib.metadata as importlib_metadata except ModuleNotFoundError: import importlib_metadata # type: ignore __version__ = importlib_metadata.version(__name__) # type: ignore
[ "cdvorkin@medallia.com" ]
cdvorkin@medallia.com
152c01b7254082a6295aa8c64ce3f0600ca33d97
be134c181703b95aca1e48b6a31bcfdb7bcfcc76
/site/mezzanine_old/galleries/migrations/0001_initial.py
11f1937e16fbf9cff1135c9e2c992c658bbfd803
[]
permissive
aldenjenkins/ThiccGaming
0245955a797394bcfeedb2cfb385f633653ba55d
4790d2568b019438d1569d0fe4e9f9aba008b737
refs/heads/master
2022-12-16T02:43:36.532981
2021-11-17T04:15:21
2021-11-17T04:15:21
154,858,818
0
0
BSD-3-Clause
2022-12-08T02:58:44
2018-10-26T15:52:39
Python
UTF-8
Python
false
false
1,837
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import mezzanine.core.fields class Migration(migrations.Migration): dependencies = [ ('pages', '__first__'), ] operations = [ migrations.CreateModel( name='Gallery', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='pages.Page')), ('content', mezzanine.core.fields.RichTextField(verbose_name='Content')), ('zip_import', models.FileField(help_text="Upload a zip file containing images, and they'll be imported into this gallery.", upload_to='galleries', verbose_name='Zip import', blank=True)), ], options={ 'ordering': ('_order',), 'verbose_name': 'Gallery', 'verbose_name_plural': 'Galleries', }, bases=('pages.page', models.Model), ), migrations.CreateModel( name='GalleryImage', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('_order', models.IntegerField(null=True, verbose_name='Order')), ('file', mezzanine.core.fields.FileField(max_length=200, verbose_name='File')), ('description', models.CharField(max_length=1000, verbose_name='Description', blank=True)), ('gallery', models.ForeignKey(related_name='images', to='galleries.Gallery')), ], options={ 'ordering': ('_order',), 'verbose_name': 'Image', 'verbose_name_plural': 'Images', }, bases=(models.Model,), ), ]
[ "alden@aldenjenkins.com" ]
alden@aldenjenkins.com
1b70bccd5370036cb4520982e27696d6b98d1e47
10729b1d8e2761e49c56a6a308ee6b2f486d4076
/PropertyScraper/__main__.py
f60db63a37a55ba97aa880ef3f66de8d9c0edd6f
[]
no_license
thebend/real
9042e4cd261bee306e3ffb81b26e55416851eb71
db2a32ac356fdd7d342aed138cac744e31e329a0
refs/heads/master
2021-06-11T00:10:48.589182
2021-03-04T07:08:34
2021-03-04T07:08:34
82,351,225
0
0
null
null
null
null
UTF-8
Python
false
false
647
py
#pylint: disable=C0103 ''' Execute scraper with default settings ''' import logging from datetime import datetime from PropertyScraper import Scraper log_filename = '{:%Y-%m-%d}.log'.format(datetime.now()) db_filename = '{:%Y-%m-%d}.db'.format(datetime.now()) logger = logging.getLogger('PropertyScraper') file_handler = logging.FileHandler(log_filename) logger.addHandler(file_handler) # scraper = Scraper.Scraper(db_filename, rebuild=True) # scraper.scrape_terramap() scraper = Scraper.Scraper(db_filename) # scraper.scrape_evalue_neighbours() # scraper.scrape_ev_by_tmaddress() # scraper.scrape_ev_by_tmcenter() scraper.ev_center2ev_geo()
[ "Benjamin Davidson" ]
Benjamin Davidson
3cd88a93ec624282caf04872b0d591e54a297d80
bd12b2b84643023ff65734bc187d0c05cc540c4c
/scripts/compare_comparators.py
fe07f218b50049761eee72939c20fa091c4412af
[ "Apache-2.0" ]
permissive
alexander-bzikadze/graph_diff
27bdc3c25c4b3a567bda8c2c967a74ccb8f412f9
c7d5510590d8f6999697a3e197d4e806c320e968
refs/heads/master
2021-05-16T13:03:25.026373
2018-05-01T18:16:35
2018-05-01T18:16:35
105,353,922
0
1
null
null
null
null
UTF-8
Python
false
false
1,676
py
import logging from graph_diff.graph import rnr_graph from graph_diff.graph.graph_generator import GraphGenerator from graph_diff.graph_comparison import generate_n_comparator_tests from graph_diff.graph_diff_algorithm import GraphMapComparatorByNodeNum, GraphMapComparatorByEdgeNum from graph_diff.graph_diff_algorithm.graph_map import * NUMBER_OF_TESTS = 100 DIRECTORY = "../comparator_png/" comparators = [ # GraphMapComparatorByEdgeNumAndThenNodeNum(), # GraphMapComparatorByEdgeNumAndNodeNumSum(), # GraphMapComparatorByNodeNumAndThenEdgeNum(), GraphMapComparatorByNodeNum(), GraphMapComparatorByEdgeNum(), # GraphMapComparatorByEdgeDiffAndThenNodeDiff() ] logging.info("Start comparator test with {0} tests".format(NUMBER_OF_TESTS)) generate_n_comparator_tests(n=NUMBER_OF_TESTS, comparators=comparators, directory=DIRECTORY) class GeneratorMock(GraphGenerator): i = 0 def generate_graph(self): if self.i == 0: graph = rnr_graph() graph.add_node(lr_node(1, 1)) graph.add_node(lr_node(1, 2)) graph.add_node(lr_node(1, 3)) graph.add_node(lr_node(2, 1)) graph.add_node(lr_node(2, 2)) elif self.i == 1: graph = rnr_graph() graph.add_node(lr_node(1, 1)) graph.add_node(lr_node(1, 2)) graph.add_node(lr_node(2, 3)) graph.add_node(lr_node(2, 1)) graph.add_node(lr_node(2, 2)) else: raise Exception("") self.i += 1 return graph # generate_n_comparator_tests(n=1, comparators=comparators, directory=DIRECTORY, graph_generator=GeneratorMock())
[ "alexander.bzikadze@gmail.com" ]
alexander.bzikadze@gmail.com
b08be16b6f55bbb29dd93651676a710322f99cdd
2fcb5da42f0aff62c88189bd36fc5f61a40eb604
/vardautomation/timeconv.py
3b84b24deda8187b48a85d3ae7948559d45a7404
[ "MIT" ]
permissive
tomato39/vardautomation
d45ec446a1cd06c2e7b7ec5378772953fa7b4caa
efa24d9420d6a6f732e8b0a846874a289a7cb095
refs/heads/master
2023-08-23T01:44:00.014196
2021-10-21T23:05:52
2021-10-21T23:09:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,647
py
"""Conversion time module""" from fractions import Fraction from .status import Status class Convert: """Collection of methods to perform time conversion""" @classmethod def ts2f(cls, ts: str, fps: Fraction, /) -> int: """ Convert a timestamp hh:mm:ss.xxxx in number of frames :param ts: Timestamp :param fps: Framerate Per Second :return: Frames """ s = cls.ts2seconds(ts) f = cls.seconds2f(s, fps) return f @classmethod def f2ts(cls, f: int, fps: Fraction, /, *, precision: int = 3) -> str: """ Convert frames in timestamp hh:mm:ss.xxxx :param f: Frames :param fps: Framerate Per Second :param precision: Precision number, defaults to 3 :return: Timestamp """ s = cls.f2seconds(f, fps) ts = cls.seconds2ts(s, precision=precision) return ts @classmethod def seconds2ts(cls, s: float, /, *, precision: int = 3) -> str: """ Convert seconds in timestamp hh:mm:ss.xxx :param s: Seconds :param precision: Precision number, defaults to 3 :return: Timestamp """ m = s // 60 s %= 60 h = m // 60 m %= 60 return cls.composets(h, m, s, precision=precision) @classmethod def f2assts(cls, f: int, fps: Fraction, /) -> str: """ Convert frames to .ass timestamp hh:mm:ss.xx properly by removing half of one frame per second of the specified framerate :param f: Frames :param fps: Framerate Per Second :return: ASS timestamp """ s = cls.f2seconds(f, fps) s -= fps ** -1 * 0.5 ts = cls.seconds2ts(max(0, s), precision=3) return ts[:-1] @classmethod def assts2f(cls, assts: str, fps: Fraction, /) -> int: """ Convert .ass timestamp hh:mm:ss.xx to frames properly by adding half of one frame per second of the specified framerate :param assts: ASS timestamp :param fps: Framerate Per Second :return: Frames """ s = cls.ts2seconds(assts) if s > 0: s += fps ** -1 * 0.5 return cls.seconds2f(s, fps) @staticmethod def f2seconds(f: int, fps: Fraction, /) -> float: """ Convert frames to seconds :param f: Frames :param fps: Framerate Per Second :return: Seconds """ if f == 0: return 0.0 t = round(float(10 ** 9 * f * fps ** -1)) s = t / 10 ** 9 return s @staticmethod def ts2seconds(ts: str, /) -> float: """ Convert timestamp hh:mm:ss.xxxx to seconds :param ts: Timestamp :return: Seconds """ h, m, s = map(float, ts.split(':')) return h * 3600 + m * 60 + s @staticmethod def seconds2f(s: float, fps: Fraction, /) -> int: """ Convert seconds to frames :param s: Seconds :param fps: Framerate Per Second :return: Frames """ return round(s * fps) @staticmethod def samples2seconds(num_samples: int, sample_rate: int, /) -> float: """ Convert samples to seconds :param num_samples: Samples :param sample_rate: Playback sample rate :return: Seconds """ return num_samples / sample_rate @staticmethod def seconds2samples(s: float, sample_rate: int, /) -> int: """ Convert seconds to samples :param s: Seconds :param sample_rate: Playback sample rate :return: Samples """ return round(s * sample_rate) @classmethod def f2samples(cls, f: int, fps: Fraction, sample_rate: int) -> int: """ Convert frames to samples :param f: Frames :param fps: Framerate Per Second :param sample_rate: Playback sample rate :return: Samples """ s = cls.f2seconds(f, fps) return cls.seconds2samples(s, sample_rate) @classmethod def samples2f(cls, num_samples: int, sample_rate: int, fps: Fraction) -> int: """ Convert sample to frames :param num_samples: Samples :param sample_rate: Playback sample rate :param fps: Framerate Per Second :return: Frame """ s = cls.samples2seconds(num_samples, sample_rate) return cls.seconds2f(s, fps) @staticmethod def composets(h: float, m: float, s: float, /, *, precision: int = 3) -> str: """ Make a timestamp based on given hours, minutes and seconds :param h: Hours :param m: Minutes :param s: Seconds :param precision: Precision number, defaults to 3 :return: Timestamp """ if precision == 0: out = f"{h:02.0f}:{m:02.0f}:{round(s):02}" elif precision == 3: out = f"{h:02.0f}:{m:02.0f}:{s:06.3f}" elif precision == 6: out = f"{h:02.0f}:{m:02.0f}:{s:09.6f}" elif precision == 9: out = f"{h:02.0f}:{m:02.0f}:{s:012.9f}" else: Status.fail(f'composets: the precision {precision} must be a multiple of 3 (including 0)') return out
[ "ichunjo.le.terrible@gmail.com" ]
ichunjo.le.terrible@gmail.com
6e260c0f266108fca49853195f3c3f8c7721266f
5f2b7f5eef576dfa575bac913c39c30607f946a1
/Assignment1.py
ed4eb2bcf302203f7f796407791ca5a42df62013
[]
no_license
Kolokodess/switch_python_Ada
f9c3a6728b25ca3d1cb30ea20ffb77de6bd73cd8
42a42751cac75fab4fb01505294be6a3ef25268e
refs/heads/master
2021-06-18T11:07:38.416980
2017-04-04T08:41:46
2017-04-04T08:41:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
718
py
balance = float(raw_input ("Enter Balance:")) Annual_interest = float(raw_input("Enter Annual Interest rate:")) min_payment_rate = float(raw_input("Enter Minimum payment rate:")) total_payment = 0 months = 1 while months <= 12: #m_m_p = minimum monthly payment m_m_p = min_payment_rate * balance monthly_interest = Annual_interest/12.0 * balance principal = m_m_p - monthly_interest balance = balance - principal print "month:", months print "m_m_p:", m_m_p print "monthly_interest:", monthly_interest print "principal:", principal print "balance:", balance total_payment +=m_m_p months +=1 print "Result" print "total_payment: $",round(total_payment,2) print "Remaining balance: $", round(balance,2)
[ "ada.oyom@gmail.com" ]
ada.oyom@gmail.com
5e759521921a5fbee942af6ff03899975bbd0b35
84ab518741695c4cdaaaaad7aacd242a48542373
/practicePrograms2.py
2a9bca47df4a22c71dce1bbf0feda7d31fd35710
[]
no_license
Aakashgarg743/Learn-Python
755818988dc391dc9cdea7091a6488fdb39b0b3d
b5e832146845ed140e63f7f2151af70d21e44003
refs/heads/master
2023-08-11T08:14:00.474551
2021-09-29T13:52:33
2021-09-29T13:52:33
409,439,716
0
0
null
null
null
null
UTF-8
Python
false
false
2,188
py
# DICTIONARY user = input("Welcome To My Dictionary\n['python', 'pip', 'functions']\nEnter any word that are listed above to get the meaning....\n").lower() dic = {"python": "it is a programming language..", "pip":"it is used to install packages", "funcitons": "it is a block of code that only runs when it is called..."} if user in dic.keys(): print(dic[user]) else: print("You entered wrong input...") # FAULTY - CALCULATOR def add(num1, num2): if num1=="56" and num2=="9": print("77") else: user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n")) if user == 1: val = float(num1) + float(num2) else: val = int(num1) + int(num2) return val def sub(num1, num2): user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n")) if user == 1: val = float(num1) - float(num2) else: val = int(num1) - int(num2) return val def mul(num1, num2): if num1=="45" and num2=="3": print("555") else: user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n")) if user == 1: val = float(num1) * float(num2) else: val = int(num1) * int(num2) return val def div(num1, num2): user = int(input("In which format you want to get your result???\nType- \n1. Decimal\n2. Integer\n")) if user == 1: val = float(num1) / float(num2) else: val = int(num1) // int(num2) return val if __name__=='__main__': n1 = input("Enter 1st number...\n") n2 = input("Enter 2nd number...\n") if n1.isdigit() and n2.isdigit(): inpu = int(input("What operation you want to perform\nType- \n1. Addition\n2. Subtraction\n3. Multiplication\n4. Division\n")) if inpu == 1: print(add(n1, n2)) elif inpu == 2: print(sub(n1, n2)) elif inpu == 3: print(mul(n1, n2)) elif inpu ==4: print(div(n1, n2)) else: print("Wrong Input...") else: print("You enter wrong input")
[ "91084902+Aakashgarg743@users.noreply.github.com" ]
91084902+Aakashgarg743@users.noreply.github.com
ecf24d7d6ee11295a51d53aedecf26dcf7c0a36a
f57a425d2bfe242f59bfccefb844c06f6a924bb9
/LinearRegression.py
d8742adbfb5eb3ca9eda275cfd5df8ec9b9b7d75
[]
no_license
MostafaZegoo/NLP_Project
a48208d4626e1b2254f96757714db77f14ae2880
363942bea9d297c9af47e28a5fd35fcf8860dda2
refs/heads/master
2020-03-12T23:19:17.713877
2018-08-21T08:29:59
2018-08-21T08:29:59
130,863,659
0
0
null
null
null
null
UTF-8
Python
false
false
1,706
py
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score,confusion_matrix,classification_report from sklearn.multiclass import OneVsRestClassifier from sklearn.linear_model import LinearRegression import string np.random.seed(123456) news_df = pd.read_csv("uci-news-aggregator.csv", sep = ",") news_df['CATEGORY'] = news_df.CATEGORY.map({ 'b': 1, 't': 2, 'e': 3, 'm': 4 }) news_df['TITLE'] = news_df.TITLE.map(lambda x: x.lower().translate(str.maketrans('','', string.punctuation))) vectorizer = CountVectorizer(stop_words='english') x = vectorizer.fit_transform(news_df['TITLE']) encoder = LabelEncoder() y = encoder.fit_transform(news_df['CATEGORY']) # split into train and test sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33,random_state=42) # Instantiate the classifier: clf clf = OneVsRestClassifier(LinearRegression()) # Fit the classifier to the training data clf.fit(x_train, y_train) #clf.score(x_test,y_test) y_pred=clf.predict(x_test) tn,fn,tp,fp=confusion_matrix(y_test,y_pred) print(confusion_matrix(y_test,y_pred)) print("===================================") print(classification_report(y_test,y_pred)) print("===================================") print("Accuracy score:",accuracy_score(y_test,y_pred)) print("===================================") print(clf.predict(vectorizer.transform(["nescafe is a product from nestle"]))) plt.plot(tn) plt.plot(tp) plt.plot(fn) plt.plot(fp) plt.show()
[ "noreply@github.com" ]
MostafaZegoo.noreply@github.com
bb86890b77d314e21ced1d0d6e8ca9908fe3952a
baccbb4478c01c4c102cc1cfae56103f179a50d8
/scripts/handle_mysql.py
83cd4c7631baa42559ce110f8b02604fc0a58aa8
[]
no_license
cheer1106/test_cheer
a09b5f11e85f06e0aa1c0fef69b6b30e069a38b1
6e29266daabdcf45a7a76da2a5653d6ef6b1c108
refs/heads/master
2020-09-29T08:27:03.935876
2019-12-10T10:44:30
2019-12-10T10:44:30
226,999,091
0
0
null
null
null
null
UTF-8
Python
false
false
4,061
py
""" ============================ Author:cheer # @time:2019-11-19 10:01 # @FileName: handle_mysql.py # @Software: PyCharm # @Cnblogs :https://www.cnblogs.com/***** ============================ """ import pymysql import random from scripts.handle_yaml import do_yaml ''' 类封装的原则:不同功能写不同的方法 ''' class HandleMysql(object): def __init__(self): # 1 建立连接 # self.conn = pymysql.connect(host='api.lemonban.com', # mysql服务器IP或者域名 # user='future', # 用户名 # password='123456', # 密码 # db='futureloan', # 要连接的数据库名 # port=3306, # 数据库端口号,默认为3306,也可以不写 # charset='utf8', # 数据库编码为utf8,不能写为utf-8 # cursorclass=pymysql.cursors.DictCursor # 添加游标类,取结果的时候返回的字典类型;不添加返回元组 # ) self.conn = pymysql.connect(host=do_yaml.read_yaml('mysql', 'host'), # mysql服务器IP或者域名 user=do_yaml.read_yaml('mysql', 'user'), # 用户名 password=do_yaml.read_yaml('mysql', 'password'), # 密码 db=do_yaml.read_yaml('mysql', 'db'), # 要连接的数据库名 port=do_yaml.read_yaml('mysql', 'port'), # 数据库端口号,默认为3306,也可以不写 charset='utf8', # 数据库编码为utf8,不能写为utf-8 cursorclass=pymysql.cursors.DictCursor # 添加游标类,取结果的时候返回的字典类型(结果不唯一的话返回嵌套字典的列表);不添加返回元组 ) # 2.创建游标对象 self.cursor = self.conn.cursor() def run(self, sql, args=None, is_more=True): # 3.使用游标对象,运行sql self.cursor.execute(sql, args) # 4.使用连接对象提交 self.conn.commit() # 5.返回结果 if is_more: return self.cursor.fetchall() else: return self.cursor.fetchone() # 官方推荐,一定要关闭 def close(self): self.cursor.close() self.conn.close() @staticmethod def create_mobile(): """ 随机生成11位手机号 :return: """ return '188' + ''.join(random.sample('0123456789', 8)) def is_existed_mobile(self, mobile): """ 判断手机号是否被注册 :param mobile: 待判断是否注册的手机号 :return: """ # sql = "select * from member where mobile_phone = %s;" sql = do_yaml.read_yaml('mysql', 'select_user_sql') # 已注册(run函数返回数据,即if表达式为真),返回True;查询不到结果(None),返回False if self.run(sql, args=[mobile], is_more=False): return True else: return False def create_not_exsited_mobile(self): """ 随机生成一个在数据库中不存在的手机号 :return: """ while True: # 随机生成一个手机号码 one_mobile = self.create_mobile() # 如果找到了未注册的手机号,跳出循环 if not self.is_existed_mobile(one_mobile): break return one_mobile if __name__ == '__main__': do_mysql = HandleMysql() # 不建议放在main上面创建对象,因为有关闭 sql_1 = 'select * from member LIMIT 0,10;' # sql_2 = "select * from member where mobile_phone = '13888888889';" # # print(do_mysql.run(sql_1)) print(do_mysql.run(sql_1)) # print(do_mysql.create_not_exsited_mobile()) do_mysql.close()
[ "1498053436@qq.com" ]
1498053436@qq.com
9418bf8162cced953666e74e72750c54214a25e4
ca87c047f49a4aa893224466c4ea54e1801e0de2
/code/pywin32/excel/extract_excel_data.py
0659280bc294925a0cfacd0f9fb16e9b812f4540
[]
no_license
jpereiran/jpereiran-blog
a46504871dfbd1a007090d4a39fe51ddced0032c
08385e2e8b0a0440d1fda81293f8692c923174a1
refs/heads/master
2021-10-11T06:51:28.190808
2021-10-04T00:19:49
2021-10-04T00:19:49
192,151,831
1
1
null
null
null
null
UTF-8
Python
false
false
2,971
py
import win32com.client import glob import sys, io # Open up Excel and make it visible (actually you don't need to make it visible) excel = win32com.client.Dispatch('Excel.Application') excel.Visible = True # Select the path of the folder with all the files files = glob.glob("folder_path/*.xlsx") # Redirect the stdout to a file orig_stdout = sys.stdout bk = io.open("Answers_Report.txt", mode="w", encoding="utf-8") sys.stdout = bk # Go through all the files in the folder for file in files: print(file.split('\\')[1]) wb_data = excel.Workbooks.Open(file) # Get the answers to the Q1A mission=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C6") vision =wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C7") print("Question 1A") print("Mission:",mission) print("Vision:" ,vision) print() # Get the answers to the Q1B oe1=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C14") ju1=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("D14") oe2=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("C15") ju2=wb_data.Worksheets("1ayb_MisiónyVisiónFutura").Range("D15") print("Question 1B") print("OEN1:",oe1, "- JUSTIF:",ju1) print("OEN2:",oe2, "- JUSTIF:",ju2) print() # Get the answers to the Q2A mision=wb_data.Worksheets("2a_MisionyVisionSI").Range("C6") vision=wb_data.Worksheets("2a_MisionyVisionSI").Range("C7") print("Question 2A") print("Mission SI:",mision) print("Vision SI:",vision) print() # Get the answers to the Q3A print("Question 3A") for i in range(5,13): proy=wb_data.Worksheets("3a_ProySI").Range("B"+str(i)) desc=wb_data.Worksheets("3a_ProySI").Range("D"+str(i)) mcfr=wb_data.Worksheets("3a_ProySI").Range("E"+str(i)) tipo=wb_data.Worksheets("3a_ProySI").Range("F"+str(i)) print("\tProyect:",proy) print("\tDesc:",desc) print("\tMacFarlan:",mcfr,"- Tipo",tipo) print() # Close the file without saving wb_data.Close(True) # Restoring the stdout sys.stdout = orig_stdout bk.close() # Create a new Excel file for the grading template wb_template = excel.Workbooks.Add() # Headers of the template wb_template.Worksheets(1).Range("A1").Value = 'File' wb_template.Worksheets(1).Range("B1").Value = 'Q1A' wb_template.Worksheets(1).Range("C1").Value = 'C1A' wb_template.Worksheets(1).Range("D1").Value = 'Q1B' wb_template.Worksheets(1).Range("E1").Value = 'C1A' wb_template.Worksheets(1).Range("F1").Value = 'Q2A' wb_template.Worksheets(1).Range("G1").Value = 'C2A' wb_template.Worksheets(1).Range("H1").Value = 'Q3A' wb_template.Worksheets(1).Range("I1").Value = 'C3A' # Add the path of each file into the template for idx, arch in enumerate(files): wb_template.Worksheets(1).Range("A"+str(idx+2)).Value = arch.replace('\\','/') # Save the grading template without alerts excel.DisplayAlerts = False wb_template.SaveAs(r'folder_path\Grades_Template.xlsx') # Close the file and the program wb_template.Close() excel.DisplayAlerts = True excel.Quit()
[ "noreply@github.com" ]
jpereiran.noreply@github.com
ba730b6a4b4982aa4ff13b5059b8122ad718b1b3
9d615b7174eecd4c8401513ca8cc21fc498fef5a
/api/views.py
457826ebef472c327e82113a59326e527f03c40c
[]
no_license
AlexeySub/prephack
ef13e637da181cd6e46d0ace20bd79c9438fba21
fe971421438d66f59cb2ce977dc736573c2e9ea3
refs/heads/master
2020-05-05T12:31:41.571292
2019-04-13T08:00:28
2019-04-13T08:00:28
180,032,825
0
0
null
2019-04-07T22:45:46
2019-04-07T22:45:46
null
UTF-8
Python
false
false
2,920
py
from api.models import User, UserAuthen, Message from rest_framework import renderers, parsers from django.views import View from django.http import HttpResponse from django.contrib.auth.hashers import make_password from django.core import exceptions from django import db import jwt, time from django.shortcuts import render class UserRegister(View): def post(self, request): data = parsers.JSONParser().parse(request) data['password'] = make_password(data['password'], salt='123') user = User(name=data['username'].lower(), password=data['password'], email=data['email'].lower(), userType=data['usertype']) try: user.save() except db.IntegrityError: return HttpResponse('Conflict', status=409) return HttpResponse('OK', status=200) class UserAuth(View): def post(self, request): data = parsers.JSONParser().parse(request) print(data['login'].lower()) try: user = User.objects.get(name=data['login'].lower()) except exceptions.ObjectDoesNotExist: return HttpResponse('Unauthorized', status=401) if user.password == make_password(data['password'], salt='123'): authtoken = jwt.encode(data, 'secret', algorithm='HS256').decode('UTF-8') userAuth = UserAuthen(user_id=user.id, token=authtoken, is_authenticated=True) userAuth.save() return HttpResponse(renderers.JSONRenderer().render({'auth_token': authtoken})) else: return HttpResponse('Unauthorized', status=401) class UserLogout(View): def post(self, request): data = parsers.JSONParser().parse(request) userAuth = UserAuthen.objects.get(token=data['auth_token']) userAuth.is_authenticated=False userAuth.save() return HttpResponse('Ok') class Chat(View): def post(self, request): data = parsers.JSONParser().parse(request) try: jwt.decode(data['auth_token'], 'secret', algorithm='HS256') except jwt.InvalidSignatureError: return HttpResponse('Unathorized', status=401) message = Message(user_id=UserAuthen.objects.get(token=data['auth_token']).user_id, text=data['text']) try: message.save() except db.IntegrityError: return HttpResponse('Conflict', status=409) return HttpResponse('OK', status=200) def get(self, request): data = parsers.JSONParser().parse(request) try: jwt.decode(data['auth_token'], 'secret', algorithm='HS256') except jwt.InvalidSignatureError: return HttpResponse('Unathorized', status=401) chat = Message.objects.all().filter(user_id=User.objects.get(name=data['login']).id) return HttpResponse(renderers.JSONRenderer().render(chat.text)) def index(request): return render(request, 'index.html')
[ "mr.317676@gmail.com" ]
mr.317676@gmail.com
4cf799ae31dfe4802a0d9299a2f9c9087c10afe6
0add969034a82912bc6e19abc427abe883ee65bb
/theta_en_time_polar.py
a9683111bde6bafb250a54492723f599975e5624
[]
no_license
Michael-Gong/New_LPI_python_script
eefd162fdbbc3c614c66e2b157ea5296e3bc8492
9de109c6f19aa60bdeaf102e9a1ec0baff5669ad
refs/heads/master
2020-03-28T16:06:09.631550
2020-02-01T08:21:17
2020-02-01T08:21:17
148,659,608
2
0
null
null
null
null
UTF-8
Python
false
false
7,511
py
#%matplotlib inline #import sdf import matplotlib import matplotlib as mpl #mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style') matplotlib.use('agg') import matplotlib.pyplot as plt import numpy as np from numpy import ma from matplotlib import colors, ticker, cm from matplotlib.mlab import bivariate_normal from optparse import OptionParser import os from mpl_toolkits.mplot3d import Axes3D import random from mpl_toolkits import mplot3d from matplotlib import rc import matplotlib.transforms as mtransforms import sys #rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) ## for Palatino and other serif fonts use: #rc('font',**{'family':'serif','serif':['Palatino']}) #rc('text', usetex=True) font = {'family' : 'monospace', 'color' : 'black', 'weight' : 'normal', 'size' : 28, } font2 = {'family' : 'monospace', 'color' : 'black', 'weight' : 'normal', 'size' : 15, } font_size = 28 font_size_2 = 15 #plt.rc('text', usetex=True) #plt.rc('font', family='serif') upper = matplotlib.cm.jet(np.arange(256)) lower = np.ones((int(256/4),4)) for i in range(3): lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0]) cmap = np.vstack(( lower, upper )) mycolor_jet = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0]) upper = matplotlib.cm.viridis(np.arange(256)) lower = np.ones((int(256/4),4)) for i in range(3): lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0]) cmap = np.vstack(( lower, upper )) mycolor_viridis = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0]) upper = matplotlib.cm.rainbow(np.arange(256)) lower = np.ones((int(256/4),4)) for i in range(3): lower[:,i] = np.linspace(1, upper[0,i], lower.shape[0]) cmap = np.vstack(( lower, upper )) mycolor_rainbow = matplotlib.colors.ListedColormap(cmap, name='myColorMap', N=cmap.shape[0]) def pxpy_to_energy(gamma, weight): binsize = 200 en_grid = np.linspace(50,19950,200) en_bin = np.linspace(0,20000.0,201) en_value = np.zeros_like(en_grid) for i in range(binsize): # if i == binsize-1: # en_value[i] = sum(weight[en_bin[i]<=gamma]) # else: en_value[i] = sum(weight[ (en_bin[i]<=gamma) & (gamma<en_bin[i+1]) ]) return (en_grid, en_value) def theta_to_grid(theta, weight): binsize = 240 theta_grid = np.linspace(-119.5,119.5,240) theta_bin = np.linspace(-120,120,241) theta_value = np.zeros_like(theta_grid) for i in range(binsize): # if i == binsize-1: # en_value[i] = sum(weight[en_bin[i]<=gamma]) # else: theta_value[i] = sum(weight[ (theta_bin[i]<=theta) & (theta<theta_bin[i+1]) ]) return (theta_grid, theta_value) if __name__ == "__main__": part_number = 50000 from_path = './p50000_no_T150/' nsteps = int(sum(1 for line in open(from_path+'t_tot_s.txt'))/part_number) ntheta = 270 ngg = 120 from_path_list = ['./p50000_no_T150/','./p50000_rr_T150/','./p50000_qe_T150/'] #from_path_list = ['./Data_qe_T500_p50000_try/'] for i in range(np.size(from_path_list)): from_path = from_path_list[i] #'./Data_qe_T050_p50000/' to_path = from_path t0 = np.loadtxt(from_path+'t_tot_s.txt')/2/np.pi px0 = np.loadtxt(from_path+'px_tot_s.txt') py0 = np.loadtxt(from_path+'py_tot_s.txt') t0 = np.reshape(t0,(part_number,nsteps)) px0 = np.reshape(px0,(part_number,nsteps)) py0 = np.reshape(py0,(part_number,nsteps)) gg0 = (px0**2+py0**2+1)**0.5*0.51e-3 ww0 = np.zeros_like(gg0)+1 ww0 = np.zeros_like(gg0)+gg0 theta0 = np.arctan2(py0,px0) theta_edges = np.linspace(-np.pi,np.pi, ntheta +1) gg_edges = np.linspace(0.1, 6, ngg +1) theta_edges_1 = np.linspace(-np.pi,np.pi,ntheta) gg_edges_1 = np.linspace(0.1, 6, ngg) for n in range(np.size(t0[0,:])): H, _, _ = np.histogram2d(gg0[:,n], theta0[:,n], [gg_edges, theta_edges], weights=gg0[:,n]) print('Max H:',np.max(H)) Theta, R = np.meshgrid(theta_edges_1,gg_edges_1) H_temp = np.sum(H[:,:]*R,0) print('averaged |theta|=',np.sum(H_temp*abs(theta_edges_1))/np.sum(H_temp)/np.pi*180) fig, ax = plt.subplots(1, 1, subplot_kw=dict(projection='polar')) ax.set_facecolor('whitesmoke') levels = np.logspace(1,5, 101) H[H<0.01] = np.nan img=ax.pcolormesh(Theta, R, H, norm=colors.LogNorm(vmin=0.01, vmax=1e3), cmap='viridis') # cax = fig.add_axes([0.68,0.97,0.25,0.02]) # cbar=fig.colorbar(img,cax=cax, ticks=[1e3,1e5],orientation='horizontal') # cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), fontsize=font_size_2) # cbar.set_label(r'dI/d$\theta$dE [A.U.]',fontdict=font2) # ax.tick_params(axis="y", pad=25) ax.tick_params(axis="x", pad=10) # ax.set_xticks([]) if (i%3 != 2): ax.set_xticklabels([]) #ax.set_xlim(10,50) #ax.set_ylim(0.,1.) ax.set_xlabel(r'$\theta\ [^o]$',fontdict=font) # ax.set_rlim(1e-1,1e3) # ax.set_rmax(1e3) l_r = np.array([0,1,2,3]) ax.set_rticks(l_r+1) ax.set_yticklabels([]) # ax.set_yticklabels(['$10^%d$' % x for x in (l_r+1)]) ax.set_rlim(0, 6) ax.set_rlabel_position(90) # ax.set_rscale('log') # ax.set_rscale('log') # ax.set_thetamin(-90) # ax.set_thetamax(90) # ax.set_yticklabels([0.1,1,10,100,1000]) ax.set_xticklabels([0,90,180,270]) #ax.set_theta_zero_location('N') # ax.set_ylabel(r'$\theta\ [^o]$',fontdict=font) ax.tick_params(axis='x',labelsize=font_size) ax.tick_params(axis='y',labelsize=font_size_2) #ax.set_title('proton_angular_time='+str(time1), va='bottom', y=1., fontsize=20) # plt.text(-100,650,' t = '++' fs',fontdict=font) ax.grid(True,linestyle='--',linewidth=1.5,color='grey') #plt.pcolormesh(x, y, ex.T, norm=mpl.colors.Normalize(vmin=0,vmax=100,clip=True), cmap=cm.cubehelix_r) # plt.axis([x.min(), x.max(), y.min(), y.max()]) #### manifesting colorbar, changing label and axis properties #### # cbar=plt.colorbar(pad=0.01)#ticks=[np.min(ex), -eee/2, 0, eee/2, np.min()]) # cbar.ax.set_yticklabels(cbar.ax.get_yticklabels(), fontsize=font_size) # cbar.set_label('dN/dE [A.U.]',fontdict=font) # a0=200.0 # alpha=np.linspace(-3.5,0.5,501) # plt.xlabel(r'$\theta$'+' [degree]',fontdict=font) # plt.ylabel('time [fs]',fontdict=font) # plt.xticks([-135,-90,-45,0,45,90,135],fontsize=font_size); #plt.yticks([0,500,1000,1500],fontsize=font_size); # plt.title(r'$dN/d\theta$'+' for no RR', fontsize=font_size) # plt.xlim(-120,120) # plt.ylim(0,1650) #plt.title('electron at y='+str(round(y[n,0]/2/np.pi,4)),fontdict=font) plt.subplots_adjust(top=0.90, bottom=0.11, left=0.1, right=0.93, hspace=0.10, wspace=0.05) fig = plt.gcf() fig.set_size_inches(6., 6.) #fig.set_size_inches(5, 4.5) fig.savefig(to_path+'theta_en_dist_'+to_path[7:-1]+'_'+str(n).zfill(4)+'.png',format='png',dpi=160) plt.close("all")
[ "noreply@github.com" ]
Michael-Gong.noreply@github.com
3ce6b9f20d08c14c582b9278fa91e5bb702c29b2
e3472add507c7fc16d013c2e318ca4e28158a13a
/tcc_tf/deterministic_alignment.py
4e8c87dc31aabe92912c8f4e9dd78be57da37059
[ "Apache-2.0" ]
permissive
JiaHeng-DLUT/tcc_Temporal_Cycle_Consistency_Loss.pytorch
ebd5a9eba26a2332d81743c95a460eef2c690cb4
61490f457b406366f847822962f607e4c3d3e1bd
refs/heads/main
2022-12-27T20:33:20.462667
2020-10-11T11:12:05
2020-10-11T11:12:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,375
py
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Deterministic alignment between all pairs of sequences in a batch.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v2 as tf from .losses import classification_loss from .losses import regression_loss def pairwise_l2_distance(embs1, embs2): """Computes pairwise distances between all rows of embs1 and embs2.""" norm1 = tf.reduce_sum(tf.square(embs1), 1) norm1 = tf.reshape(norm1, [-1, 1]) norm2 = tf.reduce_sum(tf.square(embs2), 1) norm2 = tf.reshape(norm2, [1, -1]) # Max to ensure matmul doesn't produce anything negative due to floating # point approximations. dist = tf.maximum( norm1 + norm2 - 2.0 * tf.matmul(embs1, embs2, False, True), 0.0) return dist def get_scaled_similarity(embs1, embs2, similarity_type, temperature): """Returns similarity between each all rows of embs1 and all rows of embs2. The similarity is scaled by the number of channels/embedding size and temperature. Args: embs1: Tensor, Embeddings of the shape [M, D] where M is the number of embeddings and D is the embedding size. embs2: Tensor, Embeddings of the shape [N, D] where N is the number of embeddings and D is the embedding size. similarity_type: String, Either one of 'l2' or 'cosine'. temperature: Float, Temperature used in scaling logits before softmax. Returns: similarity: Tensor, [M, N] tensor denoting similarity between embs1 and embs2. """ channels = tf.cast(tf.shape(embs1)[1], tf.float32) # Go for embs1 to embs2. if similarity_type == 'cosine': similarity = tf.matmul(embs1, embs2, transpose_b=True) elif similarity_type == 'l2': similarity = -1.0 * pairwise_l2_distance(embs1, embs2) else: raise ValueError('similarity_type can either be l2 or cosine.') # Scale the distance by number of channels. This normalization helps with # optimization. similarity /= channels # Scale the distance by a temperature that helps with how soft/hard the # alignment should be. similarity /= temperature return similarity def align_pair_of_sequences(embs1, embs2, similarity_type, temperature): """Align a given pair embedding sequences. Args: embs1: Tensor, Embeddings of the shape [M, D] where M is the number of embeddings and D is the embedding size. embs2: Tensor, Embeddings of the shape [N, D] where N is the number of embeddings and D is the embedding size. similarity_type: String, Either one of 'l2' or 'cosine'. temperature: Float, Temperature used in scaling logits before softmax. Returns: logits: Tensor, Pre-softmax similarity scores after cycling back to the starting sequence. labels: Tensor, One hot labels containing the ground truth. The index where the cycle started is 1. """ max_num_steps = tf.shape(embs1)[0] # Find distances between embs1 and embs2. sim_12 = get_scaled_similarity(embs1, embs2, similarity_type, temperature) # Softmax the distance. softmaxed_sim_12 = tf.nn.softmax(sim_12, axis=1) # Calculate soft-nearest neighbors. nn_embs = tf.matmul(softmaxed_sim_12, embs2) # Find distances between nn_embs and embs1. sim_21 = get_scaled_similarity(nn_embs, embs1, similarity_type, temperature) logits = sim_21 labels = tf.one_hot(tf.range(max_num_steps), max_num_steps) return logits, labels def compute_deterministic_alignment_loss(embs, steps, seq_lens, num_steps, batch_size, loss_type, similarity_type, temperature, label_smoothing, variance_lambda, huber_delta, normalize_indices): """Compute cycle-consistency loss for all steps in each sequence. This aligns each pair of videos in the batch except with itself. When aligning it also matters which video is the starting video. So for N videos in the batch, we have N * (N-1) alignments happening. For example, a batch of size 3 has 6 pairs of sequence alignments. Args: embs: Tensor, sequential embeddings of the shape [N, T, D] where N is the batch size, T is the number of timesteps in the sequence, D is the size of the embeddings. steps: Tensor, step indices/frame indices of the embeddings of the shape [N, T] where N is the batch size, T is the number of the timesteps. seq_lens: Tensor, Lengths of the sequences from which the sampling was done. This can provide additional information to the alignment loss. num_steps: Integer/Tensor, Number of timesteps in the embeddings. batch_size: Integer, Size of the batch. loss_type: String, This specifies the kind of loss function to use. Currently supported loss functions: 'classification', 'regression_mse', 'regression_mse_var', 'regression_huber'. similarity_type: String, Currently supported similarity metrics: 'l2' , 'cosine' . temperature: Float, temperature scaling used to scale the similarity distributions calculated using the softmax function. label_smoothing: Float, Label smoothing argument used in tf.keras.losses.categorical_crossentropy function and described in this paper https://arxiv.org/pdf/1701.06548.pdf. variance_lambda: Float, Weight of the variance of the similarity predictions while cycling back. If this is high then the low variance similarities are preferred by the loss while making this term low results in high variance of the similarities (more uniform/random matching). huber_delta: float, Huber delta described in tf.keras.losses.huber_loss. normalize_indices: Boolean, If True, normalizes indices by sequence lengths. Useful for ensuring numerical instabilities doesn't arise as sequence indices can be large numbers. Returns: loss: Tensor, Scalar loss tensor that imposes the chosen variant of the cycle-consistency loss. """ labels_list = [] logits_list = [] steps_list = [] seq_lens_list = [] for i in range(batch_size): for j in range(batch_size): # We do not align the sequence with itself. if i != j: logits, labels = align_pair_of_sequences(embs[i], embs[j], similarity_type, temperature) logits_list.append(logits) labels_list.append(labels) steps_list.append(tf.tile(steps[i:i+1], [num_steps, 1])) seq_lens_list.append(tf.tile(seq_lens[i:i+1], [num_steps])) logits = tf.concat(logits_list, axis=0) labels = tf.concat(labels_list, axis=0) steps = tf.concat(steps_list, axis=0) seq_lens = tf.concat(seq_lens_list, axis=0) if loss_type == 'classification': loss = classification_loss(logits, labels, label_smoothing) elif 'regression' in loss_type: loss = regression_loss(logits, labels, num_steps, steps, seq_lens, loss_type, normalize_indices, variance_lambda, huber_delta) else: raise ValueError('Unidentified loss_type %s. Currently supported loss ' 'types are: regression_mse, regression_huber, ' 'classification.' % loss_type) return loss
[ "noreply@github.com" ]
JiaHeng-DLUT.noreply@github.com
bb7be13aa1ae689ed05a4e1ef6b48ef41a63abf7
34edc8b21515817caa87aedeb07b87515c33ebd0
/shipping/serializers.py
c02da959ef30fc9d803c7ff4b5f9b8d0607690d0
[]
no_license
waelbeso/Ftrina
b20c277030132b195af621d9e739040d42943a9b
449868f8c095bb920a2aef2e2dc4cb80de8ec82a
refs/heads/master
2022-09-06T16:34:40.391965
2018-05-27T12:19:05
2018-05-27T12:19:05
134,336,376
0
0
null
null
null
null
UTF-8
Python
false
false
2,464
py
from shipping.models import Model,Zone from rest_framework import serializers from shop.models import WareHouse,Shop from rest_framework.validators import UniqueTogetherValidator class ModelSerializer(serializers.ModelSerializer): def ShippingValidator(value): import json if "update" in value['method']: if value['name']: try: Model.objects.get(name=value['name'],shop=value['shop']) except Model.DoesNotExist: return shop_shipping_model = Model.objects.get(name=value['name'],shop=value['shop']) if str(shop_shipping_model.id) in value['pk']: return raise serializers.ValidationError('You have Shipping Model with this name.') raise serializers.ValidationError('Name is required.') if "new" in value['method']: if value['name']: try: Model.objects.get(name=value['name'],shop=value['shop']) except Model.DoesNotExist: return raise serializers.ValidationError('You have Shipping Model with this name.') raise serializers.ValidationError('Name is required.') name = serializers.JSONField(required=True, validators=[ShippingValidator]) shop = serializers.PrimaryKeyRelatedField(queryset=Shop.objects.filter(),read_only=False) ware_house = serializers.PrimaryKeyRelatedField(required=True,queryset=WareHouse.objects.filter(),read_only=False) class Meta: model = Model fields = ('id', 'name','shop','ware_house','zone') def update(self, instance, validated_data): ''' We did not update the shop record ''' ware_house = WareHouse.objects.get(pk=validated_data.pop('ware_house')) instance.name = validated_data.get('name', instance.name) instance.ware_house = ware_house instance.save() return instance class ZoneSerializer(serializers.ModelSerializer): model = serializers.PrimaryKeyRelatedField(queryset=Model.objects.filter(),read_only=False) country = serializers.CharField(required=True) province = serializers.CharField(required=True) price = serializers.DecimalField(max_digits=19, decimal_places=2, coerce_to_string=None, max_value=None, min_value=None) price_currency = serializers.CharField(max_length=None, min_length=None, allow_blank=False) class Meta: model = Zone fields = ('id', 'model','country','province','price','price_currency') validators = [ UniqueTogetherValidator( queryset=Zone.objects.all(), fields=('model', 'country','province') ) ]
[ "waelabbas@live.com" ]
waelabbas@live.com
361e4d07975ca9bfa13fde5395e05cfab57a2474
847b39a71c85aeea7e3812f15f9bd5811edbec4d
/main2.py
b9ce4d61cef2890774a5093914c407f43e1f4fa5
[]
no_license
arsalansaad/webcrawler
559eee2c95e0e4e3699e7788958e57f57c6378ed
2c70b769ce572d010bb1314303ad786de2304bac
refs/heads/master
2021-01-19T15:02:52.725367
2017-08-21T10:00:27
2017-08-21T10:00:27
100,939,267
0
0
null
null
null
null
UTF-8
Python
false
false
443
py
import requests from bs4 import BeautifulSoup url = "http://www.hindustantimes.com/editorials/" sourcecode = requests.get(url).text soup = BeautifulSoup(sourcecode, "html.parser") for link in soup.findAll("div",{ "class": "media-heading headingfour"}): print(link.text) for item in link.findAll('a'): print(item.get('href')) # for link in soup.findAll("div",class_="media-heading headingfour"): # print(link.get('href'))
[ "arsalansaad.iitkgp@gmail.com" ]
arsalansaad.iitkgp@gmail.com
c4cc3eae8ce8dc40427cfc6263c0d8d9207e33ce
e2590e0a78046a22131b69c76ebde21bf042cdd1
/ABC201_300/ABC275/A.py
6bc5a95d16891d1502a3adf5fbd2ff8aa0b3a6a3
[]
no_license
masato-sso/AtCoderProblems
b8e23941d11881860dcf2942a5002a2b19b1f0c8
fbc02e6b7f8c6583e5a4e5187463e0001fc5f4d8
refs/heads/main
2023-01-22T23:57:58.509585
2023-01-21T14:07:47
2023-01-21T14:07:47
170,867,816
0
0
null
null
null
null
UTF-8
Python
false
false
179
py
N = int(input()) H = list(map(int, input().split())) maxValue = max(H) ans = 0 for idx,h in enumerate(H): if(h == maxValue): ans = idx + 1 break print(ans)
[ "masato@seijinnoMacBook-Pro-2.local" ]
masato@seijinnoMacBook-Pro-2.local
31e5d88aad90549955249b4cb57b003d157e5527
620ca56701bce0add202f3cbe7c62036e4b1e359
/Course_3/Week_1/validations2.py
77d004fd5a9ff1e3999c8772722fd59fa072d73d
[ "MIT" ]
permissive
gpastor3/Google-ITAutomation-Python
1f52dbff0b8f0832ab3fea4ac9c468c667363e1a
6027750a33e8df883d762223bb0c4a5a95395bc0
refs/heads/main
2023-04-05T00:29:10.902116
2021-02-04T02:08:06
2021-02-04T02:08:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
928
py
#!/usr/bin/env python3 """ This script is used for course notes. Author: Erick Marin Date: 01/06/2020 """ def validate_user(username, minlen): # An alternative to the raise keyword that we can use for situations where # we want to check that our code behaves the way it should particularly # when we want to avoid situations that should never happen. This is the # assert keyword. This keyword tries to verify that a conditional # expression is true, and if it's false it raises an assertion error with # the indicated message. if type(username) != str: raise TypeError("username must be a string") if minlen < 1: raise ValueError("minlen must be at least 1") if len(username) < minlen: return False if not username.isalnum(): return False # Usernames can't begin with a number if username[0].isnumeric(): return False return True
[ "emarin.iot@gmail.com" ]
emarin.iot@gmail.com
87542af4bb98ec1f4f2dd18363ced2a123b396b2
f0eb4d12fdac429d5620c0823af0b0be54d9ae3a
/KerasTracker/QualitativeResultsFigure.py
2c9149b99f691939d72647c667e16923e340162a
[ "Apache-2.0" ]
permissive
felixVil/LDASegment
817cf7a5b8d101c7879b293d464c0428e37a776b
25f59c9f43c76e64c0a1e4131fa3c12bab60b716
refs/heads/master
2023-03-28T03:23:20.419209
2021-03-30T23:10:35
2021-03-30T23:10:35
322,999,160
0
0
null
null
null
null
UTF-8
Python
false
false
3,118
py
from UtilFunctions import * import os import numpy as np def find_result_per_sequence_tracker_ind(sequence, tracker, ind): result_sequence_path = os.path.join(results_path, tracker, 'baseline') result_filename = '%s_001.txt' % sequence result_filepath = os.path.join(result_sequence_path, sequence, result_filename) file_id = open(result_filepath, 'r') lines = file_id.readlines() file_id.close() polygon_line = lines[ind] polygon_line.replace('\n','') polygon_arr = np.array([float(element) for element in polygon_line.split(',')]) return polygon_arr sequence_path = 'D:/Another_D/E_backup/my homework/BGU Computer Vision thesis/vot-toolkit-master-2019/vot-workspace/sequences' results_path = 'D:/Another_D/E_backup/my homework/BGU Computer Vision thesis/results_on_tracker_qualitatively_evaluated' overlay_images_path = 'overlay_images' if not os.path.exists(overlay_images_path): os.makedirs(overlay_images_path) sequences_dict = {'zebrafish1': {'inds' : [14, 31, 57], 'width': 2}, 'fish1': {'inds': [143, 278, 316], 'width': 2}, 'gymnastics2': {'inds': [178, 194, 206], 'width': 9}, 'book': {'inds': [43, 82, 104], 'width': 2}, 'conduction1':{'inds': [42, 187], 'width': 2}, 'dinosaur': {'inds': [220, 277], 'width' : 9}} color_dict = {'SiamMask':(255, 255, 255, 128), 'UPDT': (255, 0, 255, 128), 'ATOM':(255, 0, 0, 128), 'LADCF': (0, 0, 255, 128), 'LDATrackerDenseNetDilate':(0, 255, 0, 128)} for sequence in sequences_dict.keys(): line_width = sequences_dict[sequence]['width'] poi_inds = sequences_dict[sequence]['inds'] frames_folder = os.path.join(sequence_path, sequence, 'color') for ind in poi_inds: poly_arrays = [] frames_file = os.path.join(frames_folder, '%08d.jpg' % (ind + 1)) overlay_image_file = os.path.join(overlay_images_path, '%s_%08d.jpg' % (sequence, ind + 1)) for tracker in color_dict.keys(): poly_array = find_result_per_sequence_tracker_ind(sequence, tracker, ind) if len(poly_array) < 4: continue # tracker is during failure. elif len(poly_array) == 4: #polygon is a standard axis aligned rectangle. poly_array = convert_rect_to_real_poly(poly_array) poly_arrays.append(poly_array) draw_beatiful_polygon(poly_array, frames_file, overlay_image_file, color_dict[tracker], line_width) frames_file = overlay_image_file img_overlay = read_image(overlay_image_file) crop_rect = create_tight_rect_around_locations(poly_arrays, img_overlay.shape) img_overlay_cropped = img_overlay[crop_rect[0]:crop_rect[1], crop_rect[2]:crop_rect[3]] img_overlay_cropped_pil = Image.fromarray(img_overlay_cropped, 'RGB') overlay_cropped_filename = 'cropped_%s_%08d.png' % (sequence, ind + 1) overlay_cropped_filepath = os.path.join(overlay_images_path, overlay_cropped_filename) img_overlay_cropped_pil.save(overlay_cropped_filepath, "PNG")
[ "felixvil@post.bgu.ac.il" ]
felixvil@post.bgu.ac.il
16468fa7074d1375000d5ddc4377969b545f6089
2e9589362c3f53841c101de62e714a5bac3d8096
/dataset_generator/word_embeddings/document_featurizer.py
b2abcd83bcf17e7f6052f59c79b1425136fb9bf4
[]
no_license
nikhilsu/CitationRecommender
8b61bd44c3884de010d698dd013938d6cc13a6dc
44666e57664980ab6476182aa9a572b7ab68fa07
refs/heads/master
2023-04-11T01:07:05.379559
2021-05-21T14:31:53
2021-05-31T06:15:45
183,876,181
0
0
null
2023-03-25T01:08:58
2019-04-28T07:59:19
Python
UTF-8
Python
false
false
4,672
py
import numpy as np from keras.preprocessing.sequence import pad_sequences from sklearn.feature_extraction.text import CountVectorizer from tqdm import tqdm class DocumentFeaturizer(object): STOPWORDS = { 'abstract', 'about', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from', 'how', 'in', 'is', 'it', 'of', 'on', 'or', 'that', 'the', 'this', 'to', 'was', 'what', 'when', 'where', 'who', 'will', 'with', 'the', 'we', 'our', 'which' } def __init__(self, raw_dataset, opts): self.raw_dataset = raw_dataset self.max_abstract_len = opts.max_abstract_len self.max_title_len = opts.max_title_len title_abstract_of_training_data = self.raw_dataset.fetch_collated_training_text(opts.train_split) max_df_frac = 0.90 self.count_vectorizer = CountVectorizer( max_df=max_df_frac, max_features=opts.max_features, stop_words=self.STOPWORDS ) self.count_vectorizer.fit(tqdm(title_abstract_of_training_data, desc='Building Count-Vectorizer')) self.word_to_index = dict((word, index + 1) for index, word in enumerate(self.count_vectorizer.vocabulary_)) self.n_features = 1 + len(self.word_to_index) opts.n_features = self.n_features def __index_of_word(self, word): return self.word_to_index[word] if word in self.word_to_index else None def __word_to_index_features(self, document): x_indexes = [] for words in document: indexes = [] for word in words: index = self.__index_of_word(word) if index: indexes.append(index) x_indexes.append(indexes) return x_indexes def __extract_textual_features(self, text, max_len): return np.asarray(pad_sequences(self.__word_to_index_features([text]), max_len)[0], dtype=np.int32) @staticmethod def __extract_citation_features(documents): return np.log([max(doc['in_citation_count'] - 1, 0) + 1 for doc in documents]) @staticmethod def __extract_common_types_features(d_qs, candidates): common_types = [np.intersect1d(d_q, candidate) for (d_q, candidate) in zip(d_qs, candidates)] common_types_features = np.zeros_like(d_qs) for i, intersection in enumerate(common_types): common_types_features[i, :len(intersection)] = intersection return common_types_features @staticmethod def __extract_sim_scores(d_qs, candidates, candidate_selector): return np.asarray( [candidate_selector.cosine_similarity(d_q, candidate) for (d_q, candidate) in zip(d_qs, candidates)]) def featurize_documents(self, documents): features = { 'title': np.asarray([self.__extract_textual_features(doc['title'], self.max_title_len) for doc in documents]), 'abstract': np.asarray( [self.__extract_textual_features(doc['abstract'], self.max_abstract_len) for doc in documents]) } return features def extract_features(self, d_qs, candidates, candidate_selector=None): for_nn_rank = candidate_selector is not None d_q_features = self.featurize_documents(d_qs) candidate_features = self.featurize_documents(candidates) features = { 'query-title-text': d_q_features['title'], 'query-abstract-text': d_q_features['abstract'], 'candidate-title-text': candidate_features['title'], 'candidate-abstract-text': candidate_features['abstract'] } if for_nn_rank: citation_features = DocumentFeaturizer.__extract_citation_features(candidates) common_title = DocumentFeaturizer.__extract_common_types_features(d_q_features['title'], candidate_features['title']) common_abstract = DocumentFeaturizer.__extract_common_types_features(d_q_features['abstract'], candidate_features['abstract']) similarity_score_features = DocumentFeaturizer.__extract_sim_scores(d_qs, candidates, candidate_selector) features['query-candidate-common-title'] = common_title features['query-candidate-common-abstract'] = common_abstract features['candidate-citation-count'] = citation_features features['similarity-score'] = similarity_score_features return features
[ "nikhilsulegaon@gmail.com" ]
nikhilsulegaon@gmail.com
4c481d87668445176f6e0368afd5521ee3954e1e
395828af169b8d808057d16a399db7ef0f3bd11c
/first_django/blog/migrations/0003_auto_20200204_2042.py
29a281decf055703fc2480a376ed28e6059e50b4
[]
no_license
yeonghan/yozora
88103cea289dfd4cbad8a120d822db83bf52a1eb
58174a2a7d5dab04f5736243e0789d628d250fc7
refs/heads/master
2020-05-23T01:41:30.325966
2020-02-24T15:51:22
2020-02-24T15:51:22
47,525,950
0
0
null
null
null
null
UTF-8
Python
false
false
357
py
# Generated by Django 3.0.2 on 2020-02-04 11:42 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0002_auto_20200128_2032'), ] operations = [ migrations.RenameField( model_name='comment', old_name='Post', new_name='post', ), ]
[ "dudgks29@naver.com" ]
dudgks29@naver.com
4122d8dfdf03cb8b82f3ada1eac86eba2d701a0f
72e76a8eeb3afbbd2d77eb79047410e3944947c5
/datasets.py
ac3d0e16e9df457d922d542baa0547f0f858d3d7
[]
no_license
Tirthraj93/Topic-Modelling-and-Clustering
f6a042141ed54f65ce00cd7c51dc138e72ba2f5a
4a7edaa3845cf18f6bbd57fee740a6bd40c9cbfe
refs/heads/master
2020-04-17T05:31:55.423827
2016-08-30T20:48:15
2016-08-30T20:48:15
66,976,601
0
0
null
null
null
null
UTF-8
Python
false
false
365
py
from lda.utils import ldac2dtm def load_ldac(file_path): return ldac2dtm(open(file_path), offset=0) def load_vocab(file_path): with open(file_path) as f: vocab = tuple(f.read().split()) return vocab def load_titles(file_path): with open(file_path) as f: titles = tuple(line.strip() for line in f.readlines()) return titles
[ "noreply@github.com" ]
Tirthraj93.noreply@github.com
ae8caa3e5755b5b934074980647e9b8a044a2e9a
2d930aadf19b2ad6ea49725099d2f37475cd57f8
/test/functional/wallet-dump.py
c3f723a19bbd46584fb33bce6dba37487abcdcbe
[ "MIT" ]
permissive
stratton-oakcoin/oakcoin
ea83774c9f6ea64adb8832770e6219ffb31edef6
fe53193a50bd3674211448f1dcc39c6f9f042bb2
refs/heads/master
2021-01-20T13:22:05.877005
2017-05-07T10:09:57
2017-05-07T10:09:57
90,477,972
1
2
null
2017-05-07T10:09:57
2017-05-06T16:58:05
C++
UTF-8
Python
false
false
4,770
py
#!/usr/bin/env python3 # Copyright (c) 2016 The Oakcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the dumpwallet RPC.""" from test_framework.test_framework import OakcoinTestFramework from test_framework.util import (start_nodes, start_node, assert_equal, oakcoind_processes) def read_dump(file_name, addrs, hd_master_addr_old): """ Read the given dump, count the addrs that match, count change and reserve. Also check that the old hd_master is inactive """ with open(file_name, encoding='utf8') as inputfile: found_addr = 0 found_addr_chg = 0 found_addr_rsv = 0 hd_master_addr_ret = None for line in inputfile: # only read non comment lines if line[0] != "#" and len(line) > 10: # split out some data key_label, comment = line.split("#") # key = key_label.split(" ")[0] keytype = key_label.split(" ")[2] if len(comment) > 1: addr_keypath = comment.split(" addr=")[1] addr = addr_keypath.split(" ")[0] keypath = None if keytype == "inactivehdmaster=1": # ensure the old master is still available assert(hd_master_addr_old == addr) elif keytype == "hdmaster=1": # ensure we have generated a new hd master key assert(hd_master_addr_old != addr) hd_master_addr_ret = addr else: keypath = addr_keypath.rstrip().split("hdkeypath=")[1] # count key types for addrObj in addrs: if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=": found_addr += 1 break elif keytype == "change=1": found_addr_chg += 1 break elif keytype == "reserve=1": found_addr_rsv += 1 break return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret class WalletDumpTest(OakcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = False self.num_nodes = 1 self.extra_args = [["-keypool=90"]] def setup_network(self, split=False): # Use 1 minute timeout because the initial getnewaddress RPC can take # longer than the default 30 seconds due to an expensive # CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in # the test often takes even longer. self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60) def run_test (self): tmpdir = self.options.tmpdir # generate 20 addresses to compare against the dump test_addr_count = 20 addrs = [] for i in range(0,test_addr_count): addr = self.nodes[0].getnewaddress() vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath addrs.append(vaddr) # Should be a no-op: self.nodes[0].keypoolrefill() # dump unencrypted wallet self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump") found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \ read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None) assert_equal(found_addr, test_addr_count) # all keys must be in the dump assert_equal(found_addr_chg, 50) # 50 blocks where mined assert_equal(found_addr_rsv, 90*2) # 90 keys plus 100% internal keys #encrypt wallet, restart, unlock and dump self.nodes[0].encryptwallet('test') oakcoind_processes[0].wait() self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0]) self.nodes[0].walletpassphrase('test', 10) # Should be a no-op: self.nodes[0].keypoolrefill() self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump") found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \ read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc) assert_equal(found_addr, test_addr_count) assert_equal(found_addr_chg, 90*2 + 50) # old reserve keys are marked as change now assert_equal(found_addr_rsv, 90*2) if __name__ == '__main__': WalletDumpTest().main ()
[ "s.matthew.english@gmail.com" ]
s.matthew.english@gmail.com
22ff9336b110cd98c8003d9035ac0470e51ce429
1cd3305944de3d5b76ed91c9e0ac7e26b82f47ff
/2019/src/j4_s1.py
c8c5708d08518a425e322647be8957ec162d4269
[ "Apache-2.0" ]
permissive
coachlivinglegend/CCC
4b0b6d4bbe031de88275a2834a12ae74fa7bc54e
6f98e81c7fef38bf70e68188db38863cc0cba2f4
refs/heads/master
2023-04-18T00:41:23.522774
2021-05-04T12:55:02
2021-05-04T12:55:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
772
py
import collections import itertools import functools import math import re import bisect import random rint = lambda: int(input()) rstr = lambda: input() rints = lambda: list(map(int, input().split())) rstrs = lambda: input().split() wmat = lambda mat, sep: '\n'.join(sep.join(map(str, row)) for row in mat) warr = lambda arr, sep: sep.join(map(str, arr)) wl = lambda sep, *arr: sep.join(map(str, arr)) ctoi = lambda x : ord(x) - ord('a') itoc = lambda x : chr(x + ord('a')) grid = [ [[[1,2],[3,4]], [[2,1],[4,3]]], [[[3,4],[1,2]], [[4,3],[2,1]]] ] def main(): s = rstr() h = v = 0 for ch in s: if ch == 'H': h = 1 - h else: v = 1 - v print(wmat(grid[h][v], ' ')) if __name__ == '__main__': main()
[ "kylexie186@gmail.com" ]
kylexie186@gmail.com
3753ecbbf592ed7d15df03a2549a45b42ac22766
224a906e91c7c1cc8778466ef785060871ede67d
/name_to_job.py
4d1e39731dce00448b3ebe524814d3cbaed30fb8
[]
no_license
Pavanisoma/Salary-Prediction-from-Name-Team-Competition-
353c7fda0df873e7e41d8283929c9ed49fdc97e9
1ddf618cbc4df41171a1b87698c0a7b45f0eb574
refs/heads/master
2021-08-08T21:10:57.862710
2020-06-14T19:28:51
2020-06-14T19:28:51
191,850,482
0
0
null
null
null
null
UTF-8
Python
false
false
12,272
py
import numpy as np from keras.utils import to_categorical import keras.backend as K import matplotlib.pyplot as plt import random from tqdm import tqdm import nltk import os import time import tensorflow as tf import csv from nltk.stem.wordnet import WordNetLemmatizer from nltk import word_tokenize tf.enable_eager_execution() import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split def pre_processed_job_title(title): title = title.lower() lmtzr = WordNetLemmatizer() lemmatized = [lmtzr.lemmatize(word) for word in word_tokenize(title)] title = '<start> ' + ' '.join(lemmatized) + ' <end>' return title def pre_processed_name(first_name, last_name): first_name = first_name.replace(',', '').replace('"', '').replace('\'', '').lower() last_name = last_name.replace(',', '').replace('"', '').replace('\'', '').lower() return '<start> ' + first_name + ' ' + last_name + ' <end>' def load_dataset(file="processed_bayarea.csv"): name_list = [] job_list = [] name_job_pairs = [] with open(file, 'r') as csvfile: csvreader = csv.reader(csvfile) next(csvreader) for row in csvreader: name = pre_processed_name(row[2], row[1]) job = pre_processed_job_title(row[0]) job_list.append(job) name_list.append(name) name_job_pairs.append([name, job]) return name_list, job_list, name_job_pairs class LanguageIndex(): def __init__(self, lang): self.lang = lang self.word2idx = {} self.idx2word = {} self.vocab = set() def create_index(self): for phrase in self.lang: self.vocab.update(phrase.split(' ')) self.vocab = sorted(self.vocab) self.word2idx['<pad>'] = 0 for index, word in enumerate(self.vocab): self.word2idx[word] = index + 1 for word, index in self.word2idx.items(): self.idx2word[index] = word def max_length(tensor): return max(len(t) for t in tensor) def load_sequence_data(): # creating cleaned input, output pairs name_list, job_list, pairs = load_dataset() # index language using the class defined above inp_lang = LanguageIndex([name for name, job in pairs]) targ_lang = LanguageIndex([job for name, job in pairs]) inp_lang.create_index() targ_lang.create_index() # Vectorize the input and target languages # name input_tensor = [[inp_lang.word2idx[s] for s in name.split(' ')] for name, job in pairs] # job_tite target_tensor = [[targ_lang.word2idx[s] for s in job.split(' ')] for name, job in pairs] # Calculate max_length of input and output tensor # Here, we'll set those to the longest sentence in the dataset max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor) # Padding the input and output tensor to the maximum length input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor, maxlen=max_length_inp, padding='post') target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor, maxlen=max_length_tar, padding='post') return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar def gru(units): if tf.test.is_gpu_available(): return tf.keras.layers.CuDNNGRU(units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') else: return tf.keras.layers.GRU(units, return_sequences=True, return_state=True, recurrent_activation='sigmoid', recurrent_initializer='glorot_uniform') class Encoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz): super(Encoder, self).__init__() self.batch_sz = batch_sz self.enc_units = enc_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = gru(self.enc_units) def call(self, x, hidden): x = self.embedding(x) output, state = self.gru(x, initial_state = hidden) return output, state def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.enc_units)) class Decoder(tf.keras.Model): def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz): super(Decoder, self).__init__() self.batch_sz = batch_sz self.dec_units = dec_units self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim) self.gru = gru(self.dec_units) self.fc = tf.keras.layers.Dense(vocab_size) # used for attention self.W1 = tf.keras.layers.Dense(self.dec_units) self.W2 = tf.keras.layers.Dense(self.dec_units) self.V = tf.keras.layers.Dense(1) def call(self, x, hidden, enc_output): hidden_with_time_axis = tf.expand_dims(hidden, 1) score = self.V(tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis))) attention_weights = tf.nn.softmax(score, axis=1) context_vector = attention_weights * enc_output context_vector = tf.reduce_sum(context_vector, axis=1) x = self.embedding(x) x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1) # passing the concatenated vector to the GRU output, state = self.gru(x) output = tf.reshape(output, (-1, output.shape[2])) x = self.fc(output) return x, state, attention_weights def initialize_hidden_state(self): return tf.zeros((self.batch_sz, self.dec_units)) def loss_function(real, pred): mask = 1 - np.equal(real, 0) loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask return tf.reduce_mean(loss_) def evaluate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ): attention_plot = np.zeros((max_length_targ, max_length_inp)) units = 1024 sentence = pre_processed_name(first_name, last_name) inputs = [] for i in sentence.split(' '): if i in inp_lang.word2idx: inputs.append(inp_lang.word2idx[i]) inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post') inputs = tf.convert_to_tensor(inputs) result = '' hidden = [tf.zeros((1, units))] enc_out, enc_hidden = encoder(inputs, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0) for t in range(max_length_targ): predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out) # storing the attention weights to plot later on attention_weights = tf.reshape(attention_weights, (-1, )) attention_plot[t] = attention_weights.numpy() predicted_id = tf.argmax(predictions[0]).numpy() result += targ_lang.idx2word[predicted_id] + ' ' if targ_lang.idx2word[predicted_id] == '<end>': return result, sentence, attention_plot # the predicted ID is fed back into the model dec_input = tf.expand_dims([predicted_id], 0) return result, sentence, attention_plot def plot_attention(attention, sentence, predicted_sentence): fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(1, 1, 1) ax.matshow(attention, cmap='viridis') fontdict = {'fontsize': 14} ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90) ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict) plt.show() def translate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ): result, sentence, attention_plot = evaluate(last_name, first_name, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ) # print('Input: {}'.format(sentence)) # print('Predicted translation: {}'.format(result)) # attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))] # plot_attention(attention_plot, sentence.split(' '), result.split(' ')) return result def main(): input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_sequence_data() # Creating training and validation sets using an 80-20 split input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2) BUFFER_SIZE = len(input_tensor_train) BATCH_SIZE = 64 N_BATCH = BUFFER_SIZE//BATCH_SIZE embedding_dim = 256 units = 1024 vocab_inp_size = len(inp_lang.word2idx) vocab_tar_size = len(targ_lang.word2idx) dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE, drop_remainder=True) encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE) decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE) optimizer = tf.train.AdamOptimizer() checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, encoder=encoder, decoder=decoder) EPOCHS = 10 for epoch in range(EPOCHS): start = time.time() hidden = encoder.initialize_hidden_state() total_loss = 0 for (batch, (inp, targ)) in enumerate(dataset): loss = 0 with tf.GradientTape() as tape: enc_output, enc_hidden = encoder(inp, hidden) dec_hidden = enc_hidden dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1) # Teacher forcing - feeding the target as the next input for t in range(1, targ.shape[1]): # passing enc_output to the decoder predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output) loss += loss_function(targ[:, t], predictions) # using teacher forcing dec_input = tf.expand_dims(targ[:, t], 1) batch_loss = (loss / int(targ.shape[1])) total_loss += batch_loss variables = encoder.variables + decoder.variables gradients = tape.gradient(loss, variables) optimizer.apply_gradients(zip(gradients, variables)) if batch % 100 == 0: print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1, batch, batch_loss.numpy())) # saving (checkpoint) the model every 2 epochs if (epoch + 1) % 2 == 0: checkpoint.save(file_prefix = checkpoint_prefix) print('Epoch {} Loss {:.4f}'.format(epoch + 1, total_loss / N_BATCH)) print('Time taken for 1 epoch {} sec\n'.format(time.time() - start)) print(tf.train.latest_checkpoint(checkpoint_dir)) checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir)) print(translate('chang', 'shih yu', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)) print(translate('bui', 'xuan loc', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)) if __name__ == '__main__': main()
[ "noreply@github.com" ]
Pavanisoma.noreply@github.com
cb1af45e6576cfaa0436075b85515ae1b2b235e3
bd5e4b1317e741e2c241a7285f632e2beefb8cf4
/bdd_example/settings.py
8e9ebdb9d57d1d0f441518deb6fff4b0a41d7f97
[]
no_license
asleao/bdd-django-tutorial-2
c7e5c19af55c31097d7f705568f7b18b8405acd3
fe79cf4dded6328290ce34a8b221b8276bc733cb
refs/heads/master
2020-12-05T17:43:59.062935
2016-08-23T14:44:35
2016-08-23T14:44:35
66,375,593
0
1
null
null
null
null
UTF-8
Python
false
false
3,143
py
""" Django settings for bdd_example project. Generated by 'django-admin startproject' using Django 1.10. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_^o_m+ne4=ht(efkt$dpd40-%px!qs++w#g(x8$0%2aa_qj)2@' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'behave_django', 'login', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'bdd_example.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'bdd_example.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/'
[ "andre.sp.leao@gmail.com" ]
andre.sp.leao@gmail.com
b01d1738f1683a3b3cff5b6198a8926953464429
084916df5eff5e2a3f19ac5d86c401b467cb3969
/assignment1/bfs-wikipedia.py
484c57e34cfa312faa7ec9728cc7539fd575ab0e
[]
no_license
cvalenzuela/NOC_Intelligence-Learning
2850b87c2ead8be97be193375129eee2ea16c0e8
9074fae4f1e20d7f93ef6e1ef9f75b92374d2f51
refs/heads/master
2021-01-23T04:20:26.702674
2017-04-30T03:50:19
2017-04-30T03:50:19
86,189,486
0
0
null
null
null
null
UTF-8
Python
false
false
4,938
py
# coding: utf-8 # An implementation of bfs based on Grokking Algorithms chapter 6 and NOC-S17-2-Intelligence-Learning bfs examples. # Using a Breadth-First Algorithm (BFS), this script will search for the shortest way to get from one article in Wikipedia to a specific word or term used in another article. # Cristóbal Valenzuela # ------------- from collections import deque import wikipedia import time import sys wikipedia.set_lang("en") print ''' ----- This script will search for the most efficient way to get from one article in Wikipedia to a another article that has a specific term in it. ----- ''' begin = raw_input("Ready? (y/n): ") if begin != 'y': print 'Well....Hasta la vista' sys.exit() else: pass start_article = raw_input("Enter the name of a Wikipedia article from where to start (ie: 'Paul Cezanne'): ") search_term = raw_input("Enter a term or word of search for (ie: 'Gertrude Stein'): ") print(''' Awesome!, I will search for the most efficient way to start from the %s Wikipedia article and find the path to another article that talks about %s. ''' % (start_article, search_term)) raw_input("This may take me a while, so chill...Ok?") #start_article = "New York University" # starting point #search_term = 'Chile' # end point # define the graph and starting point graph = {} # create a Node class class Node: def __init__(self, name, parent): self.name = name self.parent = parent self.article = 'None' self.content = 'None' self.title = 'None' self.links = 'None' def get_article(self): try: self.article = wikipedia.page(self.name) self.content = self.article.content self.title = self.article.title self.links = self.article.links except: print 'Sorry! Something happend!' print 'Try again with another article!' #sys.exit() # initialize the root node root_node = Node(start_article, None) root_node.get_article() graph[root_node.title] = [] all_nodes = [] for article in root_node.links: graph[root_node.title].append(Node(article, root_node.title)) # add all of the first article links to the graph #print root_node.links def spinning_cursor(): while True: for cursor in '|/-\\': yield cursor # this is main loop def search(name): print(''' Searching how to get from %s to %s. ''' % (root_node.title, search_term)) spinner = spinning_cursor() search_queue = deque() # create a new queue (double-ended queue) search_queue += graph[root_node.title] # add all of the root node urls to the search queue searched = [] # this array keeps track of which article we have already search for, so we dont search twice. while search_queue: # while the queue isn't empty # fancy spinner to wait sys.stdout.write(spinner.next()) sys.stdout.flush() time.sleep(0.1) sys.stdout.write('\b') article = search_queue.popleft() # grabs the first article off the queue if not article.title in searched: # only search this article if we haven't already search for try: article.get_article() # get the content from wikipedia #print article.title if search_term in article.content: # check if article contains the search_term we are looking for all_nodes.append(article) print 'I found it!' print 'Here is the first shortest path I discovered:' current = article.title path = [] while current != root_node.title: for article in all_nodes: if article.title == current: path.append(article.title) current = article.parent path.append(start_article) print(''' %s ---> the %s wikipedia article talks about %s! ''' % (' ---> '.join(path[::-1]), path[0], search_term)) #print ' ---> '.join(path[::-1]) + ' ---> ' + path[0] + ' has ' + search_term + ' in it!' print 'cool!' return True else: graph[article.title] = [] all_nodes.append(article) for name in article.links: graph[article.title].append(Node(name, article.title)) # add all of the first article links to the graph search_queue += graph[article.title] searched.append(article.title) # mark this article as searched. except: pass print 'wops!' return False # if we reach here, the term was not find in the max of x iterations # start the search search(start_article)
[ "cvalenzuela@nyu.edu" ]
cvalenzuela@nyu.edu
eb160d82373fb3bb62f2083ae5cbdbcf702d1379
738ae0290d91596086810298eb3ced56967d45d2
/python-cmd/scrabble.py
6f1cbea6c0101940856302143274ae5de62a01cf
[]
no_license
kkredit/hs-projects
6e7a8732331a23eacd154b4c0c611adc8795a0a6
97edcedf8116db57791f6b8c4666329f694d13b5
refs/heads/master
2021-01-09T09:36:43.379173
2016-06-01T21:58:15
2016-06-01T21:58:15
60,213,161
0
0
null
null
null
null
UTF-8
Python
false
false
3,442
py
# scrabble.py # A program to propose possible words in scrabble situations # (mostly) Kevin Kredit from string import *#split,lower WORDLIST_FILENAME = "words.txt" ####################################NOT MINE################################## def load_words(): """ Returns a list of valid words. Words are strings of lowercase letters. Depending on the size of the word list, this function may take a while to finish. """ #from string import *#split,lower print ("Loading word list from file...") inFile = open(WORDLIST_FILENAME, 'r')#, 0) line = inFile.readline() wordlist = line.split()#split(line) print (" ", len(wordlist), "words loaded.") return wordlist dictionary = load_words() ###############################MINE############################################ alphabet = 'abcdefghijklmnopqrstuvwxyz' def search(word,start=0,finish=len(dictionary),x=1): #print x,start,finish,finish-start,(finish+start)/2,\ # dictionary[(finish+start)/2] if finish-start < 5: if word in dictionary[start:finish]: return True else: return False tword = dictionary[int((start+finish)/2)] if len(tword) < len(word): start = (start+finish)/2 elif len(tword) > len(word): finish = (start+finish)/2 else:##if length is correct unfound,n = True,0 while unfound: if alphabet.index(tword[n]) < alphabet.index(word[n]): start,unfound = int((start+finish)/2),False elif alphabet.index(tword[n]) > alphabet.index(word[n]): finish,unfound = int((start+finish)/2),False elif n+1==len(word): return True n += 1 return search(word,start,finish,x+1) #######################################NOT MINE######################### def anagram(word): if word == '': return [''] else: ans = [] for w in anagram(word[1:]): for pos in range(len(w)+1): ans.append(w[:pos]+word[0]+w[pos:]) return ans #######################################MINE################################ def combinations(word,wnum,numletters):#to allow not-all-letter-using words????????? if wnum == numletters: return [word] else: ans = [] for pos in range(len(word)): ans += combinations(word[:pos]+word[pos+1:],wnum-1,numletters) return ans def main(): #from string import lower,split letters = input('\nWhat are the scrambled letters: ').lower(); printed,numletters,more = [],len(letters),'yes' while more[0].lower() == 'y' and numletters > 1: found = 0 for word in combinations(letters,len(letters),numletters): for w in anagram(word): if (w not in printed) and search(w):##w in dictionary:#make my own search? if not found: print ('\nPossible',numletters,'letter words:\n') print (w) printed.append(w) found += 1 print ('\nTotal:',found) if numletters == 2: break numletters -= 1 more = input(str('\nWould you like '+str(numletters)+ ' letter combinations? (y/n) ')) if not found: print ('\nThose letters do not form any words.') if input('\nAgain? ')[0].lower() == 'y':main() if __name__=='__main__':main()
[ "k.kredit.us@ieee.org" ]
k.kredit.us@ieee.org
e3cc6b9117ff7d7c9fee0eba2bd19618379ed048
1ab7fff33be75efb4b725cd6c3ba5566c29bed93
/tutorial/tutorial/urls.py
1245d82edb7947719fca3d3ca4448a39e0087e7a
[]
no_license
Anjali-Del/Anj
5b0ea6b5bc2b9c17653014d830e2526ac215ce1b
c361bc29b3da6700c51967590cb5f3abeb66881c
refs/heads/master
2021-01-15T17:41:27.574646
2015-07-30T05:11:00
2015-07-30T05:11:00
38,672,846
0
0
null
null
null
null
UTF-8
Python
false
false
904
py
"""tutorial URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url, patterns from django.contrib import admin urlpatterns = patterns('', url(r'^admin/', include(admin.site.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^', include('snippets.urls')), )
[ "anjali@delhivery.com" ]
anjali@delhivery.com
b17d3ad44bb4ae3b8a6f3fb5f6c5bbe92883ca46
9292bd4bd9589e08fa8277069b20abc0e6f9fd7d
/Clustering/app.py
585d9a1b10894213868e017694d0e98d33e6322b
[]
no_license
Leonidesguerra/final_project
53f416b872677a98ff823c6bddf1fb86ac8f3fc6
68c9f6b5e13aad89c6528b6b15c16261f33098d1
refs/heads/main
2023-06-15T21:29:28.365502
2021-06-29T03:03:28
2021-06-29T03:03:28
377,657,466
0
0
null
null
null
null
UTF-8
Python
false
false
3,139
py
import numpy as np from numpy.core.fromnumeric import reshape import pandas as pd import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session, session from sqlalchemy import create_engine, func # from config import DATABASE_URI from flask import Flask, jsonify, render_template, redirect ################################################# # Database Setup ################################################# app = Flask(__name__) # engine = sqlalchemy.create_engine(DATABASE_URI) rds_connection_string = "postgres:imadlefl@localhost:5432/Agriculture_JB" engine = create_engine(f'postgresql://{rds_connection_string}') @app.route("/") def home(): # go to home page to scrape info return render_template("index.html") @app.route("/visualizations") def visualizations(): # go to home page to scrape info return render_template("Agricultura_HTML.html") @app.route("/mexican_states") def perimeter(): # go to home page to scrape info return render_template("mexican_states.html") @app.route("/toppais") def toppais(): data = engine.execute( "SELECT cultivo, SUM(valorproduccion) FROM agr2017 GROUP BY cultivo LIMIT 20") #df = pd.read_sql_query(query, engine) # return df[['cultivo', 'sum']].to_dict() all_data = [] for record in data: data_dict = {} data_dict['cultivo'] = record[0] data_dict['sum'] = record[1] all_data.append(data_dict) return jsonify(all_data) @app.route("/estadocrop") def estadocrop(): data = engine.execute( "SELECT estado, cultivo, SUM(valorproduccion) FROM agr2017 GROUP BY estado, cultivo;") all_data = [] for record in data: data_dict = {} data_dict['estado'] = record[0] data_dict['cultivo'] = record[1] data_dict['sum'] = record[2] all_data.append(data_dict) return jsonify(all_data) @app.route("/mapa") def mapa(): data = engine.execute( "SELECT estado, municipio, cultivo, SUM(valorproduccion), AVG(latitud), AVG(longitud), MAX(altitud) FROM agr2017 GROUP BY estado, municipio , cultivo;") all_data = [] for record in data: data_dict = {} data_dict['estado'] = record[0] data_dict['municipio'] = record[1] data_dict['cultivo'] = record[2] data_dict['sum'] = record[3] data_dict['lat'] = record[4] data_dict['lng'] = record[5] data_dict['alt'] = record[6] all_data.append(data_dict) return jsonify(all_data) @app.route("/clustering_map") def clus_map(): data = engine.execute( "SELECT latitud, longitud, cultivo, estado, clusters ,rendimiento FROM clustering;") all_data = [] for record in data: data_dict = {} data_dict['latitud'] = record[0] data_dict['longitud'] = record[1] data_dict['cultivo'] = record[2] data_dict['estado'] = record[3] data_dict['clusters'] = record[4] data_dict['rendimiento'] = record[5] all_data.append(data_dict) return jsonify(all_data) if __name__ == '__main__': app.run(debug=True)
[ "leonidesguerra@gmail.com" ]
leonidesguerra@gmail.com
4ce257895e21ccb0c844c1e6aa51c30a9ac4fe4d
202f3112b74e0c46f906c95a3914e24a734aa5ea
/polls/models.py
d6bc4118f5fb516f0ed4046cca01b3b02017e094
[]
no_license
markadeev/djangoapp
36e19cfac9cff07bffec5f54a903b4ee2d64dded
ac94bfff12a47994f0d2af3a924ef76430d1bf80
refs/heads/master
2021-01-06T14:17:23.121078
2020-02-20T12:01:52
2020-02-20T12:01:52
241,357,222
0
0
null
2020-02-20T12:01:54
2020-02-18T12:37:04
Python
UTF-8
Python
false
false
643
py
import datetime from django.db import models from django.utils import timezone class Question(models.Model): question_text = models.CharField(max_length=200) pub_date = models.DateTimeField('date published') def __str__(self): return self.question_text class Choice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) choice_text = models.CharField(max_length=200) votes = models.IntegerField(default=0) def __str__(self): return self.question_text def was_published_recently(self): return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
[ "markadeev@ukr.net" ]
markadeev@ukr.net
2870cf1b425dae0d303dc3b910f2b3820bac2b3e
c1abf5c7dd599b25d84c2026f97eaccd03dc4e46
/movedown.py
1da89fb7214f214508b7e7d52b1b88c29c20d425
[ "MIT" ]
permissive
oknalv/linky
09768abe96d95f2dcb67ff91c22663a4a69356cb
78fba19946e2212b10f3d1a5b27c7d9329556290
refs/heads/master
2016-09-13T01:33:29.192646
2016-04-29T15:41:13
2016-04-29T15:41:13
57,393,703
0
0
null
null
null
null
UTF-8
Python
false
false
1,606
py
import webapp2 from base import BaseHandler from link import Link, Container from google.appengine.api import users import time class MoveDownHandler(BaseHandler): def __init__(self, request = None, response = None): self.initialize( request, response ) def get(self): if not self.request.get("id"): self.set_flash("danger", "forbidden-access") self.redirect("/") else: user = users.get_current_user() if user: containers = Container.query(Container.user == user) cont = None if not containers.iter().has_next(): cont = Container(user = user) cont.put() else: cont = containers.iter().next() actual = None for ind, link in enumerate(cont.links): if link.name == self.request.get("id"): actual = ind break if actual is not None and actual < len(cont.links): cont.links[actual], cont.links[actual + 1] = cont.links[actual + 1], cont.links[actual] cont.put() time.sleep(1) self.redirect("/") else: self.set_flash("danger", "not-logged-in") self.redirect("/") config = {} config['webapp2_extras.sessions'] = { 'secret_key': 'merely remarkable came line', } app = webapp2.WSGIApplication([ ('/movedown', MoveDownHandler) ], debug = True, config = config)
[ "thevlanko@gmail.com" ]
thevlanko@gmail.com
3f1b20e6325128b26f23eed22db51edb5211804d
e1d942fc4d0099c4a5fe7cf10fdf7e710da2a147
/11/JackCompiler.py
354594cea58f8aa55a8c2273f48dda940a0413fb
[]
no_license
AradAlon/Nand2Tetris
1c5856a6cf5734661b8e848a4e5fbea5381f4603
1ca9948b1495b0f16bfa5c89c4be50944fa2380e
refs/heads/master
2022-12-15T14:32:10.149762
2020-09-19T13:27:15
2020-09-19T13:27:15
296,870,095
0
0
null
null
null
null
UTF-8
Python
false
false
19,213
py
import sys import re import glob OUT_PATH = '' T_KEYWORD = 'keyword' T_SYM = 'symbol' T_NUM = 'integerConstant' T_STR = 'stringConstant' T_ID = 'identifier' class JackCompiler: def __init__(self, jacks): self.analyze(jacks) def analyze(self, jacks): for jack in jacks: CompilationEngine(jack) class JackTokenizer: def __init__(self, jack): reader = open(jack, 'r') one_liner = self.one_liner(reader) self.tokens = self.tokenize(one_liner) self.index = -1 reader.close() def one_liner(self, reader): content = [] line = reader.readline() while line: comment_index = line.find('//') if line.find('//') > -1 else len(line) line = line[:comment_index].strip() if not line: line = reader.readline() continue content.append(line) line = reader.readline() one_liner = ' '.join(content) one_liner = re.sub(r'/\*(.*?)\*/', '', one_liner).strip() return one_liner def tokenize(self, one_liner): keywords = ['class', 'method', 'function', 'constructor', 'int', 'boolean', 'char', 'void', 'var', 'static', 'field', 'let', 'do', 'if', 'else', 'while', 'return', 'true', 'false', 'null', 'this'] symbols = ['{','}','(',')','[',']','.',',',';','+','-','*','/','&','|','<','>','=','~'] convert_symbols = { "<": '&lt;', ">": '&gt;', '"': '&quot;', "&": '&amp;', } tokens = [] keyword_re = r'\b' + r'\b|\b'.join(keywords) + r'\b' sym_re = '['+re.escape(''.join(symbols))+']' num_re = r'\d+' str_re = r'"[^"\n]*"' id_re = r'[\w\-]+' word = re.compile(keyword_re+'|'+sym_re+'|'+num_re+'|'+str_re+'|'+id_re) types = { T_KEYWORD: keyword_re, T_SYM: sym_re, T_NUM: num_re, T_STR: str_re, T_ID: id_re, } split = word.findall(one_liner) for word in split: for typ, reg in types.items(): if re.match(reg, word) != None: if typ == T_STR: word = word.strip('"') # if typ == T_SYM: # word = convert_symbols.get(word, word) tokens.append((word,typ)) break return tokens @property def hasMoreTokens(self): return self.index < len(self.tokens) - 1 def advance(self): self.index += 1 if self.hasMoreTokens else self.index @property def currentToken(self): return self.tokens[self.index] if self.index > -1 else None def nextToken(self, LL): return self.tokens[self.index + LL] if self.hasMoreTokens else None class CompilationEngine: label_count = 0 convert_symbols = {'+':'add', '-':'sub', '*':'call Math.multiply 2', '/':'call Math.divide 2', '<':'lt', '>':'gt', '=':'eq', '&':'and', '|':'or'} unary_convert_symbols = {'-':'neg', '~':'not'} def __init__(self, jack): self.jackTokens = JackTokenizer(jack) self.vm = VMWriter(jack) self.symbols = SymbolTable() self.compileClass() self.vm.close() def process(self, expected_typ, *args): self.jackTokens.advance() val ,typ = self.jackTokens.currentToken if expected_typ != typ or ((expected_typ == T_KEYWORD or expected_typ == T_SYM) and val not in args): text = '{}, ({} {})'.format(expected_typ, typ, val) raise ValueError() return typ, val def peek(self, expected_typ, *args, LL=1): val, typ = self.jackTokens.nextToken(LL) if expected_typ != typ or ((expected_typ == T_KEYWORD or expected_typ == T_SYM) and val not in args): return False return True @property def label(self): self.label_count += 1 return 'label{}'.format(str(self.label_count)) def vm_variable(self, action, name): kind, type, index = self.symbols.kind_type_index_of(name) if action == 'push': self.vm.write_push(kind, index) if action == 'pop': self.vm.write_pop(kind, index) def compileClass(self): self.process(T_KEYWORD, 'class') _, self.current_class_name = self.process(T_ID) self.process(T_SYM, '{') self.compileClassVarDec() self.compileSubroutineDec() self.process(T_SYM, '}') def compileClassVarDec(self): while self.peek(T_KEYWORD, 'static', 'field'): _, kind = self.process(T_KEYWORD, 'static', 'field') _, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID) _, name = self.process(T_ID) self.symbols.append_class_table(name, type, kind) while self.peek(T_SYM, ','): self.process(T_SYM, ',') _, name = self.process(T_ID) self.symbols.append_class_table(name, type, kind) self.process(T_SYM, ';') def compileSubroutineDec(self): while self.peek(T_KEYWORD, 'constructor', 'function', 'method'): _, self.current_subroutine_type = self.process(T_KEYWORD, 'constructor', 'function', 'method') _, type = self.process(T_KEYWORD, 'void', 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'void', 'int', 'char', 'boolean') else self.process(T_ID) _, self.current_subroutine_name = self.process(T_ID) self.symbols.start_subroutine() if self.current_subroutine_type == 'method': self.symbols.append_subroutine_table('this', self.current_class_name, 'argument') self.compileParameterList() self.compileSubroutineBody() def compileParameterList(self): self.process(T_SYM, '(') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') or self.peek(T_ID): _, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID) _, name = self.process(T_ID) self.symbols.append_subroutine_table(name, type, 'argument') while self.peek(T_SYM, ','): self.process(T_SYM, ',') _, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID) _, name = self.process(T_ID) self.symbols.append_subroutine_table(name, type, 'argument') self.process(T_SYM, ')') def compileSubroutineBody(self): self.process(T_SYM, '{') self.compileVarDec() func_name = self.current_class_name+'.'+self.current_subroutine_name num_of_var = self.symbols.var_count('var') self.vm.write_function(func_name, num_of_var) self.this_pointer() self.compileStatements() self.process(T_SYM, '}') def this_pointer(self): if self.current_subroutine_type == 'method': self.vm.write_push('argument', 0) self.vm.write_pop('pointer', 0) elif self.current_subroutine_type == 'constructor': self.vm.write_push('constant', self.symbols.var_count('field')) self.vm.write_call('Memory.alloc', 1) self.vm.write_pop('pointer', 0) def compileVarDec(self): while self.peek(T_KEYWORD, 'var'): _, kind = self.process(T_KEYWORD, 'var') _, type = self.process(T_KEYWORD, 'int', 'char', 'boolean') if self.peek(T_KEYWORD, 'int', 'char', 'boolean') else self.process(T_ID) _, name = self.process(T_ID) self.symbols.append_subroutine_table(name, type, kind) while self.peek(T_SYM, ','): self.process(T_SYM, ',') _, name = self.process(T_ID) self.symbols.append_subroutine_table(name, type, kind) self.process(T_SYM, ';') def compileStatements(self): while self.peek(T_KEYWORD, 'let', 'if', 'while', 'do', 'return'): if self.peek(T_KEYWORD, 'let'): self.compileLet() elif self.peek(T_KEYWORD, 'if'): self.compileIf() elif self.peek(T_KEYWORD, 'while'): self.compileWhile() elif self.peek(T_KEYWORD, 'do'): self.compileDo() elif self.peek(T_KEYWORD, 'return'): self.compileReturn() def compileLet(self): self.process(T_KEYWORD, 'let') _, name = self.process(T_ID) if self.peek(T_SYM, '['): self.vm_variable('push', name) self.process(T_SYM, '[') self.compileExpression() self.process(T_SYM, ']') self.vm.write_arithmetic('add') self.process(T_SYM, '=') self.compileExpression() self.process(T_SYM, ';') self.vm.write_pop('temp', 1) self.vm.write_pop('pointer', 1) self.vm.write_push('temp', 1) self.vm.write_pop('that', 0) return self.process(T_SYM, '=') self.compileExpression() self.process(T_SYM, ';') self.vm_variable('pop', name) def compileIf(self): self.process(T_KEYWORD, 'if') label = self.label self.compileCondition(label) if self.peek(T_KEYWORD, 'else'): self.process(T_KEYWORD, 'else') self.process(T_SYM, '{') self.compileStatements() self.process(T_SYM, '}') self.vm.write_label(label) def compileWhile(self): self.process(T_KEYWORD, 'while') label = self.label self.vm.write_label(label) self.compileCondition(label) def compileCondition(self, label): self.process(T_SYM, '(') self.compileExpression() self.process(T_SYM, ')') self.vm.write_arithmetic('not') else_label = self.label self.vm.write_if(else_label) self.process(T_SYM, '{') self.compileStatements() self.process(T_SYM, '}') self.vm.write_goto(label) self.vm.write_label(else_label) def compileDo(self): self.process(T_KEYWORD, 'do') self.compileSubroutineCall() self.vm.write_pop('temp', 0) self.process(T_SYM, ';') def compileReturn(self): self.process(T_KEYWORD, 'return') if not self.peek(T_SYM, ';'): self.compileExpression() else: self.vm.write_push('constant', 0) self.process(T_SYM, ';') self.vm.write_return() def compileExpression(self): if not self.is_term(): return 0 self.compileTerm() while self.peek(T_SYM, '+', '-', '*', '/', '&', '|', '<', '>', '='): _, op = self.process(T_SYM, '+', '-', '*', '/', '&', '|', '<', '>', '=') self.compileTerm() self.vm.write_arithmetic(self.convert_symbols[op]) return 1 def compileTerm(self): if self.peek(T_NUM): _, val = self.process(T_NUM) self.vm.write_push('constant', val) elif self.peek(T_STR): _, string = self.process(T_STR) self.vm.write_push('constant', len(string)) self.vm.write_call('String.new', 1) for char in string: self.vm.write_push('constant', ord(char)) self.vm.write_call('String.appendChar', 2) elif self.peek(T_KEYWORD, 'true', 'false', 'null', 'this'): _, word = self.process(T_KEYWORD, 'true', 'false', 'null', 'this') if word == 'this': self.vm.write_push('pointer', 0) elif word == 'true': self.vm.write_push('constant', 1) self.vm.write_arithmetic('neg') else: self.vm.write_push('constant', 0) elif self.peek(T_SYM, '('): self.process(T_SYM, '(') self.compileExpression() self.process(T_SYM, ')') elif self.peek(T_SYM, '-', '~'): _, op = self.process(T_SYM, '-', '~') self.compileTerm() self.vm.write_arithmetic(self.unary_convert_symbols[op]) elif self.peek(T_ID): if self.peek(T_SYM, '[', LL=2): _, name = self.process(T_ID) self.vm_variable('push', name) self.process(T_SYM, '[') self.compileExpression() self.process(T_SYM, ']') self.vm.write_arithmetic('add') self.vm.write_pop('pointer', 1) self.vm.write_push('that', 0) elif self.peek(T_SYM, '(', '.', LL=2): self.compileSubroutineCall() else: _, name = self.process(T_ID) self.vm_variable('push', name) def is_term(self): return (self.peek(T_NUM) or self.peek(T_STR) or self.peek(T_KEYWORD, 'true', 'false', 'null', 'this') or self.peek(T_ID) or self.peek(T_SYM, '(', '-', '~')) def compileSubroutineCall(self): num_of_args = 0 _, obj_name = self.process(T_ID) if self.peek(T_SYM, '.'): self.process(T_SYM, '.') _, type, _ = self.symbols.kind_type_index_of(obj_name) if type: num_of_args += 1 self.vm_variable('push', obj_name) obj_name = type _, func_name = self.process(T_ID) name = '{}.{}'.format(obj_name, func_name) else: self.vm.write_push('pointer', 0) num_of_args += 1 name = '{}.{}'.format(self.current_class_name, obj_name) self.process(T_SYM, '(') num_of_args += self.compileExpressionList() self.process(T_SYM, ')') self.vm.write_call(name, num_of_args) def compileExpressionList(self): num_of_args = self.compileExpression() while self.peek(T_SYM, ','): self.process(T_SYM, ',') self.compileExpression() num_of_args += 1 return num_of_args class SymbolTable: def __init__(self): self.class_table = { "field": [ # { # 'name': 'x', # 'type': T_NUM, # }, ], "static": [ # { # 'name': 'x', # 'type': T_NUM, # } ] } self.subroutine_tables = [ # { # "argument": [ # { # 'name': 'x', # 'type': T_NUM, # }, # ], # "local": [ # { # 'name': 'x', # 'type': T_NUM, # } # ] # } ] def append_class_table(self, name, type, kind): raw = { 'name': name, 'type': type, } self.class_table[kind].append(raw) def append_subroutine_table(self, name, type, kind): raw = { 'name': name, 'type': type, } self.subroutine_tables[-1][kind].append(raw) def start_subroutine(self): element = { "argument": [], "var": [], # "local": [] } self.subroutine_tables.append(element) def var_count(self, kind): if kind in ['field', 'static']: count = len(self.class_table[kind]) else: count = len(self.subroutine_tables[-1][kind]) return count def kind_type_index_of(self, name): for kind, elements in self.class_table.items(): for element in elements: if element['name'] == name: return kind, element['type'], elements.index(element) for kind, elements in self.subroutine_tables[-1].items(): for element in elements: if element['name'] == name: return kind, element['type'], elements.index(element) return None, None, None class VMWriter: def __init__(self, jack): self.file = open(jack.replace('.jack','.vm'), 'w') def write(self, line): self.file.write(line + '\n') def write_push(self, segment, index): if segment == 'field': segment = 'this' if segment == 'var': segment = 'local' line = 'push {} {}'.format(segment, str(index)) self.write(line) def write_pop(self, segment, index): if segment == 'field': segment = 'this' if segment == 'var': segment = 'local' line = 'pop {} {}'.format(segment, str(index)) self.write(line) def write_arithmetic(self, command): line = '{}'.format(command) self.write(line) def write_label(self, label): line = 'label {}'.format(label) self.write(line) def write_goto(self, label): line = 'goto {}'.format(label) self.write(line) def write_if(self, label): line = 'if-goto {}'.format(label) self.write(line) def write_call(self, name, num_of_args): line = 'call {} {}'.format(name, str(num_of_args)) self.write(line) def write_function(self, name, num_of_locals): line = 'function {} {}'.format(name, str(num_of_locals)) self.write(line) def write_return(self): line = 'return' self.write(line) def close(self): self.file.close() if __name__ == "__main__": path_or_file = sys.argv[1] if not path_or_file.endswith('.jack'): name = path_or_file.split('\\')[-1] OUT_PATH = path_or_file num_of_arg = len(sys.argv) - 1 if num_of_arg != 1: print("expected 1 argument - file or folder, got {} argument/s".format(num_of_arg)) sys.exit() jacks = glob.glob(path_or_file+'/*.jack') or [path_or_file] if jacks == []: print("no jack files in folder") sys.exit() trans = JackCompiler(jacks)
[ "noreply@github.com" ]
AradAlon.noreply@github.com
3881ce11f6a9512b8d49fa4fb9fdd8eedf5e4ae6
954f9a154066c65374b475f925f2e5a138a14162
/bigdatamining/text_based/parser.py
548979216515cbe8f5a21eb422fa2f33c8bfa6f7
[]
no_license
reloadbrain/recommEngine
8263673945b5af1f73d26c22625a3090e071e952
9d3a4227916cb8583e1faef572824a54a067e7d1
refs/heads/master
2020-03-11T01:49:48.925712
2015-12-03T13:31:48
2015-12-03T13:31:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,998
py
import nltk import sys import gzip import json ###################################################### # # Parser that take a text and reduce it to a tokenized/stemmed # string. # # To debug call python parser.py # ###################################################### def extract_terms(text): #divide text in tokens and labeld them based on what type of word they are tokens = nltk.word_tokenize(text) tagged = nltk.pos_tag(tokens) #filter the list and select only nouns filtered = [] for tupla in tagged: if 'NN' in tupla[1]: filtered.append(tupla[0]) #stemm the list and join it in a string stemmer = nltk.SnowballStemmer("english") lst = [] for i in range(len(filtered)): lst.append(stemmer.stem(filtered[i])) return ' '.join(lst) def extract_reviews(path): set = gzip.open(path, 'r') list = [] already_parsed = {} for line in set: temp = {} parsedline = json.loads(line) try: if 'asin' in parsedline and 'reviewText' in parsedline: #ASIN not exists skip temp['asin'] = parsedline['asin'] temp['text'] = parsedline['reviewText'] #if this item has already a review concat this with the otherone if temp['asin'] in already_parsed: index = next(i for (i, d) in enumerate(list) if d["asin"] == temp['asin']) list[index]['text'] = list[index]['text'] + " " + temp['text'] else: already_parsed[temp['asin']] = True list.append(temp) except (RuntimeError, TypeError, NameError): print "EXCEPTION: error " + str(RuntimeError) #print "Found " + str(len(list)) return list if __name__ == '__main__': test_string = raw_input("Please enter something: ") result = extract_terms (test_string) print result
[ "Martintoni@MacBook-Pro-di-Martintoni.local" ]
Martintoni@MacBook-Pro-di-Martintoni.local
a2ff8efb83a37d60e0d1299f437db3a37bd87b9a
1d943d6daf9c25a9737663091d81bb08a6de6ef6
/main.py
4ae6d7ee999c4d9cb970c7803bfe124add29fd61
[]
no_license
Steveineiter/A-Star_visualization
1b29cbdc0dd3dafbc69d467a9cd2446a04d25336
50c24bdcbc3c85650dbe2459648bc20b73da08a9
refs/heads/main
2023-01-01T00:34:41.195400
2020-10-29T15:58:18
2020-10-29T15:58:18
308,378,255
0
0
null
null
null
null
UTF-8
Python
false
false
2,684
py
try: import pygame import sys import math from tkinter import * from tkinter import ttk from tkinter import messagebox except: pass # Game field WIDTH, HEIGHT = (900, 900) WINDOW = pygame.display.set_mode((WIDTH, HEIGHT)) # Colors BLACK = (0, 0, 0) WHITE = (255, 255, 255) RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) PURPLE = (148, 62, 143) # Utility NUMBER_OF_ROWS = 30 NUMBER_OF_COLUMNS = 30 PIXEL_PER_BOX_WIDTH = WIDTH / NUMBER_OF_ROWS PIXEL_PER_BOX_HEIGHT = HEIGHT / NUMBER_OF_COLUMNS BOX_SIZE = 3 # TODO poder if we need this grid = [] # Mit pygame machen wir die visualisieriung mit tkinter die eingabe usw def start_up(): # Creating 2D Array global grid grid = [[0 for i in range(NUMBER_OF_ROWS)] for j in range(NUMBER_OF_COLUMNS)] # same as the next few lines # Creating Spots for i in range(NUMBER_OF_ROWS): for j in range(NUMBER_OF_COLUMNS): grid[i][j] = BoxInGrid(i, j) # Set start and end node start = grid[5][5] end = grid[NUMBER_OF_ROWS - 6][NUMBER_OF_COLUMNS - 6] start.color = end.color = PURPLE start.is_changeable = end.is_changeable = False def add_box_neighbor(): pass class BoxInGrid: def __init__(self, x, y): self.x = x self.y = y self.color = WHITE self.is_blocked = False self.is_changeable = True def draw(self, window, box_with): pygame.draw.rect(window, self.color, (self.x * PIXEL_PER_BOX_WIDTH, self.y * PIXEL_PER_BOX_HEIGHT, 10, 10), box_with) def redraw_window(): # print(grid) for row in grid: for box in row: box.draw(WINDOW, BOX_SIZE) pygame.display.update() def handle_mouse_press(mouse_position): x_axis, y_axis = mouse_position x_pos = x_axis // PIXEL_PER_BOX_WIDTH y_pos = y_axis // PIXEL_PER_BOX_HEIGHT access_point = grid[int(x_pos)][int(y_pos)] if not access_point.is_blocked and access_point.is_changeable: access_point.color = BLUE access_point.is_blocked = True if __name__ == '__main__': run = True fps = 60 clock = pygame.time.Clock() start_up() box_in_grid = BoxInGrid(100, 100) while(run): clock.tick(fps) redraw_window() for event in pygame.event.get(): if event.type == pygame.QUIT: run = False if pygame.mouse.get_pressed()[0]: mouse_position = pygame.mouse.get_pos() handle_mouse_press(mouse_position) if pygame.key.get_pressed()[pygame.K_RETURN]: print("yes your majesti") run = False add_box_neighbor()
[ "noreply@github.com" ]
Steveineiter.noreply@github.com
023f6b987e1a2d0d2183da7b4e4d3ffb07f79497
ac0dc4a4c9960bbbdca2db0eaf7c839f552b0546
/nomdivertit.py
e0d0b5f3828c2a5978d6db4a5ee2855b81e97965
[]
no_license
HectorGarciaPY/primer1.py
b7d237b82d6e3ca2cd09ea771a6e152c34fb55ff
802aac5f442b4e1956cdd4f63a7767b94c30a775
refs/heads/master
2023-05-14T16:39:53.271639
2021-06-02T10:17:09
2021-06-02T10:17:09
297,622,319
0
0
null
null
null
null
UTF-8
Python
false
false
352
py
contador=0 while True: print("Escribe un nombre divertido:") x=input() if x[0]==x[(len(x)-1)] and x[1] == x[(len(x)-2)]: print("Es un nom divertit") else: print("Ets un avorrit, el nom no mola") contador=contador+1 if contador==2: print("No tens gens d'originalitat. No pots tenir gos, no pots sortir al carrer.\n"" Adéu!") break
[ "" ]
000950e05d418733d1aee53faa55ce0a11927353
87ef03b1ff43333361771976397908abeb56e496
/venv/Lib/site-packages/gunicorn/http/body.py
afde36854d1b6ce7e58bdb115b34e09dbed4eee6
[ "MIT" ]
permissive
pran01/AlgoVision
cba938db1f56c3b52e4868bcdda5283492b2902e
40e85f3c55266f43ee103dfa0852a63af306a8d4
refs/heads/master
2023-04-05T21:01:39.513718
2021-04-30T18:56:33
2021-04-30T18:56:33
281,875,751
33
9
MIT
2021-03-20T04:56:44
2020-07-23T06:58:41
Python
UTF-8
Python
false
false
7,297
py
# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import io import sys from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator, InvalidChunkSize) class ChunkedReader(object): def __init__(self, req, unreader): self.req = req self.parser = self.parse_chunked(unreader) self.buf = io.BytesIO() def read(self, size): if not isinstance(size, int): raise TypeError("size must be an integral type") if size < 0: raise ValueError("Size must be positive.") if size == 0: return b"" if self.parser: while self.buf.tell() < size: try: self.buf.write(next(self.parser)) except StopIteration: self.parser = None break data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret def parse_trailers(self, unreader, data): buf = io.BytesIO() buf.write(data) idx = buf.getvalue().find(b"\r\n\r\n") done = buf.getvalue()[:2] == b"\r\n" while idx < 0 and not done: self.get_data(unreader, buf) idx = buf.getvalue().find(b"\r\n\r\n") done = buf.getvalue()[:2] == b"\r\n" if done: unreader.unread(buf.getvalue()[2:]) return b"" self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx]) unreader.unread(buf.getvalue()[idx + 4:]) def parse_chunked(self, unreader): (size, rest) = self.parse_chunk_size(unreader) while size > 0: while size > len(rest): size -= len(rest) yield rest rest = unreader.read() if not rest: raise NoMoreData() yield rest[:size] # Remove \r\n after chunk rest = rest[size:] while len(rest) < 2: rest += unreader.read() if rest[:2] != b'\r\n': raise ChunkMissingTerminator(rest[:2]) (size, rest) = self.parse_chunk_size(unreader, data=rest[2:]) def parse_chunk_size(self, unreader, data=None): buf = io.BytesIO() if data is not None: buf.write(data) idx = buf.getvalue().find(b"\r\n") while idx < 0: self.get_data(unreader, buf) idx = buf.getvalue().find(b"\r\n") data = buf.getvalue() line, rest_chunk = data[:idx], data[idx + 2:] chunk_size = line.split(b";", 1)[0].strip() try: chunk_size = int(chunk_size, 16) except ValueError: raise InvalidChunkSize(chunk_size) if chunk_size == 0: try: self.parse_trailers(unreader, rest_chunk) except NoMoreData: pass return (0, None) return (chunk_size, rest_chunk) def get_data(self, unreader, buf): data = unreader.read() if not data: raise NoMoreData() buf.write(data) class LengthReader(object): def __init__(self, unreader, length): self.unreader = unreader self.length = length def read(self, size): if not isinstance(size, int): raise TypeError("size must be an integral type") size = min(self.length, size) if size < 0: raise ValueError("Size must be positive.") if size == 0: return b"" buf = io.BytesIO() data = self.unreader.read() while data: buf.write(data) if buf.tell() >= size: break data = self.unreader.read() buf = buf.getvalue() ret, rest = buf[:size], buf[size:] self.unreader.unread(rest) self.length -= size return ret class EOFReader(object): def __init__(self, unreader): self.unreader = unreader self.buf = io.BytesIO() self.finished = False def read(self, size): if not isinstance(size, int): raise TypeError("size must be an integral type") if size < 0: raise ValueError("Size must be positive.") if size == 0: return b"" if self.finished: data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret data = self.unreader.read() while data: self.buf.write(data) if self.buf.tell() > size: break data = self.unreader.read() if not data: self.finished = True data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret class Body(object): def __init__(self, reader): self.reader = reader self.buf = io.BytesIO() def __iter__(self): return self def __next__(self): ret = self.readline() if not ret: raise StopIteration() return ret next = __next__ def getsize(self, size): if size is None: return sys.maxsize elif not isinstance(size, int): raise TypeError("size must be an integral type") elif size < 0: return sys.maxsize return size def read(self, size=None): size = self.getsize(size) if size == 0: return b"" if size < self.buf.tell(): data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret while size > self.buf.tell(): data = self.reader.read(1024) if not data: break self.buf.write(data) data = self.buf.getvalue() ret, rest = data[:size], data[size:] self.buf = io.BytesIO() self.buf.write(rest) return ret def readline(self, size=None): size = self.getsize(size) if size == 0: return b"" data = self.buf.getvalue() self.buf = io.BytesIO() ret = [] while 1: idx = data.find(b"\n", 0, size) idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0 if idx: ret.append(data[:idx]) self.buf.write(data[idx:]) break ret.append(data) size -= len(data) data = self.reader.read(min(1024, size)) if not data: break return b"".join(ret) def readlines(self, size=None): ret = [] data = self.read() while data: pos = data.find(b"\n") if pos < 0: ret.append(data) data = b"" else: line, data = data[:pos + 1], data[pos + 1:] ret.append(line) return ret
[ "pran.sinha1.0@gmail.com" ]
pran.sinha1.0@gmail.com
1123236231c7d7542bb38bab826fbc2184d101e5
01b77be351755b7f2b49d40744751cf22f3953cf
/tools/json_schema_compiler/compiler.py
38235e07f9c9833705f99c341b718ad1db3fdb11
[ "LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause" ]
permissive
bwahn/Havana
4159876f98850fbfe873ccaaa3dc38739537e9f3
5e8bc991ea7e251e98efb6e54e0b8573e5503aa6
refs/heads/master
2020-05-31T21:40:08.597468
2013-09-03T15:40:14
2013-09-03T15:40:14
12,556,726
1
0
null
null
null
null
UTF-8
Python
false
false
6,190
py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generator for C++ structs from api json files. The purpose of this tool is to remove the need for hand-written code that converts to and from base::Value types when receiving javascript api calls. Originally written for generating code for extension apis. Reference schemas are in chrome/common/extensions/api. Usage example: compiler.py --root /home/Work/src --namespace extensions windows.json tabs.json compiler.py --destdir gen --root /home/Work/src --namespace extensions windows.json tabs.json """ import cc_generator import cpp_type_generator import h_generator import idl_schema import json_schema import model import schema_bundle_generator import optparse import os.path import sys def load_schema(schema): schema_filename, schema_extension = os.path.splitext(schema) if schema_extension == '.json': api_defs = json_schema.Load(schema) elif schema_extension == '.idl': api_defs = idl_schema.Load(schema) else: sys.exit("Did not recognize file extension %s for schema %s" % (schema_extension, schema)) return api_defs def handle_single_schema(filename, dest_dir, root, root_namespace): schema = os.path.normpath(filename) schema_filename, schema_extension = os.path.splitext(schema) api_defs = load_schema(schema) api_model = model.Model() for target_namespace in api_defs: referenced_schemas = target_namespace.get('dependencies', []) # Load type dependencies into the model. # TODO(miket): do we need this in IDL? for referenced_schema in referenced_schemas: referenced_schema_path = os.path.join( os.path.dirname(schema), referenced_schema + '.json') referenced_api_defs = json_schema.Load(referenced_schema_path) for namespace in referenced_api_defs: api_model.AddNamespace(namespace, os.path.relpath(referenced_schema_path, opts.root)) # Gets the relative path from opts.root to the schema to correctly determine # the include path. relpath = os.path.relpath(schema, opts.root) namespace = api_model.AddNamespace(target_namespace, relpath) if not namespace: continue # The output filename must match the input filename for gyp to deal with it # properly. out_file = namespace.name type_generator = cpp_type_generator.CppTypeGenerator( root_namespace, namespace, namespace.unix_name) for referenced_namespace in api_model.namespaces.values(): if referenced_namespace == namespace: continue type_generator.AddNamespace( referenced_namespace, referenced_namespace.unix_name) h_code = (h_generator.HGenerator(namespace, type_generator) .Generate().Render()) cc_code = (cc_generator.CCGenerator(namespace, type_generator) .Generate().Render()) if dest_dir: with open( os.path.join(dest_dir, namespace.source_file_dir, out_file + '.cc'), 'w') as cc_file: cc_file.write(cc_code) with open( os.path.join(dest_dir, namespace.source_file_dir, out_file + '.h'), 'w') as h_file: h_file.write(h_code) else: print '%s.h' % out_file print print h_code print print '%s.cc' % out_file print print cc_code def handle_bundle_schema(filenames, dest_dir, root, root_namespace): # Merge the source files into a single list of schemas. api_defs = [] for filename in filenames: schema = os.path.normpath(filename) schema_filename, schema_extension = os.path.splitext(schema) api_defs.extend(load_schema(schema)) api_model = model.Model() relpath = os.path.relpath(os.path.normpath(filenames[0]), root) for target_namespace in api_defs: api_model.AddNamespace(target_namespace, relpath) type_generator = cpp_type_generator.CppTypeGenerator(root_namespace) for referenced_namespace in api_model.namespaces.values(): type_generator.AddNamespace( referenced_namespace, referenced_namespace.unix_name) generator = schema_bundle_generator.SchemaBundleGenerator( api_model, api_defs, type_generator) api_h_code = generator.GenerateAPIHeader().Render() schemas_h_code = generator.GenerateSchemasHeader().Render() schemas_cc_code = generator.GenerateSchemasCC().Render() if dest_dir: basedir = os.path.join(dest_dir, 'chrome/common/extensions/api') with open(os.path.join(basedir, 'generated_api.h'), 'w') as h_file: h_file.write(api_h_code) with open(os.path.join(basedir, 'generated_schemas.h'), 'w') as h_file: h_file.write(schemas_h_code) with open(os.path.join(basedir, 'generated_schemas.cc'), 'w') as cc_file: cc_file.write(schemas_cc_code) else: print 'generated_api.h' print print api_h_code print print 'generated_schemas.h' print print schemas_h_code print print 'generated_schemas.cc' print print schemas_cc_code if __name__ == '__main__': parser = optparse.OptionParser( description='Generates a C++ model of an API from JSON schema', usage='usage: %prog [option]... schema') parser.add_option('-r', '--root', default='.', help='logical include root directory. Path to schema files from specified' 'dir will be the include path.') parser.add_option('-d', '--destdir', help='root directory to output generated files.') parser.add_option('-n', '--namespace', default='generated_api_schemas', help='C++ namespace for generated files. e.g extensions::api.') parser.add_option('-b', '--bundle', action="store_true", help= '''if supplied, causes compiler to generate bundle files for the given set of source files.''') (opts, args) = parser.parse_args() if not args: sys.exit(0) # This is OK as a no-op dest_dir = opts.destdir root_namespace = opts.namespace if opts.bundle: handle_bundle_schema(args, dest_dir, opts.root, root_namespace) else: handle_single_schema(args[0], dest_dir, opts.root, root_namespace)
[ "BW@BW-PC.(none)" ]
BW@BW-PC.(none)
32f142de985f427b2e3ecba10fa765f0c368c943
9460f8e795d65ff8667a9c1b0da7a141d2a9c849
/blog/views.py
ebff93e410cfe142a882386fce08dfa8d44d8c0c
[]
no_license
wadewow/myblog
3a06614872637d502a5b24c802429cfdb7b8e0a8
af498894b3379bc876a93142122a29d31119735c
refs/heads/master
2021-01-01T16:54:40.704270
2017-07-21T13:40:51
2017-07-21T13:40:51
97,951,222
0
0
null
null
null
null
UTF-8
Python
false
false
1,473
py
# coding:utf-8 from django.shortcuts import render # from django.http import HttpResponse import models # Create your views here. def home(request): ''' 下面的语句相当于 select * from article where id = 1 ''' articles = models.Article.objects.all() return render(request, 'blog/blog_home.html', {'articles': articles}) def content(request, article_id): article = models.Article.objects.get(pk = article_id) return render(request, 'blog/blog_content.html', {'article_content': article}) def edit(request,article_id): print 'id等于:', article_id if str(article_id) == '0': return render(request,'blog/blog_edit.html') article = models.Article.objects.get(pk = article_id) return render(request,'blog/blog_edit.html',{'article':article}) def form_action(request): title = request.POST.get('title') # 这里get('title')的title是根据input的name属性值title content = request.POST.get('content') article_id = request.POST.get('article_id', '0') if article_id == '0': models.Article.objects.create(title = title, content = content) articles = models.Article.objects.all() return render(request,'blog/blog_home.html',{'articles':articles}) article = models.Article.objects.get(pk = article_id) article.title = title article.content = content article.save() return render(request, 'blog/blog_content.html', {'article_content': article})
[ "949768106@qq.com" ]
949768106@qq.com
bd143b97ac92cf6eef6bbe7e91edb34eafbf4540
60e9be8297b98075afb304ebae929f9cac30cf42
/leetCode/Array/Easy/K-diff Pairs in an Array.py
95442868e44479b644d955b4d724e4873131c538
[]
no_license
sifact/Leet-Code-Problems
e4bbd0ab2d1349de32521650c9eeaa5ad3b8085f
eb62e8407dd0931841fbbb351aca5c415c226a07
refs/heads/main
2023-01-30T05:07:57.904604
2020-11-30T16:54:19
2020-11-30T16:54:19
317,285,734
0
0
null
null
null
null
UTF-8
Python
false
false
678
py
from collections import Counter def findPairs(nums, k): # Count the elements with Counter # if k > 0 for each element i, check if i + k exist # if k == 0 for each element i, check if count[i] > 1 hash_map = Counter(nums) count = 0 for key in hash_map: if k > 1 and key + k in hash_map or k == 0 and hash_map[key] > 1: count += 1 return count # Generator expression def findPairs2(nums, k): hash_map = Counter(nums) return sum(k > 0 and key + k in hash_map or k == 0 and hash_map[key] > 1 for key in hash_map) a = list(map(int, input().split())) num = int(input()) print(findPairs2(a, num))
[ "noreply@github.com" ]
sifact.noreply@github.com
6fe7640c64822df4cca889a856f9099d33231595
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02860/s554783475.py
ba781c1a512917a311a200fc59b2e495d4dab5c5
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
201
py
n = int(input()) s = input() if (n%2 == 1): print("No") else: c = 0 for i in range(int(n/2)): if (s[i] != s[i + int(n/2)]): c = 1 if (c == 0): print("Yes") else: print("No")
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
85c85d8ad12001b13683eb1cd155223c1da9f3cf
d69b96f8a2d4a0025b2513d49ad1726d53a9adcc
/sow/console.py
033266b0b1c668bcddc24cc81ce0afa56dd6f3d4
[]
no_license
mekhami/Sow
3ed4fa82f2016899924692c979d5e2ed0ca20166
aa55d69fa1d18ac9a35a24f67b126e39ca69b721
refs/heads/master
2021-01-10T19:16:59.841592
2015-06-26T15:50:00
2015-06-26T15:50:00
30,369,588
1
1
null
2015-12-31T20:00:36
2015-02-05T17:55:29
Python
UTF-8
Python
false
false
932
py
#/usr/bin/env python ################################### ## A Harvest Command Line App ## ################################### '''Harvest. Usage: sow [options] sow add [(<alias> <hours> <note>)] [-d|--date <date>] sow show (today|yesterday|week | --date <date>) sow reauth sow delete [-a|--all] [(-d|--date <date>)] Options: -h --help Show this screen. --version Show the version. ''' from docopt import docopt from commands import add, show, delete from utils import get_timesheet, get_config, reauth def _main(args, config, timesheet): if args['add']: add(args, config, timesheet) if args['show']: show(args, timesheet) if args['reauth']: reauth(config) if args['delete']: delete(args, timesheet) def main(): args = docopt(__doc__) config = get_config() timesheet = get_timesheet() _main(args, config, timesheet)
[ "Lawrence.vanderpool@gmail.com" ]
Lawrence.vanderpool@gmail.com
d481c060b21ebf733f9e03348fe8dbb008dcb1a0
0f8e3eb9c3405409418428148f97f93627a886a5
/gui.py
4e644ecf6068f86453d27bbbc2aea5287e181a51
[]
no_license
shikharsrivastava/Bot-Environment
dfd4a41ffc614f46eea129d102ba4441de39eae2
0afbfb61baae49ebe3bb22b3c257a251913c04e6
refs/heads/master
2020-06-12T15:44:21.965327
2018-01-30T08:01:53
2018-01-30T08:01:53
75,796,633
4
4
null
null
null
null
UTF-8
Python
false
false
7,020
py
import pygame,sys from pygame.locals import * import random import time from math import log import subprocess pygame.init() DISPLAY = pygame.display.set_mode((800,800)) pygame.display.set_caption('Checkers') BLACK = (0,0,0) WHITE = (255, 255, 255) RED= (255,0,0) GREEN = (0, 255,0) BLUE = (0,0, 255) AQUA=(0, 255, 255) FUCHSIA=(255,0, 255) GRAY=(128, 128, 128) OLIVE=(128, 128,0) PURPLE=(128,0, 128) YELLOW=(255, 255,0) TEAL=( 0, 128, 128) row=8 col=8 WIDTH=120 INITIAL_X=120 INITIAL_Y=80 SIDE=80 DISPLAY.fill(WHITE) col1=WHITE col2=BLACK currentColor = col1 colboard=[] bbw = 0 bbb = 0 side = 'W' def convertPos(pos): row=pos/8 col=pos%8 return ((7-row)*8+col) def buildSquare(i,j): pygame.draw.rect(DISPLAY,currentColor,(i-SIDE/2,j-SIDE/2,SIDE,SIDE)) def init(): global currentColor; pygame.draw.rect(DISPLAY,BLACK,(INITIAL_X-SIDE/2,INITIAL_Y-SIDE/2,SIDE*8,SIDE*8),5) for i in range(0,8): if i%2==0: currentColor=col1 else: currentColor=col2 for j in range(0,8): x=INITIAL_X+j*SIDE y=INITIAL_Y+i*SIDE buildSquare(x,y) colboard.append(currentColor) currentColor=col2 if currentColor==col1 else col1 def first(): black = 0 for i in range(1, 8, 2): row = i / 8 col = i % 8 pos = convertPos(i) black |= (1 << pos) for i in range(8, 15, 2): row = i / 8 col = i % 8 black |= (1 << convertPos(i)) for i in range(17, 24, 2): row = i / 8 col = i % 8 black |= (1 << convertPos(i)) white = 0; for i in range(40, 47, 2): row = i / 8 col = i % 8 white |= (1 << convertPos(i)) for i in range(49, 56, 2): row = i / 8 col = i % 8 white |= (1 << convertPos(i)) for i in range(56, 63, 2): row = i / 8 col = i % 8 white |= (1 << convertPos(i)) return (white, black) def genFen(white, black, side): board = [[0 for _ in range(8)] for _ in range(8)] while white > 0: pos = int(log(white & -white, 2)) pos = convertPos(pos) row = pos / 8 col = pos % 8 board[row][col] = 1 white -= white & -white while black > 0: pos = int(log(black & -black, 2)) pos = convertPos(pos) row = pos / 8 col = pos % 8 board[row][col] = 2 black -= black & -black fen = '' for i in range(8): row = '' count = 0 for j in range(8): if board[i][j] == 0: count += 1 elif board[i][j] == 1: if count > 0: row = row + str(count) row = row + 'D' count = 0 elif board[i][j] == 2: if count > 0: row = row + str(count) row = row + 'd' count = 0 if count > 0: row = row + str(count) row = row + '/' fen = fen + row fen = fen + side return fen def makeBoard(white, black): init() while white > 0: pos = int(log(white & - white, 2)) pos = convertPos(pos) row = pos / 8 col = pos % 8 pygame.draw.circle(DISPLAY, GREEN, (INITIAL_X+col*SIDE, INITIAL_Y+row*SIDE), SIDE/3) white -= white & (-white) while black > 0: pos = int(log(black & - black, 2)) pos = convertPos(pos) row = pos / 8 col = pos % 8 pygame.draw.circle(DISPLAY, RED, (INITIAL_X+col*SIDE, INITIAL_Y+row*SIDE), SIDE/3) black -= black & (-black) def isValid(move, exe): fen = genFen(bbw, bbb, side) out = subprocess.check_output([exe, 'fen', fen, 'isvalid', move]) out = out.split() if out[0] == '0': return False else: bbw, bbb = int(out[1]), int(out[2]) return True def bestMove(bw, bb, side, exe): fen = genFen(bw, bb, side) out = subprocess.check_output([exe, 'fen', fen, 'best']) out = out.split() return map(int, out) def botfight(): bw, bb = first() makeBoard(bw, bb) pygame.display.update() side = 'W' while True: bw, bb = bestMove(bw, bb, side, './a.out') if bw == -1 and bb == -1: print "Game over, {} loses".format(side) break else: makeBoard(bw, bb) pygame.display.update() time.sleep(0.5) side = 'B' if side == 'W' else 'W' if __name__ == "__main__": botfight() """ white, black = first() bbw = white bbb = black makeBoard(bbw, bbb) print(genFen(bbw, bbb, 'W')) prev = None killed = 0 move = 0 while True: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit(0) elif event.type==MOUSEBUTTONDOWN: mousePos=list(pygame.mouse.get_pos()) mousePos[0]-=INITIAL_X-SIDE/2; mousePos[1]-=INITIAL_Y-SIDE/2; col=mousePos[0]/SIDE row=mousePos[1]/SIDE if 0<=row<=7 and 0<=col<=7: print row, col if prev == None: pos = convertPos(row*8+col) move |= pos prev = pos print "prev = ", prev else: cur = convertPos(row*8+col) print cur, prev if cur - prev == 7 or cur - prev == 9: print "hello" move |= (cur << 6) elif cur - prev == 14: killed += 1 s = 0 for i in range(6, 16): s += (1 << i) move = move & (~s) move |= (cur << 6) move |= (killed << 12) elif cur - prev == 18: move |= (1 << (killed + 17)) killed += 1 s = 0 for i in range(6, 16): s += (1 << i) move = move & (~s) move |= (cur << 6) move |= (killed << 12) else: s = 0 for i in range(6, 12): s += (1 << i) move = move & (~s) move |= (cur << 6) prev = cur if (isValid(move, './a.out')): makeBoard(bbw, bbb) print((move & 0x3f), ((move >> 6) & 0x3f), ((move >> 12) & 0xf)) killed = ((move >> 12) & 0xf) for i in range(killed): print ((move >> (17+i)) & 1) else: prev = None killed = 0 move = 0 pygame.display.update() """
[ "noreply@github.com" ]
shikharsrivastava.noreply@github.com
5e0f3c0a44b787914d3dce78b805204bdbc0bee6
45ff5b1fc0414693087050cc738010a39833a1c6
/backend/app/models/user_model.py
40b513eef4cb444ed6161121188fa11d2eab1dd3
[]
no_license
hanson190505/full-stack-fastapi-vue
8606971d86dddc341bd98fa8310c70e4aaf54560
37121a3ddc50bcabea69433ac1d8318f7c9d870e
refs/heads/main
2023-03-17T20:22:13.373644
2021-03-01T16:23:49
2021-03-01T16:23:49
327,338,913
1
0
null
null
null
null
UTF-8
Python
false
false
1,081
py
from app.db.base_class import Base from sqlalchemy.orm import relationship from sqlalchemy import Column, String, JSON, Integer, ForeignKey class DepartmentModel(Base): name = Column(String(64), index=True) parent_department = Column(Integer, ForeignKey('departmentmodel.id'), nullable=True) sub_department = relationship('DepartmentModel', lazy='joined', join_depth=3) # users = relationship('UserModel', back_populates='department') class UserModel(Base): name = Column(String(128), index=True) hashed_password = Column(String(1024)) mail = Column(String(64), nullable=True, index=True) phone = Column(String(32), nullable=True, index=True) detail = Column(JSON, nullable=True) # department = relationship('DepartmentModel', back_populates='users') class RouteModel(Base): name = Column(String(64)) path = Column(String(64)) pid = Column(Integer, ForeignKey('routemodel.id'), nullable=True) title = Column(String(64), nullable=True) detail = Column(JSON, nullable=True) children = relationship('RouteModel')
[ "413506012@qq.com" ]
413506012@qq.com
d8863ebbb7cfbc6f46a2659c40eff9f0092bdcf6
eb8660d8a7c7557af0fd681a4cce305e1fc73ef9
/grpc/stt_client.py
3179370f360474849612723415585af7faaf5ca0
[ "Apache-2.0" ]
permissive
morfeusys/vosk-server
d4639eaaae7b2e171bd99618513100100d94e773
955517bfcc8a7ef3f93ed5ace50052234aa3bf74
refs/heads/master
2021-01-14T19:18:55.803416
2020-02-24T12:57:55
2020-02-24T12:57:55
242,727,733
1
0
Apache-2.0
2020-02-24T12:14:17
2020-02-24T12:14:16
null
UTF-8
Python
false
false
1,633
py
#!/usr/bin/python3 import argparse import grpc import stt_service_pb2 import stt_service_pb2_grpc CHUNK_SIZE = 4000 def gen(audio_file_name): specification = stt_service_pb2.RecognitionSpec( partial_results=True, audio_encoding='LINEAR16_PCM', sample_rate_hertz=8000 ) streaming_config = stt_service_pb2.RecognitionConfig(specification=specification) yield stt_service_pb2.StreamingRecognitionRequest(config=streaming_config) with open(audio_file_name, 'rb') as f: data = f.read(CHUNK_SIZE) while data != b'': yield stt_service_pb2.StreamingRecognitionRequest(audio_content=data) data = f.read(CHUNK_SIZE) def run(audio_file_name): channel = grpc.insecure_channel('localhost:5001') stub = stt_service_pb2_grpc.SttServiceStub(channel) it = stub.StreamingRecognize(gen(audio_file_name)) try: for r in it: try: print('Start chunk: ') for alternative in r.chunks[0].alternatives: print('alternative: ', alternative.text) print('words: ', alternative.words) print('Is final: ', r.chunks[0].final) print('') except LookupError: print('No available chunks') except grpc._channel._Rendezvous as err: print('Error code %s, message: %s' % (err._state.code, err._state.details)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--path', required=True, help='audio file path') args = parser.parse_args() run(args.path)
[ "nshmyrev@gmail.com" ]
nshmyrev@gmail.com
373f9f9cd537df8df9fb85fee9220607f78f2be6
de5adea6b67660bfc45150ee56b6cf4957c8c4e7
/main_app/migrations/0001_initial.py
f522eb7c2263895a61cc3153af186e867e0d5fdf
[]
no_license
arthuroe/treasure_gram
70049a25009318d947488dea28505f65816d9d84
5ce93ed21284fee17640b15546011848de3115ac
refs/heads/develop
2020-03-18T02:16:19.413381
2018-05-23T17:18:58
2018-05-23T17:24:16
134,182,468
0
0
null
2018-05-28T18:52:48
2018-05-20T20:02:49
Python
UTF-8
Python
false
false
824
py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-05-20 21:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Treasure', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('value', models.DecimalField(decimal_places=2, max_digits=10)), ('materials', models.CharField(max_length=100)), ('location', models.CharField(max_length=100)), ('img_url', models.CharField(max_length=100)), ], ), ]
[ "arthur.orache@gmail.com" ]
arthur.orache@gmail.com
639a8318adc71b502d3f0053794000dbc4d50a3c
6f8b9e95b2833de2a4f2c8413fe45133e540a5cf
/Sequences/tuples_examples.py
93edf2eb0dd93f374233d64740dad825f86671a4
[]
no_license
riteshelias/UMC
a658665d8653ef1ba72d65030b38da7462783ae7
e30d42192290905b0a878b66f7634500868b174d
refs/heads/master
2023-01-20T19:32:51.885534
2020-11-27T04:10:22
2020-11-27T04:10:22
316,398,737
0
0
null
null
null
null
UTF-8
Python
false
false
1,709
py
dishes = [ ("Kaju Katli", "Desi", "Dessert", ( (1, "Cashew Nuts"), (2, "Mawa"), (3, "Sugar") ) ), ("Machow Soup", "Chinese", "Soup", ( (1, "Noodles"), (2, "Chicken"), (3, "Chopped Veggies"), (4, "Soya Sauce") ) ), ("Hara bhara Kebab", "Desi", "Starters", ( (1, "Spinach"), (2, "Corn"), (3, "Cheese"), (4, "Potatoes") ) ), ("Tandoori Chicken", "Mughlai", "Starters", ( (1, "Chicken"), (2, "Spices"), (3, "Butter") ) ), ("Navratan Pulav", "Awadhi", "Main Course", ( (1, "Mix Veggies"), (2, "Basmati Rice"), (3, "Dry Fruits") ) ), ("Rogan Josh", "Kashmiri", "Main Course", ( (1, "Mutton"), (2, "Spices"), (3, "Oil"), (4, "Onions") ) ), ("Rosogolla", "Bengali", "Dessert", ( (1, "Milk"), (2, "Sugar"), (3, "Water"), (4, "Rose essence") ) ), ] print(len(dishes)) print() # for dish in dishes: # name, ingredients, category = dish for name, cuisine, category, ingredients in dishes: # print("Name: {}, Ingredients: {}, Category: {}".format(dish[0], dish[1], dish[2])) print("Name: {}, Cuisine: {}, Category: {}, Ingredients: {}".format(name, cuisine, category, ingredients)) dish = dishes[1] print(dish) print() ingredient = dish[3] print(ingredient) item = ingredient[1] print(item) print() spitem = item[1] print(spitem) spitem1 = dishes[1][3][1][1] print(spitem1) print(dishes[1][3][1][1]) # for item in ingredient: # print(item)
[ "ritesh.elias@gmail.com" ]
ritesh.elias@gmail.com
6db6eac332058fd6e1c5a656fd107b838cd08767
2e2494148f19a2f51383a7eb8853c746a60b6db9
/MemoryBlock.py
d5ff3a06afac5bbd0652a8c34e08b6793ac6c744
[]
no_license
GrimaldoMike/Compiladores
a79614d77ac9baed3837d76ccfa70f664b62b3ee
2d01512b537f523d608d79e91ec163ee7e2ab529
refs/heads/master
2021-01-10T17:40:55.376425
2016-05-06T20:23:58
2016-05-06T20:23:58
53,536,738
0
0
null
null
null
null
UTF-8
Python
false
false
2,580
py
#NO FUE IMPLEMENTADO PARA EL PROYECTO class MemoryBlock: def __init__(self, start_dir, ints_start_dir, floats_start_dir, chars_start_dir, strings_start_dir, limit): '''All blocks have to be in ascending order and non overlapping on init. We leave that to the developer that uses this class''' self.bools = [ start_dir, 0 ] self.ints = [ ints_start_dir, 0 ] self.floats = [ floats_start_dir, 0 ] self.chars = [ chars_start_dir, 0 ] self.strings = [ strings_start_dir, 0 ] self.limit = limit def __str__(self): return "MemoryBlock ({start}-{end}): {boolno} bools, {intno} ints, {floatno} floats, {charno} chars, {stringno} strings".format( start=self.bools[0], end=self.limit, boolno=self.bools[1], intno=self.ints[1], floatno=self.floats[1], charno=self.chars[1], stringno=self.strings[1]) def add_bool(self, num=1): '''Adds a var to the memory block''' if ( self.bools[0] + self.bools[1] + num ) < self.ints[0]: self.bools[1] += num return ( self.bools[0] + self.bools[1] - num ) else: print ('Stackoverflow: Se intenta exceder el limite de memoria para el boolean.') def add_int(self, num=1): '''Adds a var to the memory block''' if ( self.ints[0] + self.ints[1] + num ) < self.floats[0]: self.ints[1] += num return ( self.ints[0] + self.ints[1] - num ) else: print ('Stackoverflow: Se intenta exceder el limite de memoria para el int.') def add_float(self, num=1): '''Adds a var to the memory block''' if ( self.floats[0] + self.floats[1] + num ) < self.chars[0]: self.floats[1] += num return ( self.floats[0] + self.floats[1] - num ) else: print ('Stackoverflow: Se intenta exceder el limite de memoria para el foat.') def add_char(self, num=1): '''Adds a var to the memory block''' if ( self.chars[0] + self.chars[1] + num ) < self.strings[0]: self.chars[1] += num return ( self.chars[0] + self.chars[1] - num ) else: print ('Stackoverflow: Se intenta exceder el limite de memoria para el char.') def add_string(self, num=1): '''Adds a var to the memory block''' if ( self.strings[0] + self.strings[1] + num ) < self.limit: self.strings[1] += num return ( self.strings[0] + self.strings[1] - num ) else: print ('Stackoverflow: Se intenta exceder el limite de memoria para el string.')
[ "grimaldo.mike@hotmail.com" ]
grimaldo.mike@hotmail.com
866fcd777ed57198ecc587fa85d3a71e6974ea99
9d1491368c5e87760131ba27d252ee2d10620433
/gammapy/spectrum/powerlaw.py
39edaeca1329962422682f6d153c6cf79d653ff1
[ "BSD-3-Clause" ]
permissive
cnachi/gammapy
f9295306a8e81d0b7f4d2111b3fa3679a78da3f7
3d3fc38c111d2f490d984082750f8003580fe06c
refs/heads/master
2021-01-20T23:37:59.409914
2016-06-09T08:36:33
2016-06-09T08:36:33
60,764,807
0
0
null
2016-06-09T09:55:54
2016-06-09T09:55:54
null
UTF-8
Python
false
false
6,540
py
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Power law spectrum helper functions. Convert differential and integral fluxes with error propagation. """ from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np __all__ = [ 'power_law_evaluate', 'power_law_pivot_energy', 'df_over_f', 'power_law_flux', 'power_law_integral_flux', 'g_from_f', 'g_from_points', 'I_from_points', 'f_from_points', 'f_with_err', 'I_with_err', 'compatibility', ] E_INF = 1e10 # practically infinitely high flux g_DEFAULT = 2 def power_law_evaluate(energy, norm, gamma, energy_ref): r"""Differential flux at a given energy. .. math:: f(energy) = N (E / E_0) ^ - \Gamma with norm ``N``, energy ``E``, reference energy ``E0`` and spectral index :math:`\Gamma`. Parameters ---------- energy : array_like Energy at which to compute the differential flux gamma : array_like Power law spectral index """ return norm * (energy / energy_ref) ** (-gamma) def power_law_pivot_energy(energy_ref, f0, d_gamma, cov): """Compute pivot (a.k.a. decorrelation) energy. Defined as smallest df / f. Reference: http://arxiv.org/pdf/0910.4881 """ pivot_energy = energy_ref * np.exp(cov / (f0 * d_gamma ** 2)) return pivot_energy def df_over_f(e, e0, f0, df0, dg, cov): """Compute relative flux error at any given energy. Used to draw butterflies. Reference: http://arxiv.org/pdf/0910.4881 Equation (1) """ term1 = (df0 / f0) ** 2 term2 = 2 * cov / f0 * np.log(e / e0) term3 = (dg * np.log(e / e0)) ** 2 return np.sqrt(term1 - term2 + term3) def _conversion_factor(g, e, e1, e2): """Conversion factor between differential and integral flux.""" # In gamma-ray astronomy only falling power-laws are used. # Here we force this, i.e. give "correct" input even if the # user gives a spectral index with an incorrect sign. g = np.abs(g) term1 = e / (-g + 1) term2 = (e2 / e) ** (-g + 1) - (e1 / e) ** (-g + 1) return term1 * term2 def power_law_flux(I=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF): """Compute differential flux for a given integral flux. Parameters ---------- I : array_like Integral flux in ``energy_min``, ``energy_max`` band g : array_like Power law spectral index e : array_like Energy at which to compute the differential flux e1 : array_like Energy band minimum e2 : array_like Energy band maximum Returns ------- flux : `numpy.array` Differential flux at ``energy``. """ return I / _conversion_factor(g, e, e1, e2) def power_law_integral_flux(f=1, g=g_DEFAULT, e=1, e1=1, e2=E_INF): """Compute integral flux for a given differential flux. Parameters ---------- f : array_like Differential flux at ``energy`` g : array_like Power law spectral index e : array_like Energy at which the differential flux is given e1 : array_like Energy band minimum e2 : array_like Energy band maximum Returns ------- flux : `numpy.array` Integral flux in ``energy_min``, ``energy_max`` band """ return f * _conversion_factor(g, e, e1, e2) def g_from_f(e, f, de=1): """Spectral index at a given energy e for a given function f(e)""" e1, e2 = e, e + de f1, f2 = f(e1), f(e2) return g_from_points(e1, e2, f1, f2) def g_from_points(e1, e2, f1, f2): """Spectral index for two given differential flux points""" return -np.log(f2 / f1) / np.log(e2 / e1) def I_from_points(e1, e2, f1, f2): """Integral flux in energy bin for power law""" g = g_from_points(e1, e2, f1, f2) pl_int_flux = (f1 * e1 / (-g + 1) * ((e2 / e1) ** (-g + 1) - 1)) return pl_int_flux def f_from_points(e1, e2, f1, f2, e): """Linear interpolation""" e1 = np.asarray(e1, float) e2 = np.asarray(e2, float) f1 = np.asarray(f1, float) f2 = np.asarray(f2, float) e = np.asarray(e, float) logdy = np.log(f2 / f1) logdx = np.log(e2 / e1) logy = np.log(f1) + np.log(e / e1) * (logdy / logdx) return np.exp(logy) def f_with_err(I_val=1, I_err=0, g_val=g_DEFAULT, g_err=0, e=1, e1=1, e2=E_INF): """Wrapper for f so the user doesn't have to know about the uncertainties module""" from uncertainties import unumpy I = unumpy.uarray(I_val, I_err) g = unumpy.uarray(g_val, g_err) _f = power_law_flux(I, g, e, e1, e2) f_val = unumpy.nominal_values(_f) f_err = unumpy.std_devs(_f) return f_val, f_err def I_with_err(f_val=1, f_err=0, g_val=g_DEFAULT, g_err=0, e=1, e1=1, e2=E_INF): """Wrapper for f so the user doesn't have to know about the uncertainties module""" from uncertainties import unumpy f = unumpy.uarray(f_val, f_err) g = unumpy.uarray(g_val, g_err) _I = power_law_integral_flux(f, g, e, e1, e2) I_val = unumpy.nominal_values(_I) I_err = unumpy.std_devs(_I) return I_val, I_err def compatibility(par_low, par_high): """Quantify spectral compatibility of power-law measurements in two energy bands. Reference: 2008ApJ...679.1299F Equation (2) Compute spectral compatibility parameters for the situation where two power laws were measured in a low and a high spectral energy band. par_low and par_high are the measured parameters, which must be lists in the following order: e, f, f_err, g, g_err where e is the pivot energy, f is the flux density and g the spectral index """ # Unpack power-law paramters e_high, f_high, f_err_high, g_high, g_err_high = par_high e_low, f_low, f_err_low, g_low, g_err_low = par_low log_delta_e = np.log10(e_high) - np.log10(e_low) log_delta_f = np.log10(f_high) - np.log10(f_low) # g_match is the index obtained by connecting the two points # with a power law, i.e. a straight line in the log_e, log_f plot g_match = -log_delta_f / log_delta_e # sigma is the number of standar deviations the match index # is different from the measured index in one band. # (see Funk et al. (2008ApJ...679.1299F) eqn. 2) sigma_low = (g_match - g_low) / g_err_low sigma_high = (g_match - g_high) / g_err_high sigma_comb = np.sqrt(sigma_low ** 2 + sigma_high ** 2) return g_match, sigma_low, sigma_high, sigma_comb
[ "Deil.Christoph@gmail.com" ]
Deil.Christoph@gmail.com
c82dcbc9cc057d4d5d64d87082af0f1e59d0a74b
1eddf34d87d1c8fa06a71dd934bfdc4de8fd6752
/binary_files_generation/stdp_table_generator.py
c928969be2abe27e46d842b9ea2238768d31ef9e
[]
no_license
galluppf/spackage_conv
b6367f0cd93ef02891512733e83a70f984f9b2a3
902c6d3be1a4fb7692056814eafd4d94a75a59d6
refs/heads/master
2021-01-13T02:32:09.383591
2013-09-30T12:04:41
2013-09-30T12:04:41
12,670,028
0
1
null
null
null
null
UTF-8
Python
false
false
7,185
py
#!/usr/bin/python """ Utility to generate an STDP table for SpiNNaker __author__="francesco" __date__ ="$22-Mar-2011 18:01:14$" """ BINARY_DIRECTORY = '../binaries/' import ConfigParser, sys from numpy import arange, zeros from math import exp, log from struct import pack from pacman import * # packs an array with a given mask for every element. maybe there's a python function doing this? like [ out += pack(mask,i) for i in array ] def packArray(array, mask): out = "" for i in array: out += pack(mask,i) # h = 4bit words return out DEBUG = pacman_configuration.getboolean('stdp_table_generator', 'debug') p1 = 256 # packs an array with a given mask for every element def packArray(array, mask): out = "" for i in array: out += pack(mask,i) # h = 4bit words # print out return out def setHeaders(w_min, w_max, ltp_time_window, ltd_time_window, resolution, words): s = pack("<h", w_min*p1) s += pack("<h", w_max*p1) s += pack("<b", ltp_time_window) s += pack("<b", ltd_time_window) s += pack("<b", resolution) s += pack("<b", int( log(resolution, 2)) ) s += pack("<b", words) return s def calc_STDP_table(ltp_time_window, ltd_time_window, resolution, A_plus, A_minus, tau_plus, tau_minus, words, zero_value=0): # print ltd_time_window+ltp_time_window, resolution*words assert ltd_time_window+ltp_time_window < resolution*words*32*2, "Time window exceeds maxmimum size of %d msec. Decrease ltd/ltp time window or resolution" % (resolution*words*32*2+1) ltd = arange(resolution*32*4, 0, -resolution) ltp = arange(resolution, resolution*4*32+resolution, resolution) if DEBUG: print "[ stdp_table_generator ] :" ,ltd, ltp out = [] for l in ltd: out.append( (A_minus*exp(float(-l)/tau_minus) + A_minus*exp(float(-(l+1))/tau_minus))/2 ) if zero_value != 0: print "[ stdp_table_generator ] : setting value in dt = 0 to %f" % zero_value out.append(zero_value) for l in ltp: out.append( (A_plus*exp(float(-l)/tau_plus) + A_plus*exp(float(-(l+1))/tau_plus))/2 ) # Scaling out = [ int(i*p1) for i in out ] # words*32 is the size of the ltp and ltd window. The whole table is words*32 + 1 (value in 0) + words*32 = words*32*2+1 bytes long # left_bound = int(resolution*32*4-ltd_time_window/resolution) # right_bound = int(words*32+1+ltp_time_window/resolution) left_bound = 128-ltd_time_window right_bound = 129 + ltp_time_window # Truncating the time window with the one specified by ltd/ltp_time_window # print left_bound, right_bound out[:left_bound] = zeros(left_bound, 'int') out[right_bound:] = zeros(128-ltp_time_window, 'int') if DEBUG: print out return out def compile_stdp_table(cfg, out_filename): """ compiles an stdp table given dictionary cfg and an output file name cfg is in the format cfg['ltp_time_window'], cfg['ltd_time_window'], cfg['resolution'], cfg['A_plus'], cfg['A_minus'], cfg['tau_plus'], cfg['tau_minus'], cfg['words'], cfg['zero_value'] """ print "[ stdp_table_generator ] : Writing file", out_filename f = open(out_filename, mode='w+') print "[ stdp_table_generator ] : Writing headers" f.write(setHeaders(cfg['w_min'], cfg['w_max'], cfg['ltd_time_window'], cfg['ltp_time_window'], cfg['resolution'], cfg['words'])) s = calc_STDP_table(cfg['ltp_time_window'], cfg['ltd_time_window'], cfg['resolution'], cfg['A_plus'], cfg['A_minus'], cfg['tau_plus'], cfg['tau_minus'], cfg['words'], cfg['zero_value']) f.write(packArray(s,'<b')) f.close() print "[ stdp_table_generator ] : Done!" def compile_stdp_tts_table(cfg, out_filename): """ compiles an stdp table given dictionary cfg and an output file name cfg is in the format cfg['ltp_time_window'], cfg['ltd_time_window'], cfg['resolution'], cfg['A_plus'], cfg['A_minus'], cfg['tau_plus'], cfg['tau_minus'], cfg['words'], cfg['zero_value'] """ print "[ stdp_table_generator ] : Writing file", out_filename f = open(out_filename, mode='w+') print "Writing headers" f.write(setHeaders(cfg['w_min'], cfg['w_max'], cfg['ltd_time_window'], cfg['ltp_time_window'], cfg['resolution'], cfg['words'])) s = calc_STDP_table(cfg['ltp_time_window'], cfg['ltd_time_window'], cfg['resolution'], cfg['A_plus'], cfg['A_minus'], cfg['tau_plus'], cfg['tau_minus'], cfg['words'], cfg['zero_value']) f.write(packArray(s,'<b')) f.write(pack("<h", cfg['L_parameter'])) f.close() print "Done!" def compile_stdp_table_from_db(db): print "\n[ stdp_table_generator ] : calculating STDP tables" plasticity_parameters = db.get_plasticity_parameters() if len(plasticity_parameters) < 1: print "[ stdp_table_generator ] : Nothing to do...\n" return for p in plasticity_parameters: if DEBUG: print p out_file_name = BINARY_DIRECTORY + "stdp_table_" + str(p['x']) + "_" + str(p['y']) + "_" + str(p['p']) + ".dat" # FIXME read defaults from pacman cfg parameters = eval (p['parameters']) if DEBUG: print parameters if 'ltd_time_window' not in parameters.keys(): parameters['ltd_time_window'] = pacman_configuration.getint('stdp_table_generator', 'ltd_time_window') if 'ltp_time_window' not in parameters.keys(): parameters['ltp_time_window'] = pacman_configuration.getint('stdp_table_generator', 'ltp_time_window') if 'words' not in parameters.keys(): parameters['words'] = pacman_configuration.getint('stdp_table_generator', 'words') if 'zero_value' not in parameters.keys(): parameters['zero_value'] = eval(pacman_configuration.get('stdp_table_generator', 'zero_value')) if DEBUG: print "[ stdp_table_generator ] : parameters: ", parameters print "[ stdp_table_generator ] : p: ", p if p['method'] == 'FullWindow': print "[ stdp_table_generator ] : computing STDP table for FullWindow rule" compile_stdp_table(parameters, out_file_name) if p['method'] == 'SpikePairRule': print "[ stdp_table_generator ] : computing STDP table for SpikePair rule" compile_stdp_table(parameters, out_file_name) if p['method'] == 'TimeToSpike': print "[ stdp_table_generator ] : computing STDP table for TimeToSpike rule" compile_stdp_tts_table(parameters, out_file_name) if __name__ == "__main__": db = load_db(sys.argv[1]) # IMPORTS THE DB (it will also load the model libraray by default) compile_stdp_table_from_db(db)
[ "francesco@inspiron.local" ]
francesco@inspiron.local
79505b2c69220a3b4844e0e3ff6288faa3bd033b
006f73f4cc37dda59904a85d346186897f00834a
/sorteo/urls.py
3807308aa78b5b230eff8c9c0cdb1306d361e6ce
[]
no_license
nathanbernal/sorteo_django
4ecd2cd85f5d2dcf7d0b825c5d6327ff080c3380
b8c4ea6b674b3cdff3cd5aed002222955c592a6e
refs/heads/main
2023-04-26T02:39:50.711438
2021-05-19T03:01:33
2021-05-19T03:01:33
368,727,484
0
0
null
null
null
null
UTF-8
Python
false
false
580
py
from django.urls import include, path from rest_framework import routers from api import views router = routers.DefaultRouter() router.register(r'users', views.UserViewSet) router.register(r'groups', views.GroupViewSet) router.register(r'usuario', views.UsuarioViewSet) router.register(r'activacion', views.ActivacionViewSet) # Wire up our API using automatic URL routing. # Additionally, we include login URLs for the browsable API. urlpatterns = [ path('', include(router.urls)), path('api-auth/{email}', include('rest_framework.urls', namespace='rest_framework')), ]
[ "nathanbernal@gmail.com" ]
nathanbernal@gmail.com
66f7f0ea804830c3090d2f78537d4b535a84b454
0bcfdf3ba3a0083a5254388bd8bd1d24bdb70e2a
/app/models.py
c44d7700e6af934c5cdb1ecf709c97b00c275fe9
[]
no_license
Quinnan-Gill/microblog
1d216d6a6f49162080a13209e51d13c2c7169af7
c0b34a529b3434b1f29c139a90082c1b41b511ff
refs/heads/master
2022-12-10T15:29:17.055685
2019-09-12T03:01:58
2019-09-12T03:01:58
165,971,072
0
0
null
2022-12-08T01:33:25
2019-01-16T04:03:10
Python
UTF-8
Python
false
false
10,901
py
import json import redis import rq import base64 import os from time import time from datetime import datetime, timedelta from hashlib import md5 from time import time from flask import current_app, url_for from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash import jwt from app import db, login from app.search import add_to_index, remove_from_index, query_index class SearchableMixin(object): @classmethod def search(cls, expression, page, per_page): ids, total = query_index(cls.__tablename__, expression, page, per_page) if total == 0: return cls.query.filter_by(id=0), 0 when = [] for i in range(len(ids)): when.append((ids[i], i)) return cls.query.filter(cls.id.in_(ids)).order_by( db.case(when, value=cls.id)), total @classmethod def before_commit(cls, session): session._changes = { 'add': list(session.new), 'update': list(session.dirty), 'delete': list(session.deleted) } @classmethod def after_commit(cls, session): for obj in session._changes['add']: if isinstance(obj, SearchableMixin): add_to_index(obj.__tablename__, obj) for obj in session._changes['update']: if isinstance(obj, SearchableMixin): add_to_index(obj.__tablename__, obj) for obj in session._changes['delete']: if isinstance(obj, SearchableMixin): remove_from_index(obj.__tablename__, obj) @classmethod def reindex(cls): for obj in cls.query: add_to_index(cls.__tablename__, obj) class PaginatedAPIMixin(object): @staticmethod def to_collection_dict(query, page, per_page, endpoint, **kwargs): resources = query.paginate(page, per_page, False) data = { 'items': [item.to_dict() for item in resources.items], '_meta': { 'page': page, 'per_page': per_page, 'total_page': resources.pages, 'total_items': resources.total }, '_links': { 'self': url_for(endpoint, page=page, per_page=per_page, **kwargs), 'next': url_for(endpoint, page=page + 1, per_page=per_page, **kwargs) if resources.has_next else None, 'prev': url_for(endpoint, page=page - 1, per_page=per_page, **kwargs) if resources.has_prev else None } } return data db.event.listen(db.session, 'before_commit', SearchableMixin.before_commit) db.event.listen(db.session, 'after_commit', SearchableMixin.after_commit) followers = db.Table( 'followers', db.Column('follower_id', db.Integer, db.ForeignKey('user.id')), db.Column('followed_id', db.Integer, db.ForeignKey('user.id')) ) class User(PaginatedAPIMixin, UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(64), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) password_hash = db.Column(db.String(128)) posts = db.relationship('Post', backref='author', lazy='dynamic') about_me = db.Column(db.String(140)) last_seen = db.Column(db.DateTime, default=datetime.utcnow) followed = db.relationship( 'User', secondary=followers, primaryjoin=(followers.c.follower_id == id), secondaryjoin=(followers.c.followed_id == id), backref=db.backref('followers', lazy='dynamic'), lazy='dynamic') messages_sent = db.relationship('Message', foreign_keys='Message.sender_id', backref='author', lazy='dynamic') messages_received = db.relationship('Message', foreign_keys='Message.recipient_id', backref='recipient', lazy='dynamic') last_message_read_time = db.Column(db.DateTime) notifications = db.relationship('Notification', backref='user', lazy='dynamic') tasks = db.relationship('Task', backref='user', lazy='dynamic') token = db.Column(db.String(32), index=True, unique=True) token_expiration = db.Column(db.DateTime) def __repr__(self): return '<User {}>'.format(self.username) def set_password(self, password): self.password_hash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.password_hash, password) def avatar(self, size): digest = md5(self.email.lower().encode('utf-8')).hexdigest() return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format( digest, size) def follow(self, user): if not self.is_following(user): self.followed.append(user) def unfollow(self, user): if self.is_following(user): self.followed.remove(user) def is_following(self, user): return self.followed.filter( followers.c.followed_id == user.id).count() > 0 def followed_posts(self): followed = Post.query.join( followers, (followers.c.followed_id == Post.user_id)).filter( followers.c.follower_id == self.id) own = Post.query.filter_by(user_id=self.id) return followed.union(own).order_by(Post.timestamp.desc()) def get_reset_password_token(self, expires_in=600): return jwt.encode( {'reset_password': self.id, 'exp': time() + expires_in}, app.config['SECRET_KEY'], algorithm='HS256').decode('utf-8') def new_messages(self): last_read_time = self.last_message_read_time or datetime(1900, 1, 1) return Message.query.filter_by(recipient=self).filter( Message.timestamp > last_read_time).count() def add_notification(self, name, data): self.notifications.filter_by(name=name).delete() n = Notification(name=name, payload_json=json.dumps(data), user=self) db.session.add(n) return n def launch_task(self, name, description, *args, **kwargs): rq_job = current_app.task_queue.enqueue('app.tasks.' + name, self.id, *args, **kwargs) task = Task(id=rq_job.get_id(), name=name, description=description, user=self) db.session.add(task) return task def get_tasks_in_progress(self): return Task.query.filter_by(user=self, complete=False).all() def get_task_in_progress(self, name): return Task.query.filter_by(name=name, user=self, complete=False).first() def get_tasks_in_progress(self, name): return Task.query.filter_by(name=name, user=self, complete=False).first() def to_dict(self, include_email=False): data = { 'id': self.id, 'username': self.username, 'last_seen': self.last_seen.isoformat() + 'Z', 'about_me': self.about_me, 'post_count': self.posts.count(), 'follower_count': self.followers.count(), 'followed_count': self.followed.count(), '_links': { 'self': url_for('api.get_user', id=self.id), 'followers': url_for('api.get_followers', id=self.id), 'followed': url_for('api.get_followed', id=self.id), 'avatar': self.avatar(128) } } if include_email: data['email'] = self.email return data def from_dict(self, data, new_user=False): for field in ['username', 'email', 'about_me']: if field in data: setattr(self, field, data[field]) if new_user and 'password' in data: self.set_password(data['password']) def get_token(self, expires_in=3600): now = datetime.utcnow() if self.token and self.token_expiration > now + timedelta(seconds=60): return self.token self.token = base64.b64encode(os.urandom(24)).decode('utf-8') self.token_expiration = now + timedelta(seconds=expires_in) db.session.add(self) return self.token def revoke_token(self): self.token_expiration = datetime.utcnow() - timedelta(seconds=1) @staticmethod def verify_reset_password_token(token): try: id = jwt.decode(token, app.config['SECRET_KEY'], algorithms=['HS256'])['reset_password'] except: return return User.query.get(id) @staticmethod def check_token(token): user = User.query.filter_by(token=token).first() if user is None or user.token_expiration < datetime.utcnow(): return None return user @login.user_loader def load_user(id): return User.query.get(int(id)) class Post(SearchableMixin, db.Model): __searchable__ = ['body'] id = db.Column(db.Integer, primary_key=True) body = db.Column(db.String(140)) timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) language = db.Column(db.String(5)) def __repr__(self): return '<Post {}>'.format(self.body) class Message(db.Model): id = db.Column(db.Integer, primary_key=True) sender_id = db.Column(db.Integer, db.ForeignKey('user.id')) recipient_id = db.Column(db.Integer, db.ForeignKey('user.id')) body = db.Column(db.String(150)) timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) def __repr__(self): return '<Message {}>'.format(self.body) class Notification(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(128), index=True) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) timestamp = db.Column(db.Float, index=True, default=time) payload_json = db.Column(db.Text) def get_data(self): return json.loads(str(self.payload_json)) class Task(db.Model): id = db.Column(db.String(36), primary_key=True) name = db.Column(db.String(128), index=True) description = db.Column(db.String(128)) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) complete = db.Column(db.Boolean, default=False) def get_rq_job(self): try: rq_job = rq.job.Job.fetch(self.id, connection=current_app.redis) except (redis.exceptions.RedisError, rq.exceptions.NoSuchJobError): return None return rq_job def get_process(self): job = self.get_rq_job() return job.meta.get('progress', 0) if job is not None else 100
[ "quinnan.gill@gmail.com" ]
quinnan.gill@gmail.com
3b1a469d9c82b2869b62462652c2a0c924e3bb31
470e0a9dc07edfe13ca68f2a1b6d60d0e395e095
/3-2.py
b67172d7abbc097ec46a4caa894c73eba80c02c4
[]
no_license
mj08021/ThisIsCodingTestforGetaJob
77ce8edab2bd855db9b96597982f58251d0bd31e
ad98b368956937065c6c396b2806351a4eaf12a2
refs/heads/main
2023-04-28T10:51:02.012344
2021-05-16T05:51:58
2021-05-16T05:51:58
316,853,768
0
0
null
null
null
null
UTF-8
Python
false
false
614
py
# N, M, K를 공백으로 구분하여 입력받기 n, m, k = map(int, input().split()) # N개의 수를 공백으로 구분하여 입력받기 data = list(map(int, input().split())) data.sort() # 입력받은 수 정렬 first = data[n - 1] # 가장 큰 수 second = data[n - 2] # 두 번째로 큰 수 # 가장 큰 수가 더해지는 횟수 계산 count = int(m / (k + 1)) * k count += m % (k + 1) result = 0 result += (count) * first # 가장 큰 수 더하기 result += (m - count) * second # 두 번째로 큰 수 더하기 print(result) # 최종 답안 출력 # ex) input # 5 8 3 # 2 4 5 4 6
[ "replituser@example.com" ]
replituser@example.com
f2a7ee60c707d01abd0cb97f85cf647ce9ebf4e3
a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb
/article/migrations/0006_auto_20210311_1721.py
116d4f2900f9f0f393ad9eb58894d557a6c11b5c
[]
no_license
Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev
5a7f210e51f1998e5d52cdeb42538f2786af3f9f
fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1
refs/heads/master
2023-05-03T17:01:59.066596
2021-05-26T13:28:41
2021-05-26T13:28:41
368,165,221
0
0
null
null
null
null
UTF-8
Python
false
false
364
py
# Generated by Django 3.1.6 on 2021-03-11 17:21 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('article', '0005_auto_20210311_1319'), ] operations = [ migrations.RenameField( model_name='article', old_name='tags', new_name='tags_old', ), ]
[ "kurbanalieverlan@gmail.com" ]
kurbanalieverlan@gmail.com
ab6ea8ec66229564a0cc2f4945f5415503dcfec8
1bb20fd77f973f23878c04b1784569ebe76ca645
/model/distrib_state.py
3138fe08510eea286b89c9cba72bb01bc01931c2
[]
no_license
keyofdeath/Tp-conceprion-objet
af25a838231547678e24aea7bd59533946a554e7
45e03ef694684c364f38f2592c5d6675fde04bdd
refs/heads/master
2020-04-23T10:26:19.035227
2019-02-17T09:32:46
2019-02-17T09:32:46
171,104,075
0
0
null
null
null
null
UTF-8
Python
false
false
3,344
py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import import logging.handlers import os PYTHON_LOGGER = logging.getLogger(__name__) if not os.path.exists("log"): os.mkdir("log") HDLR = logging.handlers.TimedRotatingFileHandler("log/DistribState.log", when="midnight", backupCount=60) STREAM_HDLR = logging.StreamHandler() FORMATTER = logging.Formatter("%(asctime)s %(filename)s [%(levelname)s] %(message)s") HDLR.setFormatter(FORMATTER) STREAM_HDLR.setFormatter(FORMATTER) PYTHON_LOGGER.addHandler(HDLR) PYTHON_LOGGER.addHandler(STREAM_HDLR) PYTHON_LOGGER.setLevel(logging.DEBUG) # Absolute path to the folder location of this python file FOLDER_ABSOLUTE_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__))) class DistrbState: def __init__(self, distrib): """ :param distrib: (Distrib) distributeur """ self.distrib = distrib def inserer_carte(self, card_number): """ Méthode pour inserer une carte dans la machine. Puis met dans l'attribut carte_inseree la carte inserée. :param card_number: (string) Numéro de ma carte inserée :return: (bool) True la carte est trouvée. False la carte n'a pas été trouvée """ raise Exception("Inserer carte: Can call this function in this state") def saisire_code(self, code): """ Regarde si le code saisi est correct :param code: (string) code entré :return: (bool) True code correct sinon False """ raise Exception("Saisire Code: Can call this function in this state") def menu(self, action): """ Menu ou l'utilisateur choisie se qu'il veut faire :param action: (int) Utiliser les constantes dans la classe Distrib :return: (object) Retourn les infos en fonction de l'action choisie """ raise Exception("Menu: Can call this function in this state") def attente_compt_choisit(self, acount_number): """ Fonction pour obtenir les données d'un compte. :param acount_number: (string) numero du compte :return: (Dictionnaire) info sur le compte en dictionnaire [numéro, solde, operations] """ raise Exception("Attente compt Choisit: Can call this function in this state") def compt_afficher(self): """ Pour retourner aux menu une fois la consultation des compts fini """ raise Exception("Compt afficher: Can call this function in this state") def attente_information_transfer(self, acount_number, credit_to_transfer): """ Recupaire les informations entrée pas l'utilisateur est attend qu'il valide :param acount_number: (int) Numeros de compt a créditer :param credit_to_transfer: (float) Montant a transferer """ raise Exception("Attente information virement: Can call this function in this state") def confimer_le_virement(self, confirm_transfer): """ L'utilisateur valide les information entrée on effectue le transfer :param confirm_transfer: (bool) True confirm le transfer :return: (bool) True transfer effectuer """ raise Exception("Confirmer le virement: Can call this function in this state")
[ "swan.blanc.pro@gmail.com" ]
swan.blanc.pro@gmail.com
1ff773919aec1f3c3dc117cc8f3db600db5c9e89
ad5b4790cf04b65f93729c56961d2feb3c6194cb
/tools/cpplint/setup.py
030ea14ef2092c83e99ebe0ecf65808f32aac0ba
[ "BSD-3-Clause", "MIT" ]
permissive
BoogeeDoo/mt19937
5b795e1f7221ef5a331824e745dc89610ead1f7e
56f0f3f80cee8ec76d08c84a413b9dfc8928b8f7
refs/heads/master
2023-07-19T21:41:39.414715
2022-06-01T14:45:23
2022-06-01T14:45:23
117,514,446
4
1
MIT
2022-11-20T13:19:26
2018-01-15T07:53:45
C++
UTF-8
Python
false
false
2,955
py
#! /usr/bin/env python from setuptools import setup, Command from subprocess import check_call from distutils.spawn import find_executable import cpplint as cpplint class Cmd(Command): ''' Superclass for other commands to run via setup.py, declared in setup.cfg. These commands will auto-install setup_requires in a temporary folder. ''' user_options = [ ('executable', 'e', 'The executable to use for the command') ] def initialize_options(self): self.executable = find_executable(self.executable) def finalize_options(self): pass def execute(self, *k): check_call((self.executable,) + k) class Lint(Cmd): '''run with python setup.py lint''' description = 'Run linting of the code' user_options = Cmd.user_options + [ ('jobs', 'j', 'Use multiple processes to speed up the linting') ] executable = 'pylint' def run(self): self.execute('cpplint.py') # some pip versions bark on comments (e.g. on travis) def read_without_comments(filename): with open(filename) as f: return [line for line in f.read().splitlines() if not len(line) == 0 and not line.startswith('#')] test_required = read_without_comments('test-requirements') setup(name='cpplint', version=cpplint.__VERSION__, py_modules=['cpplint'], # generate platform specific start script entry_points={ 'console_scripts': [ 'cpplint = cpplint:main' ] }, install_requires=[], url='https://github.com/cpplint/cpplint', download_url='https://github.com/cpplint/cpplint', keywords=['lint', 'python', 'c++'], maintainer='cpplint Developers', maintainer_email='see_github@nospam.com', classifiers=['Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: C++', 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Topic :: Software Development :: Quality Assurance', 'License :: Freely Distributable'], description='Automated checker to ensure C++ files follow Google\'s style guide', long_description=open('README.rst').read(), license='BSD-3-Clause', setup_requires=[ "pytest-runner" ], tests_require=test_required, # extras_require allow pip install .[dev] extras_require={ 'test': test_required, 'dev': read_without_comments('dev-requirements') + test_required }, cmdclass={ 'lint': Lint })
[ "i@2333.moe" ]
i@2333.moe
e4d288a30baec61e2e198b96b3163e0cf87504db
b62b673d9ade27f3e924f822d5b075e38ae28aa1
/tag-generator.py
0b3a2f6604bfbf6b8e487b1159ac54c2965410c0
[ "MIT" ]
permissive
Bhupesh-V/Bhupesh-V.github.io
7cad5f3dac12ecab9613780713a18fd9fb466ac2
8efd2afe3a5e76df45caf796222a0e498e569ed6
refs/heads/master
2023-05-29T01:27:50.233594
2023-04-30T13:14:16
2023-04-30T13:14:16
182,211,988
3
2
MIT
2019-08-27T21:10:42
2019-04-19T06:13:58
CSS
UTF-8
Python
false
false
1,349
py
#!/usr/bin/env python ''' tag_generator.py Copyright 2017 Long Qian Contact: lqian8@jhu.edu Source: https://github.com/qian256/qian256.github.io/blob/master/tag_generator.py This script creates tags for your Jekyll blog hosted by Github page. No plugins required. ''' import glob import os post_dir = '_posts/' tag_dir = 'tag/' filenames = glob.glob(post_dir + '*md') total_tags = [] for filename in filenames: f = open(filename, 'r', encoding='utf8') crawl = False for line in f: if crawl: current_tags = line.strip().split() if current_tags[0] == 'tags:': total_tags.extend(current_tags[1:]) crawl = False break if line.strip() == '---': if not crawl: crawl = True else: crawl = False break f.close() total_tags = set(total_tags) old_tags = glob.glob(tag_dir + '*.md') for tag in old_tags: os.remove(tag) if not os.path.exists(tag_dir): os.makedirs(tag_dir) for tag in total_tags: tag_filename = tag_dir + tag + '.md' f = open(tag_filename, 'a') write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n' f.write(write_str) f.close() print("Tags generated, count", total_tags.__len__())
[ "varshneybhupesh@gmail.com" ]
varshneybhupesh@gmail.com
b2e9aef98ce8e65f58c90611607ae2f1481b8d51
3bbf917e4525d84f4c42752cda3b072d83fbd77d
/Labs/Persisting Data/MoviesItemOps06.py
88a449484d1f16322df20543e2fe93b17deb4983
[]
no_license
renan-suetsugu/WorkshopPythonOnAWS
2fe891efe779802bdf497ce57c9a042886fbe3a2
0dee38d6cb24f5a33c5ac48409c6112fb57bab0a
refs/heads/main
2023-08-03T07:45:07.785458
2021-09-08T19:21:37
2021-09-08T19:21:37
400,149,268
0
0
null
2021-09-08T19:17:40
2021-08-26T11:47:11
Python
UTF-8
Python
false
false
993
py
import boto3 from botocore.exceptions import ClientError import json import decimal # Helper class to convert a DynamoDB item to JSON. class DecimalEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): if o % 1 > 0: return float(o) else: return int(o) return super(DecimalEncoder, self).default(o) dynamodb = boto3.resource('dynamodb', region_name='us-east-1') table = dynamodb.Table('Movies') title = "The Big New Movie" year = 2015 print("Tentando uma exclução condicional...") try: response = table.delete_item( Key={ 'year': year, 'title': title }, ) except ClientError as e: if e.response['Error']['Code'] == "ConditionalCheckFailedException": print(e.response['Error']['Message']) else: raise else: print("Item deletado com sucesso:") print(json.dumps(response, indent=4, cls=DecimalEncoder))
[ "noreply@github.com" ]
renan-suetsugu.noreply@github.com
781f1bed425ed743952b93b27a6dea0e2e1a1bad
a78e2aa069c38bb197a023df179d0c7e3f4c8469
/Button.py
8dd397c672b7d2b5da43f5e93177746ba2b64e14
[]
no_license
KouhouMohamed/pythonProject
74f6fa1051e109538d77904cc8a54da2c0a8d8ac
662e10887bc4272e28a56bc43e953cf1492da5e8
refs/heads/master
2023-01-06T04:18:20.244340
2020-11-07T09:47:09
2020-11-07T09:47:09
310,811,893
0
0
null
null
null
null
UTF-8
Python
false
false
679
py
from tkinter import * from tkinter import ttk #tkk is a class where buttons def main(): root = Tk() #create a root ( a window) but1 = ttk.Button(root, text="GetText") Ent1 = ttk.Entry(root, width=30) Ent1.pack() but1.pack() # Add button to root def ButtClick(): print(Ent1.get()) #get the contenant of Ent1 Ent1.delete(0,END) #clear the entery from begennin to end but1.config(command=ButtClick) Logo = PhotoImage(file='help.png') Logo_r=Logo.subsample(10,10) #resize the image Logo_r.zoom(15,20) but1.config(image=Logo_r,compound=LEFT) root.mainloop() #pour afficher root if __name__ == '__main__':main()
[ "m.kouhou-etu@enset-media.ac.ma" ]
m.kouhou-etu@enset-media.ac.ma
1a7193f2ab76638143eedaff1d1f49fa6a2291d0
b0504df295f3738827184f1aed86b48c0303e7ca
/data/pdbbind/example_conjoint_pdb/ecfp-pdb-refined2019-pocket.py
7e4cc1038cb051f05001a762b7f2bed7a465b735
[]
no_license
jank3/AlogP-DL
3c47098cff36551518eb23629ceca809b4e366ea
e4029cc76dce5b196c4eebe4a66a560b7fe14e0c
refs/heads/master
2023-03-18T17:47:40.055718
2021-02-28T07:49:46
2021-02-28T07:49:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
618
py
import pandas as pd import numpy as np from rdkit import Chem from rdkit.Chem import AllChem from rdkit.Chem import MACCSkeys ligand = Chem.MolFromPDBFile('11gs_ligand.pdb') pocketr1 = Chem.MolFromPDBFile('11gs_pocket_clean.pdb') featureL=AllChem.GetMorganFingerprintAsBitVect(ligand,2,nBits = 1024) featureL1=AllChem.GetMorganFingerprintAsBitVect(pocketr1, 2,nBits = 1024) features=[] features=['11gs'] features.extend(featureL.ToBitString()) features.extend(featureL1.ToBitString()) with open('ecfp-pocket-refined2019.txt', 'a') as f: f.write(','.join([str(x) for x in features])) f.write('\n') exit()
[ "xlxsdu@163.com" ]
xlxsdu@163.com
af415894f66167bbebd63ee550eeff6774fea102
c9837ea5229fce8a13dc28b8efe583e6b1f80f06
/tests/acceptance/test_async.py
13e29325278b60432ce81e30cf21f808f32fc48d
[ "MIT" ]
permissive
appetito/procrastinate
51de7e4e7e216514c4c417e0d496fdf968332092
5e47d99ede5fafc5717765ebde3e2782b131672a
refs/heads/master
2022-09-18T19:49:24.958649
2020-06-03T08:03:19
2020-06-03T08:03:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
835
py
import pytest import procrastinate pytestmark = pytest.mark.asyncio @pytest.fixture def pg_app(pg_connector): return procrastinate.App(connector=pg_connector) async def test_defer(pg_app): sum_results = [] product_results = [] @pg_app.task(queue="default", name="sum_task") def sum_task(a, b): sum_results.append(a + b) @pg_app.task(queue="default", name="product_task") async def product_task(a, b): product_results.append(a * b) await sum_task.defer_async(a=1, b=2) await sum_task.configure().defer_async(a=3, b=4) await pg_app.configure_task(name="sum_task").defer_async(a=5, b=6) await product_task.defer_async(a=3, b=4) await pg_app.run_worker_async(queues=["default"], wait=False) assert sum_results == [3, 7, 11] assert product_results == [12]
[ "joachim.jablon@people-doc.com" ]
joachim.jablon@people-doc.com
8303bfe6a087932e19cea98165604264c4a08b9a
f1244dd9a531a40f61c57acf7a7f11f9b2c9cb1f
/3-2/CSE 3210 (Artificial Intelligence)/Lab 6/3. Triangle by star.py
2f99de4ed6a484bc04e642627c9f8dfd1a0cd4cf
[]
no_license
SabirKhanAkash/RUET-Lab-Works
7e8be66e2d435108bed57b0335feb54d76ba23ef
3f094a5ca364d92ef42831e9f2dfb75c3baad506
refs/heads/master
2022-08-30T10:38:04.209187
2022-08-26T18:02:26
2022-08-26T18:02:26
240,317,974
2
3
null
2022-08-22T05:15:33
2020-02-13T17:17:38
Jupyter Notebook
UTF-8
Python
false
false
226
py
def main(): n = int(input("Enter the value of n: ")) k = 2*n - 2 for i in range(0, n): for j in range(0, k): print(end=" ") k = k - 1 for j in range(0, i+1): print("* ", end="") print("\r") main()
[ "39434260+SabirKhanAkash@users.noreply.github.com" ]
39434260+SabirKhanAkash@users.noreply.github.com
0e188befbac224d8224dc6e6649007c2d0ccc5b5
8b1dcac39acfcee0f573dc71d608671dea2062a2
/tools/hikyuu/interactive/draw/__init__.py
fcdb11396c845625805c5eebb3c406cd9deb7ab1
[ "MIT" ]
permissive
eightwind/hikyuu
4c876170b1e298105e7eaf9675b310ad378dd9a4
4dab98a93e2a9847f77d615d6900067fbf90b73d
refs/heads/master
2021-08-26T05:32:39.813080
2017-11-21T18:59:16
2017-11-21T18:59:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
33
py
__version__ = "Only for pip dist"
[ "fasiondog@163.com" ]
fasiondog@163.com
ddcaf6e28b533963df17ac8f9f13f4ce3c77631f
1581f1d66d6835b2c271295e3251c2dde239fec8
/payment_gateway/pg_utils.py
6036c701e7036016bef878326b20e168433fab8a
[]
no_license
abinash-kumar/pythod
527659e3bdd161f9abcaaa9182dfe58044b3ff66
1469dc0cd9d6d72b2fe2e69f99542e470bea807b
refs/heads/master
2023-01-30T02:54:10.729606
2020-02-24T07:18:51
2020-02-24T07:18:51
242,670,715
0
0
null
2023-01-25T13:57:52
2020-02-24T07:16:02
Python
UTF-8
Python
false
false
2,318
py
from motor_product import prod_utils as mpu from health_product import prod_utils as hpu HEALTH_INSURER_SLUG = { 'the-oriental-insurance-company-ltd': 'oriental' } def resolve_utils(transaction): if transaction.product_type == 'motor': return mpu elif transaction.product_type == 'health': return hpu else: return None def process_payment_response(request, response, transaction): if transaction.product_type == 'motor': return mpu.process_payment_response( request, mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type], get_insurer_slug(transaction), response, transaction.transaction_id ) elif transaction.product_type == 'health': return hpu.process_payment_response( transaction.slab.health_product.insurer.id, response, transaction ) else: return None def get_insurer_slug(transaction): if transaction.product_type == 'motor': return transaction.insurer.slug elif transaction.product_type == 'health': return HEALTH_INSURER_SLUG[transaction.slab.health_product.insurer.slug] else: return None def get_error_url(transaction): if transaction.product_type == 'motor': vehicle_type = mpu.VEHICLE_TYPE_SLUG[transaction.vehicle_type] return '/motor/' + vehicle_type + '/product/failure/' elif transaction.product_type == 'health': return '/health-plan/payment/transaction/%s/failure/' % transaction.transaction_id else: return None def todict(obj, classkey=None): if isinstance(obj, dict): data = {} for (k, v) in obj.items(): data[k] = todict(v, classkey) return data elif hasattr(obj, "_ast"): return todict(obj._ast()) elif hasattr(obj, "__iter__"): return [todict(v, classkey) for v in obj] elif hasattr(obj, "__dict__"): data = dict([(key, todict(value, classkey)) for key, value in obj.__dict__.iteritems() if not callable(value) and not key.startswith('_')]) if classkey is not None and hasattr(obj, "__class__"): data[classkey] = obj.__class__.__name__ return data else: return obj
[ "abinashlv@AbinashSymboMac.local" ]
abinashlv@AbinashSymboMac.local
6ae2804678615a3a1654175705d975799f861089
0b67530ca1ed53251c343b38332ea7f61c18c1c5
/cmd123.py
84d2c5a452f8cf1d8fddb2175c5cc96c0b61a40f
[]
no_license
NikhilChaudhari11/nik1
c4b27d305956e1560d333f962e0ee8db7760fa1a
fc129980c6f484e0c6f797fa3d20c4c0e095ef74
refs/heads/master
2020-12-11T09:07:47.073064
2018-01-12T17:21:49
2018-01-12T17:21:49
null
0
0
null
null
null
null
UTF-8
Python
false
false
588
py
import re import time import os def cmd1(line): line = re.split("\n+", line) def batch1(line): list11 = [] for i in line: #print(i) if(type(i) != 'None' and len(i) != 0): #print(i.split()[0][0]) e = i.split()[0][0] if( e != '#'): i=i.lstrip().rstrip() list11.append(i) return(list11) list2 = batch1(line) for i in list2: time.sleep(0.15) os.popen(i) time.sleep(0.15)
[ "nikhilc11@gmail.com" ]
nikhilc11@gmail.com
1d6007a5ebcba5fca71c8d3808860c34ac1f9ede
0f0f8b3b027f412930ca1890b0666538358a2807
/dotop/addons/base/ir/ir_filters.py
7e792068539ec5262791dfa23e1034b0a6500c7e
[]
no_license
konsoar/dotop_pos_v11
741bd5ca944dfd52eb886cab6f4b17b6d646e131
576c860917edd25661a72726d0729c769977f39a
refs/heads/master
2021-09-06T13:25:34.783729
2018-02-07T02:11:12
2018-02-07T02:11:12
111,168,355
0
0
null
null
null
null
UTF-8
Python
false
false
7,584
py
# -*- coding: utf-8 -*- # Part of dotop. See LICENSE file for full copyright and licensing details. import ast from dotop import api, fields, models, _ from dotop.exceptions import UserError class IrFilters(models.Model): _name = 'ir.filters' _description = 'Filters' _order = 'model_id, name, id desc' name = fields.Char(string='Filter Name', translate=True, required=True) user_id = fields.Many2one('res.users', string='User', ondelete='cascade', default=lambda self: self._uid, help="The user this filter is private to. When left empty the filter is public " "and available to all users.") domain = fields.Text(default='[]', required=True) context = fields.Text(default='{}', required=True) sort = fields.Text(default='[]', required=True) model_id = fields.Selection(selection='_list_all_models', string='Model', required=True) is_default = fields.Boolean(string='Default filter') action_id = fields.Many2one('ir.actions.actions', string='Action', ondelete='cascade', help="The menu action this filter applies to. " "When left empty the filter applies to all menus " "for this model.") active = fields.Boolean(default=True) @api.model def _list_all_models(self): self._cr.execute("SELECT model, name FROM ir_model ORDER BY name") return self._cr.fetchall() @api.multi def copy(self, default=None): self.ensure_one() default = dict(default or {}, name=_('%s (copy)') % self.name) return super(IrFilters, self).copy(default) @api.multi def _get_eval_domain(self): self.ensure_one() return ast.literal_eval(self.domain) @api.model def _get_action_domain(self, action_id=None): """Return a domain component for matching filters that are visible in the same context (menu/view) as the given action.""" if action_id: # filters specific to this menu + global ones return [('action_id', 'in', [action_id, False])] # only global ones return [('action_id', '=', False)] @api.model def get_filters(self, model, action_id=None): """Obtain the list of filters available for the user on the given model. :param action_id: optional ID of action to restrict filters to this action plus global filters. If missing only global filters are returned. The action does not have to correspond to the model, it may only be a contextual action. :return: list of :meth:`~osv.read`-like dicts containing the ``name``, ``is_default``, ``domain``, ``user_id`` (m2o tuple), ``action_id`` (m2o tuple) and ``context`` of the matching ``ir.filters``. """ # available filters: private filters (user_id=uid) and public filters (uid=NULL), # and filters for the action (action_id=action_id) or global (action_id=NULL) action_domain = self._get_action_domain(action_id) filters = self.search(action_domain + [('model_id', '=', model), ('user_id', 'in', [self._uid, False])]) user_context = self.env.user.context_get() return filters.with_context(user_context).read(['name', 'is_default', 'domain', 'context', 'user_id', 'sort']) @api.model def _check_global_default(self, vals, matching_filters): """ _check_global_default(dict, list(dict), dict) -> None Checks if there is a global default for the model_id requested. If there is, and the default is different than the record being written (-> we're not updating the current global default), raise an error to avoid users unknowingly overwriting existing global defaults (they have to explicitly remove the current default before setting a new one) This method should only be called if ``vals`` is trying to set ``is_default`` :raises dotop.exceptions.UserError: if there is an existing default and we're not updating it """ domain = self._get_action_domain(vals.get('action_id')) defaults = self.search(domain + [ ('model_id', '=', vals['model_id']), ('user_id', '=', False), ('is_default', '=', True), ]) if not defaults: return if matching_filters and (matching_filters[0]['id'] == defaults.id): return raise UserError(_("There is already a shared filter set as default for %(model)s, delete or change it before setting a new default") % {'model': vals.get('model_id')}) @api.model @api.returns('self', lambda value: value.id) def create_or_replace(self, vals): action_id = vals.get('action_id') current_filters = self.get_filters(vals['model_id'], action_id) matching_filters = [f for f in current_filters if f['name'].lower() == vals['name'].lower() # next line looks for matching user_ids (specific or global), i.e. # f.user_id is False and vals.user_id is False or missing, # or f.user_id.id == vals.user_id if (f['user_id'] and f['user_id'][0]) == vals.get('user_id')] if vals.get('is_default'): if vals.get('user_id'): # Setting new default: any other default that belongs to the user # should be turned off domain = self._get_action_domain(action_id) defaults = self.search(domain + [ ('model_id', '=', vals['model_id']), ('user_id', '=', vals['user_id']), ('is_default', '=', True), ]) if defaults: defaults.write({'is_default': False}) else: self._check_global_default(vals, matching_filters) # When a filter exists for the same (name, model, user) triple, we simply # replace its definition (considering action_id irrelevant here) if matching_filters: matching_filter = self.browse(matching_filters[0]['id']) matching_filter.write(vals) return matching_filter return self.create(vals) _sql_constraints = [ # Partial constraint, complemented by unique index (see below). Still # useful to keep because it provides a proper error message when a # violation occurs, as it shares the same prefix as the unique index. ('name_model_uid_unique', 'unique (name, model_id, user_id, action_id)', 'Filter names must be unique'), ] @api.model_cr_context def _auto_init(self): result = super(IrFilters, self)._auto_init() # Use unique index to implement unique constraint on the lowercase name (not possible using a constraint) self._cr.execute("DROP INDEX IF EXISTS ir_filters_name_model_uid_unique_index") # drop old index w/o action self._cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = 'ir_filters_name_model_uid_unique_action_index'") if not self._cr.fetchone(): self._cr.execute("""CREATE UNIQUE INDEX "ir_filters_name_model_uid_unique_action_index" ON ir_filters (lower(name), model_id, COALESCE(user_id,-1), COALESCE(action_id,-1))""") return result
[ "Administrator@20nuo003-PC" ]
Administrator@20nuo003-PC
df952844481362845f3f8fd712d4e353b5c9b969
cbe4c2c2d163d2e5c611a77258ec1eb2e92b6479
/api/migrations/0006_auto__add_field_configset_delta_name__del_unique_configset_hwtype_id_c.py
ca28f0e726f18e3e7cf855cddfb86694ed061bf4
[]
no_license
radhakrishnaa/DCP
20bcd6ce8143b5011310c42be858d139fb0cfa7a
c7970393811ef6686aafa4a49b96115b05ac86b6
refs/heads/main
2023-08-14T03:47:14.841160
2021-09-13T14:40:50
2021-09-13T14:40:50
406,011,002
0
0
null
null
null
null
UTF-8
Python
false
false
12,862
py
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Removing unique constraint on 'ConfigSet', fields ['hwtype_id', 'category_id', 'region_id', 'carrier_id'] db.delete_unique('config_set', ['hwtype_id', 'category_id', 'region_id', 'carrier_id']) # Adding field 'ConfigSet.delta_name' db.add_column('config_set', 'delta_name', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True), keep_default=False) # Adding unique constraint on 'ConfigSet', fields ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id'] db.create_unique('config_set', ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id']) def backwards(self, orm): # Removing unique constraint on 'ConfigSet', fields ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id'] db.delete_unique('config_set', ['hwtype_id', 'delta_name', 'category_id', 'region_id', 'carrier_id']) # Deleting field 'ConfigSet.delta_name' db.delete_column('config_set', 'delta_name') # Adding unique constraint on 'ConfigSet', fields ['hwtype_id', 'category_id', 'region_id', 'carrier_id'] db.create_unique('config_set', ['hwtype_id', 'category_id', 'region_id', 'carrier_id']) models = { u'api.carrier': { 'Meta': {'ordering': "['code']", 'object_name': 'Carrier', 'db_table': "'carrier'"}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}), 'old_code': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}) }, u'api.cloudenv': { 'Meta': {'ordering': "['order', 'short_name']", 'object_name': 'CloudEnv', 'db_table': "'cloud_env'"}, 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'display_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'env_type': ('django.db.models.fields.CharField', [], {'max_length': '8'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'network_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}) }, u'api.configset': { 'Meta': {'ordering': "['category_id', 'hwtype_id', 'carrier_id', 'region_id']", 'unique_together': "(('category_id', 'hwtype_id', 'carrier_id', 'region_id', 'delta_name'),)", 'object_name': 'ConfigSet', 'db_table': "'config_set'"}, 'carrier_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Carrier']", 'null': 'True', 'db_column': "'carrier_id'", 'blank': 'True'}), 'category_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingCategory']", 'db_column': "'category_id'"}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'delta_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'fallback_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigSet']", 'null': 'True', 'db_column': "'fallback_id'", 'blank': 'True'}), 'hwtype_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Hwtype']", 'null': 'True', 'db_column': "'hwtype_id'", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'region_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.Region']", 'null': 'True', 'db_column': "'region_id'", 'blank': 'True'}) }, u'api.configsetting': { 'Meta': {'object_name': 'ConfigSetting', 'db_table': "'config_setting'"}, 'config_version_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigVersion']", 'db_column': "'config_version_id'"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'setting_value_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingValue']", 'db_column': "'setting_value_id'"}) }, u'api.configversion': { 'Meta': {'object_name': 'ConfigVersion', 'db_table': "'config_version'"}, 'approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'approver_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_approver'", 'null': 'True', 'db_column': "'approver_id'", 'to': u"orm['api.User']"}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'committed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'committer_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_committer'", 'null': 'True', 'db_column': "'committer_id'", 'to': u"orm['api.User']"}), 'config_set_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigSet']", 'db_column': "'config_set_id'"}), 'fallback_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.ConfigVersion']", 'null': 'True', 'db_column': "'fallback_id'", 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'last_editor_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_last_editor'", 'null': 'True', 'db_column': "'last_editor_id'", 'to': u"orm['api.User']"}), 'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'publisher_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'config_version_publisher'", 'null': 'True', 'db_column': "'publisher_id'", 'to': u"orm['api.User']"}), 'setting_value': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['api.SettingValue']", 'null': 'True', 'through': u"orm['api.ConfigSetting']", 'blank': 'True'}), 'version_number': ('django.db.models.fields.IntegerField', [], {}) }, u'api.envtransform': { 'Meta': {'ordering': "['order', 'env_pat']", 'object_name': 'EnvTransform', 'db_table': "'env_transform'"}, 'carrier_region_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'env_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'hwtype_pat': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'setting_name_pat': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'value_pat': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'value_sub': ('django.db.models.fields.CharField', [], {'max_length': '8000', 'null': 'True', 'blank': 'True'}) }, u'api.hwtype': { 'Meta': {'ordering': "['code']", 'object_name': 'Hwtype', 'db_table': "'hwtype'"}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'internal_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}), 'marketing_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}), 'model_number': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}) }, u'api.region': { 'Meta': {'ordering': "['code']", 'object_name': 'Region', 'db_table': "'region'"}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}), 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}) }, u'api.settingcategory': { 'Meta': {'ordering': "['name']", 'object_name': 'SettingCategory', 'db_table': "'setting_category'"}, 'comment': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}) }, u'api.settingdef': { 'Meta': {'ordering': "['group', 'order', 'display_name']", 'object_name': 'SettingDef', 'db_table': "'setting_def'"}, 'category_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingCategory']", 'db_column': "'category_id'"}), 'datatype': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}), 'group': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'rules': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}), 'short_help': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}) }, u'api.settingvalue': { 'Meta': {'object_name': 'SettingValue', 'db_table': "'setting_value'"}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'setting_def_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['api.SettingDef']", 'db_column': "'setting_def_id'"}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '8000', 'null': 'True', 'blank': 'True'}) }, u'api.user': { 'Meta': {'ordering': "['username']", 'object_name': 'User', 'db_table': "'user'"}, 'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'approver': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'display_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}), 'editor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'email': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'publisher': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}) } } complete_apps = ['api']
[ "rtalluri@motorola.com" ]
rtalluri@motorola.com
265049dd5c7273612076608f805ee6f00e3f2430
82e0fb055637e3181c7b1c25b2c199213c130f1b
/Python/Funciones de Alto orden/Ejemplo4.py
eae624cd1e8e739a75e60ef4e2f55f361ea537d6
[]
no_license
DangerousCode/DAM-2-Definitivo
ffd7d99a385e9d9a821887676ecd81d3e2e1ddfc
6fcaad2342a68a6005e062bdd8603b900dcdf147
refs/heads/master
2021-01-10T17:58:44.570045
2015-12-16T15:19:25
2015-12-16T15:19:25
47,215,281
0
0
null
null
null
null
UTF-8
Python
false
false
1,256
py
__author__ = 'AlumnoT' '''Funcion dada una lista de numeros y un numero cota superior, queremos devolver aquellos elementos menores a dicha cota''' lista=list(range(-5,5)) '''1)Modificar la sintaxis anterior para que solo nos muestre los numeros negativos''' print filter(lambda x:x<0,lista) '''2)Crear funcion a la que le vamos a pasar una lista de los valores 0,1,2,3,4 y esa funcion tiene que devolvernos una lista formada por el cuadrado del primer valor con el cubo del primer valor (con todos los valores)''' print map(lambda x:[x*x,x*x*x],[0,1,2,3,4]) '''3)Generar dos listas una con valores numericos del 0 al 5 y otra con tres cadenas cuando ejecutemos la funcion queremos que nnos muestre la media de la lista que contiene los numeros y que las tres cadenas de la segunda lista aparezcan como una sola frase''' lista=list(range(0,6)) listacad=["hola","que","tal"] print (reduce(lambda x,z:x+z,lista))/len(lista) print reduce(lambda a,b:a+" "+b,listacad) '''4)Se nos va a facilitar una lista y una tupla con numeros debemos realizar una funcion que sume cada numero de la lista con el correspondiente numero de su misma posicion en la tupla todo ello usando map,reduce,filter, lambda''' lis=[1,2,3] tup=(3,2,1) print map(lambda x,y:x+y,lis,tup)
[ "asantosq1@gmail.com" ]
asantosq1@gmail.com
f0558330618b47efd52ea7dae4624354fe0c32ac
89b45e528f3d495f1dd6f5bcdd1a38ff96870e25
/pyneng/exercises/09_functions/task_9_2.py
e2a25f74f4ea48dd6a5f51879221d1048f8a5c94
[]
no_license
imatyukin/python
2ec6e712d4d988335fc815c7f8da049968cc1161
58e72e43c835fa96fb2e8e800fe1a370c7328a39
refs/heads/master
2023-07-21T13:00:31.433336
2022-08-24T13:34:32
2022-08-24T13:34:32
98,356,174
2
0
null
2023-07-16T02:31:48
2017-07-25T22:45:29
Python
UTF-8
Python
false
false
2,935
py
# -*- coding: utf-8 -*- """ Задание 9.2 Создать функцию generate_trunk_config, которая генерирует конфигурацию для trunk-портов. У функции должны быть такие параметры: - intf_vlan_mapping: ожидает как аргумент словарь с соответствием интерфейс-VLANы такого вида: {'FastEthernet0/1': [10, 20], 'FastEthernet0/2': [11, 30], 'FastEthernet0/4': [17]} - trunk_template: ожидает как аргумент шаблон конфигурации trunk-портов в виде списка команд (список trunk_mode_template) Функция должна возвращать список команд с конфигурацией на основе указанных портов и шаблона trunk_mode_template. В конце строк в списке не должно быть символа перевода строки. Проверить работу функции на примере словаря trunk_config и списка команд trunk_mode_template. Если предыдущая проверка прошла успешно, проверить работу функции еще раз на словаре trunk_config_2 и убедится, что в итоговом списке правильные номера интерфейсов и вланов. Пример итогового списка (перевод строки после каждого элемента сделан для удобства чтения): [ 'interface FastEthernet0/1', 'switchport mode trunk', 'switchport trunk native vlan 999', 'switchport trunk allowed vlan 10,20,30', 'interface FastEthernet0/2', 'switchport mode trunk', 'switchport trunk native vlan 999', 'switchport trunk allowed vlan 11,30', ...] Ограничение: Все задания надо выполнять используя только пройденные темы. """ from pprint import pprint trunk_mode_template = [ "switchport mode trunk", "switchport trunk native vlan 999", "switchport trunk allowed vlan", ] trunk_config = { "FastEthernet0/1": [10, 20, 30], "FastEthernet0/2": [11, 30], "FastEthernet0/4": [17], } trunk_config_2 = { "FastEthernet0/11": [120, 131], "FastEthernet0/15": [111, 130], "FastEthernet0/14": [117], } def generate_trunk_config(intf_vlan_mapping, trunk_template): cfg = [] for intf, vlans in intf_vlan_mapping.items(): cfg.append("interface " + intf) for s in trunk_template: if s.endswith('allowed vlan'): s = s + ' ' + str(vlans)[1:-1].replace(" ", "") cfg.append(s) return cfg pprint(generate_trunk_config(trunk_config, trunk_mode_template)) pprint(generate_trunk_config(trunk_config_2, trunk_mode_template))
[ "i.matyukin@gmail.com" ]
i.matyukin@gmail.com
1572ed7e2b86b6dc9bc339d9cf970e352a1bdfa1
f5a2059897f30a77244c0e8426f54ad5bf0db0e3
/resources/store.py
82c62a12820e1ed584f44fccfd130f9033054a21
[]
no_license
colemanGH319/stores-rest-api
c25a01199af1cfcfb3a899f7d4752c382c8870a8
44004f0f64a6bcae4fdc80245cb524b4d7958024
refs/heads/master
2020-04-01T15:04:55.172168
2018-10-17T19:14:47
2018-10-17T19:14:47
153,320,266
0
0
null
null
null
null
UTF-8
Python
false
false
936
py
from flask_restful import Resource from models.store import StoreModel class Store(Resource): def get(self, name): store = StoreModel.find_by_name(name) if store: return store.json() return {'message': 'Store not found.'}, 404 def post(self, name): if StoreModel.find_by_name(name): return {'message': "A store with the name '{}' already exists.".format(name)}, 400 store = StoreModel(name) try: store.save_to_db() except: return {'message': 'An error occurred while creating the store.'}, 500 return store.json() def delete(self, name): store = StoreModel.find_by_name(name) if store: store.delete_from_db() return {'message': 'Store deleted'} class StoreList(Resource): def get(self): return {'stores': [store.json() for store in StoreModel.query.all()]}
[ "coleman.matt319@gmail.com" ]
coleman.matt319@gmail.com
c2a7eee1f1f4756acddc4b286a978d0c08f441ef
968970ca6a39c6cdc02cf8a79280630afa5ebc4f
/src/main/python/countTravelTime.py
d6476dd199a1c139cd1a6e86d4e433554f0a391c
[]
no_license
jdcc2/mbdtraffic
91a5bc4347062971057eb9ec27bc40601240117e
ad2167cf5af63a0089f69d70f35340f68040b6ab
refs/heads/master
2021-01-20T01:04:19.137668
2017-01-25T13:37:06
2017-01-25T13:37:06
79,126,607
0
0
null
null
null
null
UTF-8
Python
false
false
1,902
py
#!/usr/bin/env python import sys import os import json import pyspark # add actual job def doJob(rdd): print('traffic job') #Map column names to column indices columns = ['measurementSiteReference','measurementSiteVersion','index','periodStart','periodEnd','numberOfIncompleteInputs','numberOfInputValuesused','minutesUsed','computationalMethod','standardDeviation','supplierCalculatedDataQuality','sCDQ_Low','sCDQ_SD','number_of_sCDQ','dataError','travelTimeType','avgVehicleFlow','avgVehicleSpeed','avgTravelTime','computationMethod','measurementEquipmentTypeUsed','measurementSiteName1','measurementSiteName2','measurementSiteNumberOfLanes', 'measurementSiteIdentification','measurementSide','accuracy','period','specificLane','specificVehicleCharacteristics','startLocatieForDisplayLat','startLocatieForDisplayLong','LocationCountryCode','LocationTableNumber','LocationTableVersion','alertCDirectionCoded','specificLocation','offsetDistance','LOC_TYPE','LOC_DES','ROADNUMBER','ROADNAME,FIRST_NAME,SECND_NAME','messageType','publicationTime','deducedNoTrafficMinutes','carriageway'] columnToIndex = {} for index, column in enumerate(columns): columnToIndex[column] = index #print(columnToIndex) #Filter rows with data errors clean = rdd.map(lambda line: line.split(',')).filter(lambda row: len(row) > 18 and row[columnToIndex['dataError']] != '1') #total = clean.count() usable = clean.filter(lambda row: row[columnToIndex['avgTravelTime']] != '') print("Row count with avgTravelTime: ", usable.count()) return usable def main(): # parse arguments in_dir, out_dir = sys.argv[1:] conf = pyspark.SparkConf().setAppName("%s %s %s" % (os.path.basename(__file__), in_dir, out_dir)) sc = pyspark.SparkContext(conf=conf) # invoke job and put into output directory doJob(sc.textFile(in_dir)).saveAsTextFile(out_dir) if __name__ == '__main__': main()
[ "jd@leetbook" ]
jd@leetbook
a96020623f1f41176402c5c4583499aab4707dc0
7620448f67684c814121a6b772a824b792e43b5f
/utilities/annotate_from_genomic_features.py
f8ed6df46a00f868b17132d811bd70ae0311144d
[ "Apache-2.0" ]
permissive
Sisov/AlignQC
2c2dd952d0d864a8d84daa86260b8ac5e8d1d9eb
f0677876408371ced09ba15b586489b9139828f4
refs/heads/master
2021-01-11T07:52:07.209342
2016-09-02T19:19:11
2016-09-02T19:19:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,263
py
#!/usr/bin/python import sys, argparse, gzip, re, os, inspect #bring in the folder to the path for our utilities pythonfolder_loc = "../pylib" cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe() ))[0],pythonfolder_loc))) if cmd_subfolder not in sys.path: sys.path.insert(0,cmd_subfolder) from Bio.Format.GPD import GPDStream from Bio.Range import merge_ranges, GenomicRange, subtract_ranges, BedArrayStream, sort_ranges from Bio.Stream import MultiLocusStream def main(args): inf = None chrlens = {} chrbed = [] if re.search('\.gz$',args.chromosome_lengths): inf = gzip.open(args.chromosome_lengths) else: inf = open(args.chromosome_lengths) for line in inf: f = line.rstrip().split("\t") chrlens[f[0]] = f[1] chrbed.append(GenomicRange(f[0],1,f[1])) inf.close() inf = None exonbed = [] txbed = [] sys.stderr.write("Reading Exons\n") if re.search('\.gz$',args.annotation_gpd): inf = gzip.open(args.annotation_gpd) else: inf = open(args.annotation_gpd) gs = GPDStream(inf) for gpd in gs: exonbed += [x.get_range() for x in gpd.exons] txbed.append(gpd.get_range()) inf.close() sys.stderr.write("Merging "+str(len(txbed))+" transcripts\n") txbed = merge_ranges(txbed) sys.stderr.write(str(len(txbed))+" transcripts after merging\n") sys.stderr.write("Finding intergenic\n") intergenicbed = subtract_ranges(chrbed,txbed) sys.stderr.write("Found "+str(len(intergenicbed))+" intergenic regions\n") intergenicbp = sum([x.length() for x in intergenicbed]) sys.stderr.write("Intergenic size: "+str(intergenicbp)+"\n") sys.stderr.write("Merging "+str(len(exonbed))+" exons\n") exonbed = merge_ranges(exonbed) sys.stderr.write(str(len(exonbed))+" exons after merging\n") sys.stderr.write("Finding introns\n") intronbed = subtract_ranges(txbed,exonbed) sys.stderr.write("Found "+str(len(intronbed))+" introns\n") chrbp = sum([x.length() for x in chrbed]) sys.stderr.write("Genome size: "+str(chrbp)+"\n") txbp = sum([x.length() for x in txbed]) sys.stderr.write("Tx size: "+str(txbp)+"\n") exonbp = sum([x.length() for x in exonbed]) sys.stderr.write("Exon size: "+str(exonbp)+"\n") intronbp = sum([x.length() for x in intronbed]) sys.stderr.write("Intron size: "+str(intronbp)+"\n") #sys.stderr.write(str(txbp+intergenicbp)+"\n") if args.output_beds: if not os.path.exists(args.output_beds): os.makedirs(args.output_beds) with open(args.output_beds+'/chrs.bed','w') as of1: for rng in chrbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n") with open(args.output_beds+'/exon.bed','w') as of1: for rng in exonbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n") with open(args.output_beds+'/intron.bed','w') as of1: for rng in intronbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n") with open(args.output_beds+'/intergenic.bed','w') as of1: for rng in intergenicbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n") with open(args.output_beds+'/tx.bed','w') as of1: for rng in txbed: of1.write("\t".join([str(x) for x in rng.get_bed_array()])+"\n") inf = None if re.search('\.gz$',args.reads_gpd): inf = gzip.open(args.reads_gpd) else: inf = open(args.reads_gpd) reads = {} gs = GPDStream(inf) for gpd in gs: reads[gpd.get_gene_name()] = {} sys.stderr.write("Checking "+str(len(reads.keys()))+" Aligned Reads\n") #now we know all features we can annotate reads sys.stderr.write("Read through our reads and bed entries\n") sys.stderr.write("Annotate exons\n") exons = annotate_gpds(args,exonbed) exonnames = set(exons.keys()) sys.stderr.write("Annotate intron\n") intron = annotate_gpds(args,intronbed) intronnames = set(intron.keys()) sys.stderr.write("Annotate intergenic\n") intergenic = annotate_gpds(args,intergenicbed) intergenicnames = set(intergenic.keys()) allnames = exonnames|intronnames|intergenicnames sys.stderr.write(str(len(allnames))+" reads attributed to a feature\n") vals = set(reads.keys())-allnames if len(vals) > 0: sys.stderr.write("WARNING unable to ascribe annotation to "+str(len(vals))+" reads\n") donenames = set() of = sys.stdout if args.output: if re.search('\.gz$',args.output): of = gzip.open(args.output,'w') else: of = open(args.output,'w') for name in allnames: exonfrac = 0 intronfrac = 0 intergenicfrac = 0 readlen = 0 exoncount = 0 if name in exons: exonfrac = float(exons[name][1])/float(exons[name][0]) readlen = exons[name][0] exoncount = exons[name][2] if name in intron: intronfrac = float(intron[name][1])/float(intron[name][0]) readlen = intron[name][0] exoncount = intron[name][2] if name in intergenic: intergenicfrac = float(intergenic[name][1])/float(intergenic[name][0]) readlen = intergenic[name][0] exoncount = intergenic[name][2] vals = {'exon':exonfrac,'intron':intronfrac,'intergenic':intergenicfrac} type = None if exonfrac >= 0.5: type = 'exon' elif intronfrac >= 0.5: type = 'intron' elif intergenicfrac >= 0.5: type = 'intergenic' else: type = sorted(vals.keys(),key=lambda x: vals[x])[-1] if vals[type] == 0: sys.stderr.write("WARNING trouble setting type\n") if not type: continue of.write(name+"\t"+type+"\t"+str(exoncount)+"\t"+str(readlen)+"\n") of.close() def annotate_gpds(args,inputbed): bas = BedArrayStream(sort_ranges(inputbed)) inf = None if re.search('\.gz$',args.reads_gpd): inf = gzip.open(args.reads_gpd) else: inf = open(args.args.reads_gpd) gs = GPDStream(inf) mls = MultiLocusStream([gs,bas]) results = {} for es in mls: [gpds,inbeds] = es.get_payload() if len(gpds) == 0 or len(inbeds) == 0: continue v = annotate_inner(gpds,inbeds) for res in v: results[res[0]]=res[1:] inf.close() return results def annotate_inner(gpds,inbeds): results = [] for gpd in gpds: orig = gpd.get_length() tot = 0 for rng1 in [x.get_range() for x in gpd.exons]: tot += sum([y.overlap_size(rng1) for y in inbeds]) if tot > 0: results.append([gpd.get_gene_name(),orig,tot,gpd.get_exon_count()]) return results def do_inputs(): parser = argparse.ArgumentParser(description="Assign genomic features to reads based on where they majority of the read lies. In the event of a tie prioritize exon over intron and intron over intergenic.",formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('reads_gpd',help="reads gpd") parser.add_argument('annotation_gpd',help="reference annotations gpd") parser.add_argument('chromosome_lengths',help="reference lengths table") parser.add_argument('--output_beds',help="save features") parser.add_argument('-o','--output',help="output results") args = parser.parse_args() return args def external_cmd(cmd): cache_argv = sys.argv sys.argv = cmd.split() args = do_inputs() main(args) sys.argv = cache_argv if __name__=="__main__": args = do_inputs() main(args)
[ "jason.weirather@gmail.com" ]
jason.weirather@gmail.com
1109d37bf0366a1327a89aea8da48513a50ab171
62ccd6d2d3e10a4587c8e35a98879840656afe6a
/Bike.py
30cd82c283c91fe6fe4cddc05843116def60d616
[]
no_license
Swills2/python_OOp
b37bd5ee08f6f7e710219f1e6cb55f354f65bf5e
8642fb765f784412f9b9d085f19d5810eef7e941
refs/heads/master
2020-04-07T10:56:39.132877
2018-11-20T00:22:56
2018-11-20T00:22:56
158,306,351
0
0
null
null
null
null
UTF-8
Python
false
false
440
py
class Bike: def __init__(self, price, max_speed, miles = 0): self.price = price self.max_speed = [] self.miles = miles def displayInfo(self, price, max_speed, miles): print(price, max_speed, miles) return self def ride(self, miles): print("Riding") miles += 10 return self def reverse(self, miles): print("Reversing") miles -= 5 if miles < 0: miles = 0 return self
[ "swills0055@gmail.com" ]
swills0055@gmail.com
8224ec2ea7bc83f7d68a0df94cbee6f1ccdee3ae
6fa6288bd21694bb144798d63b77a8e2924603e5
/DataStructures/arrays/codility/cheap_letter_deletion.py
3a1e1b0a156b1406efd8ebc982701ecdfd622bd2
[]
no_license
akshatakulkarni98/ProblemSolving
649ecd47cec0a29ccff60edb60f3456bf982c4a1
6765dbbde41cfc5ee799193bbbdfb1565eb6a5f5
refs/heads/master
2023-01-03T19:03:49.249794
2020-10-27T06:28:02
2020-10-27T06:28:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,663
py
""" You are given a string S. Deletion of the K-th letter of S costs C[K]. After deleting a letter, the costs of deleting other letters do not change. For example, for S = "ab" and C = [1, 3], after deleting 'a', deletion of 'b' will still cost 3. You want to delete some letters from S to obtain a string without two identical letters next to each other. What is the minimum total cost of deletions to achieve such a string? Write a function: def solution(S, C) that, given string S and array C of integers, both of length N, returns the minimum cost of all necessary deletions. Examples: 1. Given S = "abccbd" and C = [0, 1, 2, 3, 4, 5], the function should return 2. You can delete the rst occurrence of 'c' to achieve "abcbd". 2. Given S = "aabbcc" and C = [1, 2, 1, 2, 1, 2], the function should return 3. By deleting all letters with a cost of 1, you can achieve string "abc". 3. Given S = "aaaa" and C = [3, 4, 5, 6], the function should return 12. You need to delete all but one letter 'a', and the lowest cost of deletions is 3+4+5=12. 4. Given S = "ababa" and C = [10, 5, 10, 5, 10], the function should return 0. There is no need to delete any letter. Write an ecient algorithm for the following assumptions: string S and array C have length equal to N; N is an integer within the range [1..100,000]; string S consists only of lowercase letters ('a'−'z'); each element of array C is an integer within the range [0..1,000] """ def solution(S, C): if not S or not C: return -1 result=0 j=0 for i in range(1,len(S)): if S[j]!=S[i]: j=i else: min_value = min(C[i], C[j]) result+=min_value if C[j] < C[i]: j=i return result
[ "noreply@github.com" ]
akshatakulkarni98.noreply@github.com
0106c4e95e4cb7a8b9b3ea1a99c3e6cf72e413fa
ee461003c4836dcc2e7c493e7b705841825cba52
/titanic/variable_builder.py
5e497b440f4638a3102f6f594e2c37429bcd89c0
[]
no_license
kenta-s/kaggle
5e05b10b2455f8e5744dc4aab99def3b15681063
b68ddfede3480214a163d4d8a778e4eb74d4f6f9
refs/heads/master
2021-06-24T03:37:41.385822
2018-10-27T00:36:04
2018-10-27T00:36:04
96,259,111
0
0
null
null
null
null
UTF-8
Python
false
false
2,148
py
import numpy as np import pandas as pd from IPython import embed class VariableBuilder(): def __init__(self, file): self.df = pd.read_csv(file) def __call__(self): valid_data = self.build_variable_x() valid_data = np.array(valid_data).astype(np.float32).T return valid_data @staticmethod def convert_sex_to_int(str): if str == 'male': return 0 elif str == 'female': return 1 else: return 2 @staticmethod def convert_embarked_to_int(str): if str == 'S': return 0 elif str == 'C': return 1 elif str == 'Q': return 2 else: return 3 def build_train_variable(self): sex_list = list(map(VariableBuilder.convert_sex_to_int, self.df.Sex)) age_list = list(map(lambda x: 0.0 if np.isnan(x) else x, self.df.Age)) embarked_list = list(map(VariableBuilder.convert_embarked_to_int, self.df.Embarked)) valid_data = np.array([ self.df.Pclass, sex_list, age_list, self.df.SibSp, self.df.Parch, self.df.Fare, embarked_list, self.df.Survived ]).astype(np.float32) data = list(map(lambda x: (np.array(x[0:7]), np.array(x[7]).astype(np.int32)), valid_data.T)) return data def build_test_variable(self, file): sex_list = list(map(VariableBuilder.convert_sex_to_int, self.df.Sex)) age_list = list(map(lambda x: 0.0 if np.isnan(x) else x, self.df.Age)) embarked_list = list(map(VariableBuilder.convert_embarked_to_int, self.df.Embarked)) df2 = pd.read_csv(file) survived = df2.Survived valid_data = np.array([ self.df.Pclass, sex_list, age_list, self.df.SibSp, self.df.Parch, self.df.Fare, embarked_list, survived ]).astype(np.float32) data = list(map(lambda x: (np.array(x[0:7]), np.array(x[7]).astype(np.int32)), valid_data.T)) return data
[ "knt01222@gmail.com" ]
knt01222@gmail.com
770f1a9f35b1bc5cf04b2acf5eb206b60f5e0aa8
f2851c0d6125fc93f6dcd9c731180484bcf3299e
/Simple_baidu_baike/baike_spider/test.py
62c20de996f7529ee4beed2a7108f60b418a465b
[]
no_license
FrankYang3110/Simple_baidu_baike_spider
da0b7a3478486d3fd36d46f04c01536d1751ecb8
a54161f20f002615d0454da4f6c63359b083fdee
refs/heads/master
2020-05-03T02:09:59.214644
2019-04-12T03:05:26
2019-04-12T03:05:26
178,360,610
0
0
null
null
null
null
UTF-8
Python
false
false
814
py
#!/usr/bin/env python # -*- coding:utf-8 -*- # a = set() # print(a is None) # def get(): # return # a = get() # print(a is None) from fake_useragent import UserAgent import re import requests from urllib.parse import urljoin from lxml import etree headers = {'User-Agent': UserAgent().random} url = 'https://baike.baidu.com/item/Python/407313?fr=aladdin' r = requests.get(url, headers=headers) r.encoding = r.apparent_encoding base_url = r.url html = r.text tree = etree.HTML(html) hrefs = tree.xpath('//a[contains(@href,"/item")]/@href') # pattern = re.compile(r'href="(/item.*?)"') # urls = re.findall(pattern, html) # for url in urls: # new_url = urljoin(base_url, url) # print(new_url) # href="/item/%E8%AE%A1%E7%AE%97%E6%9C%BA%E7%A8%8B%E5%BA%8F%E8%AE%BE%E8%AE%A1%E8%AF%AD%E8%A8%80/7073760"
[ "43460484+FrankYang3110@users.noreply.github.com" ]
43460484+FrankYang3110@users.noreply.github.com
f34674523abde90b1c1b6d237f4f30d76afc7788
ff9c5a10eea701b6b6be1ec7076b5dfab91b6a38
/ex08.py
6a748ab59f8096a77c05fccec2e922c387077bf3
[]
no_license
CFEsau/learnpython
21707fc15bcb09098e83b116bd77203158a65353
0616bf36c1a0e3b81cb6d0f5edc70c142904ca65
refs/heads/master
2021-01-18T15:51:13.948611
2017-08-15T15:09:24
2017-08-15T15:09:24
100,386,583
0
0
null
null
null
null
UTF-8
Python
false
false
449
py
#ex08: Printing, printing formatter = "%r %r %r %r" print formatter % (1, 2, 3, 4) print formatter % ("one", "two", "three", "four") print formatter % (True, False, False, True) print formatter % (formatter, formatter, formatter, formatter) print formatter % ( "I had this thing.", "That you could type up right.", "But it didn't sing.", # The above line contains an apostrophe which affects the output "So I said goodnight." )
[ "c.esau@shef.ac.uk" ]
c.esau@shef.ac.uk
17fa82a9093701e46b8648bd51b5684c11c5f8c9
5d6365f4cc81272f8c481ee31f1111e8eca6dca5
/alipay/aop/api/domain/BizActionLogDTO.py
bdaee8dcf4791f2ea8f5f6ac64c0cb3184f154de
[ "Apache-2.0" ]
permissive
barrybbb/alipay-sdk-python-all
9e99b56138e6ca9c0b236707c79899d396ac6f88
1b63620431d982d30d39ee0adc4b92463cbcee3c
refs/heads/master
2023-08-22T20:16:17.242701
2021-10-11T08:22:44
2021-10-11T08:22:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,378
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class BizActionLogDTO(object): def __init__(self): self._amount = None self._biz_budget_apply_code = None self._biz_budget_id = None self._biz_name = None self._biz_type = None self._biz_uk_id = None self._gmt_create = None self._gmt_modified = None self._id = None self._modify_type = None @property def amount(self): return self._amount @amount.setter def amount(self, value): self._amount = value @property def biz_budget_apply_code(self): return self._biz_budget_apply_code @biz_budget_apply_code.setter def biz_budget_apply_code(self, value): self._biz_budget_apply_code = value @property def biz_budget_id(self): return self._biz_budget_id @biz_budget_id.setter def biz_budget_id(self, value): self._biz_budget_id = value @property def biz_name(self): return self._biz_name @biz_name.setter def biz_name(self, value): self._biz_name = value @property def biz_type(self): return self._biz_type @biz_type.setter def biz_type(self, value): self._biz_type = value @property def biz_uk_id(self): return self._biz_uk_id @biz_uk_id.setter def biz_uk_id(self, value): self._biz_uk_id = value @property def gmt_create(self): return self._gmt_create @gmt_create.setter def gmt_create(self, value): self._gmt_create = value @property def gmt_modified(self): return self._gmt_modified @gmt_modified.setter def gmt_modified(self, value): self._gmt_modified = value @property def id(self): return self._id @id.setter def id(self, value): self._id = value @property def modify_type(self): return self._modify_type @modify_type.setter def modify_type(self, value): self._modify_type = value def to_alipay_dict(self): params = dict() if self.amount: if hasattr(self.amount, 'to_alipay_dict'): params['amount'] = self.amount.to_alipay_dict() else: params['amount'] = self.amount if self.biz_budget_apply_code: if hasattr(self.biz_budget_apply_code, 'to_alipay_dict'): params['biz_budget_apply_code'] = self.biz_budget_apply_code.to_alipay_dict() else: params['biz_budget_apply_code'] = self.biz_budget_apply_code if self.biz_budget_id: if hasattr(self.biz_budget_id, 'to_alipay_dict'): params['biz_budget_id'] = self.biz_budget_id.to_alipay_dict() else: params['biz_budget_id'] = self.biz_budget_id if self.biz_name: if hasattr(self.biz_name, 'to_alipay_dict'): params['biz_name'] = self.biz_name.to_alipay_dict() else: params['biz_name'] = self.biz_name if self.biz_type: if hasattr(self.biz_type, 'to_alipay_dict'): params['biz_type'] = self.biz_type.to_alipay_dict() else: params['biz_type'] = self.biz_type if self.biz_uk_id: if hasattr(self.biz_uk_id, 'to_alipay_dict'): params['biz_uk_id'] = self.biz_uk_id.to_alipay_dict() else: params['biz_uk_id'] = self.biz_uk_id if self.gmt_create: if hasattr(self.gmt_create, 'to_alipay_dict'): params['gmt_create'] = self.gmt_create.to_alipay_dict() else: params['gmt_create'] = self.gmt_create if self.gmt_modified: if hasattr(self.gmt_modified, 'to_alipay_dict'): params['gmt_modified'] = self.gmt_modified.to_alipay_dict() else: params['gmt_modified'] = self.gmt_modified if self.id: if hasattr(self.id, 'to_alipay_dict'): params['id'] = self.id.to_alipay_dict() else: params['id'] = self.id if self.modify_type: if hasattr(self.modify_type, 'to_alipay_dict'): params['modify_type'] = self.modify_type.to_alipay_dict() else: params['modify_type'] = self.modify_type return params @staticmethod def from_alipay_dict(d): if not d: return None o = BizActionLogDTO() if 'amount' in d: o.amount = d['amount'] if 'biz_budget_apply_code' in d: o.biz_budget_apply_code = d['biz_budget_apply_code'] if 'biz_budget_id' in d: o.biz_budget_id = d['biz_budget_id'] if 'biz_name' in d: o.biz_name = d['biz_name'] if 'biz_type' in d: o.biz_type = d['biz_type'] if 'biz_uk_id' in d: o.biz_uk_id = d['biz_uk_id'] if 'gmt_create' in d: o.gmt_create = d['gmt_create'] if 'gmt_modified' in d: o.gmt_modified = d['gmt_modified'] if 'id' in d: o.id = d['id'] if 'modify_type' in d: o.modify_type = d['modify_type'] return o
[ "jiandong.jd@antfin.com" ]
jiandong.jd@antfin.com
46fd7987e76562876a9df13d571ec26da2089cf7
bcc90e2a3ef609caf24fa427061750cb7ed807ba
/Decorator/ConcreteComponent.py
5b6989308ff4c75e35df9b51e663518ce6ef0f15
[]
no_license
vudt93/DesignPattern
9140eb16544b1a02da1f889f5713b499166e9046
3f21df6be2b46fd4f5648b6d30b450699faabcbf
refs/heads/master
2021-03-24T01:19:56.332113
2020-03-17T09:36:26
2020-03-17T09:36:26
247,502,467
0
0
null
null
null
null
UTF-8
Python
false
false
134
py
from Decorator.Component import Component class ConcreteComponent(Component): def do_operation(self): print("Operation")
[ "vu.do@cj.net" ]
vu.do@cj.net
e5131ff29aa41698036707a61a86466d77e7d3b9
6c50bced6fb4474e4eb2e4f3c27a5ce38b0e6048
/manage.py
e1fbda688388d8db4449c6abeb1423356d40d79b
[]
no_license
NMShihab/WebChatApp
0d5651fe38baccfee186e59e32c2c79de2bb39a4
2dda4e750c370e74bbfbc42dce02432268194d46
refs/heads/master
2023-02-01T22:57:53.738222
2020-12-15T17:09:14
2020-12-15T17:09:14
319,082,634
0
0
null
null
null
null
UTF-8
Python
false
false
663
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ChatApi.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[ "nmshihabislam@gmail.com" ]
nmshihabislam@gmail.com
ac9c7f15ea1547bd32a8c41e2f64470813bf0d52
70054615f56be28373b00c9df96544ec822be683
/res/scripts/client/gui/scaleform/daapi/view/meta/questswindowmeta.py
66a92293420cda94a63d878facfa96ffceb268d2
[]
no_license
wanyancan/WOTDecompiled
c646ad700f5ec3fb81fb4e87862639ce0bdf0000
9ffb09007a61d723cdb28549e15db39c34c0ea1e
refs/heads/master
2020-04-17T23:13:15.649069
2013-11-15T16:37:10
2013-11-15T16:37:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
116
py
from gui.Scaleform.framework.entities.DAAPIModule import DAAPIModule class QuestsWindowMeta(DAAPIModule): pass
[ "james.sweet88@googlemail.com" ]
james.sweet88@googlemail.com
e0f9841169ca668d1ced55ab7a6441e600ad51af
064992da81d70b4df85fc192cddf93f2ded111a0
/analytic_scripts/code_reimplementation/Android/android_reimpl.py
cdba8d7572a2f6a951af93712bab41bee11b75b4
[]
no_license
maxxbw54/reuse_reimpl
07fd42ee1708337d009a88e505b2a157e4aeaef9
c88bc6671f791485dfd47a35c1c5d16415b5beac
refs/heads/master
2021-09-14T11:28:28.835579
2018-05-12T13:41:27
2018-05-12T13:41:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,450
py
import sys, subprocess, os, re from collections import OrderedDict import pandas as pd def shellCommand(command_str): cmd = subprocess.Popen(command_str, shell=True, stdout=subprocess.PIPE) cmd_out, cmd_err = cmd.communicate() return cmd_out def removeBracketsInQuotes(line): line = re.sub(r'\\\"', '', line) return re.sub(r'\".*?\"', '', line) def addedMethods(diff_list): added_method_list = list() in_block = False brackets = 0 method_name, param_cnt = None, None for line in diff_list: cleaned_line = removeBracketsInQuotes(line) if cleaned_line.startswith('+') and re.search(method_pattern, cleaned_line): method_sig = re.findall('(?:(?:public|private|protected|static|final|native|synchronized|abstract|transient)+\\s)+(?:[\\$_\\w\\<\\>\\[\\]]*)\\s+([\\$_\\w]+)\\(([^\\)]*)\\)?\\s*\\{?[^\\}]*\\}?', cleaned_line) method_name = method_sig[0][0] if len(method_sig[0][1].strip()) == 0: param_cnt = 0 else: param_cnt = method_sig[0][1].count(',') + 1 in_block = True if '{' in cleaned_line: brackets += 1 if '}' in cleaned_line: brackets -= 1 if brackets == 0: if DEBUG: print 'ENTIRE METHOD FOUND: %s %s\n\n' %(method_name, param_cnt) added_method_list.append([method_name, param_cnt]) in_block = False brackets = 0 elif in_block and cleaned_line.startswith('+'): if '{' in cleaned_line: brackets += 1 if '}' in cleaned_line: brackets -= 1 if brackets == 0: if DEBUG: print 'ENTIRE METHOD FOUND: %s %s\n\n' %(method_name, param_cnt) added_method_list.append([method_name, param_cnt]) in_block = False brackets = 0 elif in_block == True: in_block = False brackets = 0 return added_method_list def removedInvocations(diff_list): imported_classes = dict() instance_dict = dict() removed_invoc_dict = OrderedDict() i = 1 for line in diff_list: if not re.search(r'^(\+|\-)?\s*$', line): if line.startswith('-'): # collect removed library methods matched = re.findall(r'import\s+[\w\.]+\.(\w+)\s*\;', line) if matched: full_import_class = line[1:-1] class_name = matched[0] imported_classes[class_name] = full_import_class else: # instance of the removed library method instantiated = re.findall(r'(\w+)\s*\[?\s*\w*\s*\]?\s*\=\s*new\s+(\w+)\s*\<?\s*\w*\s*\>?\s*\(.*\)\s*\;', line) if len(instantiated): if instantiated[0][1] in imported_classes: instance_dict[instantiated[0][0]] = imported_classes[instantiated[0][1]] else: # remove redundant white space cleaned_line = re.sub(r'\s+', ' ', line) # whether an instance of a removed library method's invocation is also removed (instance method) for inst in instance_dict: # IS IT ALSO POSSIBLY TO GET AN ATTRIBUTE? invoc = re.findall(r'(\w+)\s*\[?\s*\w*\s*\]?\.\w+\s*\(.*\)\s*\;', cleaned_line) if len(invoc): if invoc[0] == inst: removed_invoc_dict[i] = instance_dict[inst] break # whether a removed library method's invocation is also removed (class method) for c in imported_classes: if not ('implements %s' %c in cleaned_line or 'extends %s' %c in cleaned_line): # IS IT ALSO POSSIBLY TO GET AN ATTRIBUTE? invoc = re.findall(r'(\w+)\.\w+\s*\(.*\)\s*\;', cleaned_line) if len(invoc): if invoc[0] == c: removed_invoc_dict[i] = imported_classes[c] break i += 1 if DEBUG: print removed_invoc_dict return removed_invoc_dict def addNearDelPosition(last_removed, removed_cnt, i, line): if last_removed: position_delta = i - last_removed - removed_cnt if DEBUG: print 'Pos delta:', position_delta, line if position_delta < 5 and position_delta > -5: return True return False def addedInvocations(diff_list, added_method_list, removed_invoc_list): refact_pairs = set() last_removed = None removed_cnt = 0 i = 1 for line in diff_list: if not re.search(r'^(\+|\-)?\s*$', line): if line.startswith('+'): for m in added_method_list: method_name = m[0] param_cnt = m[1] if (re.search(method_pattern, line)) == None and (method_name in line): matched = re.search(r'\((.+)\)', line) if matched: if len(matched.group(1).strip()) == 0: argument_cnt = 0 else: argument_cnt = matched.group(1).count(',')+1 if param_cnt == argument_cnt: addedNearby = addNearDelPosition(last_removed, removed_cnt, i, line) if addedNearby: if DEBUG: print last_removed, i, line refact_pairs.add((last_removed, i, last_library)) elif line.startswith('-'): if i in removed_invoc_list: last_removed = i removed_cnt = 0 last_library = removed_invoc_list[i] elif removed_cnt != None: removed_cnt += 1 i += 1 if DEBUG: print sorted(refact_pairs) return sorted(refact_pairs) # combine main funcitons to search refactoring from a client method implementation to an API call def searchRefactoring(diff_str): diff_list = diff_str.split('\n') # Detect entire added methods added_method_list = addedMethods(diff_list) # check whether a library method is removed near a deleted method call removed_invoc_list = removedInvocations(diff_list) # Check whether an added method's invocation is also added refact_res = addedInvocations(diff_list, added_method_list, removed_invoc_list) return refact_res def formatOutput(refact_res): formatted_list = list() for pair in refact_res: formatted_list.append('%s^%s' %(pair[0],pair[1])) return '-'.join(formatted_list) if __name__ == '__main__': DEBUG = False method_pattern = '((public|private|protected|static|final|native|synchronized|abstract|transient)+\\s)+[\\$_\\w\\<\\>\\[\\]]*\\s+[\\$_\\w]+\\([^\\)]*\\)?\\s*\\{?[^\\}]*\\}?' current_dir = os.getcwd() shellCommand('mkdir -p %s/converse_candidates' %current_dir) i = 1 app_names = os.listdir('fdroid_apps') for an_app in app_names: print 'Analyzing %s (%d) ...' %(an_app,i) output_list = list() # change to the subject system's directory os.chdir('%s/fdroid_apps/%s' %(current_dir,an_app)) # output commit list commit_logs = subprocess.check_output('git log --pretty=format:%h'.split()) for commit_id in commit_logs.split('\n'): if len(commit_id): diff_str = shellCommand('git show %s' %commit_id) # our current computational resources cannot allow to analyze super huge patches if sys.getsizeof(diff_str) > 1000000: print ' %s is too big!' %commit_id print ' ' + '-' * 50 else: refact_res = searchRefactoring(diff_str) if len(refact_res): # output locations print ' ', commit_id output_list.append(commit_id) for res in refact_res: print ' ', res[0], res[1], '\t', res[2] output_list.append(' (%s, %s)\t%s' %(res[0],res[1],res[2])) print ' ' + '-' * 50 output_list.append('-' * 50) # output the patch shellCommand('mkdir -p %s/converse_patches/%s' %(current_dir,an_app)) with open('%s/converse_patches/%s/%s.txt' %(current_dir,an_app,commit_id), 'w') as pf: pf.write(diff_str) if len(output_list): with open('%s/converse_candidates/%s_candidates.txt' %(current_dir,an_app), 'w') as wf: wf.write('\n'.join(output_list)) i += 1
[ "le.an@polymtl.ca" ]
le.an@polymtl.ca
b69ca6b786925c7020c263729f5d7bd1e74e3d05
35cf6fc79b8d6c335add8e55e0f4dca6f2816d1d
/Python_Study/第七模块学习/Day04/EdmureBlog/web/forms/base.py
ab198421829eb1b2c3ebc96a9c1743d571cc884e
[]
no_license
KongChan1988/51CTO-Treasure
08b4ca412ad8a09d67c1ea79c7149f8573309ca4
edb2e4bd11d39ac24cd240f3e815a88361867621
refs/heads/master
2021-07-04T15:57:56.164446
2019-07-24T15:28:36
2019-07-24T15:28:36
97,453,749
5
8
null
2019-10-30T22:05:12
2017-07-17T08:34:59
Python
UTF-8
Python
false
false
208
py
#!/usr/bin/env python # -*- coding:utf-8 -*- class BaseForm(object): def __init__(self, request, *args, **kwargs): self.request = request super(BaseForm, self).__init__(*args, **kwargs)
[ "wangwei_198811@163.com" ]
wangwei_198811@163.com
43730854b668cdc8e523b81d756b9615a915f5d5
2ff113af86a2cde69ccf114a98e3a2092f751993
/Aula18/B - Replacing Digits/b.py
12b37f10be0ef9aafdf7b37695fe1680e768444c
[]
no_license
Math-Gomes/ProgramacaoCompetitiva
7874e6a3cbcfadb7d4c2366f178d69b02909daf8
4ce79f5cb564ba7e07fdcee0995aa476b883c7a3
refs/heads/master
2023-08-13T10:26:43.481381
2021-09-22T12:46:07
2021-09-22T12:46:07
380,554,915
0
0
null
null
null
null
UTF-8
Python
false
false
300
py
if __name__ == '__main__': a = list(map(int, list(input()))) s = list(map(int, list(input()))) while True: max_s = max(s) min_a = min(a) if max_s < min_a: break s.remove(max_s) a[a.index(min_a)] = max_s print(*a, sep = '')
[ "mathjvmf@gmail.com" ]
mathjvmf@gmail.com
f602f55691918872b41f72f9e122627a0f538a6e
931841bd1de963e0dcfcf69114cec8c8c3f17323
/search/avltree.py
943eeeade92e526f2ae671c999a55416ecfaff7b
[]
no_license
sancheng/py-algos
6f2e1e700c60224963f77b01c706a399b9897c1d
1bb73d32a1cf4f95358df5d6e0aae2828098fd07
refs/heads/master
2021-08-21T21:07:33.199863
2017-11-29T03:10:26
2017-11-29T03:10:26
111,628,863
0
0
null
null
null
null
UTF-8
Python
false
false
4,048
py
class Node(object): def __init__(self,val,parent,isleft): self.val = val self.parent = parent self.leftchild,self.rightchild=None,None if parent is not None: if isleft: parent.setleftChild(self) else: parent.setrightChild(self) def setleftChild(self,left): self.leftchild = left if left is not None: left.parent = self def setrightChild(self,right): self.rightchild = right if right is not None: right.parent = self class AvlTree(object): def __init__(self,cmp_func,rootval): self.cmp = cmp_func self.root = Node(rootval,None,False) def height(self, node): if node is None: return 0 else: return 1 + max(self.height(node.leftchild),self.height(node.rightchild)) def insert(self,element): n = self.root #insert directly inode = None while True: if self.cmp(element,n.val) < 0: if n.leftchild is None: inode = Node(element,n,True) break else: n = n.leftchild else: if n.rightchild is None: inode = Node(element,n,False) break else: n = n.rightchild #find unbalanced subtree isleft_rotate = True path_directs = [] while inode is not None: lh = self.height(inode.leftchild) rh = self.height(inode.rightchild) hdiff = lh -rh if hdiff > 1: isleft_rotate = False break elif hdiff < -1: break if inode.parent is not None: if inode == inode.parent.leftchild: path_directs.append(0) else: path_directs.append(1) inode = inode.parent #rebalance if inode is not None: if path_directs[-2] == 1 and path_directs[-1] == 1: self.left_rotate(inode.rightchild,inode) elif path_directs[-2] == 0 and path_directs[-1] == 0: self.right_rotate(inode.leftchild,inode) elif path_directs[-2] == 1 and path_directs[-1] == 0: self.left_right_rotate(inode.leftchild, inode) elif path_directs[-2] == 0 and path_directs[-1] == 1: self.right_left_rotate(inode.leftchild,inode) def search(self,value): n = self.root while n is not None and n.val != value: if n.val > value: n = n.leftchild else: n = n.rightchild return n is not None def left_rotate(self,node,pnode): pp = pnode.parent if pp is None: self.root = node node.parent = None else: if pp.leftchild == pnode: pp.setleftChild(node) else: pp.setrightChild(node) pnode.setrightChild(node.leftchild) node.setleftChild(pnode) def right_rotate(self,node,pnode): pp = pnode.parent if pp is None: self.root = node node.parent = None else: if pp.leftchild == pnode: pp.setleftChild(node) else: pp.setrightChild(node) pnode.setleftChild(node.rightchild) node.setrightChild(pnode) def printTree(self): self.printNode(self.root) def printNode(self,node): print node.val if node.leftchild is not None: self.printNode(node.leftchild) if node.rightchild is not None: self.printNode(node.rightchild) #test right rotation tree = AvlTree(lambda x,y:x-y,1) tree.insert(5) tree.insert(8) tree.insert(10) tree.insert(11) tree.insert(12) tree.printTree() print tree.search(11) print tree.search(7)
[ "sancheng@cisco.com" ]
sancheng@cisco.com
0a7ff4211eaca98470e2742585ac72c1dbe492de
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02415/s303347384.py
f7612caa107b4023d41f174a9952151845dbb81a
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
41
py
word = input() print(str.swapcase(word))
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
2a132a7f304bf03097919aab6ebca25961224c39
4558f88bc7b48a692599aac4d2316201e6c95a02
/scud/plt/er_log/phil.py
0a245e28e09a42bb507e5403f76287bb172726c9
[]
no_license
kroon-lab/scud
bb3f7dc05c1000c0816d1b458d1c74bd74413053
b55423edb4b0e33110cf96fbd3828f86166924c9
refs/heads/master
2020-03-18T06:49:26.989684
2019-05-01T14:56:14
2019-05-01T14:56:14
134,412,663
0
0
null
null
null
null
UTF-8
Python
false
false
1,131
py
import libtbx.phil from scud.general.phil_methods import init_command_line_phil def phil_parse(args=None,log=None): ''' Contains default parameters, will process commandline params and changes them ''' # Default parameters master_phil = libtbx.phil.parse(""" er_log { input .help = "Input files" { log = None .type = path .help = 'File name of PDB containing ensemble to be converted to supercell' } params .help = "Control running" { title = None .type = str .help = 'Plot Title' show = False .type = bool .help = 'show plot or not' } output .help = "output files" { plot_out = plt.eps .type = path .help = 'Name of output plot' } } """) working_phil = init_command_line_phil(master_phil=master_phil, args=args, log=log) return working_phil.extract()
[ "l.m.j.kroon-batenburg@uu.nl" ]
l.m.j.kroon-batenburg@uu.nl
1989906ee223d14319cc93f1ef9c3f3bb7ce946e
addb8ac420db7328afd209639204b526edcf9a15
/W-Maze/Tabular-Q/env.py
4d3c5a70dc6ee69c0e79b15372fd8f60f18fcf08
[]
no_license
logic2code/DelayResolvedRL
ee704c8d4e9b1df2867dbe0ac77ab112ee4c0d89
a291875417a0e52fe09294d7f78ef9b3c9045b9c
refs/heads/main
2023-07-14T04:39:42.115756
2021-08-13T06:57:31
2021-08-13T06:57:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,162
py
import numpy as np from collections import deque class Environment: """Initialize Environment""" def __init__(self, seed, delay): np.random.seed(seed) self.breadth = 7 self.length = 11 self.state_space = np.empty([self.breadth, self.length], dtype='<U1') '''Environment Configuration''' self.state_space[:] = 'E' self.state_space[0] = 'X' self.state_space[1:4, self.length // 2 - 2] = 'X' self.state_space[1:4, self.length // 2 + 2] = 'X' self.state_space[0, self.length // 2 - 1:self.length // 2 + 2] = 'G' self.state_space[self.breadth - 1, 0] = 'P' '''Actions''' self.actions = [0, 1, 2, 3] # UP, DOWN, LEFT, RIGHT self.num_actions = len(self.actions) self.turn_limit = 300 self.delay = delay self.actions_in_buffer = deque(maxlen=self.delay) self.fill_up_buffer() self.delayed_action = 0 self.state = self.reset() def reset(self): x = np.random.randint(self.breadth) y = 0 starting_state = [x, y] self.state_space[x, y] = 'P' self.fill_up_buffer() return starting_state def fill_up_buffer(self): for _ in range(self.delay): action = np.random.choice(self.num_actions) self.actions_in_buffer.append(action) def step(self, state, action): done = False player_position = state reward = -1 """UP""" if action == 0: if player_position[0] - 1 >= 0 and self.state_space[player_position[0] - 1, player_position[1]] != 'X': self.state_space[player_position[0], player_position[1]] = 'E' if self.state_space[player_position[0] - 1, player_position[1]] == 'G': done = True self.state = self.reset() reward = 11 else: self.state = [player_position[0] - 1, player_position[1]] self.state_space[player_position[0] - 1, player_position[1]] = 'P' """DOWN""" if action == 1: if player_position[0] + 1 < self.breadth \ and self.state_space[player_position[0] + 1, player_position[1]] != 'X': self.state_space[player_position[0], player_position[1]] = 'E' if self.state_space[player_position[0] + 1, player_position[1]] == 'G': done = True self.state = self.reset() reward = 11 else: self.state = player_position[0] + 1, player_position[1] self.state_space[player_position[0] + 1, player_position[1]] = 'P' """LEFT""" if action == 2: if player_position[1] - 1 >= 0 and self.state_space[player_position[0], player_position[1] - 1] != 'X': self.state_space[player_position[0], player_position[1]] = 'E' if self.state_space[player_position[0], player_position[1] - 1] == 'G': done = True self.state = self.reset() reward = 11 else: self.state = player_position[0], player_position[1] - 1 self.state_space[player_position[0], player_position[1] - 1] = 'P' """RIGHT""" if action == 3: if player_position[1] + 1 < self.length \ and self.state_space[player_position[0], player_position[1] + 1] != 'X': self.state_space[player_position[0], player_position[1]] = 'E' if self.state_space[player_position[0], player_position[1] + 1] == 'G': done = True self.state = self.reset() reward = 11 else: self.state = [player_position[0], player_position[1] + 1] self.state_space[player_position[0], player_position[1] + 1] = 'P' return self.state, reward, done
[ "noreply@github.com" ]
logic2code.noreply@github.com
88881e340fd70a3969fd1822c2d1552ff989a8c7
c56fdac92304316ebe52796d619e5e72c564475b
/hw2/perceptron.py
f55a6de69ef7e861414fa307acc847777cedc155
[]
no_license
RamisesM/Learning-From-Data
0cf043b0474992441d746c2aff8ef6c7134bc7e6
dde2677773fea2bff48899371a58cebda75449ae
refs/heads/master
2020-03-07T11:30:45.894709
2018-05-14T20:14:20
2018-05-14T20:14:20
127,457,670
0
0
null
null
null
null
UTF-8
Python
false
false
1,740
py
import random import numpy class Point: def __init__(self): self.c = 1 self.x = random.uniform(-1,1) self.y = random.uniform(-1,1) self.vec = (self.c, self.x, self.y) class Function: def __init__(self): self.f = [0, 0, 0] def randomize(self): points = [Point(), Point()] self.f[2] = -1 self.f[1] = (points[1].y - points[0].y)/(points[1].x - points[0].x) self.f[0] = points[0].y - self.f[1]*points[0].x def classify(self, point): value = numpy.sign(numpy.inner(point.vec, self.f)) if value == 0: value = -1 return value def perceptron(target_function, hypothesis, data_set): f_set = [] for point in data_set: f_set += [target_function.classify(point)] h_set = [] for point in data_set: h_set += [hypothesis.classify(point)] # misclassified points misclassified_set = [] for index in range(len(data_set)): if h_set[index] != f_set[index]: misclassified_set += [index] number_of_iterations = 0 while len(misclassified_set) != 0: test_index = misclassified_set[random.randint(0, len(misclassified_set)-1)] test_point = data_set[test_index] hypothesis.f = [hypothesis.f[i] + f_set[test_index]*test_point.vec[i] for i in range(3)] # updating h_set h_set = [] for point in data_set: h_set += [hypothesis.classify(point)] # updating misclassified_set misclassified_set = [] for index in range(len(data_set)): if h_set[index] != f_set[index]: misclassified_set += [index] number_of_iterations += 1 return number_of_iterations
[ "ramises.martins@gmail.com" ]
ramises.martins@gmail.com