blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
17c5b2471577628fd0394914e3f06511ee0a6531
855e69a905c26d19eb2252515eeaef1c0379d9bb
/tests/app/settings.py
256875ff6b2f09e0803218febb668e4968b1638b
[ "BSD-2-Clause" ]
permissive
pnovusol/django-tables2
da630536628648491c9071c173643edf1ece2ded
48182aa120715b8c55c4bc8ccbdc3f53f0df1b0e
refs/heads/master
2021-01-18T03:09:01.008769
2013-08-12T15:07:38
2013-08-12T15:07:38
null
0
0
null
null
null
null
UTF-8
Python
false
false
672
py
from django.conf import global_settings import six DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } } INSTALLED_APPS = [ 'tests.app', 'django_tables2', ] ROOT_URLCONF = 'tests.app.urls' SECRET_KEY = "this is super secret" TEMPLATE_CONTEXT_PROCESSORS = [ 'django.core.context_processors.request' ] + list(global_settings.TEMPLATE_CONTEXT_PROCESSORS) TIME_ZONE = "Australia/Brisbane" USE_TZ = True if not six.PY3: # Haystack isn't compatible with Python 3 INSTALLED_APPS += [ 'haystack', ] HAYSTACK_SEARCH_ENGINE = 'simple', HAYSTACK_SITECONF = 'tests.app.models'
[ "bradley.ayers@gmail.com" ]
bradley.ayers@gmail.com
5e8d13dc417ced950898ac72b0618cc3c3a2fec2
055789729e069b1244bcf096cfa16d11d9596260
/Online-Examination-master/online_examination/exam/models.py
b13eda80f7c4c0f96de1a117ee98fd26c6d27141
[]
no_license
kishorekdty/netz-code
d877f8d82fcccdc197508265d3895d39eb3995b1
bf5c6d166690b400fd109070401d36f77f56b671
refs/heads/master
2021-01-10T08:25:51.199137
2015-06-05T14:26:22
2015-06-05T14:26:22
36,936,978
0
0
null
null
null
null
UTF-8
Python
false
false
10,915
py
from django.db import models from college.models import Course, Semester from academic.models import Student class Subject(models.Model): subject_name = models.CharField('Subject Name', null=True, blank=True, max_length=200) duration = models.CharField('Duration', null=True, blank=True, max_length=200) duration_parameter = models.CharField('Duration Parameter', null=True, blank=True, max_length=200) total_mark = models.CharField('Total Mark', null=True, blank=True, max_length=200) pass_mark = models.DecimalField('Pass Mark', max_digits=14, decimal_places=2, default=0) def __unicode__(self): return str(self.subject_name) def get_json_data(self): subject_data = { 'subject_id': self.id if self.id else '', 'subject': self.subject_name if self.subject_name else '', 'subject_name' : self.subject_name if self.subject_name else '', 'Duration': self.duration + '-' +self.duration_parameter, 'duration': self.duration, 'duration_parameter': self.duration_parameter, 'duration_no': self.duration, 'total_mark': self.total_mark if self.total_mark else '', 'pass_mark': self.pass_mark if self.pass_mark else '', } return subject_data class Meta: verbose_name = 'Subject' verbose_name_plural = 'Subject' class Exam(models.Model): student = models.ForeignKey(Student, null=True, blank=True) exam_name = models.CharField('Exam Name', null=True, blank=True, max_length=200) course = models.ForeignKey(Course, null=True, blank=True) semester = models.ForeignKey(Semester, null=True, blank=True) start_date = models.DateField('Start Date', null=True, blank=True) end_date = models.DateField('End Date', null=True, blank=True) no_subjects= models.IntegerField('Number of Subjects', default=0) exam_total= models.IntegerField('Exam Total', default=0) subjects = models.ManyToManyField(Subject, null=True, blank=True) def __unicode__(self): return str(self.exam_name) class Meta: verbose_name = 'Exam' verbose_name_plural = 'Exam' def get_json_data(self, x=None): subjects_data = [] for subject in self.subjects.all(): questions = Question.objects.filter(subject=subject) # if questions.count() == 0: subject_dict = { 'subject_id': subject.id if subject.id else '', 'subject': subject.subject_name if subject.subject_name else '', 'subject_name' : subject.subject_name if subject.subject_name else '', 'Duration': subject.duration + '-' +subject.duration_parameter, 'duration': subject.duration, 'duration_parameter': subject.duration_parameter, 'duration_no': subject.duration, 'total_mark': subject.total_mark if subject.total_mark else '', 'pass_mark': subject.pass_mark if subject.pass_mark else '', } question = Question.objects.filter(exam__id=self.id,subject__id=subject.id) question = question[0] if question.count() > 0 else None if x: if question: subjects_data.append(subject_dict) else: if question == None: subjects_data.append(subject_dict) exam_data = { 'exam_name':self.exam_name, 'exam': self.id, 'course': self.course.id, 'semester': self.semester.id, 'student': self.student.id if self.student else '', 'course_name': self.course.course, 'semester_name': self.semester.semester, 'student_name' : self.student.student_name if self.student else '', 'start_date': self.start_date.strftime('%d/%m/%Y') , 'end_date': self.end_date.strftime('%d/%m/%Y') , 'no_subjects': self.no_subjects, 'exam_total': self.exam_total, 'subjects_data': subjects_data, } return exam_data class Choice(models.Model): choice = models.CharField('Choice', null=True, blank=True, max_length=200) correct_answer = models.BooleanField('Correct Answer', default=False) def __unicode__(self): return str(self.choice) if self.choice else 'Choice' class Meta: verbose_name = 'Choice' verbose_name_plural = 'Choice' class Question(models.Model): exam = models.ForeignKey(Exam, null=True, blank=True) question = models.TextField('Question', null=True, blank=True) subject = models.ForeignKey(Subject, null=True, blank=True) choices = models.ManyToManyField(Choice, null=True, blank=True) mark = models.DecimalField(' Mark ',max_digits=14, decimal_places=2, default=0) def __unicode__(self): return str(self.question) if self.question else 'Question' class Meta: verbose_name = 'Question' verbose_name_plural = 'Question' def set_attributes(self, question_data): print question_data self.question = question_data['question'] self.mark = question_data['mark'] choices = question_data['choices'] print choices for choice_data in choices: choice = Choice.objects.create(choice=choice_data['choice']) if choice_data['correct_answer'] == 'true': choice.correct_answer = True else: choice.correct_answer = False choice.save() self.choices.add(choice) self.save() return self def get_json_data(self): choices = [] if self.choices: if self.choices.all().count() > 0: for choice in self.choices.all().order_by('-id'): choices.append({ 'id': choice.id, 'choice': choice.choice, }) question_data = { 'question': self.question , 'mark': self.mark, 'id': self.id, 'choices': choices, 'chosen_answer': '', } return question_data class StudentAnswer(models.Model): question = models.ForeignKey(Question, null=True, blank=True) choosen_choice = models.ForeignKey(Choice, null=True, blank=True) is_correct = models.BooleanField('Is Answer Correct', default=False) mark = models.DecimalField('Mark', max_digits=14, decimal_places=2, default=0 ) def __unicode__(self): return str(self.question.question) if self.question else 'Student' class Meta: verbose_name = 'StudentAnswer' verbose_name_plural = 'StudentAnswer' class AnswerSheet(models.Model): is_attempted = models.BooleanField('Is attempted',default=False) student = models.ForeignKey(Student, null=True, blank=True) exam = models.ForeignKey(Exam, null=True, blank=True) subject = models.ForeignKey(Subject, null=True, blank=True) student_answers = models.ManyToManyField(StudentAnswer, null=True, blank=True) is_completed = models.BooleanField('Is Completed',default=False) total_mark = models.DecimalField('Total Mark Obtained',max_digits=14, decimal_places=2, default=0) status = models.CharField('Status ', null=True, blank=True, max_length=200) def __unicode__(self): return str(self.student.student_name) if self.student else 'Student' class Meta: verbose_name = 'AnswerSheet' verbose_name_plural = 'AnswerSheet' def set_attributes(self, answer_data): questions = answer_data['questions'] total = 0 for question_data in questions: student_answer = StudentAnswer() if question_data['id']: question = Question.objects.get(id=question_data['id']) if question_data['chosen_answer']: choosen_choice = Choice.objects.get(id=question_data['chosen_answer']) student_answer.choosen_choice = choosen_choice for correct_choice in question.choices.all().order_by('id'): if correct_choice.choice == choosen_choice.choice: if correct_choice.correct_answer == True: student_answer.mark = question.mark student_answer.is_correct = True total = float(total) + float(student_answer.mark) student_answer.save() student_answer.question = question student_answer.save() self.student_answers.add(student_answer) self.total_mark = total self.save() if self.total_mark >= self.subject.pass_mark: self.status = 'Pass' else: self.status = 'Fail' self.save() def get_json_data(self): student_answers = [] if self.student_answers: if self.student_answers.all().count() > 0: for student_answer in self.student_answers.all().order_by('-id'): print(student_answer) student_answers.append({ 'id': student_answer.id if student_answer.id else '', 'question': student_answer.question.id if student_answer.question else'', 'choosen_choice': student_answer.choosen_choice.id if student_answer.choosen_choice else '', 'is_correct':student_answer.is_correct if student_answer.is_correct else '', 'mark': student_answer.mark if student_answer.mark else '', }) print("oo") print(student_answers) answer_sheet_data = { 'student': self.student.id, 'student_name': self.student.student_name, 'fathers_name': self.student.father_name if self.student.father_name else '', 'specialization': self.student.specialization if self.student.specialization else '', 'exam': self.exam.id, 'exam_name':self.exam.exam_name, 'subject_name': self.subject.subject_name, 'status':self.status if self.status else 'Fail', 'total_mark': self.total_mark if self.total_mark else 0, 'subject' : self.subject.id, 'subject_total_mark':self.subject.total_mark, 'student_answers': student_answers, 'is_completed': self.is_completed if self.is_completed else '', 'is_attempted': self.is_attempted if self.is_attempted else '', } return answer_sheet_data
[ "kkishore@integretz.com" ]
kkishore@integretz.com
d3e5f7765718f82b66adb5745ca36ba8ba9e4adf
0d4a603ea0ffca1d6503b6f03f2d8bd9718adb35
/code/playerInterface.py
a0e181872f3029e2af3be4e6578ddc7a1414c8ce
[]
no_license
RobinMontferme/ProjetReversi2019
3dedbb0423153df1d0effb60455c6b6cc7d77e1f
576fb93de4a7e04687ee08b5b666d082f4763943
refs/heads/master
2022-03-24T17:28:36.137712
2019-12-22T20:45:25
2019-12-22T20:45:25
221,645,058
1
1
null
null
null
null
UTF-8
Python
false
false
1,126
py
class PlayerInterface(): # Returns your player name, as to be displayed during the game def getPlayerName(self): return "Not Defined" # Returns your move. The move must be a couple of two integers, # Which are the coordinates of where you want to put your piece # on the board. Coordinates are the coordinates given by the Reversy.py # methods (e.g. validMove(board, x, y) must be true of you play '(x,y)') # You can also answer (-1,-1) as "pass". Note: the referee will never # call your function if the game is over def getPlayerMove(self): return (-1,-1) # Inform you that the oponent has played this move. You must play it # with no search (just update your local variables to take it into account) def playOpponentMove(self, x,y): pass # Starts a new game, and give you your color. # As defined in Reversi.py : color=1 for BLACK, and color=2 for WHITE def newGame(self, color): pass # You can get a feedback on the winner # This function gives you the color of the winner def endGame(self, color): pass
[ "robin.montferme@gmail.com" ]
robin.montferme@gmail.com
b34b6eedf808f6a26a2cf64fcd353839b742dfa3
c36f46d8d8afd0ddf27d8ebe3afbdd03a8a8dddb
/Include/DBConnection.py
2681e264bcaa8e66137f3e6822b5d968708085d4
[]
no_license
NazarNintendo/python-app
a71a538622547be25128df2666914a99cba6d2d0
b611430cb140d8d0e6b918450296ee5a02e237bd
refs/heads/master
2022-12-10T07:55:08.604274
2020-09-13T21:34:26
2020-09-13T21:34:26
295,242,219
0
0
null
null
null
null
UTF-8
Python
false
false
419
py
import psycopg2 as psc from Include.config import config class DbConnection: records = [] def enable_connection(self): conn = psc.connect(f"dbname={config['database']} user={config['user']} password={config['pass']}") cur = conn.cursor() cur.execute("SELECT * FROM customer;") self.records = cur.fetchall() cur.close() conn.close() print(self.records)
[ "nazarchik1701@gmail.com" ]
nazarchik1701@gmail.com
50f5477a0bbb10e0d356fbe8aa777cae29d9dffa
6ec91b363b077bffd33f15300a0935124e9fb915
/Cracking_the_Code_Interview/Leetcode/14.DP/120.Triangle.py
a7e2879f610fe04793e1b1f2c35318dc4b3ff0fc
[]
no_license
lzxyzq/Cracking_the_Coding_Interview
03232515ae8eb50394d46322d36b230d1a626fcf
79dee7dab41830c4ff9e38858dad229815c719a0
refs/heads/master
2023-06-05T19:52:15.595289
2021-06-23T22:46:02
2021-06-23T22:46:02
238,068,000
0
1
null
null
null
null
UTF-8
Python
false
false
805
py
''' @Author: your name @Date: 2020-06-30 18:43:37 @LastEditTime: 2020-06-30 19:39:19 @LastEditors: Please set LastEditors @Description: In User Settings Edit @FilePath: /Cracking_the_Code_Interview/Leetcode/14.DP/120.Triangle.py ''' # Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below. # For example, given the following triangle # [ # [2], # [3,4], # [6,5,7], # [4,1,8,3] # ] # The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11). class Solution: def minimumTotal(self, triangle: List[List[int]]) -> int: for i in range(len(triangle)-2,-1,-1): for j in range(i+1): triangle[i][j] += min(triangle[i+1][j],triangle[i+1][j+1]) return triangle[0][0]
[ "lzxyzq@gmail.com" ]
lzxyzq@gmail.com
41b2488c525f79030de81dd82d710a0cd7c146a4
184245bcb1c338dfccb0c1cfa7bda86211a4ff81
/python_requests_essentials_practice/Chapter-1/client/make_request.py
7a7ce0abd89761eb91bf733114bc8aeb0bfb69a2
[]
no_license
bkzzshzz/python_intermediate
a24ed98eea47e06cfdb1226e7355885bfab7fcf4
9e9ddf6cb680d436c866d1041b446fe2eaa2d93e
refs/heads/master
2023-06-21T03:12:04.082582
2021-07-23T18:52:35
2021-07-23T18:52:35
375,579,916
0
0
null
null
null
null
UTF-8
Python
false
false
314
py
import requests import json def request1(): url = 'http://localhost:8001' payload = {'some' : 'data'} request_header = {'user-agent' : 'bkess browser'} response = requests.get(url, headers=request_header) print((response.content.decode('utf-8'))) if __name__ == '__main__' : request1()
[ "bkzz_shzz@yahoo.com" ]
bkzz_shzz@yahoo.com
9112fe8e2731f2eecbd1a30f14bdddf2f1e47068
15b2de45066828b113ded3cbb8b0296234cca713
/algorithm/bubble_sort.py
28da815f3684515116d11cfb7c5e188ae6061404
[]
no_license
linhuiyangcdns/leetcodepython
38ab91aea95c46ea183cc6072f1d71c3fe7d0007
ad9089d29ec8ef484fedece884f7e5702e6a3ad1
refs/heads/master
2020-03-21T18:29:07.276732
2018-08-15T15:09:22
2018-08-15T15:09:22
138,894,437
0
0
null
null
null
null
UTF-8
Python
false
false
308
py
def bublle_sort(list1): for i in range(len(list1)-1,0,-1): for j in range(i): if list1[j] > list1[j+1]: list1[j],list1[j+1] = list1[j+1],list1[j] return list1 if __name__ == '__main__': li = [54, 26, 93, 17, 77, 31, 44, 55, 20] print(bublle_sort(li))
[ "804268450@qq.com" ]
804268450@qq.com
9cb8af564787f969643454e662a6a84c037be982
a20db420b58321756676ddf41a2833f0283c6f66
/src/Repositories/TVDB.py
7906c18edb742e0e517a7cf05a381e20bda76f04
[]
no_license
CPSibo/Chrysalis
5a3194cfb0be8c24543ffb51dd52643afea9c2b1
a2cfaaf4aeb4ad7adb48f1229ba291a9af6dc263
refs/heads/master
2020-04-15T07:54:00.249188
2019-10-01T01:12:06
2019-10-01T01:12:06
164,506,527
0
0
null
null
null
null
UTF-8
Python
false
false
4,339
py
import os import json from Repositories.Repository import Repository, RepositoryTypes from Utilities.Logger import Logger from .Repository import RegisteredRepository @RegisteredRepository class TVDB(Repository): """ API wrapper for TheTVDB.com. Attributes: base_url (str): Minimal URL for the API. username (str): API username. user_key (str): Secret key for the user. api_key (str): Secret API key for the user. jwt (str): JWT returned from login. """ source: str = 'tvdb' type: RepositoryTypes = RepositoryTypes.EPISODE | RepositoryTypes.SERIES def __init__(self): self.base_url: str = 'https://api.thetvdb.com/' self.api_key: str = None self.jwt: str = None self.username = os.getenv("tvdb_username") self.user_key = os.getenv("tvdb_userkey") self.api_key = os.getenv("tvdb_apikey") def login(self): """ Pass the user credentials to receive a JWT. """ Logger.log(r'API', r'Logging in...') request_data = { 'username': self.username, 'userkey': self.user_key, 'apikey': self.api_key, } encoded_data = json.dumps(request_data).encode('utf-8') response = self.http.request( 'POST', self.base_url + 'login', body=encoded_data, headers={'Content-Type': 'application/json'} ) response_data = json.loads(response.data.decode('utf-8')) self.jwt = response_data['token'] def get_series(self, series_id: int): """ Query series information. Args: series_id (int): Unique ID of the series. Returns: dict: Series infomation. """ Logger.log(r'API', r'Querying series...') response = self.http.request( 'GET', self.base_url + 'series/' + str(series_id), headers={ 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.jwt } ) response_data = json.loads(response.data.decode('utf-8')) return response_data['data'] def get_series_episodes(self, series_id: int): """ Query episode information for series. Automatically retrieves all pages. Args: series_id (int): Unique ID of the series. Returns: list: List of dicts for episodes. """ Logger.log(r'API', r'Querying episodes...') page = 1 episodes = [] while True: response = self.http.request( 'GET', self.base_url + 'series/' + str(series_id) + '/episodes?page=' + str(page), headers={ 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.jwt } ) response_data = json.loads(response.data.decode('utf-8')) episodes.extend(response_data['data']) if response_data['links']['next'] is None or page == response_data['links']['last']: break page += 1 episodes = list({v['episodeName']:v for v in episodes if v['episodeName'] is not None and v['episodeName'] != ''}.values()) return episodes def match_episode(self, episodes: list, title: str): """ Attempts to find the series' episode that matches the given title from youtube-dl. Args: episodes (list): List of dicts of episode information. title (str): The title to match against. Returns: dict: The matched episode. """ import pylev for episode in episodes: episode['__distance'] = pylev.recursive_levenshtein(episode['episodeName'], title) episode['__ratio'] = 1 - (episode['__distance'] / len(episode['episodeName'])) threshold = 0.8 filtered_episodes = [item for item in episodes if item['__ratio'] >= threshold] sorted_episodes = sorted( filtered_episodes, key=lambda x: x['__distance'] ) return sorted_episodes[0]
[ "cpsibo@gmail.com" ]
cpsibo@gmail.com
3e7c131b1322a82cf9d29b9063a13e6984219f58
3f4f03b37d908d8dba0f360dec8ce607aefefc6a
/src/intranet3/utils/mail.py
68efe0d47e1df65cf586f9c57d1a12f53878b982
[ "MIT" ]
permissive
rutral/intranet
47a5bd33e5e1e82c2ceafef7dc8af886176835a6
e673ceb9a054910f6c858779cc783c6c3377e664
refs/heads/master
2021-01-20T23:51:07.545975
2013-07-01T17:14:47
2013-07-01T17:14:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
18,240
py
# -*- coding: utf-8 -*- """ Sending emails """ import re import email import quopri import datetime import time from base64 import b64decode from functools import partial from pprint import pformat from email.header import decode_header from email.utils import parsedate, formataddr from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email import Encoders from twisted.internet import ssl, reactor from twisted.internet.defer import Deferred, DeferredList from twisted.mail.smtp import ESMTPSenderFactory from twisted.mail.pop3client import POP3Client from twisted.internet.protocol import ClientFactory import transaction from intranet3.models import ApplicationConfig, TimeEntry, Project, Tracker, User, DBSession, TrackerCredentials from intranet3.models.project import SelectorMapping from intranet3.log import DEBUG_LOG, WARN_LOG, EXCEPTION_LOG, INFO_LOG from intranet3.utils.smtp import ESMTP_XOUATH2_SenderFactory try: from cStringIO import StringIO except ImportError: from StringIO import StringIO from email.mime.text import MIMEText LOG = INFO_LOG(__name__) EXCEPTION = EXCEPTION_LOG(__name__) WARN = WARN_LOG(__name__) DEBUG = DEBUG_LOG(__name__) MIN_HOURS = 6.995 #record hours class EmailSender(object): SMTP_SERVER = 'smtp.gmail.com' SMTP_PORT = 587 contextFactory = ssl.ClientContextFactory() @classmethod def send(cls, to, topic, message, sender_name=None, cc=None, replay_to=None): """ Send an email with message to given address. This is an asynchronous call. @return: deferred """ config = ApplicationConfig.get_current_config() user = config.google_user_email email_addr = user if sender_name: email_addr = formataddr((sender_name, email_addr)) secret = config.google_user_password SenderFactory = ESMTPSenderFactory email = MIMEText(message, _charset='utf-8') email['Subject'] = topic email['From'] = email_addr email['To'] = to if cc: email['Cc'] = cc if replay_to: email['Reply-To'] = replay_to formatted_mail = email.as_string() messageFile = StringIO(formatted_mail) resultDeferred = Deferred() senderFactory = SenderFactory( user, # user secret, # secret user, # from to, # to messageFile, # message resultDeferred, # deferred contextFactory=cls.contextFactory) reactor.connectTCP(cls.SMTP_SERVER, cls.SMTP_PORT, senderFactory) return resultDeferred @classmethod def send_html(cls, to, topic, message): config = ApplicationConfig.get_current_config() email = MIMEMultipart('alternative') email['Subject'] = topic email['From'] = config.google_user_email email['To'] = to email.attach(MIMEText(message,'html', 'utf-8')) formatted_mail = email.as_string() messageFile = StringIO(formatted_mail) resultDeferred = Deferred() senderFactory = ESMTPSenderFactory( config.google_user_email, # user config.google_user_password, # secret config.google_user_email, # from to, # to messageFile, # message resultDeferred, # deferred contextFactory=cls.contextFactory) reactor.connectTCP(cls.SMTP_SERVER, cls.SMTP_PORT, senderFactory) return resultDeferred @classmethod def send_with_file(cls, to, topic, message, file_path): config = ApplicationConfig.get_current_config() email = MIMEMultipart() email['Subject'] = topic email['From'] = config.google_user_email email['To'] = to part = MIMEBase('application', "octet-stream") part.set_payload(open(file_path, "rb").read()) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' % file_path.split('/')[-1]) email.attach(part) email.attach(MIMEText(message)) formatted_mail = email.as_string() messageFile = StringIO(formatted_mail) resultDeferred = Deferred() senderFactory = ESMTPSenderFactory( config.google_user_email, # user config.google_user_password, # secret config.google_user_email, # from to, # to messageFile, # message resultDeferred, # deferred contextFactory=cls.contextFactory) reactor.connectTCP(cls.SMTP_SERVER, cls.SMTP_PORT, senderFactory) return resultDeferred decode = lambda header: u''.join( val.decode('utf-8' if not encoding else encoding) for val, encoding in decode_header(header) ).strip() Q_ENCODING_REGEXP = re.compile(r'(\=\?[^\?]+\?[QB]\?[^\?]+\?\=)') def decode_subject(val): for value in Q_ENCODING_REGEXP.findall(val): val = val.replace(value, decode(value)) return val.strip() def get_msg_payload(msg): encoding = msg.get('Content-Transfer-Encoding') payload = msg.get_payload() if type(payload) == list: a_msg = payload[0] # first is plaintext, second - html encoding = a_msg.get('Content-Transfer-Encoding') payload = a_msg.get_payload() DEBUG(u'Extracted email msg %r with encoding %r' % (payload, encoding)) if encoding == 'quoted-printable': payload = quopri.decodestring(payload) elif encoding == 'base64': payload = b64decode(payload) return payload class MailerPOP3Client(POP3Client): MAX_EMAILS = 100 SUBJECT_REGEXP = re.compile(r'^\[Bug (\d+)\](.*)') HOURS_REGEXP = re.compile(r'^\s*Hours Worked\|\s*\|(\d+(\.\d+)?)$') HOURS_NEW_BUG_REGEXP = re.compile(r'^\s*Hours Worked: (\d+(\.\d+)?)$') TRAC_SUBJECT_REGEXP = re.compile(r'^(Re\:\ +)?\[.+\] \#\d+\: (.*)') TRAC_HOURS_REGEXP = re.compile(r'.*Add Hours to Ticket:\ *(\d+(\.\d+)?)') TRAC_AUTHOR_REGEXP = re.compile(r'^Changes \(by (.*)\)\:') TRAC_COMPONENT_REGEXP = re.compile(r'.*Component:\ *([^|]*)') timeout = 10 def handle_trac_email(self, msg, tracker): date = decode(msg['Date']) subject = msg['Subject'] DEBUG(u'Message with subject %r retrieved from date %r' % (subject, date)) date = datetime.datetime.fromtimestamp(time.mktime(parsedate(date))) bug_id = decode(msg['X-Trac-Ticket-ID']) subject = decode(subject.replace('\n', u'')) match = self.TRAC_SUBJECT_REGEXP.match(subject) if not match: WARN(u"Trac subject not matched %r" % (subject, )) return subject = match.group(2) hours = 0.0 who = '' component = '' payload = get_msg_payload(msg) for line in payload.split('\n'): match = self.TRAC_HOURS_REGEXP.match(line) if match: hours = float(match.group(1)) continue match = self.TRAC_AUTHOR_REGEXP.match(line) if match: who = match.group(1) continue match = self.TRAC_COMPONENT_REGEXP.match(line) if match: component = match.group(1).strip() continue DEBUG(u'Found bug title %(subject)s component %(component)s, by %(who)s from %(date)s, hours %(hours)s' % locals()) if hours <= 0.0: DEBUG(u"Ignoring bug with no hours") return who = who.lower() if not who in self.factory.logins_mappings[tracker.id]: DEBUG(u'User %s not in logins mapping' % (who, )) return user = self.factory.logins_mappings[tracker.id][who] DEBUG(u'Found user %s' % (user.name, )) mapping = self.factory.selector_mappings[tracker.id] project_id = mapping.match(bug_id, 'none', component) if project_id is None: DEBUG(u'Project not found for component %s' % (component, )) return project = self.factory.projects[project_id] LOG(u"Will add entry for user %s project %s bug #%s hours %s title %s" % ( user.name, project.name, bug_id, hours, subject )) return user.id, date, bug_id, project_id, hours, subject def handle_bugzilla_email(self, msg, tracker): date = decode(msg['Date']) component = decode(msg['X-Bugzilla-Component']) product = decode(msg['X-Bugzilla-Product']) who = decode(msg['X-Bugzilla-Who']) subject = msg['Subject'] DEBUG(u'Message with subject %r retrieved from date %r' % (subject, date)) date = datetime.datetime.fromtimestamp(time.mktime(parsedate(date))) subject = decode_subject(subject.replace('\n', u'').replace(u':', u' ')) match = self.SUBJECT_REGEXP.match(subject) if not match: DEBUG(u"Subject doesn't match regexp: %r" % subject) return bug_id, subject = match.groups() subject = subject.strip() is_new_bug = subject.startswith('New ') payload = get_msg_payload(msg) username = who.lower() if username not in self.factory.logins_mappings[tracker.id]: DEBUG(u'User %s not in logins mapping' % (who, )) return DEBUG(u'Found bug title %(subject)s product %(product)s, component %(component)s, by %(who)s from %(date)s' % locals()) bug_id = int(bug_id) newline = '\n' # some emails have \r\n insted of \n if '\r\n' in payload: DEBUG(u'Using CRLF istead of LF') newline = '\r\n' for line in payload.split(newline): if is_new_bug: match = self.HOURS_NEW_BUG_REGEXP.match(line) else: match = self.HOURS_REGEXP.match(line) if match: hours = float(match.groups()[0]) break else: hours = 0.0 DEBUG(u'Found bug #%(bug_id)s with title %(subject)s product %(product)s, component %(component)s, by %(who)s, hours %(hours)f %(date)s' % locals()) if is_new_bug: # new bug - create with 0 h, first strip title subject = subject[4:].strip() DEBUG(u'Bug creation found %s' % (subject, )) elif hours == 0.0: DEBUG(u'Ignoring non-new bug without hours') return user = self.factory.logins_mappings[tracker.id][username] DEBUG(u'Found user %s' % (user.name, )) # selector_mapping given explicitly to avoid cache lookups mapping = self.factory.selector_mappings[tracker.id] project_id = mapping.match(bug_id, product, component) project = self.factory.projects[project_id] LOG(u"Will add entry for user %s project %s bug #%s hours %s title %s" % ( user.name, project.name, bug_id, hours, subject )) return user.id, date, bug_id, project_id, hours, subject handle_cookie_trac_email = handle_trac_email handle_igozilla_email = handle_bugzilla_email handle_rockzilla_email = handle_bugzilla_email def serverGreeting(self, greeting): """ When connected to server """ DEBUG(u'Server greeting received %s' % (pformat(greeting, ))) self.login(self.factory.login, self.factory.password)\ .addCallbacks(self.on_login, partial(self.fail, u'login')) def on_login(self, welcome): """ When login succeeded """ DEBUG(u'Logged in: %s' % (welcome, )) self.stat().addCallbacks(self.on_stat, partial(self.fail, u'stat')) def prepare(self): """ Prepare structures for bugs fetching """ self.times = [] def on_stat(self, stats): """ When number of messages was provided """ LOG(u'Emails: %s' % (pformat(stats))) mails, sizes = stats if mails > self.MAX_EMAILS: mails = self.MAX_EMAILS if mails: self.prepare() retrievers = [] for i in xrange(mails): d = self.retrieve(i) d.addCallbacks(self.on_retrieve, partial(self.fail, u'retrive %s' % (i, ))) retrievers.append(d) DeferredList(retrievers).addCallback(self.on_finish) else: DEBUG(u'No new messages') self.quit().addCallbacks(self.on_quit, partial(self.fail, u'empty quit')) def match_tracker(self, msg): sender = decode(msg['From']) for email in self.factory.trackers: if email in sender: return self.factory.trackers[email] else: return None def on_retrieve(self, lines): """ When single message was retrieved """ msg = email.message_from_string('\n'.join(lines)) sender = decode(msg['From']) tracker = self.match_tracker(msg) if tracker is None: DEBUG(u'Email from %s ignored, no tracker matched' % (sender, )) return # find appopriate handler handler = getattr(self, 'handle_%s_email' % tracker.type) # handler should parse the response and return essential info or None data = handler(msg, tracker) if data is None: # email should be ignored return user_id, date, bug_id, project_id, hours, subject = data # try finding existing entry for this bug session = DBSession() bug_id = str(bug_id) entry = TimeEntry.query.filter(TimeEntry.user_id==user_id)\ .filter(TimeEntry.date==date.date())\ .filter(TimeEntry.ticket_id==bug_id)\ .filter(TimeEntry.project_id==project_id)\ .first() if not entry: # create new entry entry = TimeEntry( user_id=user_id, date=date.date(), time=hours, description=subject, ticket_id=bug_id, project_id = project_id, modified_ts=date ) session.add(entry) LOG(u'Adding new entry') else: # update existing entry if not entry.frozen: entry.time += hours entry.modified_ts = date # TODO: this might remove an already existing lateness session.add(entry) LOG(u'Updating existing entry') else: LOG(u'Omission of an existing entry because it is frozen') transaction.commit() def on_finish(self, results): """ When all messages have been retrieved """ self.quit().addCallbacks(self.on_quit, partial(self.fail, u'quit')) def on_quit(self, bye): """ When QUIT finishes """ DEBUG(u'POP3 Quit: %s' % bye) self.factory.done_callback() def fail(self, during, resp): """ Something went wrong """ EXCEPTION(u'POP3 Client failed during %s: %s' % (during, pformat(resp))) self.factory.done_callback() class CustomClientFactory(ClientFactory): protocol = MailerPOP3Client def __init__(self, login, password, done_callback, trackers, logins_mappings, projects, selector_mappings): self.login = login self.password = password self.done_callback = done_callback self.trackers = trackers self.logins_mappings = logins_mappings self.projects = projects self.selector_mappings = selector_mappings class MailCheckerTask(object): MAX_BUSY_CALLS = 3 POP3_SERVER = 'pop.gmail.com' POP3_PORT = 995 context_factory = ssl.ClientContextFactory() def __init__(self): self.busy = False self.busy_calls = 0 def __call__(self): if self.busy: self.busy_calls += 1 if self.busy_calls > self.MAX_BUSY_CALLS: self.busy_calls = 0 WARN(u'Will override a busy Mail Checker') self.run() else: WARN(u'Mail Checker is already running, ignoring (%s/%s)' % (self.busy_calls, self.MAX_BUSY_CALLS)) else: self.busy_calls = 0 self.busy = True LOG(u'Will start Mail Checker') self.run() def mark_not_busy(self): if not self.busy: WARN(u'Tried to unmark an already unmarked Mail Checker') else: self.busy = False LOG(u'Marked Mail Check as not busy anymore') def run(self): self._run() def _run(self): config = ApplicationConfig.get_current_config(allow_empty=True) if config is None: WARN(u'Application config not found, emails cannot be checked') return self.mark_not_busy() trackers = dict( (tracker.mailer, tracker) for tracker in Tracker.query.filter(Tracker.mailer != None).filter(Tracker.mailer != '') ) if not len(trackers): WARN(u'No trackers have mailers configured, email will not be checked') return self.mark_not_busy() username = config.google_user_email.encode('utf-8') password = config.google_user_password.encode('utf-8') # TODO logins_mappings = dict( (tracker.id, TrackerCredentials.get_logins_mapping(tracker)) for tracker in trackers.itervalues() ) selector_mappings = dict( (tracker.id, SelectorMapping(tracker)) for tracker in trackers.itervalues() ) # find all projects connected to the tracker projects = dict( (project.id, project) for project in Project.query.all() ) # all pre-conditions should be checked by now # start fetching f = CustomClientFactory(username, password, self.mark_not_busy, trackers, logins_mappings, projects, selector_mappings) f.protocol = MailerPOP3Client reactor.connectSSL(self.POP3_SERVER, self.POP3_PORT, f, self.context_factory)
[ "konrad.rotkiewicz@stxnext.pl" ]
konrad.rotkiewicz@stxnext.pl
dc29b58c92f8d705a0d55b2e25bf2a720992ce86
4584ecfc572567e62dab233c60824955315b7c4a
/examples/s5b_transfer/s5b_sender.py
70a9704fc8c42c8cc31d8f3ab6debff09da80b05
[ "BSD-3-Clause", "MIT", "BSD-2-Clause" ]
permissive
louiz/slixmpp
9bc8964d463672b28d8ccf3e324600e77fca4e8a
7b69ae373838c1adcbff223b7c2d8518ff6d8f85
refs/heads/master
2021-01-16T22:00:28.105481
2016-08-23T22:33:07
2016-08-23T22:33:07
988,500
2
3
null
2016-01-07T08:36:46
2010-10-14T23:03:43
Python
UTF-8
Python
false
false
4,054
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Slixmpp: The Slick XMPP Library Copyright (C) 2015 Emmanuel Gil Peyrot This file is part of Slixmpp. See the file LICENSE for copying permission. """ import asyncio import logging from getpass import getpass from argparse import ArgumentParser import slixmpp from slixmpp.exceptions import IqError, IqTimeout class S5BSender(slixmpp.ClientXMPP): """ A basic example of creating and using a SOCKS5 bytestream. """ def __init__(self, jid, password, receiver, filename): slixmpp.ClientXMPP.__init__(self, jid, password) self.receiver = receiver self.file = open(filename, 'rb') # The session_start event will be triggered when # the bot establishes its connection with the server # and the XML streams are ready for use. self.add_event_handler("session_start", self.start) @asyncio.coroutine def start(self, event): """ Process the session_start event. Typical actions for the session_start event are requesting the roster and broadcasting an initial presence stanza. Arguments: event -- An empty dictionary. The session_start event does not provide any additional data. """ try: # Open the S5B stream in which to write to. proxy = yield from self['xep_0065'].handshake(self.receiver) # Send the entire file. while True: data = self.file.read(1048576) if not data: break yield from proxy.write(data) # And finally close the stream. proxy.transport.write_eof() except (IqError, IqTimeout): print('File transfer errored') else: print('File transfer finished') finally: self.file.close() self.disconnect() if __name__ == '__main__': # Setup the command line arguments. parser = ArgumentParser() # Output verbosity options. parser.add_argument("-q", "--quiet", help="set logging to ERROR", action="store_const", dest="loglevel", const=logging.ERROR, default=logging.INFO) parser.add_argument("-d", "--debug", help="set logging to DEBUG", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.INFO) # JID and password options. parser.add_argument("-j", "--jid", dest="jid", help="JID to use") parser.add_argument("-p", "--password", dest="password", help="password to use") parser.add_argument("-r", "--receiver", dest="receiver", help="JID of the receiver") parser.add_argument("-f", "--file", dest="filename", help="file to send") parser.add_argument("-m", "--use-messages", action="store_true", help="use messages instead of iqs for file transfer") args = parser.parse_args() # Setup logging. logging.basicConfig(level=args.loglevel, format='%(levelname)-8s %(message)s') if args.jid is None: args.jid = input("Username: ") if args.password is None: args.password = getpass("Password: ") if args.receiver is None: args.receiver = input("Receiver: ") if args.filename is None: args.filename = input("File path: ") # Setup the S5BSender and register plugins. Note that while plugins may # have interdependencies, the order in which you register them does # not matter. xmpp = S5BSender(args.jid, args.password, args.receiver, args.filename) xmpp.register_plugin('xep_0030') # Service Discovery xmpp.register_plugin('xep_0065') # SOCKS5 Bytestreams # Connect to the XMPP server and start processing XMPP stanzas. xmpp.connect() xmpp.process(forever=False)
[ "emmanuel.peyrot@collabora.com" ]
emmanuel.peyrot@collabora.com
148bcc649884e56f3eabd4f23b4769ac9a8402b6
3a937a74d17559cee02bb1b64f8bf0db83371d9f
/optim/opt-talk/many.py
eed29d37ba76a6eeb79caf0d2c4f6979dc2fff31
[]
no_license
amey-joshi/am
11d95229e6d7574189ab9fd8afe7d5088d3a9ce8
f546b13b741e2746a4eb1e45943c9686479bb896
refs/heads/master
2021-07-17T19:17:33.852098
2021-05-08T17:13:49
2021-05-08T17:13:49
247,212,889
1
0
null
null
null
null
UTF-8
Python
false
false
244
py
#!/bin/python import numpy as np import matplotlib.pyplot as plt x = np.linspace(0, 25, 100) y = np.sqrt(x) * np.cos(x) + x/3 plt.plot(x, y) plt.xlabel("x") plt.ylabel("y") plt.title("A function with several extrema") plt.savefig("many.png")
[ "ajoshi@localhost.localdomain" ]
ajoshi@localhost.localdomain
b6a660a4d40988c152dd9f46172b80b9eb8a1ebe
5c6fec629b7e78987a23ed554bae3a2e444212d4
/test.py
fad7b682e78fb1a18f58cb52ef70e14af46f172f
[ "MIT" ]
permissive
fz-lyu/FaceRecoCamera
2ee755ab56bd2b174909627e2009274d57675a33
3a5b5018e26e07a98036ca12e79d263e23e154f8
refs/heads/master
2022-06-16T00:39:09.683879
2017-10-16T15:20:01
2017-10-16T15:20:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,073
py
import face_recognition import cv2 import main # This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the # other example, but it includes some basic performance tweaks to make things run a lot faster: # 1. Process each video frame at 1/4 resolution (though still display it at full resolution) # 2. Only detect faces in every other frame of video. # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. # Get a reference to webcam #0 (the default one) video_capture = cv2.VideoCapture(0) # Load a sample picture and learn how to recognize it. obama_image = face_recognition.load_image_file("obama.jpg") obama_face_encoding = face_recognition.face_encodings(obama_image)[0] # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True while True: # Grab a single frame of video # ret, frame = video_capture.read() frame = main.get_video() frame = cv2.resize(frame, (640, 480)) # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(small_frame) face_encodings = face_recognition.face_encodings(small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) match = face_recognition.compare_faces([obama_face_encoding], face_encoding) name = "Unknown" if match[0]: name = "Barack" face_names.append(name) process_this_frame = not process_this_frame # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 print([top, right, bottom, left]) # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), 2) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) # Display the resulting image cv2.imshow('Video', frame) # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): break # Release handle to the webcam video_capture.release() cv2.destroyAllWindows()
[ "fanzhe@gatech.edu" ]
fanzhe@gatech.edu
bf3c76a5c4aad512d0d65ca40066f8ef02a57b78
3677fed8c91a284c8ce24afd8f853604e799258a
/configs/__init__.py
b498ffb392b2e7931868beb34e155acd8f332073
[]
no_license
iseekwonderful/HPA-singlecell-2nd-dual-head-pipeline
945e683ad8061cecb42b404234b404016d558bfa
8033fb317fa59694db44c9823bdf06752cf0a1bc
refs/heads/master
2023-05-11T07:59:31.020497
2021-05-25T03:21:31
2021-05-25T03:21:31
370,543,923
10
3
null
null
null
null
UTF-8
Python
false
false
7,805
py
import yaml import json import os import glob class Element: def __repr__(self): return ', '.join(['{}: {}'.format(k, v) for k, v in self.__dict__.items()]) class DPP(Element): def __init__(self, dict): self.nodes = 1 self.gpus = 4 self.rank = 0 self.sb = True self.mode = 'train' self.checkpoint = None class Basic(Element): def __init__(self, dict): self.seed = dict.get('seed', '233') self.GPU = str(dict.get('GPU', '0')) self.id = dict.get('id', 'unnamed') self.debug = dict.get('debug', False) self.mode = dict.get('mode', 'train') self.search = dict.get('search', False) self.amp = dict.get('amp', 'None') if len(self.GPU) > 1: self.GPU = [int(x) for x in self.GPU] class Experiment(Element): def __init__(self, dict): self.name = dict.get('name', 'KFold') self.random_state = dict.get('random_state', '2333') self.fold = dict.get('fold', 5) self.run_fold = dict.get('run_fold', 0) self.weight = dict.get('weight', False) self.method = dict.get('method', 'none') self.tile = dict.get('tile', 12) self.count = dict.get('count', 16) self.regression = dict.get('regression', False) self.scale = dict.get('scale', 1) self.level = int(dict.get('level', 1)) self.public = dict.get('public', True) self.merge = dict.get('merge', True) self.n = dict.get('N', True) self.batch_sampler = dict.get('batch_sampler', False) # batch sampler # initial_miu: 6 # miu_factor: 6 self.pos_ratio = dict.get('pos_ratio', 16) self.externals = dict.get('externals', []) self.initial_miu = dict.get('initial_miu', -1) self.miu_factor = dict.get('miu_factor', -1) self.full = dict.get('full', False) self.preprocess = dict.get('preprocess', 'train') self.image_only = dict.get('image_only', True) self.skip_outlier = dict.get('skip_outlier', False) self.outlier = dict.get('outlier', 'train') self.outlier_method = dict.get('outlier_method', 'drop') self.file = dict.get('csv_file', 'none') self.smoothing = dict.get('smoothing', 0) class Data(Element): def __init__(self, dict): self.cell = dict.get('cell', 'none') self.name = dict.get('name', 'CouldDataset') if os.name == 'nt': self.data_root = dict.get('dir_nt', '/') else: self.data_root = dict.get('dir_sv', '/') # for aws, # /home/sheep/Bengali/data # to any user try: self.data_root = glob.glob('/' + self.data_root.split('/')[1] + '/*/' + '/'.join(self.data_root.split('/')[3:]))[0] except: self.data_root = 'REPLACE ME PLZ!' class Model(Element): def __init__(self, dict): self.name = dict.get('name', 'resnet50') self.param = dict.get('params', {}) # add default true if 'dropout' not in self.param: self.param['dropout'] = True self.from_checkpoint = dict.get('from_checkpoint', 'none') self.out_feature = dict.get('out_feature', 1) class Train(Element): ''' freeze_backbond: 1 freeze_top_layer_groups: 0 freeze_start_epoch: 1 :param dict: ''' def __init__(self, dict): self.dir = dict.get('dir', None) if not self.dir: raise Exception('Training dir must assigned') self.batch_size = dict.get('batch_size', 8) self.num_epochs = dict.get('num_epochs', 100) self.cutmix = dict.get('cutmix', False) self.mixup = dict.get('mixup', False) self.beta = dict.get('beta', 1) self.cutmix_prob = dict.get('cutmix_prob', 0.5) self.cutmix_prob_increase = dict.get('cutmix_prob_increase', 0) self.validations_round = dict.get('validations_round', 1) self.freeze_backbond = dict.get('freeze_backbond', 0) self.freeze_top_layer_groups = dict.get('freeze_top_layer_groups', 0) self.freeze_start_epoch = dict.get('freeze_start_epoch', 1) self.clip = dict.get('clip_grad', None) self.combine_mix = dict.get('combine_mix', False) self.combine_list = dict.get('combine_list', []) self.combine_p = dict.get('combine_p', []) class Eval(Element): def __init__(self, dict): self.batch_size = dict.get('batch_size', 32) class Loss(Element): def __init__(self, dict): self.name = dict.get('name') self.param = dict.get('params', {}) # if 'class_balanced' not in self.param: # self.param['class_balanced'] = False self.weight_type = dict.get('weight_type', 'None') self.weight_value = dict.get('weight_value', None) self.cellweight = dict.get('cellweight', 0.1) self.pos_weight = dict.get('pos_weight', 10) class Optimizer(Element): def __init__(self, dict): self.name = dict.get('name') self.param = dict.get('params', {}) self.step = dict.get('step', 1) class Scheduler(Element): def __init__(self, dict): self.name = dict.get('name') self.param = dict.get('params', {}) self.warm_up = dict.get('warm_up', False) class Transform(Element): def __init__(self, dict): self.name = dict.get('name') self.val_name = dict.get('val_name', 'None') self.param = dict.get('params', {}) self.num_preprocessor = dict.get('num_preprocessor', 0) self.size = dict.get('size', (137, 236)) self.half = dict.get('half', False) self.tiny = dict.get('tiny', False) self.smaller = dict.get('smaller', False) self.larger = dict.get('larger', False) self.random_scale = dict.get('random_scale', False) self.random_margin = dict.get('random_margin', False) self.random_choice = dict.get('random_choice', False) self.shuffle = dict.get('shuffle', False) self.scale = dict.get('scale', []) self.gray = dict.get('gray', False) class Config: def __init__(self, dict): self.param = dict self.basic = Basic(dict.get('basic', {})) self.experiment = Experiment(dict.get('experiment', {})) self.data = Data(dict.get('data', {})) self.model = Model(dict.get('model', {})) self.train = Train(dict.get('train', {})) self.eval = Eval(dict.get('eval', {})) self.loss = Loss(dict.get('loss', {})) self.optimizer = Optimizer(dict.get('optimizer', {})) self.scheduler = Scheduler(dict.get('scheduler', {})) self.transform = Transform(dict.get('transform', {})) self.dpp = DPP({}) def __repr__(self): return '\t\n'.join(['{}: {}'.format(k, v) for k, v in self.__dict__.items()]) def dump_json(self, file_path): with open(file_path, 'w') as fp: json.dump(self.param, fp, indent=4) def to_flatten_dict(self): ft = {} for k, v in self.param.items(): for kk, vv in v.items(): if type(vv) in [dict, list]: vv = str(vv) ft[f'{k}.{kk}'] = vv return ft @staticmethod def load_json(file_path): with open(file_path) as fp: data = json.load(fp) return Config(data) @staticmethod def load(file_path): with open(file_path) as fp: data = yaml.load(fp) return Config(data) def get_config(name): return Config.load(os.path.dirname(os.path.realpath(__file__)) + '/' + name) if __name__ == '__main__': args = get_config('example.yaml') ft = args.to_flatten_dict() a = 0
[ "sss3barry@gmail.com" ]
sss3barry@gmail.com
bf73bd5eda0d1303716e539c0d40f57d6ab13de8
22fe6ed51715486ebbc09e404504ed4d7a28c37d
/python-katas/57_CountHi.py
6ef69b2da8a4251f4d619f0a62ab8c3d5042d32a
[]
no_license
Jethet/Practice-more
1dd3ff19dcb3342a543ea1553a1a6fb0264b9c38
8488a679730e3406329ef30b4f438d41dd3167d6
refs/heads/master
2023-01-28T14:51:39.283741
2023-01-06T10:14:41
2023-01-06T10:14:41
160,946,017
1
1
null
null
null
null
UTF-8
Python
false
false
378
py
# Return the number of times the string 'hi' appears in a given string. def count_hi(str): return str.count('hi') # CodingBat solution: def count_hi(str): sum = 0 for i in range(len(str)-1): if str[i:i+2] == 'hi': sum += 1 #or: sum = sum + 1 return sum print(count_hi('abc hi ho')) print(count_hi('ABChi hi')) print(count_hi('hihi'))
[ "henriette.hettinga@gmail.com" ]
henriette.hettinga@gmail.com
24f8d7f705edfcfdc557a42ef15fc94321307445
e4a3d6ae2e207af7b1f933b4f24dfe16a10fecb9
/magdysite/magdysite/wsgi.py
350f77a973606b8504f602eccece7c0defb0cc7e
[]
no_license
magdymaher129/django
69b8663605a5b6b94df10fbe0180620cab713ef4
8b07ae031d26b1260ae1a5326512e376bcf34632
refs/heads/master
2020-06-19T11:03:55.014955
2019-07-13T06:57:24
2019-07-13T06:57:24
196,686,429
1
0
null
null
null
null
UTF-8
Python
false
false
411
py
""" WSGI config for magdysite project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'magdysite.settings') application = get_wsgi_application()
[ "noreply@github.com" ]
noreply@github.com
a027d72c06e32069f439b2351506e2f825bda09b
dc5208c53aff4f7aba38424263ec9e62e5071def
/shop_sale.py
2dc550a0bbc3acdc3ae7ebda9fec7a25a520698e
[]
no_license
GeoLibra/shop
29653e4e1935cfe778891003a5de57954df73464
85d29899518672ceb17250e888f9184e8af0ece0
refs/heads/master
2020-04-21T17:48:27.150774
2019-02-24T06:00:30
2019-02-24T06:00:30
169,748,044
2
0
null
null
null
null
UTF-8
Python
false
false
7,094
py
from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * class ShopSale_UI(object): # def __init__(self): # super(ShopSale_UI, self).__init__() # self.setupUi() def setupUi(self,MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(1000,600) self.widget = QWidget(MainWindow) self.widget.setGeometry(10, 10, 1000, 600) self.widget.setObjectName("widget") # self.setWindowFlags(Qt.FramelessWindowHint) label_sell_title = QLabel("销售") label_sell_title.setFont(QFont("华文行楷", 20)) # label_code = QLabel("条码:") # label_name = QLabel("名称:") self.code_radio = QRadioButton("条码") self.name_radio = QRadioButton("名称") self.code_radio.setChecked(True) label_sum = QLabel("总计") label_sale = QLabel("应收") label_cost = QLabel("实收") label_change = QLabel("找零") # 定义条形码输入框,并设置只允许输入整数 self.line_code = QLineEdit() # 设置输入框的大小 self.line_code.setFixedSize(350, 30) # self.line_code.textChanged.connect(self.searchByCode) self.line_code.setFocus() # self.line_txm.setValidator(QIntValidator()) # self.line_name = QLineEdit() # self.line_name.setFixedSize(150, 30) # self.line_xssl.setValidator(QIntValidator()) # 定义多个list用来暂存预售货信息 self.name = [] # 名称 self.price = [] # 单价 self.count = [] # 数量 self.sum = [] # 总计 # 结算窗口 self.line_sell1 = QLineEdit() self.line_sell2 = QLineEdit() self.line_sell3 = QLineEdit() self.line_sell4 = QLineEdit() self.line_sell1.setText("0.0") self.line_sell2.setText("0.0") self.line_sell3.setText("0.0") # 实收框发生改变时 self.line_sell3.textChanged.connect(self.jiesuan) self.line_sell3.selectionChanged.connect(self.jiesuan0) self.line_sell4.setText("0.0") self.line_sell1.setReadOnly(True) self.line_sell2.setReadOnly(True) self.line_sell4.setReadOnly(True) self.line_sell1.setFixedSize(150, 30) self.line_sell2.setFixedSize(150, 30) self.line_sell3.setFixedSize(150, 30) self.line_sell4.setFixedSize(150, 30) # 录入按钮,绑定事件 btn_sell_lr = QPushButton("查找") btn_sell_lr.clicked.connect(self.event_lr) # 确认按钮,绑定事件 btn_sell_qr = QPushButton("确认") btn_sell_qr.clicked.connect(self.event_qr) # 清零按钮,绑定事件 btn_sell_ql = QPushButton("清零") btn_sell_ql.clicked.connect(self.event_ql) btn_sell_qr.setFixedSize(150, 30) btn_sell_ql.setFixedSize(150, 30) self.tabel_sell = QTableWidget() self.tabel_sell.setObjectName("tableWidget") self.tabel_sell.setRowCount(1) self.tabel_sell.setColumnCount(8) self.tabel_sell.setHorizontalHeaderLabels(["条形码","名称","生产厂家","批号","有效期", "零售价", "数量", "总计"]) # self.tabel_sell.setColumnHidden(4, True) # self.tabel_sell.setColumnWidth(4, 200) # 不可编辑 self.tabel_sell.setEditTriggers(QAbstractItemView.DoubleClicked) # 隔行改变颜色 self.tabel_sell.setAlternatingRowColors(True) self.tabel_sell.horizontalHeader().setStretchLastSection(True) # 水平方向,表格大小拓展到适当的尺寸 self.tabel_sell.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch) # 布局 layout = QVBoxLayout(self.widget) v1 = QVBoxLayout() h1 = QHBoxLayout() h2 = QHBoxLayout() h3 = QHBoxLayout() # 控制台 v2 = QVBoxLayout() h4 = QHBoxLayout() f = QFormLayout() w_title = QWidget() w_21 = QWidget() w_22 = QWidget() w_31 = QWidget() w_321 = QWidget() w_321.setFixedSize(235, 330) w_322 = QWidget() w_console = QWidget() v1.addWidget(label_sell_title, 0, Qt.AlignCenter) h1.addWidget(self.code_radio, 0, Qt.AlignLeft) h1.addWidget(self.name_radio, 0, Qt.AlignLeft) h1.addWidget(self.line_code, 0, Qt.AlignLeft) # h1.addWidget(label_name) # h1.addWidget(self.line_name) h2.addWidget(btn_sell_lr) h3.addWidget(self.tabel_sell) self.textEdit =QTextEdit() self.textEdit.setGeometry(QRect(0, 0, 200, 200)) self.textEdit.setObjectName("textEdit") self.textEdit.setReadOnly(True) h4.addWidget(self.textEdit, 0, Qt.AlignBottom) f.addRow(label_sum, self.line_sell1) f.addRow(label_sale, self.line_sell2) f.addRow(label_cost, self.line_sell3) f.addRow(label_change, self.line_sell4) v2.addWidget(btn_sell_qr, Qt.AlignCenter | Qt.AlignVCenter) v2.addWidget(btn_sell_ql, Qt.AlignCenter | Qt.AlignVCenter) w_title.setLayout(v1) w_21.setLayout(h1) w_22.setLayout(h2) w_31.setLayout(h3) w_321.setLayout(f) w_322.setLayout(v2) w_console.setLayout(h4) splitter_sell1 = QSplitter(Qt.Horizontal) splitter_sell1.setSizes([800, 60]) splitter_sell1.addWidget(w_title) splitter_sell2 = QSplitter(Qt.Horizontal) splitter_sell2.setSizes([150, 60]) splitter_sell2.addWidget(w_22) splitter_sell3 = QSplitter(Qt.Horizontal) # splitter3.setSizes([800, 60]) splitter_sell3.addWidget(w_21) splitter_sell3.addWidget(splitter_sell2) splitter_sell4 = QSplitter(Qt.Vertical) splitter_sell4.setSizes([800, 140]) splitter_sell4.addWidget(splitter_sell1) splitter_sell4.addWidget(splitter_sell3) splitter_sell5 = QSplitter(Qt.Horizontal) splitter_sell5.setSizes([150, 60]) splitter_sell5.addWidget(w_322) splitter_sell6 = QSplitter(Qt.Vertical) splitter_sell6.addWidget(w_321) splitter_sell6.addWidget(splitter_sell5) splitter_sell7 = QSplitter(Qt.Horizontal) splitter_sell7.setSizes([700, 390]) splitter_sell7.addWidget(self.tabel_sell) splitter_sell8 = QSplitter(Qt.Horizontal) splitter_sell8.addWidget(splitter_sell7) splitter_sell8.addWidget(splitter_sell6) # splitter_sell10 = QSplitter(Qt.Horizontal) # splitter_sell10.addWidget(w_console) splitter_sell9 = QSplitter(Qt.Vertical) splitter_sell9.addWidget(splitter_sell4) splitter_sell9.addWidget(splitter_sell8) # splitter_sell9.addWidget(splitter_sell10) # splitter_sell9.addWidget(w_low) layout.addWidget(splitter_sell9) self.setLayout(layout) # 临时变量 self.Row = 0 QMetaObject.connectSlotsByName(MainWindow)
[ "674530915@qq.com" ]
674530915@qq.com
4462ff9f0be2b2a4bc6388beb3c3bb7e86b6f68f
dca1b84e1f59a7218487a921aec7967001780001
/3rdStage/train.py
8bcd97bf3cb7a801a2a602b23135d36c1aa2b88d
[]
no_license
deeChyz/smomi4-5
1f5380ddee8499612fdf1fed336dbd71029685ca
f8ececb2fe65854c6736c01b3142dd2bdfaa79f8
refs/heads/master
2022-06-30T04:57:26.800232
2020-05-11T23:29:49
2020-05-11T23:29:49
257,396,814
0
0
null
null
null
null
UTF-8
Python
false
false
5,029
py
"""This module implements data feeding and training loop to create model to classify X-Ray chest images as a lab example for BSU students. """ __author__ = 'Alexander Soroka, soroka.a.m@gmail.com' __copyright__ = """Copyright 2020 Alexander Soroka""" import argparse import glob import numpy as np import tensorflow as tf import time from tensorflow.python import keras as keras from tensorflow.python.keras.callbacks import LearningRateScheduler LOG_DIR = 'logs' SHUFFLE_BUFFER = 10 BATCH_SIZE = 8 NUM_CLASSES = 2 PARALLEL_CALLS=4 RESIZE_TO = 224 TRAINSET_SIZE = 5216 VALSET_SIZE=624 def parse_proto_example(proto): keys_to_features = { 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([], tf.int64, default_value=tf.zeros([], dtype=tf.int64)) } example = tf.parse_single_example(proto, keys_to_features) example['image'] = tf.image.decode_jpeg(example['image/encoded'], channels=3) example['image'] = tf.image.convert_image_dtype(example['image'], dtype=tf.float32) example['image'] = tf.image.resize_images(example['image'], tf.constant([RESIZE_TO, RESIZE_TO])) return example['image'], example['image/class/label'] def normalize(image, label): return tf.image.per_image_standardization(image), label def resize(image, label): return tf.image.resize_images(image, tf.constant([RESIZE_TO, RESIZE_TO])), label def create_dataset(filenames, batch_size): """Create dataset from tfrecords file :tfrecords_files: Mask to collect tfrecords file of dataset :returns: tf.data.Dataset """ return tf.data.TFRecordDataset(filenames)\ .map(parse_proto_example)\ .map(resize)\ .map(normalize)\ .shuffle(buffer_size=5 * batch_size)\ .repeat()\ .batch(batch_size)\ .prefetch(2 * batch_size) def create_augmented_dataset(filenames, batch_size): """Create dataset from tfrecords file :tfrecords_files: Mask to collect tfrecords file of dataset :returns: tf.data.Dataset """ return tf.data.TFRecordDataset(filenames)\ .map(parse_proto_example)\ .map(resize)\ .map(normalize)\ .map(augmented_train)\ .shuffle(buffer_size=5 * batch_size)\ .repeat()\ .batch(batch_size)\ .prefetch(2 * batch_size) def augmented_train(image, label): image = tf.image.convert_image_dtype(image, tf.float32) image = tf.image.random_brightness(image, 0.5, seed=None) image = tf.image.random_contrast(image, 0.2, 1.2, seed=None) return image,label class Validation(tf.keras.callbacks.Callback): def __init__(self, log_dir, validation_files, batch_size): self.log_dir = log_dir self.validation_files = validation_files self.batch_size = batch_size def on_epoch_end(self, epoch, logs=None): print('The average loss for epoch {} is {:7.2f} '.format( epoch, logs['loss'] )) validation_dataset = create_dataset(self.validation_files, self.batch_size) validation_images, validation_labels = validation_dataset.make_one_shot_iterator().get_next() validation_labels = tf.one_hot(validation_labels, NUM_CLASSES) result = self.model.evaluate( validation_images, validation_labels, steps=int(np.ceil(VALSET_SIZE / float(BATCH_SIZE))) ) callback = tf.keras.callbacks.TensorBoard(log_dir=self.log_dir, update_freq='epoch', batch_size=self.batch_size) callback.set_model(self.model) callback.on_epoch_end(epoch, { 'val_' + self.model.metrics_names[i]: v for i, v in enumerate(result) }) def build_model(): model = keras.models.load_model('model.h5') model.trainable = True return model def main(): args = argparse.ArgumentParser() args.add_argument('--train', type=str, help='Glob pattern to collect train tfrecord files') args.add_argument('--test', type=str, help='Glob pattern to collect test tfrecord files') args = args.parse_args() train_dataset = create_augmented_dataset(glob.glob(args.train), BATCH_SIZE) train_images, train_labels = train_dataset.make_one_shot_iterator().get_next() train_labels = tf.one_hot(train_labels, NUM_CLASSES) model = build_model() model.compile( optimizer=keras.optimizers.sgd(lr=0.00000000001, momentum=0.9), loss=tf.keras.losses.categorical_crossentropy, metrics=[tf.keras.metrics.categorical_accuracy], target_tensors=[train_labels] ) log_dir='{}/xray-{}'.format(LOG_DIR, time.time()) model.fit( (train_images, train_labels), epochs=90, steps_per_epoch=int(np.ceil(TRAINSET_SIZE / float(BATCH_SIZE))), callbacks=[ tf.keras.callbacks.TensorBoard(log_dir), Validation(log_dir, validation_files=glob.glob(args.test), batch_size=BATCH_SIZE) ] ) if __name__ == '__main__': main()
[ "rogrumb@gmail.com" ]
rogrumb@gmail.com
8a6b6c6fe1b3b2af79f8f844858d2002dd0dd07b
6d68911c2a223577f34be94ca85465434758e7a6
/cameo.py
9951c87c0568a993118aca94d9e91a2dc780d380
[]
no_license
qzlshy/face
155c31dec4df69c76702c755da23ac90f93b2078
d126f4bacf0752baeb754b6795c8bdd8695ea788
refs/heads/master
2020-12-02T00:40:19.403301
2019-12-30T02:24:56
2019-12-30T02:24:56
230,832,617
0
0
null
null
null
null
UTF-8
Python
false
false
1,331
py
#!/usr/bin/python3 # coding=utf-8 import cv2 from managers import WindowsManager, CaptureManager import face_recognition import numpy as np class Cameo(object): def __init__(self): self._windowManager = WindowsManager('Cameo', self.onkeypress) self._capturemanager = CaptureManager(cv2.VideoCapture(0), self._windowManager, True) def run(self): """run the main loop""" face_cascade=cv2.CascadeClassifier('./haarcascade_frontalface_default.xml') self._windowManager.create_window() while self._windowManager.is_window_created: self._capturemanager.enterframe() frame = self._capturemanager.frame # 这里插入滤波代码 self._capturemanager.exitframe() self._windowManager.process_events() def onkeypress(self, keycode): """处理按键操作 空格 表示 截图 tab 表示 开始/停止 记录 screencast escape 表示退出 """ if keycode == 32: # 空格 self._capturemanager.face_code() elif keycode == 27: # escape 键 self._capturemanager.save_face_code('./face_code.npy') self._windowManager.destroy_window() print("正在退出") if __name__ == "__main__": Cameo().run()
[ "qzlshy@126.com" ]
qzlshy@126.com
a7f06fa5e0370c086f018f7bef4e557808384e41
27791ad050c906b956919dbbe475391edccdfd4f
/nanob_tests.py
7eb7fa7bad38b0be98ec981e0af46b684411cb5e
[]
no_license
liamdillon/nanobulletin
188d2da5cd58a7167c88a29d191be1b1c3d3ae29
056a0bee73fd39b55f0f385eec307c8893a7b8a6
refs/heads/master
2020-12-25T17:26:22.783856
2016-08-15T17:41:03
2016-08-15T17:41:03
12,597,942
0
0
null
null
null
null
UTF-8
Python
false
false
1,763
py
import os import nanob import unittest import tempfile class NanobTestCase(unittest.TestCase): def setUp(self): #create an empty database for testing and store its file descriptor #use the flask test client self.db_fd, nanob.app.config['DATABASE'] = tempfile.mkstemp() nanob.app.config['TESTING'] = True self.app = nanob.app.test_client() nanob.init_db() def tearDown(self): #destroy the test database os.close(self.db_fd) os.unlink(nanob.app.config['DATABASE']) def test_black_database(self): #test the app's default state assert 'No posts found' in self.app.get('/').data def test_make_post(self): #test that you can make a post page = self.app.post('/make', data = {'title':'Test title', 'content':'Test content'}, follow_redirects=True) assert 'No posts found' not in page.data assert 'Test title' in page.data assert 'Test content' in page.data def test_no_title(self): #make sure you can't make a post without a title page = self.app.post('/make', data = {'title':'', 'content':'whatever'},follow_redirects=True) assert 'No posts found' in page.data assert 'Your post has no title' in page.data assert 'whatever' not in page.data def test_delete_post(self): #tests post deletion page = self.app.post('/make', data = {'title':'test_title', 'content':'test_content'},follow_redirects=True) assert 'test_title' in page.data self.app.post('/delete_post', data = {'id':1}) assert 'Post successfully deleted' in page.data assert 'test_title' not in page.data if __name__ == '__main__': unittest.main()
[ "liamhdillon@gmail.com" ]
liamhdillon@gmail.com
43bbdb90a3582369da02ca0e61c79d2650509477
94deb6a8650e23a1f77e486995db8d7fc27d30b4
/ComputerObject.py
bdefc3fe96318e61f26b8f09d683ff050f0eac0f
[]
no_license
daniellande/battleships
f15fc5bcd1553a2cf17b0265d65c73295193106f
53aacfc8f1e7c8b39d5228daf2964b21af66f1ac
refs/heads/master
2020-05-27T07:26:02.127492
2014-08-22T15:53:17
2014-08-22T15:53:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,363
py
import random from ShipsObject import * from StringConversions import * class Computer(object): def __init__(self): self.mode = "Seek" self.hits = [] self.letters = ["A","B","C","D","E","F","G","H","I","J"] self.numbers = ["1","2","3","4","5","6","7","8","9","10"] self.sunkships = 0 self.indicator = 0 def choose_square(self): self.row = random.choice(["A","B","C","D","E","F","G","H","I","J"]) self.col = random.choice(["1","2","3","4","5","6","7","8","9","10"]) return self.row, self.col def change_mode(self): if self.mode == "Seek": self.mode = "Destroy" else: self.mode = "Seek" def choose_square_AI(self): if self.mode == "Seek": #self.row = random.choice(["A","B","C","D","E","F","G","H","I","J"]) #self.col = random.choice(["1","2","3","4","5","6","7","8","9","10"]) blah = random.randint(0,4) if blah == 1 or blah == 3: self.row = random.choice(["A","C","E","G","I"]) self.col = random.choice(["1","3","5","7","9"]) elif blah == 0 or blah == 2: self.row = random.choice(["B","D","F","H","J"]) self.col = random.choice(["2","4","6","8","10"]) else: self.row = random.choice(["A","B","C","D","E","F","G","H","I","J"]) self.col = random.choice(["1","2","3","4","5","6","7","8","9","10"]) else: (arg1, arg2) = random.choice(self.hits) # ensure that it doesn't check the other side of the board whilst seeking to destroy if arg1 == 9: vh = random.randint(0,1) if vh == 1: matrix1 = [self.letters[arg1]] matrix2 = [self.numbers[arg2 + 1], self.numbers[arg2 - 1]] else: matrix1 = [self.letters[arg1 - 1]] matrix2 = [self.numbers[arg2]] elif arg1 == 0: vh = random.randint(0,1) if vh == 1: matrix1 = [self.letters[arg1]] matrix2 = [self.numbers[arg2 + 1], self.numbers[arg2 - 1]] else: matrix1 = [self.letters[arg1 + 1]] matrix2 = [self.numbers[arg2]] elif arg2 == 0: vh = random.randint(0,1) if vh == 1: matrix1 = [self.letters[arg1]] matrix2 = [self.numbers[arg2 + 1]] else: matrix1 = [self.letters[arg1 + 1], self.letters[arg1 - 1]] matrix2 = [self.numbers[arg2]] elif arg2 == 9: vh = random.randint(0,1) if vh == 1: matrix1 = [self.letters[arg1]] matrix2 = [self.numbers[arg2 + 1]] else: matrix1 = [self.letters[arg1 + 1], self.letters[arg1 - 1]] matrix2 = [self.numbers[arg2]] else: vh = random.randint(0,1) if vh == 1: matrix1 = [self.letters[arg1]] matrix2 = [self.numbers[arg2 + 1], self.numbers[arg2 - 1]] else: matrix1 = [self.letters[arg1 + 1], self.letters[arg1 - 1]] matrix2 = [self.numbers[arg2]] self.row = random.choice(matrix1) self.col = random.choice(matrix2) return self.row, self.col def choose_orientation(self): rand = random.randint(0,1) if rand == 0: self.orientation = "v" else: self.orientation = "h" return self.orientation def enter_ship_user(self, ship): self.ship = ship [self.row, self.col] = self.choose_square() self.orientation = self.choose_orientation() return self.row, self.col, self.orientation, self.ship def count_sunk(self): self.count = 0 self.indicator = 0 for ship in all_ships: if ship.sunk == True: self.count += 1 if self.sunkships < self.count: self.indicator = 1 self.sunkships = self.count my_computer = Computer()
[ "mtt@MTTs-MacBook-Pro-2.local" ]
mtt@MTTs-MacBook-Pro-2.local
5f46ec6485ed4ae0fa2df0dde33d8e79a69ee1a4
ef68f5e2e1ede8a91d42d7f1c61cfab7ae3efbe0
/scripts/train_test_different_k.py
f4e5388f6e07760c5dd2a0c2a9e62f9f703d4c89
[]
no_license
abhiagwl/Improving_Inference_in_VAE
c09ba89dd757c818589490260653a318dd0038cb
21f03a1ff5fcb2374752395ff59e931599e5aa2b
refs/heads/master
2021-08-27T21:28:52.901662
2017-12-10T11:51:32
2017-12-10T11:51:32
113,743,501
2
0
null
null
null
null
UTF-8
Python
false
false
10,710
py
import tensorflow as tf import numpy as np import datetime import socket import os import argparse #-------------------------------- Hyper Parameters ----------------------------- batch_size = 100 hidden_dim = 30 learning_rate = 0.0005 max_epochs = 50 k=10 scale_param = 10000 sample_param = 100 #-------------------------------- Loading data ----------------------------- train = np.array([[int(i) for i in line.split()] for line in open('../data/binarized_mnist_train.amat').readlines()]).astype('float32') valid = np.array([[int(i) for i in line.split()] for line in open('../data/binarized_mnist_valid.amat').readlines()]).astype('float32') test = np.array([[int(i) for i in line.split()] for line in open('../data/binarized_mnist_test.amat').readlines()]).astype('float32') parser = argparse.ArgumentParser() parser.add_argument('-g', '--gpu', type=int, required=True) parser.add_argument('-k', '--num_of_transforms', type=int, required=True) parser.add_argument('-e', '--num_of_epoch', type=int, required=True) parser.add_argument('-n', '--name', type=str, required=False) parser.add_argument('-s', '--sparam', type=int, required=True) args = parser.parse_args() gpu = args.gpu max_epochs = args.num_of_epoch k = args.num_of_transforms name_dir = args.name sample_param = args.sparam os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) #-------------------------------- Placeholders ----------------------------- # summaries_dir = "../logs/norm_test" if name_dir is not None: summaries_dir = os.path.join('/extra_data/abhiagwl/pml/logs/norm_tests'+'_'+str(name_dir)+"_"+str(k)+"_"+str(scale_param)+"_"+str(max_epochs), datetime.datetime.now().strftime('%b%d_%H-%M-%S')+'_'+socket.gethostname()) else: summaries_dir = os.path.join('/extra_data/abhiagwl/pml/logs/norm_tests'+"_"+str(k)+"_"+str(scale_param)+"_"+str(max_epochs), datetime.datetime.now().strftime('%b%d_%H-%M-%S')+'_'+socket.gethostname()) x = tf.placeholder(tf.float32, shape=[None, 28*28]) phase_train = tf.placeholder(tf.bool) t = tf.placeholder(tf.float32) # z = tf.placeholder(tf.float32 , shape=[None,hidden_dim]) # gen_image = tf.placeholder(tf.bool) #-------------------------------- Utility functions ----------------------------- def variable_summaries(var): """Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" with tf.name_scope('summaries'): # mean = tf.reduce_mean(var) tf.summary.scalar('value',var) # with tf.name_scope('stddev'): # stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) # tf.summary.scalar('stddev', stddev) # tf.summary.scalar('max', tf.reduce_max(var)) # tf.summary.scalar('min', tf.reduce_min(var)) # tf.summary.histogram('histogram', var) def weight_variable(shape, name=None): if name: w = tf.truncated_normal(shape, stddev=0.1, name=name) else: w = tf.truncated_normal(shape, stddev=0.1) return w def bias_variable(shape, name=None): # avoid dead neurons if name: b = tf.constant(0.1, shape=shape, name=name) else: b = tf.constant(0.1, shape=shape) return tf.Variable(b) #-------------------------------- norm layer ----------------------------- def norm_layer(z): w = weight_variable([hidden_dim,1]) b = bias_variable([1,]) u = weight_variable([hidden_dim,1]) m = tf.matmul(z,w) + b h = tf.nn.tanh(m) out1 = z + tf.matmul(h,u,transpose_b = True) h_ = tf.gradients(h,m)[0] phi = tf.matmul(h_,w,transpose_b = True) out2 = tf.log(tf.abs(1 + tf.matmul(phi,u))) return out1, out2 #---------------------------------------- Encoder ------------------------------------------- with tf.name_scope("encoder"): conv1 = tf.layers.conv2d(inputs=tf.reshape(x, [-1, 28, 28, 1]), filters=32, kernel_size=[5, 5], kernel_initializer = tf.contrib.layers.xavier_initializer(), padding="same", activation=None) conv1 = tf.nn.relu(tf.layers.batch_normalization(conv1, training=phase_train)) pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2,2], strides=(2, 2)) conv2 = tf.layers.conv2d(inputs=pool1, filters=32, kernel_initializer = tf.contrib.layers.xavier_initializer(), kernel_size=[5, 5], padding="same", activation=None) conv2 = tf.nn.relu(tf.layers.batch_normalization(conv2, training=phase_train)) pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2,2], strides=(2, 2)) flat = tf.reshape(pool2, [-1, 32 * 7 * 7]) mu = tf.layers.dense(flat, units=hidden_dim, activation=None, kernel_initializer = tf.contrib.layers.xavier_initializer()) sd = tf.layers.dense(flat, units=hidden_dim, activation=tf.nn.softplus, kernel_initializer = tf.contrib.layers.xavier_initializer()) eps = tf.contrib.distributions.MultivariateNormalDiag(loc=[0.]*hidden_dim, scale_diag=[1.]*hidden_dim) samples = mu + eps.sample(tf.shape(sd)[0]) * sd #-------------------------------------- Normalizing Flow ---------------------- input_z_nf = samples with tf.name_scope("norm_flow"): with tf.name_scope("norm_flow_1"): z_nf, pz_nf = norm_layer(input_z_nf) input_z_nf = z_nf sum_p_nf = pz_nf for i in range(k): with tf.name_scope("norm_flow_"+str(i+2)): z_nf, pz_nf = norm_layer(input_z_nf) input_z_nf = z_nf sum_p_nf = sum_p_nf + pz_nf #-------------------------------------- Decoder ----------------------------------- with tf.name_scope("decoder"): # input_z = tf.where(gen_image,z,samples) # input_z = samples input_z = z_nf flat = tf.layers.dense(input_z, units=32 * 7 * 7, activation=None, kernel_initializer = tf.contrib.layers.xavier_initializer()) flat = tf.nn.relu(tf.layers.batch_normalization(flat, training=phase_train)) deconv1 = tf.layers.conv2d_transpose(tf.reshape(flat, [-1, 7, 7, 32]), filters=32, kernel_size=[5,5], strides=(2, 2), padding='same', kernel_initializer = tf.contrib.layers.xavier_initializer(), activation=None) deconv1 = tf.nn.relu(tf.layers.batch_normalization(deconv1, training=phase_train)) deconv2 = tf.layers.conv2d_transpose(deconv1, filters=32, kernel_size=[5,5], strides=(2, 2), padding='same', kernel_initializer = tf.contrib.layers.xavier_initializer(), activation=None) deconv2 = tf.nn.relu(tf.layers.batch_normalization(deconv2, training=phase_train)) out = tf.layers.conv2d_transpose(deconv2, filters=1, kernel_size=[5,5], strides=(1, 1), padding='same', kernel_initializer = tf.contrib.layers.xavier_initializer(), activation=None) #-------------------------------------- Reconstruction and losses ----------------------------------- reconstructed_image = tf.nn.sigmoid(out) > 0.5 likelihood = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.reshape(x, [-1, 28, 28, 1]), logits=out),axis=[1,2,3]) with tf.name_scope("likelihood"): likelihood_ = tf.reduce_mean(likelihood) variable_summaries(likelihood_) # loss = tf.reduce_mean(likelihood - tf.reduce_sum(0.5 * (1.+ tf.log(sd ** 2) - mu ** 2 - sd ** 2), axis=1)) # aplha_t = tf.constant(0.01,dtype= tf.float32) beta_t = tf.minimum(1.,0.01 + t/scale_param) # beta_t =1.0 with tf.name_scope("loss"): loss = beta_t*tf.reduce_mean(likelihood) -0.5*tf.reduce_mean(tf.reduce_sum(tf.log(sd),axis =1)) + tf.reduce_mean(tf.reduce_sum(0.5*input_z**2 ,axis=1))-tf.reduce_mean(sum_p_nf) variable_summaries(loss) #-------------------------------------- Updates for batch norm and other layers ----------------------------------- update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss) #-------------------------------------- Summary & Clearning GPU space after script is done ----------------------------------- tf.summary.scalar('K',k) tf.summary.scalar('scale_param',scale_param) merged = tf.summary.merge_all() gpu_options = tf.GPUOptions()#per_process_gpu_memory_fraction=0.5) gpu_options.allow_growth=True sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) train_writer = tf.summary.FileWriter(summaries_dir + '/train', sess.graph) test_writer = tf.summary.FileWriter(summaries_dir + '/test') val_writer = tf.summary.FileWriter(summaries_dir + '/val') # sess = tf.Session(config=config) # sess = tf.Session() sess.run(tf.global_variables_initializer()) #-------------------------------------- Training ----------------------------------- iteration = 0 for _ in range(max_epochs): np.random.shuffle(train) for i in range(train.shape[0]/batch_size): iteration+=1 batch = train[i*batch_size : (i+1)*batch_size, :] if iteration%50==0: tr_lik, _,summary = sess.run([likelihood_, train_step,merged],feed_dict = {x:batch, phase_train:True, t:iteration}) train_writer.add_summary(summary, iteration) else: tr_lik, _ = sess.run([likelihood_, train_step], feed_dict={x:batch, phase_train:True,t:(iteration)}) #, z:np.zeros((batch_size,hidden_dim), gen_image:False) }) # print tr_lik test_marg = 0.0 val_marg = 0.0 test_liki = 0.0 val_liki = 0.0 for i in range(sample_param): test_lik,test_loss,summary_ = sess.run([likelihood_,loss, merged], feed_dict={x:test, phase_train:False,t:scale_param}) test_liki+=test_lik test_marg+=test_loss val_lik,val_loss,summary_ = sess.run([likelihood_,loss,merged], feed_dict={x:valid, phase_train:False,t:scale_param}) val_liki+=val_lik val_marg+=val_loss print "validation marginal likelihood : " + str(val_marg/sample_param) print "test marginal likelihood: " + str(test_marg/sample_param) print "validation likelihood : " + str(val_liki/sample_param) print "test likelihood: " + str(test_liki/sample_param) # for i in range(sample_param): # test_lik,test_loss,summary_ = sess.run([likelihood_,loss, merged], feed_dict={x:test, phase_train:False,t:100000}) # print "test likelihood: " + str(test_lik) # print "test loss: " + str(test_loss) # # test_writer.add_summary(summary_,iteration) # val_lik,val_loss,summary_ = sess.run([likelihood_,loss,merged], feed_dict={x:valid, phase_train:False,t:100000}) # print "validation likelihood : " + str(val_lik) # print "validation loss: " + str(val_loss) # # val_writer.add_summary(summary_,iteration)
[ "abhiagwl@iitk.ac.in" ]
abhiagwl@iitk.ac.in
10019e15fb119b667af4d903e1694dad44c55d81
9b4520e6874e7cb03361558075a66ac8b5e6a548
/luckdsun/slang/migrations/0002_remove_member_m_pass.py
ca12f26fc20817d9b445c1dbe51e8b07179ae6f3
[]
no_license
mkh9293/slang
de41ae269107aa297ce2389781783b871d5607da
f3a635759615709f861f4e6985af3c3f041e2fab
refs/heads/master
2021-01-10T06:46:13.344350
2016-01-30T09:52:19
2016-01-30T09:52:19
50,423,684
0
0
null
2016-01-26T12:34:33
2016-01-26T11:13:13
Python
UTF-8
Python
false
false
377
py
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-01-30 08:34 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('slang', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='member', name='m_pass', ), ]
[ "mkh9209@naver.com" ]
mkh9209@naver.com
cb3e813088259f958d239a41e10ad6aaef5883f9
6691c8eb872615d1cf487b1d826191500f0eecc3
/unused/dialog_lumens_pur_prepareplanningunit.py
4bed8dac2f75d852cdb88181c3758bec001ac0d2
[]
no_license
geoenvo/lumens
466198b0e137d1210b2a6a3e63b42dc9e9e6c993
4d09083e6ecc6783132bc2059306c5f35ec4d1ae
refs/heads/master
2020-04-06T07:04:00.516004
2016-06-01T09:57:26
2016-06-01T09:57:26
43,800,954
0
1
null
null
null
null
UTF-8
Python
false
false
8,542
py
#!/usr/bin/env python #-*- coding:utf-8 -*- import os, logging from qgis.core import * from PyQt4 import QtCore, QtGui from processing.tools import * from dialog_lumens_base import DialogLumensBase class DialogLumensPURPreparePlanningUnit(DialogLumensBase): """ """ def __init__(self, parent): super(DialogLumensPURPreparePlanningUnit, self).__init__(parent) self.dialogTitle = 'LUMENS PUR Prepare Planning Unit' self.tableRowCount = 0 self.tableData = [] self.setupUi(self) self.buttonAddRow.clicked.connect(self.handlerButtonAddRow) self.buttonClearAll.clicked.connect(self.handlerButtonClearAll) self.buttonLumensDialogSubmit.clicked.connect(self.handlerLumensDialogSubmit) def setupUi(self, parent): super(DialogLumensPURPreparePlanningUnit, self).setupUi(self) self.layoutTable = QtGui.QVBoxLayout() self.dialogLayout.addLayout(self.layoutTable) layoutButton = QtGui.QHBoxLayout() self.buttonAddRow = QtGui.QPushButton(parent) self.buttonAddRow.setText('Add Row') layoutButton.addWidget(self.buttonAddRow) self.buttonClearAll = QtGui.QPushButton(parent) self.buttonClearAll.setText('Clear All') layoutButton.addWidget(self.buttonClearAll) self.buttonLumensDialogSubmit = QtGui.QPushButton(parent) self.buttonLumensDialogSubmit.setText(self.dialogTitle) layoutButton.addWidget(self.buttonLumensDialogSubmit) self.dialogLayout.addLayout(layoutButton) self.setLayout(self.dialogLayout) # add 3 planning unit rows self.addRow() self.addRow() self.addRow() self.setWindowTitle(self.dialogTitle) self.setMinimumSize(800, 200) self.resize(parent.sizeHint()) def addRow(self): """Add a planning unit row """ self.tableRowCount = self.tableRowCount + 1 layoutRow = QtGui.QHBoxLayout() lineEditShapefile = QtGui.QLineEdit() lineEditShapefile.setReadOnly(True) lineEditShapefile.setObjectName('lineEditShapefile_{0}'.format(str(self.tableRowCount))) layoutRow.addWidget(lineEditShapefile) buttonSelectShapefile = QtGui.QPushButton() buttonSelectShapefile.setText('Select Shapefile') buttonSelectShapefile.setObjectName('buttonSelectShapefile_{0}'.format(str(self.tableRowCount))) layoutRow.addWidget(buttonSelectShapefile) comboBoxShapefileAttr = QtGui.QComboBox() comboBoxShapefileAttr.setDisabled(True) comboBoxShapefileAttr.setObjectName('comboBoxShapefileAttr_{0}'.format(str(self.tableRowCount))) layoutRow.addWidget(comboBoxShapefileAttr) lineEditPlanningUnitTitle = QtGui.QLineEdit() lineEditPlanningUnitTitle.setObjectName('lineEditPlanningUnitTitle_{0}'.format(str(self.tableRowCount))) layoutRow.addWidget(lineEditPlanningUnitTitle) comboBoxPlanningUnitType = QtGui.QComboBox() comboBoxPlanningUnitType.addItems(['Reconciliation', 'Additional']) comboBoxPlanningUnitType.setObjectName('comboBoxPlanningUnitType_{0}'.format(str(self.tableRowCount))) layoutRow.addWidget(comboBoxPlanningUnitType) self.layoutTable.addLayout(layoutRow) buttonSelectShapefile.clicked.connect(self.handlerSelectShapefile) def clearRows(self): """ """ for i in reversed(range(self.layoutTable.count())): layoutRow = self.layoutTable.itemAt(i).layout() self.clearLayout(layoutRow) self.tableRowCount = 0 self.addRow() self.addRow() self.addRow() def handlerButtonAddRow(self): """ """ self.addRow() def handlerButtonClearAll(self): """ """ self.clearRows() def handlerSelectShapefile(self): """Select a shp file and load the attributes in the shapefile attribute combobox """ shapefile = unicode(QtGui.QFileDialog.getOpenFileName( self, 'Select Shapefile', QtCore.QDir.homePath(), 'Shapefile (*{0})'.format(self.main.appSettings['selectShapefileExt']))) if shapefile: buttonSender = self.sender() objectName = buttonSender.objectName() tableRow = objectName.split('_')[1] lineEditShapefile = self.findChild(QtGui.QLineEdit, 'lineEditShapefile_' + tableRow) lineEditShapefile.setText(shapefile) registry = QgsProviderRegistry.instance() provider = registry.provider('ogr', shapefile) if not provider.isValid(): logging.getLogger(type(self).__name__).error('invalid shapefile') return attributes = [] for field in provider.fields(): attributes.append(field.name()) comboBoxShapefileAttr = self.findChild(QtGui.QComboBox, 'comboBoxShapefileAttr_' + tableRow) comboBoxShapefileAttr.clear() comboBoxShapefileAttr.addItems(sorted(attributes)) comboBoxShapefileAttr.setEnabled(True) logging.getLogger(type(self).__name__).info('select shapefile: %s', shapefile) def setAppSettings(self): """Set the required values from the form widgets """ if self.tableRowCount > 0: self.tableData = [] for tableRow in range (1, self.tableRowCount + 1): widgetShapefile = self.findChild(QtGui.QLineEdit, 'lineEditShapefile_' + str(tableRow)) widgetShapefileAttr = self.findChild(QtGui.QComboBox, 'comboBoxShapefileAttr_' + str(tableRow)) widgetPlanningUnitTitle = self.findChild(QtGui.QLineEdit, 'lineEditPlanningUnitTitle_' + str(tableRow)) widgetPlanningUnitType = self.findChild(QtGui.QComboBox, 'comboBoxPlanningUnitType_' + str(tableRow)) shapefile = unicode(widgetShapefile.text()) shapefileAttr = unicode(widgetShapefileAttr.currentText()) planningUnitTitle = unicode(widgetPlanningUnitTitle.text()) planningUnitType = unicode(widgetPlanningUnitType.currentText()) if shapefile and shapefileAttr and planningUnitTitle and planningUnitType: if unicode(widgetPlanningUnitType.currentText()) == 'Reconciliation': planningUnitType = 0 else: planningUnitType = 1 tableRowData = { 'shapefile': shapefile, 'shapefileAttr': shapefileAttr, 'planningUnitTitle': planningUnitTitle, 'planningUnitType': planningUnitType, } self.tableData.append(tableRowData) if self.tableData: return True else: QtGui.QMessageBox.critical(self, 'Error', 'Please complete the fields.') return False else: QtGui.QMessageBox.critical(self, 'Error', 'No planning units set.') return False def handlerLumensDialogSubmit(self): """ """ if self.setAppSettings(): logging.getLogger(type(self).__name__).info('start: %s' % self.dialogTitle) self.buttonLumensDialogSubmit.setDisabled(True) for tableRowData in self.tableData: outputs = general.runalg( 'r:purstep2prepareplanningunit', tableRowData['shapefile'], tableRowData['shapefileAttr'], tableRowData['planningUnitTitle'], tableRowData['planningUnitType'], ) ##print self.tableData self.buttonLumensDialogSubmit.setEnabled(True) logging.getLogger(type(self).__name__).info('end: %s' % self.dialogTitle) self.close()
[ "seno@geo.co.id" ]
seno@geo.co.id
55b52764902ce153ec4c19dc6da9439dee543669
9a0eb3e292d57b59198c7c66a994372ced9cfa5b
/nodes/1.x/python/String.ReplaceIllegalFilenameCharacters.py
a922b676f1485306810fd884001c9016638051ed
[ "MIT" ]
permissive
andydandy74/ClockworkForDynamo
544ddf0893f5c0072fca7934f4e128001771f767
528400c667c4c3f2b51814af84e85c8fab8a8059
refs/heads/master
2023-08-19T03:07:33.489926
2023-08-13T04:31:17
2023-08-13T04:31:17
15,043,988
184
100
MIT
2023-09-04T18:47:40
2013-12-09T10:11:01
Python
UTF-8
Python
false
false
430
py
strings = IN[0] replace = IN[1] strlist = [] for str in strings: str = str.replace('/', replace) str = str.replace('?', replace) str = str.replace('<', replace) str = str.replace('>', replace) str = str.replace('\\', replace) str = str.replace(':', replace) str = str.replace('*', replace) str = str.replace('|', replace) str = str.replace('"', replace) str = str.replace('^', replace) strlist.append(str) OUT = strlist
[ "dieckmann@caad.arch.rwth-aachen.de" ]
dieckmann@caad.arch.rwth-aachen.de
c4b714343b63868a77deb87345fae0189c00aefc
caac62c43addc19741e6b73cbbd5763f50b68279
/2program3.py
34fe551a578404acfb5ac0de6435f228d83fb72a
[]
no_license
ansaari/codekata
d17cfcbff59008dfc3c6a5af4ec179d382e42ec1
d9039e81ee04f379f55f1d74b4dbbedf01a9c4aa
refs/heads/master
2020-04-21T16:04:42.925304
2019-02-21T07:04:05
2019-02-21T07:04:05
169,688,913
0
0
null
null
null
null
UTF-8
Python
false
false
127
py
g=int(raw_input()) h=0 for i in range (2,g//3): if(g%i==0): h=h+1 if(h<=0): print("yes") else: print("no")
[ "noreply@github.com" ]
noreply@github.com
83852e477286aff2176a0246871748edca6bcef8
c733e6b433914a8faba256c7853f5cf2cd39c62a
/Python/Leetcode Daily Practice/Heap/692. Top K Frequent Words.py
db9a25d3ab733cd3cdd4dd640983c8602e54fffe
[]
no_license
YaqianQi/Algorithm-and-Data-Structure
3016bebcc1f1356b6e5f3c3e588f3d46c276a805
2e1751263f484709102f7f2caf18776a004c8230
refs/heads/master
2021-10-27T16:29:18.409235
2021-10-14T13:57:36
2021-10-14T13:57:36
178,946,803
1
0
null
null
null
null
UTF-8
Python
false
false
385
py
class Solution(object): def topKFrequent(self, words, k): from collections import Counter import heapq cnt = Counter(words) # o(n) h = [(-freq, key) for key, freq in cnt.items()] # o(n) return [heapq.heappop(h)[1] for i in range(k)] # o (k * logn) print(Solution().topKFrequent(words=["i", "love", "leetcode", "i", "love", "coding"], k = 2))
[ "alicia.qyq@gmail.com" ]
alicia.qyq@gmail.com
7d1a23e8f82b1ebe56ddbdaf7010cc53fbea9849
2a7a1f8f2b9af145985a6946c7524a98678ad6a5
/dashboard/migrations/0020_auto_20170511_0617.py
941ee4a45d14c4799b5eec1c4848470c896bfd1f
[]
no_license
Jiker4836/opsweb
ad620398e6c53632b689ceab73d02a750fbb1112
c91140d100173823e0e35972675f499c13395273
refs/heads/master
2021-01-19T20:49:00.082649
2017-05-11T09:00:59
2017-05-11T09:00:59
88,560,509
0
0
null
null
null
null
UTF-8
Python
false
false
774
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dashboard', '0019_product'), ] operations = [ migrations.AlterField( model_name='product', name='dev_interface', field=models.CharField(max_length=255, verbose_name=b'\xe5\xbc\x80\xe5\x8f\x91\xe8\xb4\x9f\xe8\xb4\xa3\xe4\xba\xba\xef\xbc\x9ausername1,username2'), ), migrations.AlterField( model_name='product', name='module_letter', field=models.CharField(max_length=10, verbose_name=b'\xe4\xb8\x9a\xe5\x8a\xa1\xe7\xba\xbf\xe5\xad\x97\xe6\xaf\x8d\xe7\xae\x80\xe7\xa7\xb0'), ), ]
[ "jiker4836@163.com" ]
jiker4836@163.com
9f5dc12109b39109052f0c7accaacf0412324b6c
20698396bce1604e7b36ad7350030f040f5f4893
/mongoNYT.py
ec7d83e2cf49ebb5f430773200e187a0a06bd38a
[]
no_license
seungwonyang/public
588058bdeb3123f949ad65e0d0a3f5ac1dce4c1d
86019e849f29d4f54dedb2b209560c1d44ec7c64
refs/heads/master
2021-01-25T09:53:45.839622
2013-08-08T15:50:59
2013-08-08T15:50:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,944
py
#------------------------------------------------------------------------------------------ # Seungwon Yang <seungwon@vt.edu> # PARC Internship, Dept. of CS, Virginia Tech # Description: this script adds a list of JSON records from an input file # (generated by mongodb) to mysql db # Usage: >>python mongoNYT.py #------------------------------------------------------------------------------------------ import math import json import MySQLdb from StringIO import StringIO import re import urllib2 from pymongo import Connection def json2mongo(jsonfile): connection = Connection() # db = connection.provisdb db = connection.nyt # collection = db.opencalais_test # collection = db.opencalais_full collection = db.nyttest jsonRecords = open(jsonfile).read() jsonLoaded = json.loads(jsonRecords) jsonList = jsonLoaded["response"]["docs"] index = 1 for item in jsonList: # if (index > 400): # break print "Index: %d ----------\n" % (index) # print item try: if (item["source"] == "NYT"): collection.insert(item) # if (item["source"] == "NYT") and (item["opencalais"]): # collection.insert(item) # index += 1 except: pass index += 1 def extIDs(collection): id_li = [item["id"] for item in collection.find().limit(10)] for i in id_li: print i # jsonRecords = open(jsonfile).read() # jsonLoaded = json.loads(jsonRecords) # jsonList = jsonLoaded["response"]["docs"] # index = 1 # for item in jsonList: # # if (index > 400): # # break # print "Index: %d ----------\n" % (index) # # print item # try: # if (item["source"] == "NYT"): # collection.insert(item) # # if (item["source"] == "NYT") and (item["opencalais"]): # # collection.insert(item) # # index += 1 # except: # pass # index += 1 def extText(collection, idList): # for each in idList[:10]: for each in idList: print each each = str(each) # grab a single record with the id singleRec = collection.find_one({"id":each}) uni_title = u""+singleRec["titleText"] uni_text = u""+singleRec["text"] title_and_text = singleRec["titleText"] + " " + singleRec["text"] # title_and_text = singleRec["titleText"] + " " + singleRec["text"] print "\n\n\n" # print title_and_text out_filename = each + ".txt" out_path = "../NYT_3000/" + out_filename fo = open(out_path, "w") fo.write(title_and_text.encode("utf-8")) fo.close() # id_li = [item["text"] for item in collection.find().limit(1)] # for i in id_li: # print i def extMetadata(collection, idList, cur): i = 1 for doc_id in idList: # for each in idList[1596:]: print "Index: %d Doc_id: %s" % (i, doc_id) doc_id = str(doc_id) # grab a single record with the id singleRec = collection.find_one({"id":doc_id}) # uni_title = u""+singleRec["titleText"] # uni_text = u""+singleRec["text"] # title_and_text = singleRec["titleText"] + " " + singleRec["text"] opencalais_li = singleRec["opencalais"][1:-1].lower().split('", "') organizations_li = singleRec["organizations"][1:-1].lower().split('","') people_li = singleRec["people"][1:-1].lower().split('","') locations_li = singleRec["locations"][1:-1].lower().split('","') descriptors_li = singleRec["descriptors"][1:-1].lower().split('","') names_li = singleRec["names"][1:-1].lower().split('","') opencalais = ";".join([re.sub(r'[^\w]', ' ', item) for item in opencalais_li]) organizations = ";".join([re.sub(r'[^\w]', ' ', item) for item in organizations_li]) people = ";".join([re.sub(r'[^\w]', ' ', item) for item in people_li]) locations = ";".join([re.sub(r'[^\w]', ' ', item) for item in locations_li]) descriptors = ";".join([re.sub(r'[^\w]', ' ', item) for item in descriptors_li]) names = ";".join([re.sub(r'[^\w]', ' ', item) for item in names_li]) nyt_manual_topics = organizations + ";" + people + ";" + locations + ";" + descriptors + ";" + names # print "\n\n\n" # # print title_and_text print "opencalais: \n%s" % opencalais print "organizations: \n%s" % organizations print "people: \n%s" % people print "locations: \n%s" % locations print "descriptors: \n%s" % descriptors print "names: \n%s" % names print "nyt_manual_topics: \n%s" % nyt_manual_topics # write extracted metadata into mysql table 'nyt_3000' # query = "update nyt_3000 set opencalais='" + opencalais + "' where doc_id=" + str(doc_id) query = "update nyt_3000 set opencalais='" + opencalais + "', organizations='" + organizations + "', people='" + people + "', locations='" + locations + "', descriptors='" + descriptors + "', names='" + names + "', nyt_manual_topics='" + nyt_manual_topics + "' where doc_id=" + str(doc_id) cur.execute(query) i += 1 # out_filename = each + ".txt" # out_path = "../NYT_3000/" + out_filename # fo = open(out_path, "w") # fo.write(title_and_text.encode("utf-8")) # fo.close() def main(): fi = open("NYT_IDs.txt", "r") li = fi.read().split() fi.close() # connection to mongodb --------------------------// connection = Connection() mongodb = connection.nyt collection = mongodb.nyttest # connection to mysqldb --------------------------// dbuser = "db_user_name" dbpasswd = "db_passwd" hostname = "db_hostname" dbname = "db_name" dbtable = "db_table" mysqldb = MySQLdb.connect(host=hostname, user=dbuser, passwd=dbpasswd, db=dbname) cur = mysqldb.cursor() # # extract ids from 83,000 records # extIDs(collection) # extText(collection, li) # print li extMetadata(collection, li, cur) # # create multiple json files, where each contains 1000 doc # for page in range(1,83): # process 83,741 records (each iteration for 1000 rec.) # # rows = 1000 # filename = "opencalais_" + str(page) + ".json" # # fout = open(filename, "w") # # url = "http://solr_server_hostname:solr_server_port/solr/select/?wt=json&q=opencalais:*%20AND%20source:NYT&fl=score&rows=" + str(rows) + "&start=" + str(rows * page) # # fi = urllib2.urlopen(url) # # ffi = fi.read() # # fout.write(ffi) # # fout.close() # print "File number: %d --------------" % page # json2mongo(filename) if __name__ == "__main__": main()
[ "seungwonyang@gmail.com" ]
seungwonyang@gmail.com
b8efe4de969fc3e054fbfc7785285133181cc58c
44125ac22788ed99389249e04c1bda87e0b0e6c3
/sharifdaily/kernel/views.py
5a4098db293bac38b3c32bd534418fa63ed43788
[]
no_license
mmbrian/sharifdaily
c2985d34fd33dc60d77e7216010ebc72c3ec72dd
5d15642859e13e956bfb9e55e8b4d667d1ea0613
refs/heads/master
2021-01-02T09:33:52.373058
2014-01-19T07:07:48
2014-01-19T07:07:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
520
py
from django.http import HttpResponse from django.core.serializers.json import DjangoJSONEncoder from .models import AppVersion try: import json except ImportError: from django.utils import simplejson as json def get_latest_version(request): try: last_version = AppVersion.objects.filter(published=True).values('version_code', 'version_name', 'apk').order_by('-date')[0] return HttpResponse(json.dumps(last_version, cls=DjangoJSONEncoder)) except: return HttpResponse('-1')
[ "mohsen.brian@gmail.com" ]
mohsen.brian@gmail.com
13f4acd3b7b06c62449a3ff575618e203428cc3d
c7066d3b72a54665d81de1d77d7bdcfd0ece7b42
/python/ccxt/ascendex.py
c2951759419e5046421f16fd8bd991a4af0f59cb
[ "MIT" ]
permissive
blair/ccxt
cf09b7a604586c230e8cea2b6e4dbf6c3c3497ea
3a6bd4efb78d01391f9a4ea43ec228b75ca24695
refs/heads/master
2023-09-03T21:09:44.447194
2023-08-26T19:01:14
2023-08-26T19:01:14
126,121,401
0
2
MIT
2018-03-21T04:02:57
2018-03-21T04:02:56
null
UTF-8
Python
false
false
132,974
py
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.base.exchange import Exchange from ccxt.abstract.ascendex import ImplicitAPI import hashlib from ccxt.base.types import OrderSide from ccxt.base.types import OrderType from typing import Optional from typing import List from ccxt.base.errors import ExchangeError from ccxt.base.errors import PermissionDenied from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import BadRequest from ccxt.base.errors import BadSymbol from ccxt.base.errors import InsufficientFunds from ccxt.base.errors import InvalidOrder from ccxt.base.errors import AuthenticationError from ccxt.base.decimal_to_precision import TICK_SIZE from ccxt.base.precise import Precise class ascendex(Exchange, ImplicitAPI): def describe(self): return self.deep_extend(super(ascendex, self).describe(), { 'id': 'ascendex', 'name': 'AscendEX', 'countries': ['SG'], # Singapore # 8 requests per minute = 0.13333 per second => rateLimit = 750 # testing 400 works 'rateLimit': 400, 'certified': False, 'pro': True, # new metainfo interface 'has': { 'CORS': None, 'spot': True, 'margin': True, 'swap': True, 'future': True, 'option': False, 'addMargin': True, 'cancelAllOrders': True, 'cancelOrder': True, 'createOrder': True, 'createPostOnlyOrder': True, 'createReduceOnlyOrder': True, 'createStopLimitOrder': True, 'createStopMarketOrder': True, 'createStopOrder': True, 'fetchAccounts': True, 'fetchBalance': True, 'fetchClosedOrders': True, 'fetchCurrencies': True, 'fetchDepositAddress': True, 'fetchDepositAddresses': False, 'fetchDepositAddressesByNetwork': False, 'fetchDeposits': True, 'fetchDepositsWithdrawals': True, 'fetchDepositWithdrawFee': 'emulated', 'fetchDepositWithdrawFees': True, 'fetchFundingHistory': False, 'fetchFundingRate': 'emulated', 'fetchFundingRateHistory': False, 'fetchFundingRates': True, 'fetchIndexOHLCV': False, 'fetchLeverage': False, 'fetchLeverageTiers': True, 'fetchMarginMode': False, 'fetchMarketLeverageTiers': 'emulated', 'fetchMarkets': True, 'fetchMarkOHLCV': False, 'fetchOHLCV': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchOrders': False, 'fetchPosition': False, 'fetchPositionMode': False, 'fetchPositions': True, 'fetchPositionsRisk': False, 'fetchPremiumIndexOHLCV': False, 'fetchTicker': True, 'fetchTickers': True, 'fetchTime': True, 'fetchTrades': True, 'fetchTradingFee': False, 'fetchTradingFees': True, 'fetchTransactionFee': False, 'fetchTransactionFees': False, 'fetchTransactions': 'emulated', 'fetchTransfer': False, 'fetchTransfers': False, 'fetchWithdrawal': False, 'fetchWithdrawals': True, 'reduceMargin': True, 'setLeverage': True, 'setMarginMode': True, 'setPositionMode': False, 'transfer': True, }, 'timeframes': { '1m': '1', '5m': '5', '15m': '15', '30m': '30', '1h': '60', '2h': '120', '4h': '240', '6h': '360', '12h': '720', '1d': '1d', '1w': '1w', '1M': '1m', }, 'version': 'v2', 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/112027508-47984600-8b48-11eb-9e17-d26459cc36c6.jpg', 'api': { 'rest': 'https://ascendex.com', }, 'test': { 'rest': 'https://api-test.ascendex-sandbox.com', }, 'www': 'https://ascendex.com', 'doc': [ 'https://ascendex.github.io/ascendex-pro-api/#ascendex-pro-api-documentation', ], 'fees': 'https://ascendex.com/en/feerate/transactionfee-traderate', 'referral': { 'url': 'https://ascendex.com/en-us/register?inviteCode=EL6BXBQM', 'discount': 0.25, }, }, 'api': { 'v1': { 'public': { 'get': { 'assets': 1, 'products': 1, 'ticker': 1, 'barhist/info': 1, 'barhist': 1, 'depth': 1, 'trades': 1, 'cash/assets': 1, # not documented 'cash/products': 1, # not documented 'margin/assets': 1, # not documented 'margin/products': 1, # not documented 'futures/collateral': 1, 'futures/contracts': 1, 'futures/ref-px': 1, 'futures/market-data': 1, 'futures/funding-rates': 1, 'risk-limit-info': 1, 'exchange-info': 1, }, }, 'private': { 'get': { 'info': 1, 'wallet/transactions': 1, 'wallet/deposit/address': 1, # not documented 'data/balance/snapshot': 1, 'data/balance/history': 1, }, 'accountCategory': { 'get': { 'balance': 1, 'order/open': 1, 'order/status': 1, 'order/hist/current': 1, 'risk': 1, }, 'post': { 'order': 1, 'order/batch': 1, }, 'delete': { 'order': 1, 'order/all': 1, 'order/batch': 1, }, }, 'accountGroup': { 'get': { 'cash/balance': 1, 'margin/balance': 1, 'margin/risk': 1, 'futures/collateral-balance': 1, 'futures/position': 1, 'futures/risk': 1, 'futures/funding-payments': 1, 'order/hist': 1, 'spot/fee': 1, }, 'post': { 'transfer': 1, 'futures/transfer/deposit': 1, 'futures/transfer/withdraw': 1, }, }, }, }, 'v2': { 'public': { 'get': { 'assets': 1, 'futures/contract': 1, 'futures/collateral': 1, 'futures/pricing-data': 1, 'futures/ticker': 1, 'risk-limit-info': 1, }, }, 'private': { 'data': { 'get': { 'order/hist': 1, }, }, 'get': { 'account/info': 1, }, 'accountGroup': { 'get': { 'order/hist': 1, 'futures/position': 1, 'futures/free-margin': 1, 'futures/order/hist/current': 1, 'futures/order/open': 1, 'futures/order/status': 1, }, 'post': { 'futures/isolated-position-margin': 1, 'futures/margin-type': 1, 'futures/leverage': 1, 'futures/transfer/deposit': 1, 'futures/transfer/withdraw': 1, 'futures/order': 1, 'futures/order/batch': 1, 'futures/order/open': 1, 'subuser/subuser-transfer': 1, 'subuser/subuser-transfer-hist': 1, }, 'delete': { 'futures/order': 1, 'futures/order/batch': 1, 'futures/order/all': 1, }, }, }, }, }, 'fees': { 'trading': { 'feeSide': 'get', 'tierBased': True, 'percentage': True, 'taker': self.parse_number('0.002'), 'maker': self.parse_number('0.002'), }, }, 'precisionMode': TICK_SIZE, 'options': { 'account-category': 'cash', # 'cash', 'margin', 'futures' # obsolete 'account-group': None, 'fetchClosedOrders': { 'method': 'v2PrivateDataGetOrderHist', # 'v1PrivateAccountGroupGetAccountCategoryOrderHistCurrent' }, 'defaultType': 'spot', # 'spot', 'margin', 'swap' 'accountsByType': { 'spot': 'cash', 'swap': 'futures', 'future': 'futures', 'margin': 'margin', }, 'transfer': { 'fillResponseFromRequest': True, }, 'networks': { 'BSC': 'BEP20(BSC)', 'ARB': 'arbitrum', 'SOL': 'Solana', 'AVAX': 'avalanche C chain', 'OMNI': 'Omni', }, 'networksById': { 'BEP20(BSC)': 'BSC', 'arbitrum': 'ARB', 'Solana': 'SOL', 'avalanche C chain': 'AVAX', 'Omni': 'OMNI', }, }, 'exceptions': { 'exact': { # not documented '1900': BadRequest, # {"code":1900,"message":"Invalid Http Request Input"} '2100': AuthenticationError, # {"code":2100,"message":"ApiKeyFailure"} '5002': BadSymbol, # {"code":5002,"message":"Invalid Symbol"} '6001': BadSymbol, # {"code":6001,"message":"Trading is disabled on symbol."} '6010': InsufficientFunds, # {'code': 6010, 'message': 'Not enough balance.'} '60060': InvalidOrder, # {'code': 60060, 'message': 'The order is already filled or canceled.'} '600503': InvalidOrder, # {"code":600503,"message":"Notional is too small."} # documented '100001': BadRequest, # INVALID_HTTP_INPUT Http request is invalid '100002': BadRequest, # DATA_NOT_AVAILABLE Some required data is missing '100003': BadRequest, # KEY_CONFLICT The same key exists already '100004': BadRequest, # INVALID_REQUEST_DATA The HTTP request contains invalid field or argument '100005': BadRequest, # INVALID_WS_REQUEST_DATA Websocket request contains invalid field or argument '100006': BadRequest, # INVALID_ARGUMENT The arugment is invalid '100007': BadRequest, # ENCRYPTION_ERROR Something wrong with data encryption '100008': BadSymbol, # SYMBOL_ERROR Symbol does not exist or not valid for the request '100009': AuthenticationError, # AUTHORIZATION_NEEDED Authorization is require for the API access or request '100010': BadRequest, # INVALID_OPERATION The action is invalid or not allowed for the account '100011': BadRequest, # INVALID_TIMESTAMP Not a valid timestamp '100012': BadRequest, # INVALID_STR_FORMAT str format does not '100013': BadRequest, # INVALID_NUM_FORMAT Invalid number input '100101': ExchangeError, # UNKNOWN_ERROR Some unknown error '150001': BadRequest, # INVALID_JSON_FORMAT Require a valid json object '200001': AuthenticationError, # AUTHENTICATION_FAILED Authorization failed '200002': ExchangeError, # TOO_MANY_ATTEMPTS Tried and failed too many times '200003': ExchangeError, # ACCOUNT_NOT_FOUND Account not exist '200004': ExchangeError, # ACCOUNT_NOT_SETUP Account not setup properly '200005': ExchangeError, # ACCOUNT_ALREADY_EXIST Account already exist '200006': ExchangeError, # ACCOUNT_ERROR Some error related with error '200007': ExchangeError, # CODE_NOT_FOUND '200008': ExchangeError, # CODE_EXPIRED Code expired '200009': ExchangeError, # CODE_MISMATCH Code does not match '200010': AuthenticationError, # PASSWORD_ERROR Wrong assword '200011': ExchangeError, # CODE_GEN_FAILED Do not generate required code promptly '200012': ExchangeError, # FAKE_COKE_VERIFY '200013': ExchangeError, # SECURITY_ALERT Provide security alert message '200014': PermissionDenied, # RESTRICTED_ACCOUNT Account is restricted for certain activity, such, or withdraw. '200015': PermissionDenied, # PERMISSION_DENIED No enough permission for the operation '300001': InvalidOrder, # INVALID_PRICE Order price is invalid '300002': InvalidOrder, # INVALID_QTY Order size is invalid '300003': InvalidOrder, # INVALID_SIDE Order side is invalid '300004': InvalidOrder, # INVALID_NOTIONAL Notional is too small or too large '300005': InvalidOrder, # INVALID_TYPE Order typs is invalid '300006': InvalidOrder, # INVALID_ORDER_ID Order id is invalid '300007': InvalidOrder, # INVALID_TIME_IN_FORCE Time In Force in order request is invalid '300008': InvalidOrder, # INVALID_ORDER_PARAMETER Some order parameter is invalid '300009': InvalidOrder, # TRADING_VIOLATION Trading violation on account or asset '300011': InsufficientFunds, # INVALID_BALANCE No enough account or asset balance for the trading '300012': BadSymbol, # INVALID_PRODUCT Not a valid product supported by exchange '300013': InvalidOrder, # INVALID_BATCH_ORDER Some or all orders are invalid in batch order request '300014': InvalidOrder, # {"code":300014,"message":"Order price doesn't conform to the required tick size: 0.1","reason":"TICK_SIZE_VIOLATION"} '300020': InvalidOrder, # TRADING_RESTRICTED There is some trading restriction on account or asset '300021': InvalidOrder, # TRADING_DISABLED Trading is disabled on account or asset '300031': InvalidOrder, # NO_MARKET_PRICE No market price for market type order trading '310001': InsufficientFunds, # INVALID_MARGIN_BALANCE No enough margin balance '310002': InvalidOrder, # INVALID_MARGIN_ACCOUNT Not a valid account for margin trading '310003': InvalidOrder, # MARGIN_TOO_RISKY Leverage is too high '310004': BadSymbol, # INVALID_MARGIN_ASSET This asset does not support margin trading '310005': InvalidOrder, # INVALID_REFERENCE_PRICE There is no valid reference price '510001': ExchangeError, # SERVER_ERROR Something wrong with server. '900001': ExchangeError, # HUMAN_CHALLENGE Human change do not pass }, 'broad': {}, }, 'commonCurrencies': { 'BOND': 'BONDED', 'BTCBEAR': 'BEAR', 'BTCBULL': 'BULL', 'BYN': 'BeyondFi', 'PLN': 'Pollen', }, }) def get_account(self, params={}): # get current or provided bitmax sub-account account = self.safe_value(params, 'account', self.options['account']) lowercaseAccount = account.lower() return self.capitalize(lowercaseAccount) def fetch_currencies(self, params={}): """ fetches all available currencies on an exchange :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: an associative dictionary of currencies """ assets = self.v1PublicGetAssets(params) # # { # "code":0, # "data":[ # { # "assetCode" : "LTCBULL", # "assetName" : "3X Long LTC Token", # "precisionScale" : 9, # "nativeScale" : 4, # "withdrawalFee" : "0.2", # "minWithdrawalAmt" : "1.0", # "status" : "Normal" # }, # ] # } # margin = self.v1PublicGetMarginAssets(params) # # { # "code":0, # "data":[ # { # "assetCode":"BTT", # "borrowAssetCode":"BTT-B", # "interestAssetCode":"BTT-I", # "nativeScale":0, # "numConfirmations":1, # "withdrawFee":"100.0", # "minWithdrawalAmt":"1000.0", # "statusCode":"Normal", # "statusMessage":"", # "interestRate":"0.001" # } # ] # } # cash = self.v1PublicGetCashAssets(params) # # { # "code":0, # "data":[ # { # "assetCode":"LTCBULL", # "nativeScale":4, # "numConfirmations":20, # "withdrawFee":"0.2", # "minWithdrawalAmt":"1.0", # "statusCode":"Normal", # "statusMessage":"" # } # ] # } # assetsData = self.safe_value(assets, 'data', []) marginData = self.safe_value(margin, 'data', []) cashData = self.safe_value(cash, 'data', []) assetsById = self.index_by(assetsData, 'assetCode') marginById = self.index_by(marginData, 'assetCode') cashById = self.index_by(cashData, 'assetCode') dataById = self.deep_extend(assetsById, marginById, cashById) ids = list(dataById.keys()) result = {} for i in range(0, len(ids)): id = ids[i] currency = dataById[id] code = self.safe_currency_code(id) scale = self.safe_string_2(currency, 'precisionScale', 'nativeScale') precision = self.parse_number(self.parse_precision(scale)) fee = self.safe_number_2(currency, 'withdrawFee', 'withdrawalFee') status = self.safe_string_2(currency, 'status', 'statusCode') active = (status == 'Normal') marginInside = ('borrowAssetCode' in currency) result[code] = { 'id': id, 'code': code, 'info': currency, 'type': None, 'margin': marginInside, 'name': self.safe_string(currency, 'assetName'), 'active': active, 'deposit': None, 'withdraw': None, 'fee': fee, 'precision': precision, 'limits': { 'amount': { 'min': precision, 'max': None, }, 'withdraw': { 'min': self.safe_number(currency, 'minWithdrawalAmt'), 'max': None, }, }, 'networks': {}, } return result def fetch_markets(self, params={}): """ retrieves data on all markets for ascendex :param dict [params]: extra parameters specific to the exchange api endpoint :returns dict[]: an array of objects representing market data """ products = self.v1PublicGetProducts(params) # # { # "code": 0, # "data": [ # { # "symbol": "LBA/BTC", # "baseAsset": "LBA", # "quoteAsset": "BTC", # "status": "Normal", # "minNotional": "0.000625", # "maxNotional": "6.25", # "marginTradable": False, # "commissionType": "Quote", # "commissionReserveRate": "0.001", # "tickSize": "0.000000001", # "lotSize": "1" # }, # ] # } # cash = self.v1PublicGetCashProducts(params) # # { # "code": 0, # "data": [ # { # "symbol": "QTUM/BTC", # "displayName": "QTUM/BTC", # "domain": "BTC", # "tradingStartTime": 1569506400000, # "collapseDecimals": "0.0001,0.000001,0.00000001", # "minQty": "0.000000001", # "maxQty": "1000000000", # "minNotional": "0.000625", # "maxNotional": "12.5", # "statusCode": "Normal", # "statusMessage": "", # "tickSize": "0.00000001", # "useTick": False, # "lotSize": "0.1", # "useLot": False, # "commissionType": "Quote", # "commissionReserveRate": "0.001", # "qtyScale": 1, # "priceScale": 8, # "notionalScale": 4 # } # ] # } # perpetuals = self.v2PublicGetFuturesContract(params) # # { # "code": 0, # "data": [ # { # "symbol": "BTC-PERP", # "status": "Normal", # "displayName": "BTCUSDT", # "settlementAsset": "USDT", # "underlying": "BTC/USDT", # "tradingStartTime": 1579701600000, # "priceFilter": { # "minPrice": "1", # "maxPrice": "1000000", # "tickSize": "1" # }, # "lotSizeFilter": { # "minQty": "0.0001", # "maxQty": "1000000000", # "lotSize": "0.0001" # }, # "commissionType": "Quote", # "commissionReserveRate": "0.001", # "marketOrderPriceMarkup": "0.03", # "marginRequirements": [ # { # "positionNotionalLowerBound": "0", # "positionNotionalUpperBound": "50000", # "initialMarginRate": "0.01", # "maintenanceMarginRate": "0.006" # }, # ... # ] # } # ] # } # productsData = self.safe_value(products, 'data', []) productsById = self.index_by(productsData, 'symbol') cashData = self.safe_value(cash, 'data', []) perpetualsData = self.safe_value(perpetuals, 'data', []) cashAndPerpetualsData = self.array_concat(cashData, perpetualsData) cashAndPerpetualsById = self.index_by(cashAndPerpetualsData, 'symbol') dataById = self.deep_extend(productsById, cashAndPerpetualsById) ids = list(dataById.keys()) result = [] for i in range(0, len(ids)): id = ids[i] market = dataById[id] settleId = self.safe_value(market, 'settlementAsset') settle = self.safe_currency_code(settleId) status = self.safe_string(market, 'status') domain = self.safe_string(market, 'domain') active = False if ((status == 'Normal') or (status == 'InternalTrading')) and (domain != 'LeveragedETF'): active = True spot = settle is None swap = not spot linear = True if swap else None minQty = self.safe_number(market, 'minQty') maxQty = self.safe_number(market, 'maxQty') minPrice = self.safe_number(market, 'tickSize') maxPrice = None underlying = self.safe_string_2(market, 'underlying', 'symbol') parts = underlying.split('/') baseId = self.safe_string(parts, 0) quoteId = self.safe_string(parts, 1) base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote if swap: lotSizeFilter = self.safe_value(market, 'lotSizeFilter') minQty = self.safe_number(lotSizeFilter, 'minQty') maxQty = self.safe_number(lotSizeFilter, 'maxQty') priceFilter = self.safe_value(market, 'priceFilter') minPrice = self.safe_number(priceFilter, 'minPrice') maxPrice = self.safe_number(priceFilter, 'maxPrice') symbol = base + '/' + quote + ':' + settle fee = self.safe_number(market, 'commissionReserveRate') marginTradable = self.safe_value(market, 'marginTradable', False) result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'settle': settle, 'baseId': baseId, 'quoteId': quoteId, 'settleId': settleId, 'type': 'swap' if swap else 'spot', 'spot': spot, 'margin': marginTradable if spot else None, 'swap': swap, 'future': False, 'option': False, 'active': active, 'contract': swap, 'linear': linear, 'inverse': not linear if swap else None, 'taker': fee, 'maker': fee, 'contractSize': self.parse_number('1') if swap else None, 'expiry': None, 'expiryDatetime': None, 'strike': None, 'optionType': None, 'precision': { 'amount': self.safe_number(market, 'lotSize'), 'price': self.safe_number(market, 'tickSize'), }, 'limits': { 'leverage': { 'min': None, 'max': None, }, 'amount': { 'min': minQty, 'max': maxQty, }, 'price': { 'min': minPrice, 'max': maxPrice, }, 'cost': { 'min': self.safe_number(market, 'minNotional'), 'max': self.safe_number(market, 'maxNotional'), }, }, 'info': market, }) return result def fetch_time(self, params={}): """ fetches the current integer timestamp in milliseconds from the ascendex server :param dict [params]: extra parameters specific to the ascendex api endpoint :returns int: the current integer timestamp in milliseconds from the ascendex server """ request = { 'requestTime': self.milliseconds(), } response = self.v1PublicGetExchangeInfo(self.extend(request, params)) # # { # "code": 0, # "data": { # "requestTimeEcho": 1656560463601, # "requestReceiveAt": 1656560464331, # "latency": 730 # } # } # data = self.safe_value(response, 'data') return self.safe_integer(data, 'requestReceiveAt') def fetch_accounts(self, params={}): """ fetch all the accounts associated with a profile :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a dictionary of `account structures <https://github.com/ccxt/ccxt/wiki/Manual#account-structure>` indexed by the account type """ accountGroup = self.safe_string(self.options, 'account-group') response = None if accountGroup is None: response = self.v1PrivateGetInfo(params) # # { # "code":0, # "data":{ # "email":"igor.kroitor@gmail.com", # "accountGroup":8, # "viewPermission":true, # "tradePermission":true, # "transferPermission":true, # "cashAccount":["cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda"], # "marginAccount":["martXoh1v1N3EMQC5FDtSj5VHso8aI2Z"], # "futuresAccount":["futc9r7UmFJAyBY2rE3beA2JFxav2XFF"], # "userUID":"U6491137460" # } # } # data = self.safe_value(response, 'data', {}) accountGroup = self.safe_string(data, 'accountGroup') self.options['account-group'] = accountGroup return [ { 'id': accountGroup, 'type': None, 'currency': None, 'info': response, }, ] def parse_balance(self, response): timestamp = self.milliseconds() result = { 'info': response, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), } balances = self.safe_value(response, 'data', []) for i in range(0, len(balances)): balance = balances[i] code = self.safe_currency_code(self.safe_string(balance, 'asset')) account = self.account() account['free'] = self.safe_string(balance, 'availableBalance') account['total'] = self.safe_string(balance, 'totalBalance') result[code] = account return self.safe_balance(result) def parse_margin_balance(self, response): timestamp = self.milliseconds() result = { 'info': response, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), } balances = self.safe_value(response, 'data', []) for i in range(0, len(balances)): balance = balances[i] code = self.safe_currency_code(self.safe_string(balance, 'asset')) account = self.account() account['free'] = self.safe_string(balance, 'availableBalance') account['total'] = self.safe_string(balance, 'totalBalance') debt = self.safe_string(balance, 'borrowed') interest = self.safe_string(balance, 'interest') account['debt'] = Precise.string_add(debt, interest) result[code] = account return self.safe_balance(result) def parse_swap_balance(self, response): timestamp = self.milliseconds() result = { 'info': response, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), } data = self.safe_value(response, 'data', {}) collaterals = self.safe_value(data, 'collaterals', []) for i in range(0, len(collaterals)): balance = collaterals[i] code = self.safe_currency_code(self.safe_string(balance, 'asset')) account = self.account() account['total'] = self.safe_string(balance, 'balance') result[code] = account return self.safe_balance(result) def fetch_balance(self, params={}): """ query for balance and get the amount of funds available for trading or funds locked in orders :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>` """ self.load_markets() self.load_accounts() query = None marketType = None marketType, query = self.handle_market_type_and_params('fetchBalance', None, params) isMargin = self.safe_value(params, 'margin', False) marketType = 'margin' if isMargin else marketType params = self.omit(params, 'margin') options = self.safe_value(self.options, 'fetchBalance', {}) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, marketType, 'cash') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_string(account, 'id') request = { 'account-group': accountGroup, } defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetBalance') method = self.get_supported_mapping(marketType, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupGetFuturesPosition', }) if (accountCategory == 'cash') or (accountCategory == 'margin'): request['account-category'] = accountCategory response = getattr(self, method)(self.extend(request, query)) # # cash # # { # 'code': 0, # 'data': [ # { # 'asset': 'BCHSV', # 'totalBalance': '64.298000048', # 'availableBalance': '64.298000048', # }, # ] # } # # margin # # { # 'code': 0, # 'data': [ # { # 'asset': 'BCHSV', # 'totalBalance': '64.298000048', # 'availableBalance': '64.298000048', # 'borrowed': '0', # 'interest': '0', # }, # ] # } # # swap # # { # "code": 0, # "data": { # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "ac": "FUTURES", # "collaterals": [ # {"asset":"ADA","balance":"0.355803","referencePrice":"1.05095","discountFactor":"0.9"}, # {"asset":"USDT","balance":"0.000014519","referencePrice":"1","discountFactor":"1"} # ], # }j # } # if marketType == 'swap': return self.parse_swap_balance(response) elif marketType == 'margin': return self.parse_margin_balance(response) else: return self.parse_balance(response) def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}): """ fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data :param str symbol: unified symbol of the market to fetch the order book for :param int [limit]: the maximum amount of order book entries to return :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols """ self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], } response = self.v1PublicGetDepth(self.extend(request, params)) # # { # "code":0, # "data":{ # "m":"depth-snapshot", # "symbol":"BTC-PERP", # "data":{ # "ts":1590223998202, # "seqnum":115444921, # "asks":[ # ["9207.5","18.2383"], # ["9207.75","18.8235"], # ["9208","10.7873"], # ], # "bids":[ # ["9207.25","0.4009"], # ["9207","0.003"], # ["9206.5","0.003"], # ] # } # } # } # data = self.safe_value(response, 'data', {}) orderbook = self.safe_value(data, 'data', {}) timestamp = self.safe_integer(orderbook, 'ts') result = self.parse_order_book(orderbook, symbol, timestamp) result['nonce'] = self.safe_integer(orderbook, 'seqnum') return result def parse_ticker(self, ticker, market=None): # # { # "symbol":"QTUM/BTC", # "open":"0.00016537", # "close":"0.00019077", # "high":"0.000192", # "low":"0.00016537", # "volume":"846.6", # "ask":["0.00018698","26.2"], # "bid":["0.00018408","503.7"], # "type":"spot" # } # timestamp = None marketId = self.safe_string(ticker, 'symbol') type = self.safe_string(ticker, 'type') delimiter = '/' if (type == 'spot') else None symbol = self.safe_symbol(marketId, market, delimiter) close = self.safe_string(ticker, 'close') bid = self.safe_value(ticker, 'bid', []) ask = self.safe_value(ticker, 'ask', []) open = self.safe_string(ticker, 'open') return self.safe_ticker({ 'symbol': symbol, 'timestamp': timestamp, 'datetime': None, 'high': self.safe_string(ticker, 'high'), 'low': self.safe_string(ticker, 'low'), 'bid': self.safe_string(bid, 0), 'bidVolume': self.safe_string(bid, 1), 'ask': self.safe_string(ask, 0), 'askVolume': self.safe_string(ask, 1), 'vwap': None, 'open': open, 'close': close, 'last': close, 'previousClose': None, # previous day close 'change': None, 'percentage': None, 'average': None, 'baseVolume': self.safe_string(ticker, 'volume'), 'quoteVolume': None, 'info': ticker, }, market) def fetch_ticker(self, symbol: str, params={}): """ fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market :param str symbol: unified symbol of the market to fetch the ticker for :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>` """ self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], } response = self.v1PublicGetTicker(self.extend(request, params)) # # { # "code":0, # "data":{ # "symbol":"BTC-PERP", # or "BTC/USDT" # "open":"9073", # "close":"9185.75", # "high":"9185.75", # "low":"9185.75", # "volume":"576.8334", # "ask":["9185.75","15.5863"], # "bid":["9185.5","0.003"], # "type":"derivatives", # or "spot" # } # } # data = self.safe_value(response, 'data', {}) return self.parse_ticker(data, market) def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}): """ fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market see https://ascendex.github.io/ascendex-pro-api/#ticker see https://ascendex.github.io/ascendex-futures-pro-api-v2/#ticker :param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>` """ self.load_markets() request = {} market = None if symbols is not None: symbol = self.safe_value(symbols, 0) market = self.market(symbol) marketIds = self.market_ids(symbols) request['symbol'] = ','.join(marketIds) type = None type, params = self.handle_market_type_and_params('fetchTickers', market, params) response = None if type == 'spot': response = self.v1PublicGetTicker(self.extend(request, params)) else: response = self.v2PublicGetFuturesTicker(self.extend(request, params)) # # { # "code":0, # "data":[ # { # "symbol":"QTUM/BTC", # "open":"0.00016537", # "close":"0.00019077", # "high":"0.000192", # "low":"0.00016537", # "volume":"846.6", # "ask":["0.00018698","26.2"], # "bid":["0.00018408","503.7"], # "type":"spot" # } # ] # } # data = self.safe_value(response, 'data', []) if not isinstance(data, list): return self.parse_tickers([data], symbols) return self.parse_tickers(data, symbols) def parse_ohlcv(self, ohlcv, market=None): # # { # "m":"bar", # "s":"BTC/USDT", # "data":{ # "i":"1", # "ts":1590228000000, # "o":"9139.59", # "c":"9131.94", # "h":"9139.99", # "l":"9121.71", # "v":"25.20648" # } # } # data = self.safe_value(ohlcv, 'data', {}) return [ self.safe_integer(data, 'ts'), self.safe_number(data, 'o'), self.safe_number(data, 'h'), self.safe_number(data, 'l'), self.safe_number(data, 'c'), self.safe_number(data, 'v'), ] def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}): """ fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market :param str symbol: unified symbol of the market to fetch OHLCV data for :param str timeframe: the length of time each candle represents :param int [since]: timestamp in ms of the earliest candle to fetch :param int [limit]: the maximum amount of candles to fetch :param dict [params]: extra parameters specific to the ascendex api endpoint :returns int[][]: A list of candles ordered, open, high, low, close, volume """ self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], 'interval': self.safe_string(self.timeframes, timeframe, timeframe), } # if since and limit are not specified # the exchange will return just 1 last candle by default duration = self.parse_timeframe(timeframe) options = self.safe_value(self.options, 'fetchOHLCV', {}) defaultLimit = self.safe_integer(options, 'limit', 500) if since is not None: request['from'] = since if limit is None: limit = defaultLimit else: limit = min(limit, defaultLimit) request['to'] = self.sum(since, limit * duration * 1000, 1) elif limit is not None: request['n'] = limit # max 500 response = self.v1PublicGetBarhist(self.extend(request, params)) # # { # "code":0, # "data":[ # { # "m":"bar", # "s":"BTC/USDT", # "data":{ # "i":"1", # "ts":1590228000000, # "o":"9139.59", # "c":"9131.94", # "h":"9139.99", # "l":"9121.71", # "v":"25.20648" # } # } # ] # } # data = self.safe_value(response, 'data', []) return self.parse_ohlcvs(data, market, timeframe, since, limit) def parse_trade(self, trade, market=None): # # public fetchTrades # # { # "p":"9128.5", # price # "q":"0.0030", # quantity # "ts":1590229002385, # timestamp # "bm":false, # if True, the buyer is the market maker, we only use self field to "define the side" of a public trade # "seqnum":180143985289898554 # } # timestamp = self.safe_integer(trade, 'ts') priceString = self.safe_string_2(trade, 'price', 'p') amountString = self.safe_string(trade, 'q') buyerIsMaker = self.safe_value(trade, 'bm', False) side = 'sell' if buyerIsMaker else 'buy' market = self.safe_market(None, market) return self.safe_trade({ 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': market['symbol'], 'id': None, 'order': None, 'type': None, 'takerOrMaker': None, 'side': side, 'price': priceString, 'amount': amountString, 'cost': None, 'fee': None, }, market) def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}): """ get the list of most recent trades for a particular symbol see https://ascendex.github.io/ascendex-pro-api/#market-trades :param str symbol: unified symbol of the market to fetch trades for :param int [since]: timestamp in ms of the earliest trade to fetch :param int [limit]: the maximum amount of trades to fetch :param dict [params]: extra parameters specific to the ascendex api endpoint :returns Trade[]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>` """ self.load_markets() market = self.market(symbol) request = { 'symbol': market['id'], } if limit is not None: request['n'] = limit # max 100 response = self.v1PublicGetTrades(self.extend(request, params)) # # { # "code":0, # "data":{ # "m":"trades", # "symbol":"BTC-PERP", # "data":[ # {"p":"9128.5","q":"0.0030","ts":1590229002385,"bm":false,"seqnum":180143985289898554}, # {"p":"9129","q":"0.0030","ts":1590229002642,"bm":false,"seqnum":180143985289898587}, # {"p":"9129.5","q":"0.0030","ts":1590229021306,"bm":false,"seqnum":180143985289899043} # ] # } # } # records = self.safe_value(response, 'data', []) trades = self.safe_value(records, 'data', []) return self.parse_trades(trades, market, since, limit) def parse_order_status(self, status): statuses = { 'PendingNew': 'open', 'New': 'open', 'PartiallyFilled': 'open', 'Filled': 'closed', 'Canceled': 'canceled', 'Rejected': 'rejected', } return self.safe_string(statuses, status, status) def parse_order(self, order, market=None): # # createOrder # # { # "id": "16e607e2b83a8bXHbAwwoqDo55c166fa", # "orderId": "16e85b4d9b9a8bXHbAwwoqDoc3d66830", # "orderType": "Market", # "symbol": "BTC/USDT", # "timestamp": 1573576916201 # } # # { # "ac": "FUTURES", # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "time": 1640819389454, # "orderId": "a17e0874ecbdU0711043490bbtcpDU5X", # "seqNum": -1, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.002", # "stopPrice": "0", # "stopBy": "ref-px", # "status": "Ack", # "lastExecTime": 1640819389454, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # # fetchOrder, fetchOpenOrders, fetchClosedOrders # # { # "symbol": "BTC/USDT", # "price": "8131.22", # "orderQty": "0.00082", # "orderType": "Market", # "avgPx": "7392.02", # "cumFee": "0.005152238", # "cumFilledQty": "0.00082", # "errorCode": "", # "feeAsset": "USDT", # "lastExecTime": 1575953151764, # "orderId": "a16eee20b6750866943712zWEDdAjt3", # "seqNum": 2623469, # "side": "Buy", # "status": "Filled", # "stopPrice": "", # "execInst": "NULL_VAL" # "Post"(for postOnly orders), "reduceOnly"(for reduceOnly orders) # } # # { # "orderId": "a173ad938fc3U22666567717788c3b66", # orderId # "seqNum": 18777366360, # sequence number # "accountId": "cshwSjbpPjSwHmxPdz2CPQVU9mnbzPpt", # accountId # "symbol": "BTC/USDT", # symbol # "orderType": "Limit", # order type(Limit/Market/StopMarket/StopLimit) # "side": "Sell", # order side(Buy/Sell) # "price": "11346.77", # order price # "stopPrice": "0", # stop price(0 by default) # "orderQty": "0.01", # order quantity(in base asset) # "status": "Canceled", # order status(Filled/Canceled/Rejected) # "createTime": 1596344995793, # order creation time # "lastExecTime": 1596344996053, # last execution time # "avgFillPrice": "11346.77", # average filled price # "fillQty": "0.01", # filled quantity(in base asset) # "fee": "-0.004992579", # cummulative fee. if negative, self value is the commission charged; if possitive, self value is the rebate received. # "feeAsset": "USDT" # fee asset # } # # { # "ac": "FUTURES", # "accountId": "testabcdefg", # "avgPx": "0", # "cumFee": "0", # "cumQty": "0", # "errorCode": "NULL_VAL", # "execInst": "NULL_VAL", # "feeAsset": "USDT", # "lastExecTime": 1584072844085, # "orderId": "r170d21956dd5450276356bbtcpKa74", # "orderQty": "1.1499", # "orderType": "Limit", # "price": "4000", # "sendingTime": 1584072841033, # "seqNum": 24105338, # "side": "Buy", # "status": "Canceled", # "stopPrice": "", # "symbol": "BTC-PERP" # }, # status = self.parse_order_status(self.safe_string(order, 'status')) marketId = self.safe_string(order, 'symbol') symbol = self.safe_symbol(marketId, market, '/') timestamp = self.safe_integer_2(order, 'timestamp', 'sendingTime') lastTradeTimestamp = self.safe_integer(order, 'lastExecTime') if timestamp is None: timestamp = lastTradeTimestamp price = self.safe_string(order, 'price') amount = self.safe_string(order, 'orderQty') average = self.safe_string(order, 'avgPx') filled = self.safe_string_n(order, ['cumFilledQty', 'cumQty', 'fillQty']) id = self.safe_string(order, 'orderId') clientOrderId = self.safe_string(order, 'id') if clientOrderId is not None: if len(clientOrderId) < 1: clientOrderId = None rawTypeLower = self.safe_string_lower(order, 'orderType') type = rawTypeLower if rawTypeLower is not None: if rawTypeLower == 'stoplimit': type = 'limit' if rawTypeLower == 'stopmarket': type = 'market' side = self.safe_string_lower(order, 'side') feeCost = self.safe_number_2(order, 'cumFee', 'fee') fee = None if feeCost is not None: feeCurrencyId = self.safe_string(order, 'feeAsset') feeCurrencyCode = self.safe_currency_code(feeCurrencyId) fee = { 'cost': feeCost, 'currency': feeCurrencyCode, } stopPrice = self.safe_number(order, 'stopPrice') reduceOnly = None execInst = self.safe_string(order, 'execInst') if execInst == 'reduceOnly': reduceOnly = True postOnly = None if execInst == 'Post': postOnly = True return self.safe_order({ 'info': order, 'id': id, 'clientOrderId': clientOrderId, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': lastTradeTimestamp, 'symbol': symbol, 'type': type, 'timeInForce': None, 'postOnly': postOnly, 'reduceOnly': reduceOnly, 'side': side, 'price': price, 'stopPrice': stopPrice, 'triggerPrice': stopPrice, 'amount': amount, 'cost': None, 'average': average, 'filled': filled, 'remaining': None, 'status': status, 'fee': fee, 'trades': None, }, market) def fetch_trading_fees(self, params={}): """ fetch the trading fees for multiple markets :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a dictionary of `fee structures <https://github.com/ccxt/ccxt/wiki/Manual#fee-structure>` indexed by market symbols """ self.load_markets() self.load_accounts() account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_string(account, 'id') request = { 'account-group': accountGroup, } response = self.v1PrivateAccountGroupGetSpotFee(self.extend(request, params)) # # { # code: '0', # data: { # domain: 'spot', # userUID: 'U1479576458', # vipLevel: '0', # fees: [ # {symbol: 'HT/USDT', fee: {taker: '0.001', maker: '0.001'}}, # {symbol: 'LAMB/BTC', fee: {taker: '0.002', maker: '0.002'}}, # {symbol: 'STOS/USDT', fee: {taker: '0.002', maker: '0.002'}}, # ... # ] # } # } # data = self.safe_value(response, 'data', {}) fees = self.safe_value(data, 'fees', []) result = {} for i in range(0, len(fees)): fee = fees[i] marketId = self.safe_string(fee, 'symbol') symbol = self.safe_symbol(marketId, None, '/') takerMaker = self.safe_value(fee, 'fee', {}) result[symbol] = { 'info': fee, 'symbol': symbol, 'maker': self.safe_number(takerMaker, 'maker'), 'taker': self.safe_number(takerMaker, 'taker'), } return result def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}): """ Create an order on the exchange :param str symbol: Unified CCXT market symbol :param str type: "limit" or "market" :param str side: "buy" or "sell" :param float amount: the amount of currency to trade :param float [price]: *ignored in "market" orders* the price at which the order is to be fullfilled at in units of the quote currency :param dict [params]: Extra parameters specific to the exchange API endpoint :param str [params.timeInForce]: "GTC", "IOC", "FOK", or "PO" :param bool [params.postOnly]: True or False :param float [params.stopPrice]: The price at which a trigger order is triggered at :returns: `An order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>` """ self.load_markets() self.load_accounts() market = self.market(symbol) marketType = None marketType, params = self.handle_market_type_and_params('createOrder', market, params) options = self.safe_value(self.options, 'createOrder', {}) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, marketType, 'cash') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id') request = { 'account-group': accountGroup, 'account-category': accountCategory, 'symbol': market['id'], 'time': self.milliseconds(), 'orderQty': self.amount_to_precision(symbol, amount), 'orderType': type, # limit, market, stop_market, stop_limit 'side': side, # buy or sell, # 'execInst': # Post for postOnly, ReduceOnly for reduceOnly # 'respInst': 'ACK', # ACK, 'ACCEPT, DONE } isMarketOrder = ((type == 'market') or (type == 'stop_market')) isLimitOrder = ((type == 'limit') or (type == 'stop_limit')) timeInForce = self.safe_string(params, 'timeInForce') postOnly = self.is_post_only(isMarketOrder, False, params) reduceOnly = self.safe_value(params, 'reduceOnly', False) stopPrice = self.safe_value_2(params, 'triggerPrice', 'stopPrice') params = self.omit(params, ['timeInForce', 'postOnly', 'reduceOnly', 'stopPrice', 'triggerPrice']) if reduceOnly: if marketType != 'swap': raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + marketType + ' orders, reduceOnly orders are supported for perpetuals only') request['execInst'] = 'ReduceOnly' if isLimitOrder: request['orderPrice'] = self.price_to_precision(symbol, price) if timeInForce == 'IOC': request['timeInForce'] = 'IOC' if timeInForce == 'FOK': request['timeInForce'] = 'FOK' if postOnly: request['postOnly'] = True if stopPrice is not None: request['stopPrice'] = self.price_to_precision(symbol, stopPrice) if isLimitOrder: request['orderType'] = 'stop_limit' elif isMarketOrder: request['orderType'] = 'stop_market' if clientOrderId is not None: request['id'] = clientOrderId defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryPostOrder') method = self.get_supported_mapping(marketType, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupPostFuturesOrder', }) if method == 'v1PrivateAccountCategoryPostOrder': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory response = getattr(self, method)(self.extend(request, params)) # # spot # # { # "code":0, # "data": { # "accountId":"cshwT8RKojkT1HoaA5UdeimR2SrmHG2I", # "ac":"CASH", # "action":"place-order", # "status":"Ack", # "info": { # "symbol":"TRX/USDT", # "orderType":"StopLimit", # "timestamp":1654290662172, # "id":"", # "orderId":"a1812b6840ddU8191168955av0k6Eyhj" # } # } # } # # # swap # # { # "code":0, # "data": { # "meta": { # "id":"", # "action":"place-order", # "respInst":"ACK" # }, # "order": { # "ac":"FUTURES", # "accountId":"futwT8RKojkT1HoaA5UdeimR2SrmHG2I", # "time":1654290969965, # "orderId":"a1812b6cf322U8191168955oJamfTh7b", # "seqNum":-1, # "orderType":"StopLimit", # "execInst":"NULL_VAL", # "side":"Buy", # "symbol":"TRX-PERP", # "price":"0.083", # "orderQty":"1", # "stopPrice":"0.082", # "stopBy":"ref-px", # "status":"Ack", # "lastExecTime":1654290969965, # "lastQty":"0", # "lastPx":"0", # "avgFilledPx":"0", # "cumFilledQty":"0", # "fee":"0", # "cumFee":"0", # "feeAsset":"", # "errorCode":"", # "posStopLossPrice":"0", # "posStopLossTrigger":"market", # "posTakeProfitPrice":"0", # "posTakeProfitTrigger":"market", # "liquidityInd":"n" # } # } # } # data = self.safe_value(response, 'data', {}) order = self.safe_value_2(data, 'order', 'info', {}) return self.parse_order(order, market) def fetch_order(self, id: str, symbol: Optional[str] = None, params={}): """ fetches information on an order made by the user :param str symbol: unified symbol of the market the order was made in :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>` """ self.load_markets() self.load_accounts() market = None if symbol is not None: market = self.market(symbol) type, query = self.handle_market_type_and_params('fetchOrder', market, params) options = self.safe_value(self.options, 'fetchOrder', {}) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') request = { 'account-group': accountGroup, 'account-category': accountCategory, 'orderId': id, } defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderStatus') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupGetFuturesOrderStatus', }) if method == 'v1PrivateAccountCategoryGetOrderStatus': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory response = getattr(self, method)(self.extend(request, query)) # # AccountCategoryGetOrderStatus # # { # "code": 0, # "accountCategory": "CASH", # "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo", # "data": [ # { # "symbol": "BTC/USDT", # "price": "8131.22", # "orderQty": "0.00082", # "orderType": "Market", # "avgPx": "7392.02", # "cumFee": "0.005152238", # "cumFilledQty": "0.00082", # "errorCode": "", # "feeAsset": "USDT", # "lastExecTime": 1575953151764, # "orderId": "a16eee20b6750866943712zWEDdAjt3", # "seqNum": 2623469, # "side": "Buy", # "status": "Filled", # "stopPrice": "", # "execInst": "NULL_VAL" # } # ] # } # # AccountGroupGetFuturesOrderStatus # # { # "code": 0, # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "ac": "FUTURES", # "data": { # "ac": "FUTURES", # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "time": 1640247020217, # "orderId": "r17de65747aeU0711043490bbtcp0cmt", # "seqNum": 28796162908, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.0021", # "stopPrice": "0", # "stopBy": "market", # "status": "New", # "lastExecTime": 1640247020232, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "USDT", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # } # data = self.safe_value(response, 'data', {}) return self.parse_order(data, market) def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}): """ fetch all unfilled currently open orders :param str symbol: unified market symbol :param int [since]: the earliest time in ms to fetch open orders for :param int [limit]: the maximum number of open orders structures to retrieve :param dict [params]: extra parameters specific to the ascendex api endpoint :returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>` """ self.load_markets() self.load_accounts() market = None if symbol is not None: market = self.market(symbol) symbol = market['symbol'] account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') type, query = self.handle_market_type_and_params('fetchOpenOrders', market, params) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') request = { 'account-group': accountGroup, 'account-category': accountCategory, } options = self.safe_value(self.options, 'fetchOpenOrders', {}) defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderOpen') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupGetFuturesOrderOpen', }) if method == 'v1PrivateAccountCategoryGetOrderOpen': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory response = getattr(self, method)(self.extend(request, query)) # # AccountCategoryGetOrderOpen # # { # "ac": "CASH", # "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo", # "code": 0, # "data": [ # { # "avgPx": "0", # Average filled price of the order # "cumFee": "0", # cumulative fee paid for self order # "cumFilledQty": "0", # cumulative filled quantity # "errorCode": "", # error code; could be empty # "feeAsset": "USDT", # fee asset # "lastExecTime": 1576019723550, # The last execution time of the order # "orderId": "s16ef21882ea0866943712034f36d83", # server provided orderId # "orderQty": "0.0083", # order quantity # "orderType": "Limit", # order type # "price": "7105", # order price # "seqNum": 8193258, # sequence number # "side": "Buy", # order side # "status": "New", # order status on matching engine # "stopPrice": "", # only available for stop market and stop limit orders; otherwise empty # "symbol": "BTC/USDT", # "execInst": "NULL_VAL" # execution instruction # }, # ] # } # # AccountGroupGetFuturesOrderOpen # # { # "code": 0, # "data": [ # { # "ac": "FUTURES", # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "time": 1640247020217, # "orderId": "r17de65747aeU0711043490bbtcp0cmt", # "seqNum": 28796162908, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.0021", # "stopPrice": "0", # "stopBy": "market", # "status": "New", # "lastExecTime": 1640247020232, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "USDT", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # ] # } # data = self.safe_value(response, 'data', []) if accountCategory == 'futures': return self.parse_orders(data, market, since, limit) # a workaround for https://github.com/ccxt/ccxt/issues/7187 orders = [] for i in range(0, len(data)): order = self.parse_order(data[i], market) orders.append(order) return self.filter_by_symbol_since_limit(orders, symbol, since, limit) def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}): """ fetches information on multiple closed orders made by the user see https://ascendex.github.io/ascendex-pro-api/#list-history-orders-v2 :param str symbol: unified market symbol of the market orders were made in :param int [since]: the earliest time in ms to fetch orders for :param int [limit]: the maximum number of orde structures to retrieve :param dict [params]: extra parameters specific to the ascendex api endpoint :param int [params.until]: the latest time in ms to fetch orders for :returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>` """ self.load_markets() self.load_accounts() account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') request = { 'account-group': accountGroup, # 'category': accountCategory, # 'symbol': market['id'], # 'orderType': 'market', # optional, string # 'side': 'buy', # or 'sell', optional, case insensitive. # 'status': 'Filled', # "Filled", "Canceled", or "Rejected" # 'startTime': exchange.milliseconds(), # 'endTime': exchange.milliseconds(), # 'page': 1, # 'pageSize': 100, } market = None if symbol is not None: market = self.market(symbol) request['symbol'] = market['id'] type, query = self.handle_market_type_and_params('fetchClosedOrders', market, params) options = self.safe_value(self.options, 'fetchClosedOrders', {}) defaultMethod = self.safe_string(options, 'method', 'v2PrivateDataGetOrderHist') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupGetFuturesOrderHistCurrent', }) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') # margin, futures if method == 'v2PrivateDataGetOrderHist': request['account'] = accountCategory if limit is not None: request['limit'] = limit else: request['account-category'] = accountCategory if limit is not None: request['pageSize'] = limit if since is not None: request['startTime'] = since until = self.safe_string(params, 'until') if until is not None: request['endTime'] = until response = getattr(self, method)(self.extend(request, query)) # # accountCategoryGetOrderHistCurrent # # { # "code":0, # "accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda", # "ac":"CASH", # "data":[ # { # "seqNum":15561826728, # "orderId":"a17294d305c0U6491137460bethu7kw9", # "symbol":"ETH/USDT", # "orderType":"Limit", # "lastExecTime":1591635618200, # "price":"200", # "orderQty":"0.1", # "side":"Buy", # "status":"Canceled", # "avgPx":"0", # "cumFilledQty":"0", # "stopPrice":"", # "errorCode":"", # "cumFee":"0", # "feeAsset":"USDT", # "execInst":"NULL_VAL" # } # ] # } # # { # "code": 0, # "data": [ # { # "orderId" : "a173ad938fc3U22666567717788c3b66", # orderId # "seqNum" : 18777366360, # sequence number # "accountId" : "cshwSjbpPjSwHmxPdz2CPQVU9mnbzPpt", # accountId # "symbol" : "BTC/USDT", # symbol # "orderType" : "Limit", # order type(Limit/Market/StopMarket/StopLimit) # "side" : "Sell", # order side(Buy/Sell) # "price" : "11346.77", # order price # "stopPrice" : "0", # stop price(0 by default) # "orderQty" : "0.01", # order quantity(in base asset) # "status" : "Canceled", # order status(Filled/Canceled/Rejected) # "createTime" : 1596344995793, # order creation time # "lastExecTime": 1596344996053, # last execution time # "avgFillPrice": "11346.77", # average filled price # "fillQty" : "0.01", # filled quantity(in base asset) # "fee" : "-0.004992579", # cummulative fee. if negative, self value is the commission charged; if possitive, self value is the rebate received. # "feeAsset" : "USDT" # fee asset # } # ] # } # # accountGroupGetFuturesOrderHistCurrent # # { # "code": 0, # "data": [ # { # "ac": "FUTURES", # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "time": 1640245777002, # "orderId": "r17de6444fa6U0711043490bbtcpJ2lI", # "seqNum": 28796124902, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.0021", # "stopPrice": "0", # "stopBy": "market", # "status": "Canceled", # "lastExecTime": 1640246574886, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "USDT", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # ] # } # data = self.safe_value(response, 'data') isArray = isinstance(data, list) if not isArray: data = self.safe_value(data, 'data', []) return self.parse_orders(data, market, since, limit) def cancel_order(self, id: str, symbol: Optional[str] = None, params={}): """ cancels an open order :param str id: order id :param str symbol: unified symbol of the market the order was made in :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>` """ if symbol is None: raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument') self.load_markets() self.load_accounts() market = self.market(symbol) type, query = self.handle_market_type_and_params('cancelOrder', market, params) options = self.safe_value(self.options, 'cancelOrder', {}) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') request = { 'account-group': accountGroup, 'account-category': accountCategory, 'symbol': market['id'], 'time': self.milliseconds(), 'id': 'foobar', } defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrder') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupDeleteFuturesOrder', }) if method == 'v1PrivateAccountCategoryDeleteOrder': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id') if clientOrderId is None: request['orderId'] = id else: request['id'] = clientOrderId params = self.omit(params, ['clientOrderId', 'id']) response = getattr(self, method)(self.extend(request, query)) # # AccountCategoryDeleteOrder # # { # "code": 0, # "data": { # "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo", # "ac": "CASH", # "action": "cancel-order", # "status": "Ack", # "info": { # "id": "wv8QGquoeamhssvQBeHOHGQCGlcBjj23", # "orderId": "16e6198afb4s8bXHbAwwoqDo2ebc19dc", # "orderType": "", # could be empty # "symbol": "ETH/USDT", # "timestamp": 1573594877822 # } # } # } # # AccountGroupDeleteFuturesOrder # # { # "code": 0, # "data": { # "meta": { # "id": "foobar", # "action": "cancel-order", # "respInst": "ACK" # }, # "order": { # "ac": "FUTURES", # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "time": 1640244480476, # "orderId": "r17de63086f4U0711043490bbtcpPUF4", # "seqNum": 28795959269, # "orderType": "Limit", # "execInst": "NULL_VAL", # "side": "Buy", # "symbol": "BTC-PERP", # "price": "30000", # "orderQty": "0.0021", # "stopPrice": "0", # "stopBy": "market", # "status": "New", # "lastExecTime": 1640244480491, # "lastQty": "0", # "lastPx": "0", # "avgFilledPx": "0", # "cumFilledQty": "0", # "fee": "0", # "cumFee": "0", # "feeAsset": "BTCPC", # "errorCode": "", # "posStopLossPrice": "0", # "posStopLossTrigger": "market", # "posTakeProfitPrice": "0", # "posTakeProfitTrigger": "market", # "liquidityInd": "n" # } # } # } # data = self.safe_value(response, 'data', {}) order = self.safe_value_2(data, 'order', 'info', {}) return self.parse_order(order, market) def cancel_all_orders(self, symbol: Optional[str] = None, params={}): """ cancel all open orders :param str symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>` """ self.load_markets() self.load_accounts() market = None if symbol is not None: market = self.market(symbol) type, query = self.handle_market_type_and_params('cancelAllOrders', market, params) options = self.safe_value(self.options, 'cancelAllOrders', {}) accountsByType = self.safe_value(self.options, 'accountsByType', {}) accountCategory = self.safe_string(accountsByType, type, 'cash') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_value(account, 'id') request = { 'account-group': accountGroup, 'account-category': accountCategory, 'time': self.milliseconds(), } if symbol is not None: request['symbol'] = market['id'] defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrderAll') method = self.get_supported_mapping(type, { 'spot': defaultMethod, 'margin': defaultMethod, 'swap': 'v2PrivateAccountGroupDeleteFuturesOrderAll', }) if method == 'v1PrivateAccountCategoryDeleteOrderAll': if accountCategory is not None: request['category'] = accountCategory else: request['account-category'] = accountCategory response = getattr(self, method)(self.extend(request, query)) # # AccountCategoryDeleteOrderAll # # { # "code": 0, # "data": { # "ac": "CASH", # "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo", # "action": "cancel-all", # "info": { # "id": "2bmYvi7lyTrneMzpcJcf2D7Pe9V1P9wy", # "orderId": "", # "orderType": "NULL_VAL", # "symbol": "", # "timestamp": 1574118495462 # }, # "status": "Ack" # } # } # # AccountGroupDeleteFuturesOrderAll # # { # "code": 0, # "data": { # "ac": "FUTURES", # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "action": "cancel-all", # "info": { # "symbol":"BTC-PERP" # } # } # } # return response def parse_deposit_address(self, depositAddress, currency=None): # # { # address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722", # destTag: "", # tagType: "", # tagId: "", # chainName: "ERC20", # numConfirmations: 20, # withdrawalFee: 1, # nativeScale: 4, # tips: [] # } # address = self.safe_string(depositAddress, 'address') tagId = self.safe_string(depositAddress, 'tagId') tag = self.safe_string(depositAddress, tagId) self.check_address(address) code = None if (currency is None) else currency['code'] chainName = self.safe_string(depositAddress, 'chainName') network = self.safe_network(chainName) return { 'currency': code, 'address': address, 'tag': tag, 'network': network, 'info': depositAddress, } def safe_network(self, networkId): networksById = { 'TRC20': 'TRC20', 'ERC20': 'ERC20', 'GO20': 'GO20', 'BEP2': 'BEP2', 'BEP20(BSC)': 'BEP20', 'Bitcoin': 'BTC', 'Bitcoin ABC': 'BCH', 'Litecoin': 'LTC', 'Matic Network': 'MATIC', 'Solana': 'SOL', 'xDai': 'STAKE', 'Akash': 'AKT', } return self.safe_string(networksById, networkId, networkId) def fetch_deposit_address(self, code: str, params={}): """ fetch the deposit address for a currency associated with self account :param str code: unified currency code :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: an `address structure <https://github.com/ccxt/ccxt/wiki/Manual#address-structure>` """ self.load_markets() currency = self.currency(code) chainName = self.safe_string(params, 'chainName') params = self.omit(params, 'chainName') request = { 'asset': currency['id'], } response = self.v1PrivateGetWalletDepositAddress(self.extend(request, params)) # # { # "code":0, # "data":{ # "asset":"USDT", # "assetName":"Tether", # "address":[ # { # "address":"1N22odLHXnLPCjC8kwBJPTayarr9RtPod6", # "destTag":"", # "tagType":"", # "tagId":"", # "chainName":"Omni", # "numConfirmations":3, # "withdrawalFee":4.7, # "nativeScale":4, # "tips":[] # }, # { # "address":"0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722", # "destTag":"", # "tagType":"", # "tagId":"", # "chainName":"ERC20", # "numConfirmations":20, # "withdrawalFee":1.0, # "nativeScale":4, # "tips":[] # } # ] # } # } # data = self.safe_value(response, 'data', {}) addresses = self.safe_value(data, 'address', []) numAddresses = len(addresses) address = None if numAddresses > 1: addressesByChainName = self.index_by(addresses, 'chainName') if chainName is None: chainNames = list(addressesByChainName.keys()) chains = ', '.join(chainNames) raise ArgumentsRequired(self.id + ' fetchDepositAddress() returned more than one address, a chainName parameter is required, one of ' + chains) address = self.safe_value(addressesByChainName, chainName, {}) else: # first address address = self.safe_value(addresses, 0, {}) result = self.parse_deposit_address(address, currency) return self.extend(result, { 'info': response, }) def fetch_deposits(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}): """ fetch all deposits made to an account :param str code: unified currency code :param int [since]: the earliest time in ms to fetch deposits for :param int [limit]: the maximum number of deposits structures to retrieve :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>` """ request = { 'txType': 'deposit', } return self.fetch_transactions(code, since, limit, self.extend(request, params)) def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}): """ fetch all withdrawals made from an account :param str code: unified currency code :param int [since]: the earliest time in ms to fetch withdrawals for :param int [limit]: the maximum number of withdrawals structures to retrieve :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>` """ request = { 'txType': 'withdrawal', } return self.fetch_transactions(code, since, limit, self.extend(request, params)) def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}): """ fetch history of deposits and withdrawals :param str [code]: unified currency code for the currency of the deposit/withdrawals, default is None :param int [since]: timestamp in ms of the earliest deposit/withdrawal, default is None :param int [limit]: max number of deposit/withdrawals to return, default is None :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a list of `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>` """ self.load_markets() request = { # 'asset': currency['id'], # 'page': 1, # 'pageSize': 20, # 'startTs': self.milliseconds(), # 'endTs': self.milliseconds(), # 'txType': undefned, # deposit, withdrawal } currency = None if code is not None: currency = self.currency(code) request['asset'] = currency['id'] if since is not None: request['startTs'] = since if limit is not None: request['pageSize'] = limit response = self.v1PrivateGetWalletTransactions(self.extend(request, params)) # # { # code: 0, # data: { # data: [ # { # requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB", # time: 1591606166000, # asset: "USDT", # transactionType: "deposit", # amount: "25", # commission: "0", # networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce", # status: "pending", # numConfirmed: 8, # numConfirmations: 20, # destAddress: {address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722"} # } # ], # page: 1, # pageSize: 20, # hasNext: False # } # } # data = self.safe_value(response, 'data', {}) transactions = self.safe_value(data, 'data', []) return self.parse_transactions(transactions, currency, since, limit) def parse_transaction_status(self, status): statuses = { 'reviewing': 'pending', 'pending': 'pending', 'confirmed': 'ok', 'rejected': 'rejected', } return self.safe_string(statuses, status, status) def parse_transaction(self, transaction, currency=None): # # { # requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB", # time: 1591606166000, # asset: "USDT", # transactionType: "deposit", # amount: "25", # commission: "0", # networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce", # status: "pending", # numConfirmed: 8, # numConfirmations: 20, # destAddress: { # address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722", # destTag: "..." # for currencies that have it # } # } # destAddress = self.safe_value(transaction, 'destAddress', {}) address = self.safe_string(destAddress, 'address') tag = self.safe_string(destAddress, 'destTag') timestamp = self.safe_integer(transaction, 'time') currencyId = self.safe_string(transaction, 'asset') amountString = self.safe_string(transaction, 'amount') feeCostString = self.safe_string(transaction, 'commission') amountString = Precise.string_sub(amountString, feeCostString) code = self.safe_currency_code(currencyId, currency) return { 'info': transaction, 'id': self.safe_string(transaction, 'requestId'), 'txid': self.safe_string(transaction, 'networkTransactionId'), 'type': self.safe_string(transaction, 'transactionType'), 'currency': code, 'network': None, 'amount': self.parse_number(amountString), 'status': self.parse_transaction_status(self.safe_string(transaction, 'status')), 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'address': address, 'addressFrom': None, 'addressTo': address, 'tag': tag, 'tagFrom': None, 'tagTo': tag, 'updated': None, 'comment': None, 'fee': { 'currency': code, 'cost': self.parse_number(feeCostString), 'rate': None, }, } def fetch_positions(self, symbols: Optional[List[str]] = None, params={}): """ fetch all open positions :param str[]|None symbols: list of unified market symbols :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict[]: a list of `position structure <https://github.com/ccxt/ccxt/wiki/Manual#position-structure>` """ self.load_markets() self.load_accounts() account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_string(account, 'id') request = { 'account-group': accountGroup, } response = self.v2PrivateAccountGroupGetFuturesPosition(self.extend(request, params)) # # { # "code": 0, # "data": { # "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn", # "ac": "FUTURES", # "collaterals": [ # { # "asset": "USDT", # "balance": "44.570287262", # "referencePrice": "1", # "discountFactor": "1" # } # ], # "contracts": [ # { # "symbol": "BTC-PERP", # "side": "LONG", # "position": "0.0001", # "referenceCost": "-3.12277254", # "unrealizedPnl": "-0.001700233", # "realizedPnl": "0", # "avgOpenPrice": "31209", # "marginType": "isolated", # "isolatedMargin": "1.654972977", # "leverage": "2", # "takeProfitPrice": "0", # "takeProfitTrigger": "market", # "stopLossPrice": "0", # "stopLossTrigger": "market", # "buyOpenOrderNotional": "0", # "sellOpenOrderNotional": "0", # "markPrice": "31210.723063672", # "indexPrice": "31223.148857925" # }, # ] # } # } # data = self.safe_value(response, 'data', {}) position = self.safe_value(data, 'contracts', []) result = [] for i in range(0, len(position)): result.append(self.parse_position(position[i])) symbols = self.market_symbols(symbols) return self.filter_by_array_positions(result, 'symbol', symbols, False) def parse_position(self, position, market=None): # # { # "symbol": "BTC-PERP", # "side": "LONG", # "position": "0.0001", # "referenceCost": "-3.12277254", # "unrealizedPnl": "-0.001700233", # "realizedPnl": "0", # "avgOpenPrice": "31209", # "marginType": "isolated", # "isolatedMargin": "1.654972977", # "leverage": "2", # "takeProfitPrice": "0", # "takeProfitTrigger": "market", # "stopLossPrice": "0", # "stopLossTrigger": "market", # "buyOpenOrderNotional": "0", # "sellOpenOrderNotional": "0", # "markPrice": "31210.723063672", # "indexPrice": "31223.148857925" # }, # marketId = self.safe_string(position, 'symbol') market = self.safe_market(marketId, market) notional = self.safe_string(position, 'buyOpenOrderNotional') if Precise.string_eq(notional, '0'): notional = self.safe_string(position, 'sellOpenOrderNotional') marginMode = self.safe_string(position, 'marginType') collateral = None if marginMode == 'isolated': collateral = self.safe_string(position, 'isolatedMargin') return self.safe_position({ 'info': position, 'id': None, 'symbol': market['symbol'], 'notional': self.parse_number(notional), 'marginMode': marginMode, 'liquidationPrice': None, 'entryPrice': self.safe_number(position, 'avgOpenPrice'), 'unrealizedPnl': self.safe_number(position, 'unrealizedPnl'), 'percentage': None, 'contracts': self.safe_number(position, 'position'), 'contractSize': self.safe_number(market, 'contractSize'), 'markPrice': self.safe_number(position, 'markPrice'), 'lastPrice': None, 'side': self.safe_string_lower(position, 'side'), 'hedged': None, 'timestamp': None, 'datetime': None, 'lastUpdateTimestamp': None, 'maintenanceMargin': None, 'maintenanceMarginPercentage': None, 'collateral': collateral, 'initialMargin': None, 'initialMarginPercentage': None, 'leverage': self.safe_integer(position, 'leverage'), 'marginRatio': None, 'stopLossPrice': self.safe_number(position, 'stopLossPrice'), 'takeProfitPrice': self.safe_number(position, 'takeProfitPrice'), }) def parse_funding_rate(self, contract, market=None): # # { # "time": 1640061364830, # "symbol": "EOS-PERP", # "markPrice": "3.353854865", # "indexPrice": "3.3542", # "openInterest": "14242", # "fundingRate": "-0.000073026", # "nextFundingTime": 1640073600000 # } # marketId = self.safe_string(contract, 'symbol') symbol = self.safe_symbol(marketId, market) currentTime = self.safe_integer(contract, 'time') nextFundingRate = self.safe_number(contract, 'fundingRate') nextFundingRateTimestamp = self.safe_integer(contract, 'nextFundingTime') return { 'info': contract, 'symbol': symbol, 'markPrice': self.safe_number(contract, 'markPrice'), 'indexPrice': self.safe_number(contract, 'indexPrice'), 'interestRate': self.parse_number('0'), 'estimatedSettlePrice': None, 'timestamp': currentTime, 'datetime': self.iso8601(currentTime), 'previousFundingRate': None, 'nextFundingRate': None, 'previousFundingTimestamp': None, 'nextFundingTimestamp': None, 'previousFundingDatetime': None, 'nextFundingDatetime': None, 'fundingRate': nextFundingRate, 'fundingTimestamp': nextFundingRateTimestamp, 'fundingDatetime': self.iso8601(nextFundingRateTimestamp), } def fetch_funding_rates(self, symbols: Optional[List[str]] = None, params={}): """ fetch the funding rate for multiple markets :param str[]|None symbols: list of unified market symbols :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a dictionary of `funding rates structures <https://github.com/ccxt/ccxt/wiki/Manual#funding-rates-structure>`, indexe by market symbols """ self.load_markets() symbols = self.market_symbols(symbols) response = self.v2PublicGetFuturesPricingData(params) # # { # "code": 0, # "data": { # "contracts": [ # { # "time": 1640061364830, # "symbol": "EOS-PERP", # "markPrice": "3.353854865", # "indexPrice": "3.3542", # "openInterest": "14242", # "fundingRate": "-0.000073026", # "nextFundingTime": 1640073600000 # }, # ], # "collaterals": [ # { # "asset": "USDTR", # "referencePrice": "1" # }, # ] # } # } # data = self.safe_value(response, 'data', {}) contracts = self.safe_value(data, 'contracts', []) result = self.parse_funding_rates(contracts) return self.filter_by_array(result, 'symbol', symbols) def modify_margin_helper(self, symbol: str, amount, type, params={}): self.load_markets() self.load_accounts() market = self.market(symbol) account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_string(account, 'id') amount = self.amount_to_precision(symbol, amount) request = { 'account-group': accountGroup, 'symbol': market['id'], 'amount': amount, # positive value for adding margin, negative for reducing } response = self.v2PrivateAccountGroupPostFuturesIsolatedPositionMargin(self.extend(request, params)) # # Can only change margin for perpetual futures isolated margin positions # # { # "code": 0 # } # if type == 'reduce': amount = Precise.string_abs(amount) return self.extend(self.parse_margin_modification(response, market), { 'amount': self.parse_number(amount), 'type': type, }) def parse_margin_modification(self, data, market=None): errorCode = self.safe_string(data, 'code') status = 'ok' if (errorCode == '0') else 'failed' return { 'info': data, 'type': None, 'amount': None, 'code': market['quote'], 'symbol': market['symbol'], 'status': status, } def reduce_margin(self, symbol: str, amount, params={}): """ remove margin from a position :param str symbol: unified market symbol :param float amount: the amount of margin to remove :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#reduce-margin-structure>` """ return self.modify_margin_helper(symbol, amount, 'reduce', params) def add_margin(self, symbol: str, amount, params={}): """ add margin :param str symbol: unified market symbol :param float amount: amount of margin to add :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#add-margin-structure>` """ return self.modify_margin_helper(symbol, amount, 'add', params) def set_leverage(self, leverage, symbol: Optional[str] = None, params={}): """ set the level of leverage for a market :param float leverage: the rate of leverage :param str symbol: unified market symbol :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: response from the exchange """ if symbol is None: raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument') if (leverage < 1) or (leverage > 100): raise BadRequest(self.id + ' leverage should be between 1 and 100') self.load_markets() self.load_accounts() market = self.market(symbol) if market['type'] != 'future': raise BadSymbol(self.id + ' setLeverage() supports futures contracts only') account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_string(account, 'id') request = { 'account-group': accountGroup, 'symbol': market['id'], 'leverage': leverage, } return self.v2PrivateAccountGroupPostFuturesLeverage(self.extend(request, params)) def set_margin_mode(self, marginMode, symbol: Optional[str] = None, params={}): """ set margin mode to 'cross' or 'isolated' :param str marginMode: 'cross' or 'isolated' :param str symbol: unified market symbol :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: response from the exchange """ marginMode = marginMode.lower() if marginMode == 'cross': marginMode = 'crossed' if marginMode != 'isolated' and marginMode != 'crossed': raise BadRequest(self.id + ' setMarginMode() marginMode argument should be isolated or cross') self.load_markets() self.load_accounts() market = self.market(symbol) account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_string(account, 'id') request = { 'account-group': accountGroup, 'symbol': market['id'], 'marginMode': marginMode, } if market['type'] != 'future': raise BadSymbol(self.id + ' setMarginMode() supports futures contracts only') return self.v2PrivateAccountGroupPostFuturesMarginType(self.extend(request, params)) def fetch_leverage_tiers(self, symbols: Optional[List[str]] = None, params={}): """ retrieve information on the maximum leverage, and maintenance margin for trades of varying trade sizes :param str[]|None symbols: list of unified market symbols :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a dictionary of `leverage tiers structures <https://github.com/ccxt/ccxt/wiki/Manual#leverage-tiers-structure>`, indexed by market symbols """ self.load_markets() response = self.v2PublicGetFuturesContract(params) # # { # "code":0, # "data":[ # { # "symbol":"BTC-PERP", # "status":"Normal", # "displayName":"BTCUSDT", # "settlementAsset":"USDT", # "underlying":"BTC/USDT", # "tradingStartTime":1579701600000, # "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"}, # "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"}, # "commissionType":"Quote", # "commissionReserveRate":"0.001", # "marketOrderPriceMarkup":"0.03", # "marginRequirements":[ # {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"}, # {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"}, # {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"}, # {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"}, # {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"}, # {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"} # ] # } # ] # } # data = self.safe_value(response, 'data') symbols = self.market_symbols(symbols) return self.parse_leverage_tiers(data, symbols, 'symbol') def parse_market_leverage_tiers(self, info, market=None): """ :param dict info: Exchange market response for 1 market :param dict market: CCXT market """ # # { # "symbol":"BTC-PERP", # "status":"Normal", # "displayName":"BTCUSDT", # "settlementAsset":"USDT", # "underlying":"BTC/USDT", # "tradingStartTime":1579701600000, # "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"}, # "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"}, # "commissionType":"Quote", # "commissionReserveRate":"0.001", # "marketOrderPriceMarkup":"0.03", # "marginRequirements":[ # {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"}, # {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"}, # {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"}, # {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"}, # {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"}, # {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"} # ] # } # marginRequirements = self.safe_value(info, 'marginRequirements', []) id = self.safe_string(info, 'symbol') market = self.safe_market(id, market) tiers = [] for i in range(0, len(marginRequirements)): tier = marginRequirements[i] initialMarginRate = self.safe_string(tier, 'initialMarginRate') tiers.append({ 'tier': self.sum(i, 1), 'currency': market['quote'], 'minNotional': self.safe_number(tier, 'positionNotionalLowerBound'), 'maxNotional': self.safe_number(tier, 'positionNotionalUpperBound'), 'maintenanceMarginRate': self.safe_number(tier, 'maintenanceMarginRate'), 'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRate)), 'info': tier, }) return tiers def parse_deposit_withdraw_fee(self, fee, currency=None): # # { # "assetCode": "USDT", # "assetName": "Tether", # "precisionScale": 9, # "nativeScale": 4, # "blockChain": [ # { # "chainName": "Omni", # "withdrawFee": "30.0", # "allowDeposit": True, # "allowWithdraw": True, # "minDepositAmt": "0.0", # "minWithdrawal": "50.0", # "numConfirmations": 3 # }, # ] # } # blockChains = self.safe_value(fee, 'blockChain', []) blockChainsLength = len(blockChains) result = { 'info': fee, 'withdraw': { 'fee': None, 'percentage': None, }, 'deposit': { 'fee': None, 'percentage': None, }, 'networks': {}, } for i in range(0, blockChainsLength): blockChain = blockChains[i] networkId = self.safe_string(blockChain, 'chainName') currencyCode = self.safe_string(currency, 'code') networkCode = self.network_id_to_code(networkId, currencyCode) result['networks'][networkCode] = { 'deposit': {'fee': None, 'percentage': None}, 'withdraw': {'fee': self.safe_number(blockChain, 'withdrawFee'), 'percentage': False}, } if blockChainsLength == 1: result['withdraw']['fee'] = self.safe_number(blockChain, 'withdrawFee') result['withdraw']['percentage'] = False return result def fetch_deposit_withdraw_fees(self, codes: Optional[List[str]] = None, params={}): """ fetch deposit and withdraw fees see https://ascendex.github.io/ascendex-pro-api/#list-all-assets :param str[]|None codes: list of unified currency codes :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a list of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` """ self.load_markets() response = self.v2PublicGetAssets(params) data = self.safe_value(response, 'data') return self.parse_deposit_withdraw_fees(data, codes, 'assetCode') def transfer(self, code: str, amount, fromAccount, toAccount, params={}): """ transfer currency internally between wallets on the same account :param str code: unified currency code :param float amount: amount to transfer :param str fromAccount: account to transfer from :param str toAccount: account to transfer to :param dict [params]: extra parameters specific to the ascendex api endpoint :returns dict: a `transfer structure <https://github.com/ccxt/ccxt/wiki/Manual#transfer-structure>` """ self.load_markets() self.load_accounts() account = self.safe_value(self.accounts, 0, {}) accountGroup = self.safe_string(account, 'id') currency = self.currency(code) amount = self.currency_to_precision(code, amount) accountsByType = self.safe_value(self.options, 'accountsByType', {}) fromId = self.safe_string(accountsByType, fromAccount, fromAccount) toId = self.safe_string(accountsByType, toAccount, toAccount) if fromId != 'cash' and toId != 'cash': raise ExchangeError(self.id + ' transfer() only supports direct balance transfer between spot and future, spot and margin') request = { 'account-group': accountGroup, 'amount': amount, 'asset': currency['id'], 'fromAccount': fromId, 'toAccount': toId, } response = self.v1PrivateAccountGroupPostTransfer(self.extend(request, params)) # # {code: '0'} # transferOptions = self.safe_value(self.options, 'transfer', {}) fillResponseFromRequest = self.safe_value(transferOptions, 'fillResponseFromRequest', True) transfer = self.parse_transfer(response, currency) if fillResponseFromRequest: transfer['fromAccount'] = fromAccount transfer['toAccount'] = toAccount transfer['amount'] = amount transfer['currency'] = code return transfer def parse_transfer(self, transfer, currency=None): # # {code: '0'} # status = self.safe_integer(transfer, 'code') currencyCode = self.safe_currency_code(None, currency) timestamp = self.milliseconds() return { 'info': transfer, 'id': None, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'currency': currencyCode, 'amount': None, 'fromAccount': None, 'toAccount': None, 'status': self.parse_transfer_status(status), } def parse_transfer_status(self, status): if status == 0: return 'ok' return 'failed' def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): version = api[0] access = api[1] type = self.safe_string(api, 2) url = '' accountCategory = (type == 'accountCategory') if accountCategory or (type == 'accountGroup'): url += self.implode_params('/{account-group}', params) params = self.omit(params, 'account-group') request = self.implode_params(path, params) url += '/api/pro/' if version == 'v2': if type == 'data': request = 'data/' + version + '/' + request else: request = version + '/' + request else: url += version + '/' if accountCategory: url += self.implode_params('{account-category}/', params) params = self.omit(params, 'account-category') url += request if (version == 'v1') and (request == 'cash/balance') or (request == 'margin/balance'): request = 'balance' if (version == 'v1') and (request == 'spot/fee'): request = 'fee' if request.find('subuser') >= 0: parts = request.split('/') request = parts[2] params = self.omit(params, self.extract_params(path)) if access == 'public': if params: url += '?' + self.urlencode(params) else: self.check_required_credentials() timestamp = str(self.milliseconds()) payload = timestamp + '+' + request hmac = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64') headers = { 'x-auth-key': self.apiKey, 'x-auth-timestamp': timestamp, 'x-auth-signature': hmac, } if method == 'GET': if params: url += '?' + self.urlencode(params) else: headers['Content-Type'] = 'application/json' body = self.json(params) url = self.urls['api']['rest'] + url return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody): if response is None: return None # fallback to default error handler # # {'code': 6010, 'message': 'Not enough balance.'} # {'code': 60060, 'message': 'The order is already filled or canceled.'} # {"code":2100,"message":"ApiKeyFailure"} # {"code":300001,"message":"Price is too low from market price.","reason":"INVALID_PRICE","accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda","ac":"CASH","action":"place-order","status":"Err","info":{"symbol":"BTC/USDT"}} # code = self.safe_string(response, 'code') message = self.safe_string(response, 'message') error = (code is not None) and (code != '0') if error or (message is not None): feedback = self.id + ' ' + body self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback) self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback) self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback) raise ExchangeError(feedback) # unknown message return None
[ "travis@travis-ci.org" ]
travis@travis-ci.org
13ab0721b3a33f3abbaaf46d0378e8b4649ba27f
d1f15554df2d5c0f74ddbcba6e870359841f682b
/wagtail/migrations/0057_page_locale_fields_notnull.py
8f18589b5c9f794cba254c26312dd2d73645c5f1
[ "BSD-3-Clause", "LicenseRef-scancode-proprietary-license" ]
permissive
wagtail/wagtail
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
refs/heads/main
2023-09-04T06:22:51.601208
2023-09-01T15:22:00
2023-09-01T15:22:00
16,479,108
12,974
3,580
BSD-3-Clause
2023-09-14T10:45:04
2014-02-03T12:41:59
Python
UTF-8
Python
false
false
793
py
# Generated by Django 2.2.10 on 2020-07-13 10:17 from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ("wagtailcore", "0056_page_locale_fields_populate"), ] operations = [ migrations.AlterField( model_name="page", name="locale", field=models.ForeignKey( editable=False, on_delete=django.db.models.deletion.PROTECT, related_name="+", to="wagtailcore.Locale", ), ), migrations.AlterField( model_name="page", name="translation_key", field=models.UUIDField(default=uuid.uuid4, editable=False), ), ]
[ "matt@west.co.tt" ]
matt@west.co.tt
7825bb5c5c917cbc130aad94579c3f872c27d21d
c62e851ae0105122743e81228645a26a0f4d99cb
/venv/Scripts/mailmail-script.py
1aa1fa55f21ca5a7987e9cb10beba738f6c43f22
[]
no_license
prego123/covid19
7e0fce59cf1a1cd738538cc3f3ca5e25892eb1c9
0b8bbc448b5ea7e80b7fc4d9aa707b4090c92562
refs/heads/master
2022-12-11T12:29:22.203537
2020-09-07T13:56:31
2020-09-07T13:56:31
263,549,955
0
0
null
null
null
null
UTF-8
Python
false
false
442
py
#!"C:\Users\PRAGATI RANA\PycharmProjects\covid19\venv\Scripts\python.exe" # EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==20.3.0','console_scripts','mailmail' __requires__ = 'Twisted==20.3.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('Twisted==20.3.0', 'console_scripts', 'mailmail')() )
[ "shalu.pragati11@gmail.com" ]
shalu.pragati11@gmail.com
11a0c03c4815f3950a7dfa818e49110b487ad3b3
6aceb7ec64fc30ab283adb5d365dc444604c7b8e
/day03/ex08/test.py
74aa98fb8d4b09045dbab9c1497a028e67556b4c
[]
no_license
tillderoquefeuil-42-ai/bootcamp-ml
727f2839c14ad8377291bd36875be98cd915e589
c16f27bcd948a3e4477e8905f8f2c7449461d444
refs/heads/master
2022-06-06T03:25:36.563108
2020-05-05T14:56:15
2020-05-05T14:56:15
null
0
0
null
null
null
null
UTF-8
Python
false
false
764
py
import numpy as np from vec_log_gradient import vec_log_gradient # Example 1: y1 = np.array([1]) x1 = np.array([4]) theta1 = np.array([[2], [0.5]]) print(vec_log_gradient(x1, y1, theta1)) # Output: # array([[-0.01798621], [-0.07194484]]) # Example 2: y2 = np.array([[1], [0], [1], [0], [1]]) x2 = np.array([[4], [7.16], [3.2], [9.37], [0.56]]) theta2 = np.array([[2], [0.5]]) print(vec_log_gradient(x2, y2, theta2)) # Output: # array([[0.3715235 ], [3.25647547]]) # Example 3: y3 = np.array([[0], [1], [1]]) x3 = np.array([[0, 2, 3, 4], [2, 4, 5, 5], [1, 3, 2, 7]]) theta3 = np.array([[-2.4], [-1.5], [0.3], [-1.4], [0.7]]) print(vec_log_gradient(x3, y3, theta3)) # Output: # array([[-0.55711039], [-0.90334809], [-2.01756886], [-2.10071291], [-3.27257351]])
[ "tillderoquefeuil@MacBook-Pro-de-Till.local" ]
tillderoquefeuil@MacBook-Pro-de-Till.local
36a9049f82e24664d1a0b62af74fe1929ec91228
8571ed40a006e8b5917a9b3effa4c9f0450370e2
/product/migrations/0012_auto_20200516_0529.py
220051405f2a7a246188478847a8542b618397f2
[]
no_license
suleyiilmaz/DjangoProject
c3641e5f9c65ea4414ed2ea446557386fc7c018d
ab117cc12695270d01cbe884719f59e8de5b6748
refs/heads/master
2022-11-02T02:59:56.146833
2020-06-16T16:12:49
2020-06-16T16:12:49
251,582,358
0
0
null
null
null
null
UTF-8
Python
false
false
402
py
# Generated by Django 3.0.4 on 2020-05-16 02:29 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('product', '0011_remove_comment_rate'), ] operations = [ migrations.AlterField( model_name='comment', name='comment', field=models.TextField(blank=True, max_length=255), ), ]
[ "suleyilmaz10@gmail.com" ]
suleyilmaz10@gmail.com
bd7a98f7bf0dd46bd79a0c4e1cc4f7e81129c5f8
186ebaa705c6348c72f5a4f3939fead2d01b6193
/Manifold_Learning.py
3f90db27ccb097edce55e3b26dd3446709cd2798
[]
no_license
seanco-hash/Manifold_Learning
d51322238d94eed5e104bff2591f836f429e9b91
eb77be236b5bbb0eb1073683101b2cf2a448b01c
refs/heads/main
2023-01-22T15:09:53.199064
2020-12-05T22:17:55
2020-12-05T22:17:55
317,980,530
0
0
null
null
null
null
UTF-8
Python
false
false
4,259
py
import pickle import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from mpl_toolkits.mplot3d import Axes3D def digits_example(): ''' Example code to show you how to load the MNIST data and plot it. ''' # load the MNIST data: digits = datasets.load_digits() data = digits.data / 255. labels = digits.target # plot examples: plt.gray() for i in range(10): plt.subplot(2, 5, i+1) plt.axis('off') plt.imshow(np.reshape(data[i, :], (8, 8))) plt.title("Digit " + str(labels[i])) plt.show() def swiss_roll_example(): ''' Example code to show you how to load the swiss roll data and plot it. ''' # load the dataset: X, color = datasets.samples_generator.make_swiss_roll(n_samples=2000) # plot the data: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral) plt.show() def faces_example(path): ''' Example code to show you how to load the faces data. ''' with open(path, 'rb') as f: X = pickle.load(f) num_images, num_pixels = np.shape(X) d = int(num_pixels**0.5) print("The number of images in the data set is " + str(num_images)) print("The image size is " + str(d) + " by " + str(d)) # plot some examples of faces: plt.gray() for i in range(4): plt.subplot(2, 2, i+1) plt.imshow(np.reshape(X[i, :], (d, d))) plt.show() def plot_with_images(X, images, title, image_num=25): ''' A plot function for viewing images in their embedded locations. The function receives the embedding (X) and the original images (images) and plots the images along with the embeddings. :param X: Nxd embedding matrix (after dimensionality reduction). :param images: NxD original data matrix of images. :param title: The title of the plot. :param num_to_plot: Number of images to plot along with the scatter plot. :return: the figure object. ''' n, pixels = np.shape(images) img_size = int(pixels**0.5) fig = plt.figure() ax = fig.add_subplot(111) ax.set_title(title) # get the size of the embedded images for plotting: x_size = (max(X[:, 0]) - min(X[:, 0])) * 0.08 y_size = (max(X[:, 1]) - min(X[:, 1])) * 0.08 # draw random images and plot them in their relevant place: for i in range(image_num): img_num = np.random.choice(n) x0, y0 = X[img_num, 0] - x_size / 2., X[img_num, 1] - y_size / 2. x1, y1 = X[img_num, 0] + x_size / 2., X[img_num, 1] + y_size / 2. img = images[img_num, :].reshape(img_size, img_size) ax.imshow(img, aspect='auto', cmap=plt.cm.gray, zorder=100000, extent=(x0, x1, y0, y1)) # draw the scatter plot of the embedded data points: ax.scatter(X[:, 0], X[:, 1], marker='.', alpha=0.7) return fig def MDS(X, d): ''' Given a NxN pairwise distance matrix and the number of desired dimensions, return the dimensionally reduced data points matrix after using MDS. :param X: NxN distance matrix. :param d: the dimension. :return: Nxd reduced data point matrix. ''' # TODO: YOUR CODE HERE pass def LLE(X, d, k): ''' Given a NxD data matrix, return the dimensionally reduced data matrix after using the LLE algorithm. :param X: NxD data matrix. :param d: the dimension. :param k: the number of neighbors for the weight extraction. :return: Nxd reduced data matrix. ''' # TODO: YOUR CODE HERE pass def DiffusionMap(X, d, sigma, t): ''' Given a NxD data matrix, return the dimensionally reduced data matrix after using the Diffusion Map algorithm. The k parameter allows restricting the kernel matrix to only the k nearest neighbor of each data point. :param X: NxD data matrix. :param d: the dimension. :param sigma: the sigma of the gaussian for the kernel matrix transformation. :param t: the scale of the diffusion (amount of time steps). :return: Nxd reduced data matrix. ''' # TODO: YOUR CODE HERE pass if __name__ == '__main__': # TODO: YOUR CODE HERE pass
[ "shon.cohen@cs.huji.ac.il" ]
shon.cohen@cs.huji.ac.il
a00510b3a26014c9a90a8a21e7190913418962e0
79d4af804b4667c36d040a244644874739673658
/notebook/day10/polymorphism/monster.py
a3b0e83035ac6e20ebbef13d013c40b426be82f7
[]
no_license
thenry0401/css-python
452573cee63da7bea3cd6ce0cd85c80d52606be1
b9a8bd0fe2e7640d558677ed81759ec83ea8bc76
refs/heads/master
2021-01-23T07:20:54.544842
2017-05-01T06:52:57
2017-05-01T06:52:57
86,420,296
2
0
null
null
null
null
UTF-8
Python
false
false
647
py
# coding: utf-8 # In[1]: from character import Character # In[ ]: class IceMonster(Character): def get_damaged(self, attack_power, attack_kind): if attack_kind == "ICE": self.hp += attack_power else: self.hp -= attack_power def __str__(self): return "Ice Monster's HP : {}".format(self.hp) class FireMonster(Character): def get_damaged(self, attack_power, attack_kind): if attack_kind == "FIRE": self.hp += attack_power else: self.hp -= attack_power def __str__(self): return "Fire Monster's HP : {}".format(self.hp)
[ "thenry0401@gmail.com" ]
thenry0401@gmail.com
ba973d04d86b07a1aab5e135cd94a7c135c128c7
f6fd01eaa74ace15ffc085065a51681565bf2740
/env/bin/odfmeta
3e4ef655ec6b0eda94a09a55ca27f27010333967
[]
no_license
emmashen6786/api_test
d05c89a718b7441bb7f4d099792f873afa782056
51486a4c05548b410e360777c2e93d1c954bfa06
refs/heads/master
2020-05-15T03:10:40.762906
2019-04-19T02:25:08
2019-04-19T02:25:08
182,062,758
0
0
null
null
null
null
UTF-8
Python
false
false
8,482
#!/Users/shuiguowei/Practice/api_test/env/bin/python3.6 # -*- coding: utf-8 -*- # Copyright (C) 2006-2009 Søren Roug, European Environment Agency # # This is free software. You may redistribute it under the terms # of the Apache license and the GNU General Public License Version # 2 or at your option any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Contributor(s): # import zipfile, time, sys, getopt, re import xml.sax, xml.sax.saxutils from odf.namespaces import TOOLSVERSION, OFFICENS, XLINKNS, DCNS, METANS from io import BytesIO OUTENCODING="utf-8" whitespace = re.compile(r'\s+') fields = { 'title': (DCNS,u'title'), 'description': (DCNS,u'description'), 'subject': (DCNS,u'subject'), 'creator': (DCNS,u'creator'), 'date': (DCNS,u'date'), 'language': (DCNS,u'language'), 'generator': (METANS,u'generator'), 'initial-creator': (METANS,u'initial-creator'), 'keyword': (METANS,u'keyword'), 'editing-duration': (METANS,u'editing-duration'), 'editing-cycles': (METANS,u'editing-cycles'), 'printed-by': (METANS,u'printed-by'), 'print-date': (METANS,u'print-date'), 'creation-date': (METANS,u'creation-date'), 'user-defined': (METANS,u'user-defined'), #'template': (METANS,u'template'), } xfields = [] Xfields = [] addfields = {} deletefields = {} yieldfields = {} showversion = None def exitwithusage(exitcode=2): """ print out usage information """ sys.stderr.write("Usage: %s [-cdlvV] [-xXaAI metafield]... [-o output] [inputfile]\n" % sys.argv[0]) sys.stderr.write("\tInputfile must be OpenDocument format\n") sys.exit(exitcode) def normalize(str): """ The normalize-space function returns the argument string with whitespace normalized by stripping leading and trailing whitespace and replacing sequences of whitespace characters by a single space. """ return whitespace.sub(' ', str).strip() class MetaCollector: """ The MetaCollector is a pseudo file object, that can temporarily ignore write-calls It could probably be replaced with a StringIO object. """ def __init__(self): self._content = [] self.dowrite = True def write(self, str): if self.dowrite: self._content.append(str) def content(self): return ''.join(self._content) base = xml.sax.saxutils.XMLGenerator class odfmetaparser(base): """ Parse a meta.xml file with an event-driven parser and replace elements. It would probably be a cleaner approach to use a DOM based parser and then manipulate in memory. Small issue: Reorders elements """ version = 'Unknown' def __init__(self): self._mimetype = '' self.output = MetaCollector() self._data = [] self.seenfields = {} base.__init__(self, self.output, OUTENCODING) def startElementNS(self, name, qname, attrs): self._data = [] field = name # I can't modify the template until the tool replaces elements at the same # location and not at the end # if name == (METANS,u'template'): # self._data = [attrs.get((XLINKNS,u'title'),'')] if showversion and name == (OFFICENS,u'document-meta'): if showversion == '-V': print ("version:%s" % attrs.get((OFFICENS,u'version'),'Unknown').decode('utf-8')) else: print ("%s" % attrs.get((OFFICENS,u'version'),'Unknown').decode('utf-8')) if name == (METANS,u'user-defined'): field = attrs.get((METANS,u'name')) if field in deletefields: self.output.dowrite = False elif field in yieldfields: del addfields[field] base.startElementNS(self, name, qname, attrs) else: base.startElementNS(self, name, qname, attrs) self._tag = field def endElementNS(self, name, qname): field = name if name == (METANS,u'user-defined'): field = self._tag if name == (OFFICENS,u'meta'): for k,v in addfields.items(): if len(v) > 0: if type(k) == type(''): base.startElementNS(self,(METANS,u'user-defined'),None,{(METANS,u'name'):k}) base.characters(self, v) base.endElementNS(self, (METANS,u'user-defined'),None) else: base.startElementNS(self, k, None, {}) base.characters(self, v) base.endElementNS(self, k, None) if name in xfields: print ("%s" % self.data()) if name in Xfields: if isinstance(self._tag, tuple): texttag = self._tag[1] else: texttag = self._tag print ("%s:%s" % (texttag, self.data())) if field in deletefields: self.output.dowrite = True else: base.endElementNS(self, name, qname) def characters(self, content): base.characters(self, content) self._data.append(content) def meta(self): return self.output.content() def data(self): if usenormalize: return normalize(''.join(self._data)) else: return ''.join(self._data) now = time.localtime()[:6] outputfile = "-" writemeta = False # Do we change any meta data? usenormalize = False try: opts, args = getopt.getopt(sys.argv[1:], "cdlvVI:A:a:o:x:X:") except getopt.GetoptError: exitwithusage() if len(opts) == 0: opts = [ ('-l','') ] for o, a in opts: if o in ('-a','-A','-I'): writemeta = True if a.find(":") >= 0: k,v = a.split(":",1) else: k,v = (a, "") if len(k) == 0: exitwithusage() k = fields.get(k,k) addfields[k] = unicode(v,'utf-8') if o == '-a': yieldfields[k] = True if o == '-I': deletefields[k] = True if o == '-d': writemeta = True addfields[(DCNS,u'date')] = "%04d-%02d-%02dT%02d:%02d:%02d" % now deletefields[(DCNS,u'date')] = True if o == '-c': usenormalize = True if o in ('-v', '-V'): showversion = o if o == '-l': Xfields = fields.values() if o == "-x": xfields.append(fields.get(a,a)) if o == "-X": Xfields.append(fields.get(a,a)) if o == "-o": outputfile = a # The specification says we should change the element to our own, # and must not export the original identifier. if writemeta: addfields[(METANS,u'generator')] = TOOLSVERSION deletefields[(METANS,u'generator')] = True odfs = odfmetaparser() parser = xml.sax.make_parser() parser.setFeature(xml.sax.handler.feature_namespaces, 1) parser.setContentHandler(odfs) if len(args) == 0: zin = zipfile.ZipFile(sys.stdin,'r') else: if not zipfile.is_zipfile(args[0]): exitwithusage() zin = zipfile.ZipFile(args[0], 'r') try: content = zin.read('meta.xml').decode('utf-8') except: sys.stderr.write("File has no meta data\n") sys.exit(1) parser.parse(BytesIO(content.encode('utf-8'))) if writemeta: if outputfile == '-': if sys.stdout.isatty(): sys.stderr.write("Won't write ODF file to terminal\n") sys.exit(1) zout = zipfile.ZipFile(sys.stdout,"w") else: zout = zipfile.ZipFile(outputfile,"w") # Loop through the input zipfile and copy the content to the output until we # get to the meta.xml. Then substitute. for zinfo in zin.infolist(): if zinfo.filename == "meta.xml": # Write meta zi = zipfile.ZipInfo("meta.xml", now) zi.compress_type = zipfile.ZIP_DEFLATED zout.writestr(zi,odfs.meta() ) else: payload = zin.read(zinfo.filename) zout.writestr(zinfo, payload) zout.close() zin.close() # Local Variables: *** # mode: python *** # End: ***
[ "shanshan.shen@dianrong.com" ]
shanshan.shen@dianrong.com
b9fa106f81e4698d993fcb57cb93fae628641352
206f8e915a4f3964605ceb62f6c1911534bfd286
/negentweeapi/status.py
8eca5b2b8088cc05ea378b275bfc8dd2f473d7d6
[ "Apache-2.0" ]
permissive
PythonWrappers/9292API
6c37d61fe6e1b00c32a5c8acd163000a62a6b9f4
50034c9ebc0bf51d8b06bb9d44267c7c602418f1
refs/heads/master
2020-03-19T15:23:02.814336
2018-06-10T12:26:39
2018-06-10T12:26:39
136,668,559
0
0
null
null
null
null
UTF-8
Python
false
false
843
py
import requests from .settings import * from datetime import datetime from typing import Tuple def get_data_date_range() -> Tuple[datetime, datetime]: """ Gets the current from/to date available in the 9292 API :return: a tuple from a from and to date """ url = "{0}/{1}/status?lang={2}".format(URL, APIVERSION, LANG) data = requests.get(url).json() from_date = datetime.strptime(data["dateRange"]["from"], "%Y-%m-%d") to_date = datetime.strptime(data["dateRange"]["to"], "%Y-%m-%d") return from_date, to_date def get_api_version() -> str: """ Gets the current version of the 9292 API :return: A string which contains the current version of the 9292 API """ url = "{0}/{1}/status?lang={2}".format(URL, APIVERSION, LANG) data = requests.get(url).json() return data["version"]
[ "hylcos@gmail.com" ]
hylcos@gmail.com
116f88df4dcd50e7ef212af6460ce4d833d8a457
d3d40ddac839ea831a09dc380287f1278eac229d
/er_graphs/100/0.05/central2b.py
458ab03d09fa58e42fc54ea80f2d2002c3363803
[]
no_license
AkshayViru/Anchor-Node-Finder
c0b0400c4b95fe82266bf779ab2747f1f42949dc
ed569812b3313560fa10759f9b544a105f39d206
refs/heads/main
2023-07-03T11:21:00.178236
2021-08-14T16:15:14
2021-08-14T16:15:14
395,999,740
0
0
null
null
null
null
UTF-8
Python
false
false
1,389
py
from __future__ import division import networkx as nx graph_num=1 probab=0.05 f_out= open("central2b.txt" ,"w+") while graph_num<=10: G= nx.Graph() f = open('100_%s_%s.txt' %(probab,graph_num)) edgelist = [] for line in f: coms= line.split() v1= int(coms[0]) v2= int(coms[1]) edgelist.append((v1,v2)) G.add_edges_from(edgelist) #removes duplicate list values def remove_duplicates(x): z = [x[0]] for i in range(1,len(x)): for y in range(0, i): if x[i] == x[y]: break else: z.append(x[i]) return z highest_dcentrality = 0 dselected_nodes = [] final_nodes=[] for i in range(100): print graph_num,i total_dcentrality=0 selected_nodes = [i] for j in G[i]: selected_nodes.append(j) for k in G[j]: selected_nodes.append(k) selected_nodes=remove_duplicates(selected_nodes) for j in selected_nodes: ltotal_dcentrality=[v for k, v in (nx.betweenness_centrality(G)).items() if k == j] total_dcentrality+=ltotal_dcentrality[0] avg_dcentrality=total_dcentrality/len(selected_nodes) if avg_dcentrality>=highest_dcentrality: highest_dcentrality=avg_dcentrality dselected_nodes.append((i,highest_dcentrality)) for x,y in dselected_nodes: if y==highest_dcentrality: final_nodes.append(x) graph_num+=1 degcen = (final_nodes, highest_dcentrality) f_out.write(str(degcen)+"\n")
[ "akshayknr7@gmail.com" ]
akshayknr7@gmail.com
7488e911c273b0557d8a935aec55fb2a6143dc75
123910a155522dc2c1fadc00eebfb0bc66b9067b
/day2.py
6a76091c1358b6d061f8488f57405db6775ab121
[]
no_license
cdjnzpy/mlearn
99860f87ed58dfeb06a7ab71f281189c129fa606
d791e74d54f4c0540ad5ab459b1491de8efc2392
refs/heads/master
2021-01-03T19:50:06.463279
2020-02-14T13:34:56
2020-02-14T13:34:56
240,214,401
1
0
null
null
null
null
UTF-8
Python
false
false
2,785
py
#encding:utf-8 #文本预处理部分 #读入文本 import collections import re import os path = os.path.dirname(__file__) def read_book(): with open(path+'\\JaneEyre.txt', 'r') as f: lines = [re.sub('[^a-z]+', ' ', line.strip().lower()) for line in f]#去除掉所有符号,只保留英文 return lines#输出为句子,列表 #随后进行分词 def char_s(lines,token='word'): if token == 'word': return [sentence.split(' ') for sentence in lines]#如果按照词语分,就按照空格分列 elif token == 'char': return [list(sentence) for sentence in lines]#按照字母分,直接返回各个字母(list(str)) else: print('ERROR: unkown token type '+token)#错误的选择 #建立字典储存 class Vocab(object): def __init__(self, tokens, min_freq=0, use_special_tokens=False): counter = count_corpus(tokens) # : self.token_freqs = list(counter.items()) self.idx_to_token = [] if use_special_tokens: # padding, begin of sentence, end of sentence, unknown self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3) self.idx_to_token += ['', '', '', ''] else: self.unk = 0 self.idx_to_token += [''] self.idx_to_token += [token for token, freq in self.token_freqs if freq >= min_freq and token not in self.idx_to_token] self.token_to_idx = dict() for idx, token in enumerate(self.idx_to_token): self.token_to_idx[token] = idx def __len__(self): return len(self.idx_to_token) def __getitem__(self, tokens): if not isinstance(tokens, (list, tuple)): return self.token_to_idx.get(tokens, self.unk) return [self.__getitem__(token) for token in tokens] def to_tokens(self, indices): if not isinstance(indices, (list, tuple)): return self.idx_to_token[indices] return [self.idx_to_token[index] for index in indices] def count_corpus(sentences): tokens = [tk for st in sentences for tk in st] return collections.Counter(tokens) # 返回一个字典,记录每个词的出现次数 #另外的建立字典方式 lines = read_book() words = (char_s(lines,token='word')) dirs={} for sen in words: for cha in sen: if cha == "": pass else: if cha in dirs: dirs[cha]+=1 else: dirs[cha]=1 print(dirs) #将单词转换为索引 ##现有的工具的分词 #中文jieba,snownlp #英文spaCY,NLTK import spacy #先变为spacy形式,然后采用x.text模式分词,类似BS中先soup再标签
[ "noreply@github.com" ]
noreply@github.com
bdbe6ea6340e8f88e7e25d5b64882ed8bd313b28
488825f206180a276a4dbf61ed85227d6eb791cf
/src/config/asgi.py
0b57e107471310680a0c77fa63612bdaeef0f579
[ "Unlicense" ]
permissive
MTES-MCT/trackdechets-cockpit
53acf96e79bcdb2a834f2c28114bbb1866a766e6
3624caf22882bd499dc7b22900e297adc6ca62d3
refs/heads/main
2023-08-04T10:15:17.613615
2022-01-03T22:16:07
2022-01-03T22:16:07
441,255,835
0
0
null
null
null
null
UTF-8
Python
false
false
391
py
""" ASGI config for cockpit project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cockpit.settings") application = get_asgi_application()
[ "lp@providenz.fr" ]
lp@providenz.fr
2c85a1f8ffc44e63f9aa0d6425da3c4b5f07c9c7
75a4767735a91dfd6e5f51964ddf3819a1cf0ab2
/Snake/snake.py
804c7ff68d6afd6edaca1a8e2acc6732676413cb
[]
no_license
louisguan/super-saiyan
8b71d937f7e002df4412554a4ba9b0b8418dd083
21da4f99defb5f32b32c052856ea682d807b81d6
refs/heads/master
2021-01-16T21:37:38.946000
2014-03-24T13:58:20
2014-03-24T13:58:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
10,081
py
import random import sys from Tkinter import * def keyPress(event): canvas = event.widget.canvas if (event.char == "q"): canvas.data['quit']=True elif (event.char == "r"): canvas.data['start']=False init(canvas) elif (event.keysym == 'space'): canvas.data['start']= not canvas.data['start'] timerFired(canvas) if (canvas.data["isGameOver"] == False) and (canvas.data['start']==True): if (event.keysym == "Up" or event.keysym =='w') and (canvas.data['snakeDrow']!=1): moveSnake(canvas, -1, 0) elif (event.keysym == "Down" or event.keysym =='s') and (canvas.data['snakeDrow']!=-1): moveSnake(canvas, +1, 0) elif (event.keysym == "Left" or event.keysym =='a') and (canvas.data['snakeDcol']!=1): moveSnake(canvas, 0,-1) elif (event.keysym == "Right" or event.keysym =='d') and (canvas.data['snakeDcol']!=-1): moveSnake(canvas, 0,+1) canvas.data['ignoreNextTimerEvent']=True redrawAll(canvas) def gameOver(canvas): saveHighscore(canvas) canvas.data["isGameOver"] = True def saveHighscore(canvas): f=open('snakesave.txt','r') previousscore=0 for x in f: previousscore=int(x) f.close() if previousscore<=canvas.data['highscore']: f=open('snakesave.txt','w') f.write(str(canvas.data['highscore'])) f.close() def moveSnake(canvas, drow, dcol): snakeBoard = canvas.data["snakeBoard"] rows = len(snakeBoard) cols = len(snakeBoard[0]) headRow= canvas.data["headRow"] headCol= canvas.data["headCol"] newHeadRow = headRow + drow newHeadCol = headCol + dcol canvas.data["snakeDrow"] = drow canvas.data["snakeDcol"] = dcol if newHeadRow<0 or newHeadCol<0 or newHeadRow>=rows or newHeadCol>=cols: gameOver(canvas) elif snakeBoard[newHeadRow][newHeadCol]>0: gameOver(canvas) elif snakeBoard[newHeadRow][newHeadCol]==-1: canvas.bell() canvas.data['snakelength']+=1 canvas.data['snakecolor'][canvas.data['snakelength']]=canvas.data['lastfoodcolor'] snakeBoard[newHeadRow][newHeadCol] = 1 + snakeBoard[headRow][headCol] canvas.data["headRow"] = newHeadRow canvas.data["headCol"] = newHeadCol placeFood(canvas) else: snakeBoard[newHeadRow][newHeadCol] = 1 + snakeBoard[headRow][headCol] canvas.data["headRow"] = newHeadRow canvas.data["headCol"] = newHeadCol removeTail(canvas) def removeTail(canvas): snakeBoard=canvas.data['snakeBoard'] rows = len(snakeBoard) cols = len(snakeBoard[0]) for row in range(rows): for col in range(cols): if snakeBoard[row][col]>0: snakeBoard[row][col]-=1 def timerFired(canvas): if canvas.data['ignoreNextTimerEvent']==False and canvas.data["isGameOver"] == False: moveSnake(canvas, canvas.data["snakeDrow"], canvas.data["snakeDcol"]) else: canvas.data['ignoreNextTimerEvent']=False redrawAll(canvas) delay = 200-canvas.data['snakelength']*3 # ms if canvas.data['start']==True: canvas.after(delay, timerFired, canvas) def redrawAll(canvas): canvas.delete(ALL) drawSnakeBoard(canvas) if (canvas.data["isGameOver"] == True): canvas.create_text(canvas.data['canvaswidth']/2, canvas.data['canvasheight']/2, text="Game Over", font=("Helvetica", 32, "bold"),fill='white') def drawSnakeBoard(canvas): #First the status bar snakeBoard = canvas.data["snakeBoard"] margin=canvas.data["margin"] sleft=0+margin sright=canvas.data['canvaswidth']-margin stop=canvas.data['canvasheight']-canvas.data['statusbarSize']-margin sbottom=canvas.data['canvasheight']-margin canvas.create_rectangle(sleft,stop,sright,sbottom, fill="gray") #canvas.create_rectangle() score=(canvas.data['snakelength']-1)*2*(canvas.data['snakelength']<=11)+(20+5*(canvas.data['snakelength']-11))*(canvas.data['snakelength']>11) canvas.data['score']=score if canvas.data['highscore']<canvas.data['score']: canvas.data['highscore']=canvas.data['score'] highscore=canvas.data['highscore'] canvas.create_text((sleft+sright+206)/2, (stop+sbottom-20)/2, text="Sore:%d" % (score), font=("Helvetica", 18, "bold"),fill='black') canvas.create_text((sleft+sright+160)/2, (stop+sbottom+20)/2, text="High Sore:%d" % (highscore), font=("Helvetica", 18, "bold"),fill='black') canvas.create_text((sleft+sright-100)/2, (stop+sbottom-30)/2, text='Arrow Keys - move the snake', font=("Helvetica", 13, "bold"),fill='black') canvas.create_text((sleft+sright-141)/2, (stop+sbottom)/2, text='Spacebar - start/pause', font=("Helvetica", 13, "bold"),fill='black') canvas.create_text((sleft+sright-230)/2, (stop+sbottom+30)/2, text='R - reset', font=("Helvetica", 13, "bold"),fill='black') #Then the cells rows = len(snakeBoard) cols = len(snakeBoard[0]) for row in range(rows): for col in range(cols): drawSnakeCell(canvas, snakeBoard, row, col) def drawSnakeCell(canvas, snakeBoard, row, col): margin = canvas.data['margin'] cellSize = canvas.data['cellSize'] left = margin + col * cellSize right = left + cellSize top = margin + row * cellSize bottom = top + cellSize canvas.create_rectangle(left, top, right, bottom, fill="black") A=[left*3/4+right*1/4, top] B=[left*1/4+right*3/4, top] C=[right, top*3/4+bottom*1/4] D=[right, top*1/4+bottom*3/4] E=[left*1/4+right*3/4, bottom] F=[left*3/4+right*1/4, bottom] G=[left, top*1/4+bottom*3/4] H=[left, top*3/4+bottom*1/4] if (snakeBoard[row][col] > 0): canvas.create_polygon([A,B,C,D,E,F,G,H], fill=canvas.data['snakecolor'][canvas.data['snakelength']]) elif (snakeBoard[row][col] == -1): if [row,col]==canvas.data['foodposition']: canvas.create_polygon([A,B,C,D,E,F,G,H], fill=canvas.data['lastfoodcolor']) else: while True: foodcolor=random.choice(['lavender','LemonChiffon','coral','DarkBlue','ForestGreen','gold','GreenYellow','HotPink','BlueViolet','beige','azure','brown','red','green','purple','cyan','magenta','orange']) if foodcolor!=canvas.data['lastfoodcolor']: break canvas.data['lastfoodcolor']=foodcolor canvas.create_polygon([A,B,C,D,E,F,G,H], fill=foodcolor) canvas.data['foodposition']=[row,col] return def placeFood(canvas): snakeBoard=canvas.data['snakeBoard'] rows = canvas.data["rows"] cols = canvas.data["cols"] while True: randrow=random.randint(0,rows-1) randcol=random.randint(0,cols-1) if snakeBoard[randrow][randcol]==0: snakeBoard[randrow][randcol]=-1 break def loadSnakeBoard(canvas): rows = canvas.data["rows"] cols = canvas.data["cols"] snakeBoard = [ ] for row in range(rows): snakeBoard += [[0] * cols] snakeBoard[rows/2][cols/2]=1 canvas.data["snakeBoard"] = snakeBoard findSnakeHead(canvas) placeFood(canvas) def findSnakeHead(canvas): snakeBoard = canvas.data["snakeBoard"] rows = len(snakeBoard) cols = len(snakeBoard[0]) headRow = 0 headCol = 0 for row in range(rows): for col in range(cols): if (snakeBoard[row][col] > snakeBoard[headRow][headCol]): headRow = row headCol = col canvas.data["headRow"] = headRow canvas.data["headCol"] = headCol def init(canvas): loadSnakeBoard(canvas) canvas.data["isGameOver"] = False canvas.data["ignoreNextTimerEvent"] = False canvas.data['snakelength']=1 initialDirection=random.randint(1,4) if initialDirection==1: canvas.data["snakeDrow"]=0 canvas.data["snakeDcol"]=1 elif initialDirection==2: canvas.data["snakeDrow"]=0 canvas.data["snakeDcol"]=-1 elif initialDirection==3: canvas.data["snakeDrow"]=1 canvas.data["snakeDcol"]=0 else: canvas.data["snakeDrow"]=-1 canvas.data["snakeDcol"]=0 redrawAll(canvas) def run(rows,cols): root = Tk() root.title('This is not Snake!') margin = 8 cellSize = 20 statusbarSize=50 canvasWidth = 2*margin + cols*cellSize canvasHeight = 2*margin + rows*cellSize+statusbarSize canvas = Canvas(root, width=canvasWidth, height=canvasHeight) canvas.pack() # Store canvas in root and in canvas itself for callbacks root.canvas = canvas.canvas = canvas canvas.data = { } canvas.data['highscore']=0 f=open('snakesave.txt','r') for x in f: canvas.data['highscore']=int(x) f.close() canvas.data['score']=0 canvas.data['snakecolor']={} canvas.data['lastfoodcolor']='blue' canvas.data['foodposition']=[0,0] canvas.data['snakelength']=1 canvas.data['snakecolor'][1]='blue' canvas.data['canvaswidth']=canvasWidth canvas.data['canvasheight']=canvasHeight canvas.data['quit']=False canvas.data['start']=False canvas.data['ignoreNextTimerEvent']=False canvas.data["isGameOver"] = False canvas.data["margin"] = margin canvas.data["cellSize"] = cellSize canvas.data['statusbarSize']=statusbarSize canvas.data["rows"] = rows canvas.data["cols"] = cols init(canvas) canvas.create_text(canvas.data['canvaswidth']/2, canvas.data['canvasheight']/2+20, text="Press Spacebar to Start", font=("Helvetica", 20, "bold"),fill='white') # set up events #root.bind("<Button-1>", mousePress) root.bind("<Key>", keyPress) # and launch the app root.mainloop() # This call BLOCKS (so your program waits until you close the window!) run(20,16)
[ "ljn07050@gmail.com" ]
ljn07050@gmail.com
9814a392ad08db3420d2e996e257bedf1e726fa2
b47115934ac89129f4f4f7655b36574c4ef32fea
/old/V3/app1_3.py
e5d076579cdc136e005413c65471bf39cfec229f
[]
no_license
jct3000/sqlalchemy
41ce66142c3146d0bca7058876efe7aad908ff57
937bfe990768d8f40111465029c999ca8049f010
refs/heads/master
2021-07-14T10:47:47.496246
2019-08-24T17:19:31
2019-08-24T17:19:31
187,557,262
1
0
null
null
null
null
UTF-8
Python
false
false
7,732
py
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, orm, DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, relationship from datetime import datetime, timedelta # inclusao de classe geral de personal data from sqlalchemy.ext.declarative import declared_attr from mymodel import * engine = create_engine('sqlite:///user.db', echo=True) # ('sqlite:///:memory:', echo=True) --- coloca a BD em memoria se mudar para algo tipo user.db cria em file na dir Base.metadata.create_all(bind=engine) class PersonalData ( object ): @declared_attr def __tablename__ ( cls ): return cls . __name__ . lower () # # __table_args__ = { 'mysql_engine' : 'InnoDB' } # __mapper_args__ = { 'always_refresh' : True } personal_tag= Column ( Integer ) created_date= Column(DateTime) #meter na metatabela lista=set() validade= Column ( Integer ) def __init__(self, *args, **kwargs): self.personal_tag=1 print("\nPersonal_Data\n") PersonalData.lista.add(self.__tablename__) print("\n lista de classes privadas\n") uniadder(self.__tablename__) print(self.lista) print("\n") #Inicializacoes self.validade=180 #validade em dias 6 meses self.created_date=datetime(datetime.today().year,datetime.today().month, datetime.today().day,datetime.today().hour,datetime.today().minute,datetime.today().second) print("\n DATA\n") print(self.created_date) print(self.created_date+timedelta(days=self.validade)) print("\n FIM\n") #Base.__init__(self, *args, **kwargs) @orm.reconstructor def init_on_load(self): #printa sempre que for buscar algo a BD print("\n\nCarregado da DB\n\n") # def __getattribute__(self, name): #printa todos os getters # print ("getting attribute %s" %name) # return object.__getattribute__(self, name) # # def __setattr__(self, name, val): #printa todos os getters # print (" setting attribute %s to %r" %(name, val)) # return object.__setattr__(self, name, val) Base=declarative_base() class Metatable (Base): __tablename__ = 'metatable' id_sec= Column('id_sec', Integer, primary_key=True, unique=True) l_pessoal= Column('pessoal', String, unique=True ) goal= Column('goal', String, nullable=True ) data_owner= Column('data_owner', String) categorie= Column('categorie', String) data_source = Column('data_source', String) validade=Column('validade', Integer) def __init__(self, value): self.l_pessoal= value self.goal="statistic" self.categorie="External" self.data_owner="DONO" self.data_source="client" self.validade=180 class Person (Base, PersonalData ): __tablename__ = 'person' id = Column('id', Integer, primary_key=True) name = Column('name', String) email = Column ('email', String, unique=True) chekin_p=relationship("Checkin") def __repr__(self): return "<Person(name='%s', email='%s')>" % (self.name, self.email) def __init__(self, id, name, email): PersonalData.__init__(self) #tirar isto daqui???? self.id=id self.name=name self.email=email class Restaurant (Base): __tablename__ = 'restaurant' id_r= Column('id_r', Integer, primary_key=True) name = Column('name', String) adress = Column ('adress', String, unique=True) chekin_r=relationship("Checkin") def __repr__(self): return "<Restaurant(name='%s', adress='%s')>" % (self.name, self.adress) def __init__(self, id, name, adress): self.id_r=id self.name=name self.adress=adress class Checkin(Base): __tablename__ = 'checkin' id_c=Column('id_c', Integer, primary_key=True) id=Column(Integer, ForeignKey('person.id')) id_r= Column(Integer, ForeignKey('restaurant.id_r')) description = Column('description', String) rating = Column ('rating', Integer) def __repr__(self): return "<Checkin(description='%s', rating='%d')>" % (self.description, self.rating) def __init__(self, id_c, id, id_r, description, rating): self.id_c=id_c self.id=id self.id_r=id_r self.description=description self.rating=rating # engine = create_engine('sqlite:///user.db', echo=True) # ('sqlite:///:memory:', echo=True) --- coloca a BD em memoria se mudar para algo tipo user.db cria em file na dir # Base.metadata.create_all(bind=engine) Session = sessionmaker(bind=engine) #Parte responsavel pelos commits dos objectos para a DB # para deixar o campo a nulo usar None session= Session() person = Person(0,"joao", "hotmail" ) #person.personal_tag=1 session.add(person) session.commit() #teste de funcao que adiciona uma pessoa adder() person = Person(1,"miguel", "gemail" ) person.personal_tag=1 #Muda data de validade date=datetime(datetime.today().year,datetime.today().month, datetime.today().day,datetime.today().hour,datetime.today().minute,datetime.today().second) person.created_date= date-timedelta(days=18000) session.add(person) session.commit() # session.query(Person).filter(Person.id==0).delete() # Para apagar um objecto com querie # session.commit() # Para apagar um objecto com querie restaurant = Restaurant(1,"Dinner","street" ) session.add(restaurant) session.commit() checkin = Checkin(0,1 , 0 , "blabla", 3) session.add(checkin) session.commit() #Funcao de update de um valor o synchronize_session pode ter o valor 'evaluate' # session.query(Restaurant).filter(Restaurant.adress == "street").update({Restaurant.adress: "street2"}, synchronize_session=False) # session.commit() session.close() limpa(Person) alerta_vazio() change_val(Person,146) is_private(Person) is_private(Restaurant) is_private(Checkin) #parte responsavel pelo teste de query Session = sessionmaker(bind=engine) session= Session() #teste para guardar set de privados metas = session.query(Metatable).all() for meta in metas: print ("\n\nTeste de metadados lista: %s proposito %s categoria %s owner %s origem %s validade %d \n" %(meta.l_pessoal, meta.goal, meta.categorie, meta.data_owner, meta.data_source, meta.validade)) print("\n Persons data\n") persons = session.query(Person).all() for person in persons: print ("\n\nPessoa com o nome %s id %d e email %s %s\n" %(person.name, person.id, person.email,person.created_date)) print("\n Restaurant data\n") restaurants = session.query(Restaurant).all() for restaurant in restaurants: print ("\n\nRestaurante com o nome %s id %d e a morada %s\n" %(restaurant.name, restaurant.id_r, restaurant.adress)) print("\nCheckin data\n") #last query erro checkins = session.query(Checkin).all() for checkin in checkins: print ("\n\ncheckin com o id %d da pessoa com id %d no restaurante de id %d Descricao %s e Qualificacao %d \n" %(checkin.id_c , checkin.id , checkin.id_r, checkin.description, checkin.rating)) session.close()
[ "joao.carlos.teixeira@ist.utl.pt" ]
joao.carlos.teixeira@ist.utl.pt
48b15be505f68c01bcbe37105ce08e8d80a90959
93b704572dd4f36ae488f931fbe8372a215b13ad
/clean_solutions/day3.py
d56d9cf4949958790cfde7211c768208ff456079
[]
no_license
Goldenlion5648/AdventOfCode2020Live
7cfdf6804402fdf42d10c70742579522c487f501
e3f5908e8747991b50bdde339ad9ecba527b1168
refs/heads/master
2023-04-04T12:48:21.318124
2021-04-08T16:42:13
2021-04-08T16:42:13
317,414,264
1
0
null
null
null
null
UTF-8
Python
false
false
736
py
''' Right 1, down 1. Right 3, down 1. (This is the slope you already checked.) Right 5, down 1. Right 7, down 1. Right 1, down 2. ''' from collections import * with open("input3.txt") as f: # a = list(map(int,f.read().strip().split("\n"))) board = f.read().strip().split("\n") def slide(xChange, yChange): posX = 0 posY = 0 count = 0 while posY < len(board): if board[posY][posX] == "#": count += 1 posX = (posX + 3) % len(board[0]) posY += 1 return count print("part 1", slide(3, 1)) #part 2 slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)] nums = [] for x, y in slopes: nums.append(slide(x, y)) answer = 1 for i in nums: answer *= i print("part 2", answer)
[ "coboudinot@gmail.com" ]
coboudinot@gmail.com
a8bceb43fa6ab5f7df905906b3caf20a528a4d3a
ec9f6651e72cf6af5c3b377e3cf7fe24076eba39
/detection/train.py
cdeddae6c146932d0fd8afa345f5e3b59bfcdbe3
[]
no_license
DanMartyns/MacSpoofingDetection
9bc60044f7450ade6b7b407c4b271b5c4f157bfb
21b683eec5eb091d71cf6c1bc3b463b14a63c66b
refs/heads/master
2020-05-18T02:38:25.253233
2019-07-03T00:49:59
2019-07-03T00:49:59
184,121,976
0
0
null
null
null
null
UTF-8
Python
false
false
7,663
py
import argparse import os import math import random import statistics import numpy as np import pickle import copy from sklearn import svm from sklearn.ensemble import IsolationForest from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix from sklearn.neighbors import LocalOutlierFactor from sklearn.covariance import EllipticEnvelope from sklearn.covariance import MinCovDet # read files and create matrix def readFileToMatrix(files): f = open(files[0], "r") array = np.loadtxt(f) if len(files) > 0: for f in files[1:]: f = open(f, "r") array = np.concatenate((array, np.loadtxt(f))) array = np.delete(array, [0,3,4,5,6,10,12,13,14,18,20,21,22,23], axis=1) return array def predict(files, scaler, clf): data = readFileToMatrix(files) scaled_data = scaler.transform(data) return clf.predict(scaled_data) def print_results(anomaly_pred, regular_pred): an = ((anomaly_pred[anomaly_pred == -1].size)/anomaly_pred.shape[0])*100 re = ((regular_pred[regular_pred == 1].size)/regular_pred.shape[0])*100 print("\nAverage success anomaly: ", an, "%") print("Average success regular: ", re,"%") print("Score: ", int((an+re*1.5)*100/250)) y_pred = np.concatenate((anomaly_pred, regular_pred)) y_true = np.concatenate((np.full(anomaly_pred.shape[0], -1), np.full(regular_pred.shape[0], 1))) print("Confusion matrix: \n", confusion_matrix(y_true, y_pred), "\n") return (int(an), int(re)) def calc_score(anomaly_pred, regular_pred): an = ((anomaly_pred[anomaly_pred == -1].size)/anomaly_pred.shape[0])*100 re = ((regular_pred[regular_pred == 1].size)/regular_pred.shape[0])*100 return int((an+re*1.5)*100/250) def remove_algorithms(score): remv = copy.deepcopy(score) score.sort(reverse=True) median = score[math.floor(len(score)/2)] step = math.floor((score[0] - score[len(score)-1])/len(score)) values = [] for i in range(1, len(score)): if score[i] < median and (math.floor(score[i-1] - score[i]) >= step or median - score[i] > 2*step): values.append(score[i]) print(len(values)) return [i for i, x in enumerate(remv) if x in values] def decide(pred, ignore=[]): pred = np.delete(pred, ignore, axis=1) l = [] for i in range(0, pred.shape[0]): col = pred[i,:] if col.tolist().count(-1) > math.ceil(pred.shape[1]*0.4): l.append(-1) else: l.append(1) return np.array(l) #main def main(): parser = argparse.ArgumentParser() # Read from files parser.add_argument("-f", "--files", nargs='+') # Read from an directory parser.add_argument("-d", "--directory", nargs='+') # Wildcard to detect if legit computer parser.add_argument("-w", "--wildcard", required=True) # Assure at least one type of this capture goes to training parser.add_argument("-a", "--assure", nargs='+') # Wants to export files parser.add_argument("-e","--export", action='store_true') # Print confusion matrix for each algorithm parser.add_argument("-v","--verbose", action='store_true') args=parser.parse_args() if not (args.files or args.directory): parser.error("No files given, add --files or --directory.") if not args.files: args.files = [] # get all filenames from directory if args.directory: for dir in args.directory: for r, d, f in os.walk(dir): for file in f: if ".dat" in file: args.files.append(os.path.join(r, file)) train_files = [] anomaly_test_files = [] regular_test_files = [] # divide filenames in true pc or other for f in args.files: if args.wildcard in f: train_files.append(f) else: anomaly_test_files.append(f) # begin process of deciding test and train files ratio = 0.3 remove_elems = math.floor(ratio*len(train_files)) assured_files = [] count_swapped = 0 # if there are mandatory files for training if args.assure: for k in args.assure: rescued = [] for f in train_files: if k in f: rescued.append(f) if len(rescued) == 1: assured_files.append(rescued[0]) elif len(rescued) > 1: random.shuffle(rescued) for i in range(0, math.ceil(len(rescued)/2)): elem = rescued.pop(0) assured_files.append(elem) for elem in rescued: train_files.remove(elem) regular_test_files.append(elem) count_swapped+=1 if remove_elems - count_swapped > 0: random.shuffle(train_files) while count_swapped < remove_elems: if train_files[0] not in assured_files: regular_test_files.append(train_files.pop(0)) count_swapped+=1 else: random.shuffle(train_files) # fit train_data = readFileToMatrix(train_files) scaler = StandardScaler() scaler.fit(train_data) train_data = scaler.transform(train_data) clf = [] clf.append(svm.OneClassSVM(gamma='auto', kernel='rbf')) clf.append(svm.OneClassSVM(gamma=0.0000001, kernel='rbf')) clf.append(svm.OneClassSVM(gamma=1, kernel='rbf')) clf.append(svm.OneClassSVM(kernel='linear')) clf.append(svm.OneClassSVM(gamma='auto', kernel='poly', degree=1)) clf.append(svm.OneClassSVM(gamma='auto', kernel='poly', degree=2)) clf.append(svm.OneClassSVM(gamma='auto', kernel='poly', degree=5)) clf.append(svm.OneClassSVM(gamma=1, kernel='poly', degree=1)) clf.append(svm.OneClassSVM(gamma=1, kernel='sigmoid')) clf.append(IsolationForest(behaviour='new', max_samples='auto', contamination=0.1)) clf.append(IsolationForest(behaviour='new', max_samples=int(train_data.shape[0]/2), contamination=0.2)) clf.append(LocalOutlierFactor(n_neighbors=20, novelty=True, contamination=0.1)) clf.append(LocalOutlierFactor(n_neighbors=20, novelty=True, contamination=0.2)) clf.append(EllipticEnvelope(support_fraction=0.9, contamination=0.1)) clf.append(EllipticEnvelope(support_fraction=0.9, contamination=0.2)) flag = True score = [] for c in clf: c.fit(train_data) # predict an = predict(anomaly_test_files, scaler, c).reshape(-1,1) re = predict(regular_test_files, scaler, c).reshape(-1,1) if flag: anomaly_pred = an regular_pred = re flag = False else: anomaly_pred = np.concatenate((anomaly_pred, an), axis=1) regular_pred = np.concatenate((regular_pred, re), axis=1) score.append(calc_score(an, re)) if args.verbose: print_results(predict(anomaly_test_files, scaler, c), predict(regular_test_files, scaler, c)) ignore = remove_algorithms(score) print_results(decide(anomaly_pred), decide(regular_pred)) fname = print_results(decide(anomaly_pred, ignore=ignore), decide(regular_pred, ignore=ignore)) print(ignore) #serialize to file if args.export: ignore.reverse() for c in ignore: del clf[c] file = open("clf_" + str(fname[0]) + "_" + str(fname[1]) + '.bin',"wb") pickle.dump(clf, file) file = open("scaler_" + str(fname[0]) + "_" + str(fname[1]) + '.bin',"wb") pickle.dump(scaler, file) if __name__ == '__main__': main()
[ "margaridaocs@ua.pt" ]
margaridaocs@ua.pt
c079c02b3ca5326563ba354698e2392c87ee0def
47693ffe6c3d14a4b8536ae89c63b7df911a4de8
/peer_lending/users/migrations/0002_auto_20210418_1217.py
82bfab26cb63ad4283ff8fc9003d1f3225ec7af6
[ "BSD-3-Clause" ]
permissive
teorich/peerlending_starter
2b22dd3d843fe0101551f1973e961cfe1a7a9699
3f0e2761a823a75d5ad6547a407ab782008b5185
refs/heads/main
2023-04-25T04:44:53.677736
2021-04-18T18:09:53
2021-04-18T18:09:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,083
py
# Generated by Django 3.1.8 on 2021-04-18 10:17 from django.db import migrations, models import uuid class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.AddField( model_name='user', name='first_name', field=models.CharField(blank=True, help_text='Legal First names of the client.', max_length=125, null=True, verbose_name='First names'), ), migrations.AddField( model_name='user', name='last_name', field=models.CharField(blank=True, help_text='Legal Last names of the client.', max_length=125, null=True, verbose_name='Last names'), ), migrations.AlterField( model_name='user', name='id', field=models.UUIDField(default=uuid.uuid4, editable=False, help_text='The unique identifier of the instance this object belongs to.\n Mandatory, unless a new instance to create is given.', primary_key=True, serialize=False), ), ]
[ "mrjamesreinhold@gmail.com" ]
mrjamesreinhold@gmail.com
eabc327817af3553828fe0ffc5f9a44d5e5d1951
52b5773617a1b972a905de4d692540d26ff74926
/.history/mushroomPicker_20200729130815.py
f6b760f3a1112376d639f98641c3cd38b7ba4176
[]
no_license
MaryanneNjeri/pythonModules
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
f4e56b1e4dda2349267af634a46f6b9df6686020
refs/heads/master
2022-12-16T02:59:19.896129
2020-09-11T12:05:22
2020-09-11T12:05:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,178
py
''' You are given a non-empty, zero-indexed array A of n (1 � n � 100 000) integers a0, a1, . . . , an−1 (0 � ai � 1 000). This array represents number of mushrooms growing on the consecutive spots along a road. You are also given integers k and m (0 � k, m < n). A mushroom picker is at spot number k on the road and should perform m moves. In one move she moves to an adjacent spot. She collects all the mushrooms growing on spots she visits. The goal is to calculate the maximum number of mushrooms that the mushroom picker can collect in m moves. For example, consider array A such that: ''' def count_totals(p,x,y): return p[y+1] def mushroom(A,k,m): # A - is the array # k- is there position -4 # m - number of moves they can make -6 n = len(A) result = 0 pref = [0] * n pref[0] = A[0] for i in range(1,n): pref[i] = pref[i-1] + A[i] for p in range(min(m,k) + 1): # p----> 0,1,2,3,4 # k ----> 4 ,k-p ->4,3,2,1,0 left_pos = k-p right_pos = min(n-1,max(k,k+m-2 *p)) print('right',right_pos) # print(left_pos) mushroom([2,3,7,5,1,3,9],4,6)
[ "mary.jereh@gmail.com" ]
mary.jereh@gmail.com
26eb3da91f9d5309b6650b1f7e62de5724b4f8dd
056ef2e008849515aa81487614259280f7f179c1
/vrt/vfin/dain/MegaDepth/options/test_options.py
fa1f946af8aa57c723d5ad435cf0b10432018825
[ "MIT" ]
permissive
darktohka/OpenVideoEnhance
5361c2af9b091ba3a80e4fc3e3225acbf551e180
f31746b6b2ad510e18d6343304646bd8fb50a390
refs/heads/develop
2023-03-11T20:31:23.258900
2021-02-27T09:23:03
2021-02-27T09:23:03
344,285,821
1
0
MIT
2021-03-03T22:53:05
2021-03-03T22:53:04
null
UTF-8
Python
false
false
878
py
from .base_options import BaseOptions class TestOptions(BaseOptions): def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run') self.isTrain = False
[ "iBobbyTS@gmail.com" ]
iBobbyTS@gmail.com
87f76ef7f0a4f8ca551c0dd58adb6ceec115833c
75650efb55b569ac27a880702b9bd756793dd3cb
/build/scripts-3.7/eu_create_gcp_url_list.py
74238890381f628eeccac92d76aca11cdc4a53ce
[ "MIT" ]
permissive
nathankw/dev_encode_utils
d33a5c5097b0a7982790d8b69bfde2c2ec7bf3b8
9f4596112a9764d4c93214d9e5e3a4861dfc54a0
refs/heads/master
2018-12-27T11:26:04.375692
2018-10-24T06:08:36
2018-10-24T06:08:36
121,198,680
0
0
null
null
null
null
UTF-8
Python
false
false
2,052
py
#!/Library/Frameworks/Python.framework/Versions/3.7/bin/python3 # -*- coding: utf-8 -*- ### # © 2018 The Board of Trustees of the Leland Stanford Junior University # Nathaniel Watson # nathankw@stanford.edu # 2018-10-23 ### """ Creates a Google Storage Transfer Service URL list file, which can be used as input into the Google STS to transfer released ENCODE S3 files to your GCP buckets. """ import argparse import datetime import json import os import encode_utils.connection as euc from encode_utils.parent_argparser import dcc_login_parser def get_parser(): parser = argparse.ArgumentParser( description=__doc__, parents=[dcc_login_parser], formatter_class=argparse.RawTextHelpFormatter) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("-f", "--file-ids", nargs="+", help=""" An alternative to --infile, one or more ENCODE file identifiers. Don't mix ENCODE files from across buckets.""") group.add_argument("-i", "--infile", help=""" An alternative to --file-ids, the path to a file containing one or more file identifiers, one per line. Empty lines and lines starting with a '#' are skipped.""") parser.add_argument("-o", "--outfile", required=True, help=""" The output URL list file name.""") return parser def main(): parser = get_parser() args = parser.parse_args() outfile = args.outfile # Connect to the Portal dcc_mode = args.dcc_mode if dcc_mode: conn = euc.Connection(dcc_mode) else: # Default dcc_mode taken from environment variable DCC_MODE. conn = euc.Connection() file_ids = args.file_ids infile = args.infile if infile: fh = open(infile) for line in fh: line = line.strip() if not line or line.startswith("#"): continue file_ids.append(line) fh.close() conn.gcp_transfer_urllist(file_ids=file_ids, filename=outfile) if __name__ == "__main__": main()
[ "nathankw@Nathans-MacBook-Air.local" ]
nathankw@Nathans-MacBook-Air.local
39c412f9b18b2196fb290af51106a9a906ececfd
1f19e504dede93559edfa4266ae90c9b698f1595
/learning_logs/migrations/0004_auto_20201019_0709.py
e7cf8bad138692fd900c1ae7ab55070b3e1b9aa9
[]
no_license
poemDead/learning_log_django
3a93e9fa4f6553c0704c7c49911aaafadb60b1e8
5149c72bf8cb522544f1b82d030fc93915c9d2f5
refs/heads/main
2023-02-07T20:17:38.741281
2023-02-01T01:51:20
2023-02-01T01:51:20
305,242,096
1
0
null
null
null
null
UTF-8
Python
false
false
374
py
# Generated by Django 2.2 on 2020-10-19 07:09 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('learning_logs', '0003_auto_20201019_0703'), ] operations = [ migrations.RenameField( model_name='topic', old_name='data_added', new_name='date_added', ), ]
[ "69440589+poemDead@users.noreply.github.com" ]
69440589+poemDead@users.noreply.github.com
b3673d87cd687e139daef7c90d95e0a9126b841d
954df5fb1ceaf64fe3004e0b072b78024065cdd0
/virtual/Lib/site-packages/future/moves/_markupbase.py
41c4f96c55552b677772b61ba497a16ba84b3df8
[]
no_license
chelseyrandolph/cs440_DatabaseUI
7dc5b4c3d0a4e72023db61f4a613fc889bc69f86
28355cdfe0f4732568f1f8e43e2ce7809b4fc260
refs/heads/master
2022-06-06T19:18:31.819483
2020-05-05T20:51:58
2020-05-05T20:51:58
259,436,551
0
0
null
null
null
null
UTF-8
Python
false
false
151
py
from __future__ import absolute_import from future.utils import PY3 if PY3: pass else: __future_module__ = True from markupbase import *
[ "chelseyrrandolph@gmail.com" ]
chelseyrrandolph@gmail.com
ad5d5361d58d186ea6682f1b01c9158b0e151206
1255b4c76aa2def0d8ca07ff75ef264383de36e3
/main.py
8b482da2a9acf7567e56eec60c0e4c881703abac
[]
no_license
thepixelboy/flask-auth
bcbe2ce182e54743acfa70860f975b059952c65c
e49903b65c9451891b61138e1b5453ea29f733d1
refs/heads/main
2023-07-10T00:41:46.442728
2021-08-23T16:56:39
2021-08-23T16:56:39
399,182,979
0
0
null
null
null
null
UTF-8
Python
false
false
3,337
py
from os import name from flask import Flask, render_template, request, url_for, redirect, flash, send_from_directory from werkzeug.security import generate_password_hash, check_password_hash from flask_sqlalchemy import SQLAlchemy from flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user app = Flask(__name__) app.config["SECRET_KEY"] = "flown-actinium-cam-algae" app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///users.db" app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False db = SQLAlchemy(app) login_manager = LoginManager() login_manager.init_app(app) @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) ##CREATE TABLE IN DB class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(100), unique=True) password = db.Column(db.String(100)) name = db.Column(db.String(1000)) # Line below only required once, when creating DB. # db.create_all() @app.route("/") def home(): return render_template("index.html", logged_in=current_user.is_authenticated) @app.route("/register", methods=["GET", "POST"]) def register(): if request.method == "POST": if User.query.filter_by(email=request.form.get("email")).first(): # User already exists flash("You've already signed up with that email, log-in instead.") return redirect(url_for("login")) hash_and_salted_password = generate_password_hash( request.form.get("password"), method="pbkdf2:sha256", salt_length=8 ) new_user = User( email=request.form.get("email"), name=request.form.get("name"), password=hash_and_salted_password ) db.session.add(new_user) db.session.commit() # Log-in and authenticate user after adding new user data to the database login_user(new_user) return redirect(url_for("secrets")) return render_template("register.html", logged_in=current_user.is_authenticated) @app.route("/login", methods=["GET", "POST"]) def login(): if request.method == "POST": email = request.form.get("email") password = request.form.get("password") # Find user by email user = User.query.filter_by(email=email).first() # Email doesn't exist if not user: flash("That email does not exist, please try again.") return redirect(url_for("login")) # Password incorrect elif not check_password_hash(user.password, password): flash("Password incorrect, please try again.") return redirect(url_for("login")) # Email exists and password correct else: login_user(user) return redirect(url_for("secrets")) return render_template("login.html", logged_in=current_user.is_authenticated) @app.route("/secrets") @login_required def secrets(): return render_template("secrets.html", name=current_user.name, logged_in=True) @app.route("/logout") def logout(): logout_user() return redirect(url_for("home")) @app.route("/download") @login_required def download(): return send_from_directory("static", path="files/cheat_sheet.pdf", as_attachment=True) if __name__ == "__main__": app.run(debug=True)
[ "34570952+thepixelboy@users.noreply.github.com" ]
34570952+thepixelboy@users.noreply.github.com
4a5300837d83d06467afb85f15b78e321182b279
666d451f3e6a8053211600a5d33e6c757e176335
/bobtemplates/mbaechtold/django_project/src/website/wsgi.py
d56e6b1e044bfdcb992fae27ce2235add65b3c03
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
mbaechtold/django-project-template
4a7b5242c9149eea0821c47339d0fed0db183135
cdbac7de9c7cf9d20291dace39ff3000987d79f8
refs/heads/master
2020-05-01T12:03:18.974328
2013-09-09T10:54:00
2013-09-09T10:54:00
4,346,364
0
0
null
null
null
null
UTF-8
Python
false
false
1,067
py
""" This module contains the WSGI application used by Django's development server. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ # Please note that this is from ``django-configurations`` and not ``Django`` from configurations.wsgi import get_wsgi_application # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
[ "github@tph.ch" ]
github@tph.ch
5317b35317ba1ab2da314d6bd8ad9be085d19480
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
/nn_ns/parsing/FS/readme.py
d02accd2f405f4744acde147164ec31870528870
[]
no_license
edt-yxz-zzd/python3_src
43d6c2a8ef2a618f750b59e207a2806132076526
41f3a506feffb5f33d4559e5b69717d9bb6303c9
refs/heads/master
2023-05-12T01:46:28.198286
2023-05-01T13:46:32
2023-05-01T13:46:32
143,530,977
2
2
null
null
null
null
UTF-8
Python
false
false
2,141
py
3 froms: FSM, [Rule], regular_expression FSM: TotalState : FSM formal - FSM{initial :: TotalState, finals :: Set TotalState, error :: TotalState, transition :: Map TotalState (Map Symbol TotalState) } informal - NFSM{initials :: Set PartialState, finals :: Set PartialState, error :: Set PartialState, # empty_set transition :: Map PartialState (Map (Maybe Symbol) (Set PartialState)) } PartialState : FA # esp cleaned_dfa formal - DFA {initial :: Maybe PartialState, finals :: Set PartialState, error :: Maybe PartialState, # nothing transition :: Map PartialState (Map Symbol PartialState) } informal - NDFA {initials :: Set PartialState, finals :: Set PartialState, error :: Set PartialState, # empty_set transition :: Map PartialState (Map (Maybe Symbol) (Set PartialState)) } {initials::Set PartialState, transition::[Rule]}: # NDFA-RuleForm # a direct map into/from a NDFA FormalNDFARule :: (PartialState, Maybe (Maybe Symbol, PartialState)) (a, Nothing) -> [a in finals] (a, Just (maybe_symbol, b)) -> "a = maybe_symbol b" InformalNDFARule :: (Nonterminal, [Symbol], Maybe Nonterminal) where PartialState = (Nonterminal, Integer) (a, ls, Nothing) -> [(a, len(ls)) in finals] regular_expression: # RE-RuleForm # using star but without recur (even tail-recur) # DAG BasicRe a = ReConcat [BasicRe a] | ReUnion [BasicRe a] | ReStar (BasicRe a) | ReSymbol a ExtendedRe a = BasicRe a | ReComplement a | ReIntersect a
[ "wuming_zher@zoho.com.cn" ]
wuming_zher@zoho.com.cn
a786ff4225a5ef3d90945927f927306e9fcf005d
039d48e141a8276f8a8b869fe57ecd11335aa774
/toolchains/gcc-linaro-7.4.1-2019.02-x86_64_aarch64-linux-gnu/aarch64-linux-gnu/libc/lib/libstdc++.so.6.0.24-gdb.py
f1efd517cb064807ca14d91e7afab0879ed1f922
[]
no_license
BPI-SINOVOIP/BPI-Mainline-uboot
7e6d4e91f8340c381315d04ec9894ac26d36c8c0
7ce9aae0a4c658525a34e9404b6fef5140a6548b
refs/heads/master
2023-05-26T21:11:05.630207
2023-03-14T15:17:36
2023-03-14T15:17:36
41,422,808
14
15
null
2023-05-23T04:11:00
2015-08-26T11:51:04
null
UTF-8
Python
false
false
2,591
py
# -*- python -*- # Copyright (C) 2009-2017 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import gdb import os import os.path pythondir = '/home/tcwg-buildslave/workspace/tcwg-make-release_1/_build/builds/destdir/x86_64-unknown-linux-gnu/share/gcc-7.4.1/python' libdir = '/home/tcwg-buildslave/workspace/tcwg-make-release_1/_build/builds/destdir/x86_64-unknown-linux-gnu/aarch64-linux-gnu/lib/../lib64' # This file might be loaded when there is no current objfile. This # can happen if the user loads it manually. In this case we don't # update sys.path; instead we just hope the user managed to do that # beforehand. if gdb.current_objfile () is not None: # Update module path. We want to find the relative path from libdir # to pythondir, and then we want to apply that relative path to the # directory holding the objfile with which this file is associated. # This preserves relocatability of the gcc tree. # Do a simple normalization that removes duplicate separators. pythondir = os.path.normpath (pythondir) libdir = os.path.normpath (libdir) prefix = os.path.commonprefix ([libdir, pythondir]) # In some bizarre configuration we might have found a match in the # middle of a directory name. if prefix[-1] != '/': prefix = os.path.dirname (prefix) + '/' # Strip off the prefix. pythondir = pythondir[len (prefix):] libdir = libdir[len (prefix):] # Compute the ".."s needed to get from libdir to the prefix. dotdots = ('..' + os.sep) * len (libdir.split (os.sep)) objfile = gdb.current_objfile ().filename dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir) if not dir_ in sys.path: sys.path.insert(0, dir_) # Call a function as a plain import would not execute body of the included file # on repeated reloads of this object file. from libstdcxx.v6 import register_libstdcxx_printers register_libstdcxx_printers(gdb.current_objfile())
[ "lionwang@sinovoip.com.cn" ]
lionwang@sinovoip.com.cn
75a200d94249e8c6d57dbb9015f55760805259de
780cc16d12414f209f3931a912fc46b557ff1446
/without-label-V2/.secondary/experiment3.py
95ea71abacc57887e8b0a33c534a1348841e49d8
[]
no_license
smaityumich/Transfer-learning
b991ae7f547e14ddaad908e2f38c675b41eefa18
fcf28c4c302f4c4bf30edf1182b1935e6848791f
refs/heads/master
2021-11-13T00:13:27.825203
2020-02-21T02:58:00
2020-02-21T02:58:00
232,660,760
0
0
null
2020-03-13T12:38:50
2020-01-08T21:12:07
Python
UTF-8
Python
false
false
2,234
py
import numpy as np from withoutLabelV2 import * import sys import os from fractions import Fraction ## Mkaing a hidden output directory outdir = os.getcwd() + '/.out' if not os.path.exists(outdir): os.system(f'mkdir {outdir}') ## Getting classifier for the def job(par): ## Setting seed and parameter #np.random.seed(100) m, n, n_test, d, prop, distance, index = par #m = float(m) #n, n_test = float(n), float(n_test) #d = float(d) #prop = float(prop) #distance = float(distance) #m, n, n_test, d = int(m) , int(n), int(n_test), int(d) fname = f'n-source:{m} n-target:{n} dimension:{d} prop-of-success-target:{prop} dist-between-means: {distance} index {index}' ##Generate data data_generate = DataGenerator(d = d) x_source, y_source, _ = data_generate._getData(m, 0.5, distance) x_target_train, _, _ = data_generate._getData(n, prop, distance) x_target_test, y_target_test, bayes_target_test = data_generate._getData(n_test, prop, distance) ##Buliding classifier classifier_noLabel = WithoutLabelV2(x_source = x_source, y_source = y_source, x_target = x_target_train) predicted_labels = classifier_noLabel._classify(x_target_test) error = np.mean((y_target_test-predicted_labels)**2) bayes_error = np.mean((bayes_target_test-y_target_test)**2) w = classifier_noLabel.w w_true = [(1-prop)/0.5, prop/0.5] w_true = np.array(w_true) w_error = np.sum((w - w_true)**2) bandwidth = classifier_noLabel.bandwidth prop_predicted = classifier_noLabel.prop_target prop_error = np.abs(prop_predicted-prop) with open('./out/experiment.out','a') as fh: fh.writelines(f'parameter:\n{fname}\n') fh.writelines(f'Prediction error: {error}\n') fh.writelines(f'Bayes error: {bayes_error}\n') fh.writelines(f'w: {str(w)}\nw_error: {w_error}\n') fh.writelines(f'Bandwidth chosen {bandwidth}\n') fh.writelines(f'Target proportion is estimated as {prop_predicted} with error {prop_error}\n\n\n') os.system(f'echo Excess risk: {error-bayes_error}') os.system(f'echo w_error: {w_error}') os.system(f'echo Target proportion estimation error {prop_error}')
[ "smaityumich" ]
smaityumich
ee947ab878cedddde467745d6f6bf32fc9b603b9
c8a3b33f0ec6b53c3280f6a65cfb18c115d0b615
/set_cases_cpeq_extra.py
1b199c7bffa3e9c91a0bc7267b945af99de858a1
[ "MIT" ]
permissive
marcelosalles/dissertacao
dd502c1ab78d12bfc4673fcec9816485992d358a
692f80a0d6b28a7e929dc86f6a684cfa84b59df1
refs/heads/master
2021-12-11T15:42:22.545985
2021-12-07T17:53:47
2021-12-07T17:53:47
190,596,533
0
0
null
null
null
null
UTF-8
Python
false
false
7,922
py
import datetime import json import os import pandas as pd import dict_update import sample_gen import idf_creator_floor as whole_gen import singlezone_diss import runep_subprocess import output_processing2 import other_crack_fac update = dict_update.update # Globals FOLDER = 'cp_eq_extra' SIZE = 200 SAMPLE_NAME = 'sample_cpeq' NUM_CLUSTERS = int(os.cpu_count()/2) NAME_STDRD = 'whole' NAME_STDRD_2 = 'single' INPUT = "seed.json" # INPUT_WHOLE # INPUT_SZ = "seed_sz.json" EXTENSION = 'epJSON' REMOVE_ALL_BUT = [EXTENSION, 'csv', 'err'] EPW_NAME = '~/dissertacao/BRA_SP_Sao.Paulo-Congonhas.AP.837800_TMYx.2003-2017.epw' MONTH_MEANS = '/media/marcelo/OS/LabEEE_1-2/idf-creator/month_means_8760.csv' OUTPUT_PROCESSED = 'means_'+FOLDER CONCRETE_EPS = True SOBOL = False PARAMETERS = { 'area':[20,100], 'ratio':[.4,2.5], 'zone_height':[2.3,3.2], 'azimuth':[0,359.9], 'floor_height':[0,50], 'absorptance':[.2,.8], 'wall_u':[.5,4.4], 'wall_ct':[.22,450], 'wwr':[.1,.6], 'glass':[.2,.87], 'shading':[0,80], 'people':[.05,.2], 'corner_window':[0,1], 'open_fac':[0.2,1], 'roof':[0,1], 'ground':[0,1], 'bldg_ratio': [.2,1], # 'n_floor':[1,9], 'v_ar':[0,1] } start_time = datetime.datetime.now() # Dependents col_names = list(PARAMETERS) samples_x_cluster = SIZE/NUM_CLUSTERS name_length = '{:0'+str(len(str(SIZE)))+'.0f}' name_length_cluster = '{:0'+str(len(str(NUM_CLUSTERS)))+'.0f}' def add_crack(file_name, crack_fac=.1): with open(file_name, 'r') as file: model = json.loads(file.read()) model["AirflowNetwork:MultiZone:Surface:Crack"] = { "door_crack": { "air_mass_flow_coefficient_at_reference_conditions": crack_fac, "air_mass_flow_exponent": 0.667, "idf_max_extensible_fields": 0, "idf_max_fields": 4 } } with open(file_name, 'w') as file: file.write(json.dumps(model)) def parameter_value(key, i): value = PARAMETERS[key][0]+(PARAMETERS[key][1]-PARAMETERS[key][0])*i return value print('\nCREATING DIRECTORIES\n') os.system('mkdir '+FOLDER) for i in range(NUM_CLUSTERS): os.system('mkdir '+FOLDER+'/cluster'+name_length_cluster.format(i)) # Generate sample print('\nGENERATING SAMPLE\n') # sample = sample_gen.main(SIZE, col_names, SAMPLE_NAME, sobol=False) sample = pd.read_csv(SAMPLE_NAME+'.csv') if SOBOL: sample = (sample+1)/2 # Set cases print('\nGENERATING MODELS\n') df = pd.DataFrame(columns=col_names+['folder','file']) line = 0 for i in range(len(sample)): sample_line = list(sample.iloc[i]) model_values = dict((param,parameter_value(param, sample.loc[i, param])) for param in col_names) corr_width = 2 if model_values['roof'] > .5: roof = True else: roof = False if model_values['ground'] > .5: ground = True else: ground = False if model_values['corner_window'] > .5: corner_window = True else: corner_window = False zone_feat = whole_gen.zone_list(model_values) cluster_n = int(line//samples_x_cluster) case = name_length.format(line) output = (FOLDER+'/cluster'+name_length_cluster.format(cluster_n)+'/'+NAME_STDRD+'_{}.epJSON'.format(case)) df = df.append(pd.DataFrame([sample_line+['cluster'+name_length_cluster.format(cluster_n),NAME_STDRD+'_{}.epJSON'.format(case)]],columns=col_names+['folder','file'])) # print(output) # whole_gen.main( # zone_area = model_values['area'], # zone_ratio = model_values['ratio'], # zone_height = model_values['zone_height'], # absorptance = model_values['absorptance'], # shading = model_values['shading'], # azimuth = model_values['azimuth'], # corr_width = corr_width, # wall_u = model_values['wall_u'], # wall_ct = model_values['wall_ct'], # corr_vent = 1, # stairs = 0, # zone_feat = zone_feat, # concrete_eps=CONCRETE_EPS, # zones_x_floor = 6, # n_floors = 1, # corner_window=corner_window, # ground=ground, # roof=roof, # floor_height = model_values['floor_height'], # input_file = INPUT, # output = output # ) azimuth_left = (model_values['azimuth']+270)%360 azimuth_right = (model_values['azimuth']+90)%360 for i in range(6): if i%2 == 0: azi = azimuth_left else: azi = azimuth_right output = (FOLDER+'/cluster'+name_length_cluster.format(cluster_n)+'/'+NAME_STDRD_2+'_noeq_01'+'_{}_'.format(case)+str(i)+'.epJSON') df = df.append(pd.DataFrame([sample_line+['cluster'+name_length_cluster.format(cluster_n),NAME_STDRD_2+'_noeq_01'+'_{}_'.format(case)+str(i)+'.epJSON'.format(case)]],columns=col_names+['folder','file'])) # singlezone_diss.main( # zone_area = model_values['area'], # zone_ratio = model_values['ratio'], # zone_height = model_values['zone_height'], # absorptance = model_values['absorptance'], # shading = model_values['shading'], # azimuth = azi, # bldg_ratio = model_values['bldg_ratio'], # wall_u = model_values['wall_u'], # wall_ct = model_values['wall_ct'], # zn=i, # floor_height = model_values['floor_height'], # corner_window = corner_window, # ground=ground, # roof=roof, # people=model_values['people'], # glass_fs=model_values['glass'], # wwr=model_values['wwr'], # door=False, # cp_eq = False, # open_fac=model_values['open_fac'], # input_file=INPUT , # output=output, # outdoors=False # ) # add_crack(output, .01) output = (FOLDER+'/cluster'+name_length_cluster.format(cluster_n)+'/'+NAME_STDRD_2+'_cpeq_01'+'_{}_'.format(case)+str(i)+'.epJSON') df = df.append(pd.DataFrame([sample_line+['cluster'+name_length_cluster.format(cluster_n),NAME_STDRD_2+'_cpeq_01'+'_{}_'.format(case)+str(i)+'.epJSON'.format(case)]],columns=col_names+['folder','file'])) # singlezone_diss.main( # zone_area = model_values['area'], # zone_ratio = model_values['ratio'], # zone_height = model_values['zone_height'], # absorptance = model_values['absorptance'], # shading = model_values['shading'], # azimuth = azi, # bldg_ratio = model_values['bldg_ratio'], # wall_u = model_values['wall_u'], # wall_ct = model_values['wall_ct'], # zn=i, # floor_height=model_values['floor_height'], # corner_window=corner_window, # ground=ground, # roof=roof, # people=model_values['people'], # glass_fs=model_values['glass'], # wwr=model_values['wwr'], # door=False, # cp_eq = True, # open_fac=model_values['open_fac'], # input_file=INPUT, # output=output, # outdoors=False # ) # add_crack(output, .01) line += 1 df = other_crack_fac.main(df,folder=FOLDER, pattern='_01_') os.chdir(FOLDER) # print('\nRUNNING SIMULATIONS\n') # list_epjson_names = runep_subprocess.gen_list_epjson_names(NUM_CLUSTERS, EXTENSION) # runep_subprocess.main(list_epjson_names, NUM_CLUSTERS, EXTENSION, REMOVE_ALL_BUT, epw_name=EPW_NAME) print('\nPROCESSING OUTPUT\n') output_processing2.main(df, MONTH_MEANS, OUTPUT_PROCESSED) end_time = datetime.datetime.now() total_time = (end_time - start_time) print("Total processing time: " + str(total_time))
[ "marcelosalles@github.com" ]
marcelosalles@github.com
b052d9182d7cc616f37c5f07a4d3abae03216ac3
07d5ce00ce15f0c04d175ac1c54bfa3e5ed949bf
/IntroPython/parse_file.py
5f599cff5af2835bc1396f21ce775d32ef587f46
[]
no_license
mrinal-subash/BINF6308
8beececfc7d87b2703a2cefe92df36f98f46fd3c
237afbb977d1ca65bca490e8c3837d8109492f30
refs/heads/master
2022-04-23T21:22:18.603198
2020-04-25T05:17:45
2020-04-25T05:17:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
899
py
#!/usr/bin/env python # parse_file.py # To use regular expressions you need to import re import re # Set the file path to the Drosophila genome dmel_genome_path = '/scratch/Drosophila/dmel-all-chromosome-r6.17.fasta' # Initialize a line counter line_count = 0; # Initialize a sequence variable seq = '' with open(dmel_genome_path) as dmel_genome: for line in dmel_genome: # Increment the line count by one #line_count += 1 # If the line count is less than 5 #if line_count < 5: # Check to see if the line is a header line (starts with >) if re.match('^>', line): line_count += 1# Print the header print(line) if (line_count <= 50) exit() else: # This is a sequence line so append to seq seq += line # Print the seq variable print(seq)
[ "noreply@github.com" ]
noreply@github.com
3241fdacf8511474ccc549ee916f1c7d86a836cd
f3a3c0a2b161a50224e6104e35fb3dd6c5cc8ad6
/Reactor.py
738c29f9c6b2c8dd0f8034c054850449bf1b6b97
[]
no_license
reubens9/masters_code
19a3ceafc6cfe091866b2a87c91aa882b9b5957a
538161256d7d2c837f6e6b21b8e4b5b02f7fa7a3
refs/heads/master
2020-05-04T21:39:22.045216
2019-04-09T13:53:18
2019-04-09T13:53:18
179,483,977
0
0
null
null
null
null
UTF-8
Python
false
false
2,532
py
# coding: utf-8 import numpy as np def math_test(a,b,c): # a = X[0] # b = X[1] # c = X[2] return a*b*c MMx = 24.04 # g/cmol C H 1.8 O 0.5 N 0.16 Q = 0.9058 # mL/min C_N = 0.0125 # g/L r_Urea = Q / 1000 * C_N # g Urea/min Mass_fr_Urea = 14 * 2 / (12 + 4 + 28 + 16) Mass_fr_Biomass = 14 * 0.16 / (12 + 1.8 + 8 + 0.16 * 14) M_x = 1.9343 # g r_Biomass = r_Urea * Mass_fr_Urea / Mass_fr_Biomass / M_x # CmolX/CmolX/min def rates(r_CO2, r_O2, r_FA): A = 0.1 PO = 1.5 G = 1.8 B = 0.1 T = 0.1 # Q = 0.9058 # mL/min # C_N = 0.0125 # g/L # r_Urea = Q/1000*C_N # g Urea/min # Mass_fr_Urea = 14*2/(12+4+28+16) # Mass_fr_Biomass = 14*0.16/(12 + 1.8 + 8 + 0.16*14) # r_Biomass = r_Urea * Mass_fr_Urea / Mass_fr_Biomass A = np.matrix([#v0 v1 v2 v3 v4 v5 v6 v7 v8 [-1, (1+A), 1, 1, 0, 0, 0, 0, 0], #node [0, 0, 0, -1, 1, 1, 0, 0, 0], #node [0, 0, 0, 0, 0, -1, 2/3, 3/4, 0], #node [0, -G, 2/3, 0, 0, 1/3, 0, -1/4, 2*PO], #ATP [0, B, 2, 0, -1/3, 1/3, -1/2, -1/4, -2], #NADH [0, A, 1, 0, 0, 0, 1/3, -1/4, 0], #CO2 [0, 0, 0, 0, 0, 0, 0, 0, 1], #O2 [0, 0, 0, 0, 0, 0, 0, 1, 0], #Fumaric [0, 1, 0, 0, 0, 0, 0, 0, 0]]) B = np.matrix( [0, 0, 0, T, 0, r_CO2, r_O2, r_FA, r_Biomass] ) rate = np.linalg.solve(A, B) return (rate[6, 0]) #Ethanol def gas_rate_O2(Cin, Cout, Qin, m): """ Qin: mL/min P: atm """ P = 1 # atm R = 0.08205746 # L atm/mol K T = 35 + 273.15 # K M_x = m/MMx # Cmol Vrxn = 2.603 # L gas phase reactor volume Q = Qin/1000 F_total = P*Q/R*T Fin = F_total*Cin/100 Fout = F_total*Cout/100 rO2v = 1/Vrxn*(Fin - Fout) rO2x = rO2v*Vrxn/M_x return (rO2x) # mol/CmolX/min def fumaric_rate(dosing_avg, RPM, m): M_x = m/ MMx Q = 0.042*RPM/1000 * dosing_avg # L/min/Vrxn C_NaOH = 10 #mol/L F_NaOH = Q*C_NaOH # mol/min/Vrxn F_FA = F_NaOH/2 * 1.087/M_x # mol/min/CmolX return (F_FA)
[ "reubenswart@gmail.com" ]
reubenswart@gmail.com
17af078221d30f88e222eb9d6c5861dc1a20e88a
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/verbs/_disintegrating.py
532e7be5907664219ed247bb6f5173a80c0ad3de
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
282
py
from xai.brain.wordbase.verbs._disintegrate import _DISINTEGRATE #calss header class _DISINTEGRATING(_DISINTEGRATE, ): def __init__(self,): _DISINTEGRATE.__init__(self) self.name = "DISINTEGRATING" self.specie = 'verbs' self.basic = "disintegrate" self.jsondata = {}
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
51608c5d43d59f5805eb4025757cc2d391ff256c
6195c30bee626877e7c5762fb654adf1ae250bd3
/problem_32.py
c0aad91bb909b018b3d77329e2c7c49fc544a13c
[]
no_license
sergun4ik/projecteuler
8b1bac8ad7fc9485dddef6f1b89c5aa2ac00cef4
243aea89d0cd8cc9a27a72395c90861fd59fc0ed
refs/heads/master
2021-11-26T02:31:19.520044
2021-11-16T12:44:34
2021-11-16T12:44:34
188,433,458
0
0
null
null
null
null
UTF-8
Python
false
false
433
py
''' Solves problem #32 on https://projecteuler.net Sergey Lisitsin. May 2019''' pans = set() digits = '123456789' for x in range(1000): for y in range(10000): interim = str(x) + str(y) + str(x*y) interim = ''.join(sorted(interim)) print (interim) if interim == digits: pans.add(x*y) thelist = list(pans) result = 0 for x in thelist: result += x print(result)
[ "sergeyl@sicl.com" ]
sergeyl@sicl.com
27d796ebef60da62d2097d9ebfac8fc3b5f5c511
dd56626ccde1ccd5165fd251896125f0b9f0e6bb
/All-Classes_DotPY_Modified_Sublime/Class31-Accessor-MutatorMethod-Classmethod-InstanceMethod-ClassVariable-InstanceVariable.py
9d070dd01c27086fd6f03ee001b6132488bcc8b8
[]
no_license
sumanes4u/Anaconda-Projects
7df103a3af244f97f8a64cdd9a6f35ad5a7026c2
38c2cecae00dcb238808aa0a6f92cf4be36f001b
refs/heads/master
2020-04-07T04:13:56.013322
2018-11-18T03:48:42
2018-11-18T03:48:42
158,046,063
0
0
null
null
null
null
UTF-8
Python
false
false
4,817
py
# coding: utf-8 # # Class # - Constructor ( init method ) # - Namespaces # - types of methods # - Instance methods # - class methods # - static methods # - Inner Classes # - Inheritance # - Polymorphism # - Method overloading # In[3]: class Sample: # This is a constructor def __init__(self): self.x = 10 # this is an instance method def modify(self): self.x += 1 # ( x = x + 1) # In[4]: # create two instances s1 = Sample() s2 = Sample() # In[5]: print ('x in s1= ', s1.x) print ('x in s2= ', s2.x) # In[6]: # modify x in s1 s1.modify() # In[7]: print ('x in s1= ', s1.x) print ('x in s2= ', s2.x) # In[8]: # class variable vs static variables # In[17]: class Sample2: # This is a constructor def __init__(self,var1,var2,var3,var4): self.x = 10 self.var1 = var1 self.var2 = var2 self.var3 = var3 self.var4 = var4 # this is an instance method def modify(self): self.x += 1 # ( x = x + 1) def method1(self): print (self.var1) def method2(self): print (self.var2) def method3(self): print (self.var3) def method4(self): print (self.var4) # In[20]: s3 = Sample2('a','b','c','d') # instance/object creation # In[19]: s3.method4() # In[21]: # instance method or instance variable # In[22]: # class variables or static variables # In[27]: class Sample3: # this is a class var x = 10 # this is a class method @classmethod # decorator def modify(cls): cls.x += 1 # create 2 instaances s1 = Sample3() s2 = Sample3() print ('x in s1 = ', s1.x) print ('x in s2 = ', s2.x) # modify x in s1 s1.modify() print ('x in s1 = ', s1.x) print ('x in s2 = ', s2.x) # In[28]: # Namespace class Student(): n = 10 # In[29]: print (Student.n) # In[30]: Student.n += 1 # In[31]: print (Student.n) # In[32]: s1 = Student() # In[33]: s1.n # In[34]: s2 = Student() # In[35]: s2.n # In[36]: s1.n = 20 # In[37]: s2.n # In[38]: s1.n # In[42]: class Student: # This is a constructor def __init__(self,n='',m=0): self.name = n self.marks = m # instance method def display(self): print ('HI',self.name) print ('Your marks', self.marks) # to calculate grades def calculate(self): if (self.marks >= 600): print ('First Grade') elif (self.marks >= 500): print ('Second Grade') elif (self.marks >= 350): print ('Third Grade') else: print ('You got failed') n = int(input('How many students? ')) i = 0 while(i < n): name = input('Enter name: ') marks = int(input("Enter marks: ")) # create Student class instance and store data s = Student(name,marks) s.display() s.calculate() i += 1 print ('...................') # In[ ]: # Instance method : #Two types : 1) accessor method 2) mutator method A#ccessor method - simple access or read data of the variable. getABC() # getter methods # In[56]: # getter # Accessor method def getName(self): return self.name # In[ ]: # setter setABC() # Mutator method def setname(self,name): self.name = name # setting some value # In[60]: class Student: # mutator method def setName(self,name): self.name = name # Accessro method def getName(self): return self.name # mutator method def setMarks(self,marks): self.marks = marks # accessor method def getMarks(self): return self.marks n = int(input('How many students? ')) i = 0 while(i < n): s = Student() name = input('Enter name: ') s.setName(name) marks = int(input("Enter marks: ")) s.setMarks(marks) # retrieve the data print ('Hi ', s.getName()) print ('Your marks', s.getMarks()) i += 1 print ('...................') # In[61]: # class methods # static methods # In[64]: class Bird(): wings = 2 @classmethod def fly(cls,name): print('{} flies with {} wings'.format(name,cls.wings)) Bird.fly('Sparrow') Bird.fly('Pigeon') # In[65]: # static method # In[69]: class Myclass: n = 0 # class variable or static var def __init__(self): Myclass.n = Myclass.n+1 @staticmethod def noObject(): print ('No. of instance created: ', Myclass.n) obj1 = Myclass() obj2 = Myclass() obj3 = Myclass() obj3 = Myclass() Myclass.noObject() # In[ ]:
[ "ssamarthi@arubanetworks.com" ]
ssamarthi@arubanetworks.com
2c22ce041558ee668c89454dd1b433ef527f4d5f
c0c506702f51046c11085249fbb45dd01af6c11f
/sql-execute.py
2c4710d5996d702ccc23ef0d77ebf77f8da5aa13
[]
no_license
ashishup1999/Songs-detection-by-audio-fingerprinting
d6f7a61783003a7c908e87a27e8f5af6909bbe18
98f56d06cd9c991a1fefbfdd2293a8bee3fac4ec
refs/heads/main
2022-12-25T15:55:22.652039
2020-10-11T11:57:05
2020-10-11T11:57:05
303,103,832
1
0
null
null
null
null
UTF-8
Python
false
false
497
py
#!/usr/bin/python import argparse import sys from libs.db_sqlite import SqliteDatabase from termcolor import colored from argparse import RawTextHelpFormatter if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) parser.add_argument('-q', '--query', nargs='?') args = parser.parse_args() if not args.query: parser.print_help() sys.exit(0) db = SqliteDatabase() row = db.executeOne(args.query) print row
[ "noreply@github.com" ]
noreply@github.com
51a7abc5c786abeb3e55dc95ed53aef57e85b34a
94df6050f2a262da23f62dd678ccc4366b7657fc
/temporary/bin/tqdm
9efa8c44cdb5ce4f8dc9684a0b3710267894d570
[]
no_license
EkenePhDAVHV/phd-autonomous-cars-frank
29cc2fc608db53d4d060422022dc5019cf6360f0
1daed3425bfad99dac31543fbeb7950e25aa2878
refs/heads/main
2023-04-29T06:02:59.444072
2021-05-23T11:04:07
2021-05-23T11:04:07
357,157,735
0
0
null
null
null
null
UTF-8
Python
false
false
265
#!/home/ekene/PycharmProjects/phd-autonomous-cars-frank/temporary/bin/python # -*- coding: utf-8 -*- import re import sys from tqdm.cli import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "e.f.ozioko@pgr.reading.ac.uk" ]
e.f.ozioko@pgr.reading.ac.uk
cb0ffc8b1d6f6dd64345336453f628b38d80a9f1
90596c9b5d9db8bbfdf3dd996c9f00bd3f60eec9
/ultimatemusic/music.py
b695740771e09f059637dca410997e5ed41b5841
[]
no_license
Atharvious/discord-bot
b20de471b2d6e850e8688d9f49ccfa38671c3c99
3d3060a4a7f59d76b8c22b345e6419c73cd5d6bd
refs/heads/main
2023-04-05T18:55:51.430398
2021-03-31T01:02:03
2021-03-31T01:02:03
333,718,433
0
0
null
null
null
null
UTF-8
Python
false
false
3,070
py
from dotenv import load_dotenv import os load_dotenv() API = os.getenv('YOUTUBE_API') api_name = "youtube" api_version = "v3" from googleapiclient.discovery import build youtube = build(api_name,api_version,developerKey = API) class YouTubeParser: def __init__(self,argument): self.keyword = argument self.URL = {} self.songs = [] def get_url(self, keyword = None): if keyword is None: query = self.keyword elif (keyword[0:4] == 'http') or (keyword[0:3] == "www"): if keyword[24:32] == "playlist": playlist_id = keyword[38:] URL = {'link': playlist_id, 'type': "playlist"} elif keyword[24:32] == "watch?=v": i = 32 vid_id = [] while keyword[i] != "&" and i < len(keyword): vid_id.append(keyword[i]) link = "https://www.youtube.com/watch?v="+ ''.join(vid_id) URL = {'link': link, 'type': "video"} else: query = keyword request = youtube.search().list( part = 'snippet', maxResults = 10, q = query ) responses = request.execute() top_result = responses['items'][0]['id'] if top_result['kind'] == "youtube#video": url_type = "video" url = "https://www.youtube.com/watch?v="+top_result['videoId'] if top_result['kind'] == "youtube#playlist": url_type = "playlist" url = top_result['playlistId'] if top_result['kind'] == "youtube#channel": url_type = "channel" url = top_result['channelId'] URL = {'link' : url, 'type' : url_type } self.URL = URL def get_video(self): link = self.URL['link'] self.songs.append(link) def get_playlist(self): request = youtube.playlistItems().list( part = "snippet, contentDetails", maxResults = 10, playlistID = self.URL['link'] ) response = request.execute() playlist_items = response['items'] for video in playlist_items: video_id = video['contentDetails']['videoId'] self.songs.append("https://www.youtube.com/watch?v="+video_id) def get_playlist_from_channel(self): """request = youtube.channelSections().list( part = "snippet, contentDetails", channelID = self.URL['link'] ) response = request.execute() playlistId = response['items']""" pass def route(self): if self.URL['type'] == "video": self.get_video() if self.URL['type'] == "playlist": self.get_playlist() if self.URL['type'] == "channel": pass def enqueue(self): self.get_url() self.route() def remove_from_queue(self, song): if song in self.songs: self.songs.remove(song) class Queue(): def __init__(self)
[ "rainmaker@pop-os.localdomain" ]
rainmaker@pop-os.localdomain
547062260665854f2daa9245458cd458043ab8b7
02984c41ad9934a41de106d8be405cb65b265a3f
/matrix/image.py
dd536cebb090c549585725f28220c5f51355ec09
[]
no_license
pcp135/C-CtM
865637a19c0ae3494cfa1f895a517cfe053981c6
3bbe833a4501ec994c48f8c9c747bf0e785a5d9e
refs/heads/master
2016-09-05T10:05:58.367061
2013-09-08T00:34:37
2013-09-08T00:34:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,870
py
""" Basic types: file - a png file on disk image - a list of list of pixels. pixels can be triples of RGB intensities, or single grayscale values. vec - a vector with domain {0..width-1}x{0..height-1} display - not a type per se, but rather causing the type to be shown on screen Functions convert between these formats, and also can write to temporary files and display them with a web browser. """ # To do: check types of arguments, check that image has no alpha channel # Note that right now, we ignore the alpha channel, but allow it. - @dbp import png import vec # Native imports import webbrowser import tempfile import os import atexit # Round color coordinate to nearest int and clamp to [0, 255] def color_int(col): return max(min(round(col), 255), 0) # utility conversions, between boxed pixel and flat pixel formats # the png library uses flat, we use boxed. def boxed2flat(row): return [color_int(x) for box in row for x in box] def flat2boxed(row): # Note we skip every 4th element, thus eliminating the alpha channel return [tuple(row[i:i+3]) for i in range(0, len(row), 4)] ## Image conversions def isgray(image): return type(image[0][0]) == int def color2gray(image): """ Converts a color image to grayscale """ # we use HDTV grayscale conversion as per https://en.wikipedia.org/wiki/Grayscale return [[int(0.2126*p[0] + 0.7152*p[1] + 0.0722*p[2]) for p in row] for row in image] def gray2color(image): """ Converts a grayscale image to color """ return [[(p,p,p) for p in row] for row in image] #extracting and combining color channels def rgbsplit(image): """ Converts an RGB image to a 3-element list of grayscale images, one for each color channel""" return [[[pixel[i] for pixel in row] for row in image] for i in (0,1,2)] def rgpsplice(R,G,B): return [[(R[row][col],G[row][col],B[row][col]) for col in range(len(R[0]))] for row in range(len(R))] ## To and from files def file2image(path): """ Reads an image into a list of lists of pixel values (tuples with three values). This is a color image. """ (w, h, p, m) = png.Reader(filename = path).asRGBA() # force RGB and alpha return [flat2boxed(r) for r in p] def image2file(image, path): """ Writes an image in list of lists format to a file. Will work with either color or grayscale. """ if isgray(image): img = gray2color(image) else: img = image with open(path, 'wb') as f: png.Writer(width=len(image[0]), height=len(image)).write(f, [boxed2flat(r) for r in img]) ## To and from vecs def image2vec(image): """ Converts an image in list of lists format to a vector. Will work with either color or grayscale. """ if isgray(image): D = {(x,y) for x in range(len(image[0])) for y in range(len(image))} F = {(x,y):image[y][x] for (x,y) in D} else: D = {(x,y,c) for c in ['r','g','b'] for x in range(len(image[0])) for y in range(len(image))} F = dict() for y in range(len(image)): for x in range(len(image[y])): F[(x,y,'r')] = image[y][x][0] F[(x,y,'g')] = image[y][x][1] F[(x,y,'b')] = image[y][x][2] return vec.Vec(D, F) def vec2image(vec): """ Converts a vector to an image in list of lists format """ image = [] width = max(vec.D, key=lambda p: p[0])[0] height = max(vec.D, key=lambda p: p[1])[1] # check if grayscale e = vec.D.pop() vec.D.add(e) gray = len(e) == 2 for y in range(height): row = [] for x in range(width): if gray: row += [vec[(x,y)]] else: row += [(vec[(x,y,'r')], vec[(x,y,'g')], vec[(x,y,'b')])] image += [row] return image ## Shortcuts - files to vecs and vice-versa def file2vec(path): """ Reads an image from a file and turns it into a vector """ return image2vec(file2image(path)) def vec2file(vec, path): """ Reads an image from a file and turns it into a vector """ image2file(vec2image(vec), path) ## Display functions def image2display(image, browser=None): """ Stores an image in a temporary location and displays it on screen using a web browser. """ path = create_temp('.png') image2file(image, path) hpath = create_temp('.html') with open(hpath, 'w') as h: h.writelines(["<html><body><img src='file://%s'/></body></html>" % path]) openinbrowser('file://%s' % hpath, browser) def vec2display(vec): """ Stores an image in vec format in a temporary location and displays it on screen using a web browser. """ image2display(vec2image(vec)) def image2animate(image_array, delay=1, browser=None): """ Takes an array of images and displays them as an animation with `delay` seconds of pause between each one """ hpath = create_temp('.html') with open(hpath, 'w') as h: h.writelines( ["<html>\n" ,"<script type='text/javascript'>\n" ,"function start() {\n" ,"var c = document.getElementById('container');\n" ,"var active = c.firstChild;\n" ,"active.style.zIndex = 1;\n" ,"function go() {\n" ," active.style.zIndex = 0;\n" ," active = active.nextSibling;\n" ," if (active != null) {\n" ," active.style.zIndex = 1;\n" ," window.setTimeout(go,%d);\n" % int(delay * 1000) ," }\n" ,"}\n" ,"window.setTimeout(go,%d);\n" % int(delay * 1000) ,"};\n" ,"</script>\n" ,"<body onload='start()'><div id='container' style='position: relative;'>"]) for im in image_array: path = create_temp('.png') image2file(im, path) h.writelines(["<img src='%s' style='z-index: 0; position: absolute;'>" % path]) h.writelines(["</div>\n"]) openinbrowser('file://%s' % hpath, browser) _browser = None def setbrowser(browser=None): """ Registers the given browser and saves it as the module default. This is used to control which browser is used to display the plot. The argument should be a value that can be passed to webbrowser.get() to obtain a browser. If no argument is given, the default is reset to the system default. webbrowser provides some predefined browser names, including: 'firefox' 'opera' If the browser string contains '%s', it is interpreted as a literal browser command line. The URL will be substituted for '%s' in the command. For example: 'google-chrome %s' 'cmd "start iexplore.exe %s"' See the webbrowser documentation for more detailed information. Note: Safari does not reliably work with the webbrowser module, so we recommend using a different browser. """ global _browser if browser is None: _browser = None # Use system default else: webbrowser.register(browser, None, webbrowser.get(browser)) _browser = browser def getbrowser(): """ Returns the module's default browser """ return _browser def openinbrowser(url, browser=None): if browser is None: browser = _browser webbrowser.get(browser).open(url) # Create a temporary file that will be removed at exit # Returns a path to the file def create_temp(suffix='', prefix='tmp', dir=None): _f, path = tempfile.mkstemp(suffix, prefix, dir) os.close(_f) remove_at_exit(path) return path # Register a file to be removed at exit def remove_at_exit(path): pass #atexit.register(os.remove, path)
[ "pcp135@gmail.com" ]
pcp135@gmail.com
4bf28bc7353c46ecf708c6bb3059c4f96d510296
d188f791dd2e096fb3b094e086b35f3c642cd40c
/src/data/make_user_csv.py
080ba85eea2313b0c131061054a9bd1ee9335885
[ "MIT" ]
permissive
ravitejau/User_Churning_Prediction_StackOverflow
71d58a02ff06d78f115af436ffc5ac8e65d00a40
58ec5822e3ca23e912d154004e353df1725e2bc7
refs/heads/master
2020-04-11T19:34:00.768223
2018-12-16T20:42:27
2018-12-16T20:42:27
162,038,575
0
0
null
null
null
null
UTF-8
Python
false
false
1,263
py
import xml.etree.cElementTree as et import pandas as pd def getvalueofnode(node): """ return node text or None """ return node.text if node is not None else None def users(): """ Convert PostHistory.xml to pandas dataframe """ parsed_xml = et.parse("../../data/raw/Users.xml") dfcols = ['Id','AccountId','CreationDate', 'Reputation','Views','UpVotes','DownVotes','LastAccessDate'] df_xml = pd.DataFrame(columns=dfcols) i=0 for node in parsed_xml.getroot(): if i%10000==0: print(i,df_xml.shape) i+=1 Id=node.attrib.get('Id') AccountId = node.attrib.get('AccountId') CreationDate = node.attrib.get('CreationDate') Reputation = node.attrib.get('Reputation') Views = node.attrib.get('Views') UpVotes = node.attrib.get('UpVotes') DownVotes = node.attrib.get('DownVotes') LastAccessDate = node.attrib.get('LastAccessDate') df_xml = df_xml.append( pd.Series([Id,AccountId, CreationDate, Reputation, Views,UpVotes,DownVotes,LastAccessDate], index=dfcols), ignore_index=True) return df_xml users_df_xml=users() users_df_xml.to_csv("../../data/processed/users.csv",index=False)
[ "raviteja.upmaka1994@gmail.com" ]
raviteja.upmaka1994@gmail.com
afdf99debddabc6eca316386fdd77ca357973217
8244500da96cc8bda534574a3519bebfc19f09aa
/portfolio/urls.py
1a061d243046b620688c6268271668b3ec80ec17
[]
no_license
Antrowmano/portfoilo-Django-project
0deb953308cfdc792b7d33949977e8db4caf822d
978fd023460ffee8fc17089ac61b6784d45e4612
refs/heads/master
2023-07-19T20:21:47.652146
2021-08-28T07:16:52
2021-08-28T07:16:52
null
0
0
null
null
null
null
UTF-8
Python
false
false
795
py
"""portfolio URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include urlpatterns = [ path('admin/', admin.site.urls), path('', include('home.urls')) ]
[ "antrowmano543@gmail.com" ]
antrowmano543@gmail.com
ff27d1695dcafdf6c0990e339bae4ebdc384fe83
c5a921726a3805663d26a2dbaa47e49497931d4e
/Algorithms/challenges/lc437_path_sum_3.py
3c2379a605bdceaccb345b85e6736d43f336db08
[]
no_license
snowdj/cs_course
a50d07548198b4202e8abde01ec572e2cce38ab3
fa6504cb5145d10952f4615478fa745f4b35ba13
refs/heads/master
2020-03-17T15:18:52.190747
2018-05-13T08:08:51
2018-05-13T08:08:51
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,111
py
""" Time: O(n) Space: O(n) You are given a binary tree in which each node contains an integer value. Find the number of paths that sum to a given value. The path does not need to start or end at the root or a leaf, but it must go downwards (traveling only from parent nodes to child nodes). The tree has no more than 1,000 nodes and the values are in the range -1,000,000 to 1,000,000. Example: root = [10,5,-3,3,2,null,11,3,-2,null,1], sum = 8 10 / \ 5 -3 / \ \ 3 2 11 / \ \ 3 -2 1 Return 3. The paths that sum to 8 are: 1. 5 -> 3 2. 5 -> 2 -> 1 3. -3 -> 11 """ # Definition for a binary tree node. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None # Brute-force DFS. Pre-order traversal. # Time: O(nlg(n)), worst O(n^2) Space: O(lg(n)), worst O(n) class Solution: def pathSum(self, root, target): """ :type root: TreeNode :type sum: int :rtype: int """ res, stk = 0, [] # a stack to remember the path from root to current node def dfs(node, cumsum): nonlocal res, target if not node: return cumsum += node.val if cumsum == target: res += 1 stk.append(node.val) t = cumsum for i in range(len(stk)-1): # Not including the last one to avoid counting none-node case for target==0 t -= stk[i] if t == target: res += 1 dfs(node.left, cumsum) dfs(node.right, cumsum) stk.pop() dfs(root, 0) return res # Pre-order DFS with 2-sum hash table # Time: O(n) Space: O(n+lg(n)) from collections import defaultdict class Solution2: def pathSum(self, root, target): """ :type root: TreeNode :type sum: int :rtype: int """ res, tbl = 0, defaultdict(int) tbl[0] = 1 def dfs(node, cumsum): nonlocal res, tbl if not node: return cumsum += node.val res += tbl[cumsum - target] tbl[cumsum] += 1 # increament after updating result to avoid counting none-node case for target==0 dfs(node.left, cumsum) dfs(node.right, cumsum) tbl[cumsum] -= 1 dfs(root, 0) return res # Same as solution 1 brute-force, but using recursion instead of nodes stack. # Time: O(nlg(n)), worst O(n^2) Space: O(lg(n)), worst O(n) class Solution3: def pathSum(self, root, target): """ :type root: TreeNode :type sum: int :rtype: int """ if not root: return 0 return self.sumup(root, 0, target) + self.pathSum(root.left, target) + self.pathSum(root.right, target) def sumup(self, node, pre, target): if not node: return 0 cur = pre + node.val return (cur == target) + self.sumup(node.left, cur, target) + self.sumup(node.right, cur, target)
[ "jesse@liu.onl" ]
jesse@liu.onl
a3f236ba9acc0a4b6555b96f6a332662b412630d
4591b4c66f443a2a54c858a8f3b529b8f388a5e4
/workshops/migrations/0009_auto_20141201_0016.py
63d5f5e8ede381a4c432f6a8b3b7406a26f704cf
[ "MIT" ]
permissive
sburns/amy
39e11b48212304c7620e56a66c2f585d3d5951ae
7a315ba934f45e2234aaf1ea0e953b88a6239e10
refs/heads/master
2020-12-28T20:31:22.103801
2015-01-20T20:27:31
2015-01-20T20:27:31
27,539,122
0
1
null
2015-01-27T17:43:06
2014-12-04T12:18:40
Python
UTF-8
Python
false
false
451
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('workshops', '0008_person'), ] operations = [ migrations.AlterField( model_name='person', name='email', field=models.CharField(max_length=100, unique=True, null=True), preserve_default=True, ), ]
[ "gvwilson@third-bit.com" ]
gvwilson@third-bit.com
6e5431d642069e5b1b07fb396031e63b157ca4ef
d0267cedb13177a01487e11615588ee597ee498c
/src/marks/vscode_settings.py
8524e50b1e1486100bb6fdfb374adc1ac08a56ed
[ "MIT" ]
permissive
soheeyang/cursorless-talon
aa7ffffce9d1c75f380e271943cc6ba4a6aac2af
c3438d7d6300473d95568f72c60ef6e9421bbe43
refs/heads/main
2023-08-15T22:20:43.883566
2021-10-07T13:37:58
2021-10-07T13:37:58
419,826,361
0
0
null
null
null
null
UTF-8
Python
false
false
2,076
py
import os from talon import Context, Module, actions from pathlib import Path from ..vendor.jstyleson import loads mod = Module() windows_ctx = Context() mac_ctx = Context() linux_ctx = Context() windows_ctx.matches = r""" os: windows """ mac_ctx.matches = r""" os: mac """ linux_ctx.matches = r""" os: linux """ @mod.action_class class Actions: def vscode_settings_path() -> Path: """Get path of vscode settings json file""" pass def vscode_get_setting(key: str, default_value: any = None): """Get the value of vscode setting at the given key""" path: Path = actions.user.vscode_settings_path() settings: dict = loads(path.read_text()) if default_value is not None: return settings.get(key, default_value) else: return settings[key] def pick_path(paths: list[Path]): existing_paths = [path for path in paths if path.exists()] return max(existing_paths, key=lambda path: path.stat().st_mtime) @mac_ctx.action_class("user") class MacUserActions: def vscode_settings_path() -> Path: return pick_path( [ Path( f"{os.environ['HOME']}/Library/Application Support/Code/User/settings.json" ), Path( f"{os.environ['HOME']}/Library/Application Support/VSCodium/User/settings.json" ), ] ) @linux_ctx.action_class("user") class LinuxUserActions: def vscode_settings_path() -> Path: return pick_path( [ Path(f"{os.environ['HOME']}/.config/Code/User/settings.json"), Path(f"{os.environ['HOME']}/.config/VSCodium/User/settings.json"), ] ) @windows_ctx.action_class("user") class WindowsUserActions: def vscode_settings_path() -> Path: return pick_path( [ Path(f"{os.environ['APPDATA']}/Code/User/settings.json"), Path(f"{os.environ['APPDATA']}/VSCodium/User/settings.json"), ] )
[ "noreply@github.com" ]
noreply@github.com
4a37ada10f67469365dc3441bc5ec20f0d5d7c87
cebcd4c29a2f7542f4fed1109e6bdfc4fa99beca
/formdemo/models.py
0bfd77102df4b87bb6de58e11daffb711d55b15b
[]
no_license
mkdika/django-form-demo
6a35e3514c1f79fa5fb5a4754bcaad0be80a78c2
0557956aa11a0742b06a17c6e223242c36460c48
refs/heads/master
2020-03-27T18:56:37.982967
2018-09-01T00:41:39
2018-09-01T00:41:39
146,954,956
1
0
null
null
null
null
UTF-8
Python
false
false
1,814
py
from django.db import models from ckeditor.fields import RichTextField from datetime import datetime class Author(models.Model): EDUCATION_DEGREE = ( ('PRI', 'Primary'), ('INT', 'Intermediate'), ('HIG', 'High'), ('BAC', 'Bachelor Degree'), ('MAS', 'Master Degree'), ('PHD', 'Doctor Degree'), ) MEMBER_TYPES = ( ('1','REGULAR'), ('2','GOLD'), ('3','PLATINUM') ) # name, TextField name = models.CharField(max_length=100, unique=True) # birth_date, DateField birth_date = models.DateField() # last_education, ChoiceField (ComboBox) last_education = models.CharField( max_length=3, choices=EDUCATION_DEGREE, default='HIG', null=True ) # join_date, DateTimeField join_date = models.DateTimeField(default=datetime.now) # reminder_time, TimeField reminder_time = models.TimeField() # email, TextField (Email Validation) email = models.EmailField(null=True) # website, TextField (URL validation) website = models.URLField(null=True) # address, TextArea address = models.TextField(null=True) # member_type, RadioField (Single Choices) member_type = models.CharField( max_length=20, choices=MEMBER_TYPES, default='1' ) # term_day, IntegerField term_day = models.IntegerField(default=30) # balance, DecimalField balance = models.DecimalField(default=0.00, decimal_places=2, max_digits=10) # active, BooleanField (single) active = models.BooleanField(default=True) # bio, RichTextField (WYSIWYG) bio = RichTextField(blank=True) def __str__(self): return f"{self.name} ({self.member_type})" class Meta: db_table = 'tb_author'
[ "mkdika@gmail.com" ]
mkdika@gmail.com
d31f11bddf9791dee17880f0c0425c13ad495a90
ab6c6559d9cfac36c3c4ece192fa2300767662d1
/Python Game Development for Beginners - Working Files/Chapter 5/Increasing Challenge with Levels Part I/main.py
38ecd797fc5c5f6c15d8d84cfd91391e4842c047
[]
no_license
Igor-Nosatov/PythonGameDev_Trinket
962b86572c74c64652a24768dfec2101fcae221f
e6166f69307ded6880b0aaa3299c0a151807bb9c
refs/heads/master
2020-06-24T20:22:57.187289
2016-05-03T10:33:26
2016-05-03T10:33:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,111
py
# import the turtle module so we can use all the neat code it contains import turtle from helpercode import BoxTurtle, printwin, checkpos, maketurtles from time import sleep from random import randint, choice # Create variables to contain our BoxTurtle objects boxturtles = maketurtles() # Create a variable `tina` that is a Turtle() object. Set shape to 'turtle' tina = turtle.Turtle() tina.shape('turtle') tina.penup() # Create a variable `screen`, a Screen() object, that will handle keyss screen = turtle.Screen() # Keyboard controls def go_left(): tina.left(11) def go_right(): tina.right(11) # Check intersections with boxes when the turtle moves def go_forward(): tina.forward(10) check_intersect() checkpos([tina]) def go_backward(): tina.backward(10) check_intersect() checkpos([tina]) # This function loops through the `boxes` list and uses each # box's `intersect()` method to check whether it intersects # with tina. def check_intersect(): for box in boxturtles: if not box.hit and box.intersect(tina): box.hit = True box.flash() # Tell the program which functions go with which keys screen.onkey(go_left, 'Left') screen.onkey(go_right, 'Right') screen.onkey(go_forward, 'Up') screen.onkey(go_backward, 'Down') # Debugging function - press 'w' to hit all but one turtle def win(): for t in boxturtles[1:]: screen.tracer(0) t.flash() t.hit = True screen.tracer(1) screen.onkey(win, 'w') # This play function will call itself every .1 seconds and return if the player loses def play(): # Tell the screen to listen for key presses screen.listen() # Check boxes' hit state hits = [] for box in boxturtles: hits.append(box.hit) # If all boxes are hit, the game is over! if False not in hits: printwin(tina) return mover = choice(boxturtles) if not mover.hit: mover.move() # Sometimes,a turtle will awaken else: if randint(0,100) < 5: mover.awaken() checkpos(boxturtles) # start the function over in 100 miliseconds (.1 seconds) screen.ontimer(play, 100) play() turtle.done()
[ "lrbeaver@gmail.com" ]
lrbeaver@gmail.com
663f3769cf5a7c92bedbcb78fa4493d70bd8b3a0
bde8ab827d5b2c2f3c974536b02b116e47ed20f3
/optimize.py
87de30db2c5b1c51923693c110bb76cd665005c6
[ "MIT" ]
permissive
benlawson/MusicLingo
6b5d83b70db65002a033e58f108a9a5a1c065ffd
edf50e4c0e399b5cea595c65ac8c3226fb04861b
refs/heads/master
2021-01-17T06:40:21.079699
2017-04-29T17:22:33
2017-04-29T17:22:33
47,513,310
0
1
null
2016-09-16T23:38:58
2015-12-06T20:51:28
Python
UTF-8
Python
false
false
1,006
py
##################################################################### # optimize.py # Node = dict Leaf = str Num = int def eliminateDeadCode(s): if type(s) == Leaf or type(s) == Num: return s if type(s) == Node: for label in s: children = s[label] if label == 'Print': [e, p] = children return {'Print':[eliminateDeadCode(e), eliminateDeadCode(p)]} elif label == 'Play': [e, p] = children return {'Play': [eliminateDeadCode(e), eliminateDeadCode(p)]} elif label == 'And': [e1, e2] = children for label in e1: if label == 'Song': for label in e2: if label == 'Song': return {'Song' : ['']} return {'And':children} return {label : [eliminateDeadCode(c) for c in children]} #eof
[ "balawson@bu.edu" ]
balawson@bu.edu
50ac6709acfc86d952d4ef089c648926671f477b
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
/ml-flask/Lib/site-packages/sacremoses/corpus.py
721a5bbd1be20eb165825ba3c6dae6936c425b69
[ "MIT" ]
permissive
YaminiHP/SimilitudeApp
8cbde52caec3c19d5fa73508fc005f38f79b8418
005c59894d8788c97be16ec420c0a43aaec99b80
refs/heads/master
2023-06-27T00:03:00.404080
2021-07-25T17:51:27
2021-07-25T17:51:27
389,390,951
0
0
null
null
null
null
UTF-8
Python
false
false
129
py
version https://git-lfs.github.com/spec/v1 oid sha256:d72f15b792d94c6d388af9f8c568d412c657f055c925abdae378464c275c54a4 size 5016
[ "yamprakash130@gmail.com" ]
yamprakash130@gmail.com
48aa40f60c02ad5c330e0865defcba13e49a1510
518260d9991ff4771ffda214ffde116ce239b369
/hw3/hw3.py
8218d0dc9857e1cf17d6ac8dff1d3a52288991a9
[]
no_license
Ivan-Hao/IntelligentVehicle
1523926c6eefa9567e7298547318aa1f157ce8c0
c83d1f4c54a62c394a408dcda57d9bb224c62ccb
refs/heads/master
2020-09-16T21:27:55.325141
2020-06-13T07:42:19
2020-06-13T07:42:19
223,892,054
0
0
null
null
null
null
UTF-8
Python
false
false
1,837
py
import sys time = 0 back_edge = [] class vertex(): def __init__(self, args): self.number = args self.edge = [] self.ischeck = 'white' self.checktime = 0 self.endtime = 0 self.predecessor = None def add_connect(self, args): self.edge.append(args) def dfs(vertex,vertex_list,predecessor=None): global time global back_edge time += 1 vertex.checktime = time vertex.ischeck = 'gray' for j in vertex.edge : if j in vertex_list.keys() and vertex_list[j].ischeck == 'white': vertex_list[j].predecessor = vertex dfs(vertex_list[j],vertex_list,vertex) elif j in vertex_list.keys() and vertex_list[j].ischeck == 'gray': back_edge.append(dict(front = vertex.number, end = vertex_list[j].number)) vertex.ischeck = 'black' time +=1 vertex.endtime =time if __name__ == '__main__': sys.setrecursionlimit(1000000) input_file = open(sys.argv[1],'r') total_amount = int(input_file.readline()) total_edge = int(input_file.readline()) vertex_list = {} for edge in input_file.readlines(): vertex_front,vertex_rear = edge.strip().split() if vertex_front not in vertex_list.keys(): temp = vertex(vertex_front) temp.add_connect(vertex_rear) vertex_list[vertex_front]=temp else: vertex_list[vertex_front].add_connect(vertex_rear) input_file.close() for i in vertex_list.keys(): if vertex_list[i].ischeck == 'white' : dfs(vertex_list[i],vertex_list) if back_edge == []: print("There is no cycle in graph!") else: print("There is a cycle in graph!") for j in back_edge: print("The removal edge",j)
[ "wangyunhao@cmlab.csie.ntu.edu.tw" ]
wangyunhao@cmlab.csie.ntu.edu.tw
83819dde3730e1a0ba29a2eae622ceb336a6cb20
d0582057d848ffd1e6481824282a94c23dc3b5b3
/setup-tty2img.py
f68c25dbcaeb5c55b6ceb5cc6caea5d1eea5b7b2
[ "MIT" ]
permissive
opcode-eu-org-libs/asciicast2movie
2f9d3be188694e6ae3fa70be96bb54620638da12
986576bd63add5bed0d5aa4a21cc5539c6bee2a7
refs/heads/master
2023-04-19T10:38:36.819255
2021-04-26T07:26:46
2021-04-26T07:26:46
308,849,580
35
7
MIT
2021-04-26T07:26:47
2020-10-31T09:44:33
Python
UTF-8
Python
false
false
897
py
import setuptools import tty2img setuptools.setup( name="tty2img", py_modules=["tty2img"], version="0.3.7", author="Robert Paciorek", author_email="robert@opcode.eu.org", description="rendering pyte terminal emulator screen as image", long_description=tty2img.__doc__, long_description_content_type="text/plain", keywords='asciicast video movie mp4 tty pyte moviepy', install_requires=[ 'pyte', 'pillow', 'fclist-cffi', 'freetype-py' ], license='MIT', url="https://bitbucket.org/OpCode-eu-org/asciicast2movie/", project_urls={ 'GitHub': 'https://github.com/opcode-eu-org-libs/asciicast2movie/' }, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.5', )
[ "robert@opcode.eu.org" ]
robert@opcode.eu.org
24432c9fd52eb43caf72d99549f5067e361a860d
2841d8e3d48aed80fe2098739a0b8050d34f8569
/Automata/PA2_python/src/CYK.py
a44dba8c3d14c1474b2dbfadf8330d189f0ab79d
[]
no_license
kingback1/CourseraHW
8f64ba8326f7339f5d451606e82cd1be251915cc
776bb7f9376424cdc2af6eaeee3942b9e21c21fb
refs/heads/master
2021-01-21T09:47:22.463023
2014-04-22T14:32:13
2014-04-22T14:32:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,844
py
''' CYK algorithm for Context Free Language Author: Chenguang Zhu CS154, Stanford University ''' import sys,traceback import os import string maxProductionNum = 100 #max number of productions VarNum = 4 production = [[0] * 3 for i in range(maxProductionNum+1)] '''Prouductions in Chomsky Normal Form (CNF) production[i][0] is the number for the variable (0~3, 0: S 1: A, 2: B, 3: C) If this production is A->BC (two variables), then production[i][1] and production[i][2] will contain the numbers for these two variables If this production is A->a (a single terminal), then production[i][1] will contain the number for the terminal (0 or 1, 0: a, 1: b), production[i][2]=-1''' X = [[[False]*3 for i in range(10)] for j in range(10)] '''X[i][j][s]=true if and only if variable s (0~3, 0: S 1: A, 2: B, 3: C) is in X_ij defined in CYK Suppose the length of string to be processed is L, then 0<=i<=j<L ''' #check whether (a,b,c) exists in production def existProd(a, b, c): global production for i in range(len(production)): if ((production[i][0]==a) and (production[i][1]==b) and (production[i][2]==c)): return True return False '''CYK algorithm Calculate the array X w is the string to be processed''' def calcCYK(w): global X global VarNum L=len(w) X=[[[False]*VarNum for i in range(L)] for j in range(L)] #Fill in your program here for i in range(L): for s in range(VarNum): X[i][i][s] = existProd(s, w[i], -1); for i in range(1, L): for j in range(L-i): for s in range(VarNum): for k in range(j, j+i): for s1 in range(VarNum): if X[j][k][s1]: for s2 in range(VarNum): if X[k+1][j+i][s2] and existProd(s, s1, s2): X[j][j+i][s] = True; def Start( filename): global X global VarNum global production result='' #read data case line by line from file try: br=open(filename,'r') #example on Page 8 of lecture 15_CFL5 production=[[0]*3 for i in range(7)] production[0][0]=0; production[0][1]=1; production[0][2]=2 #S->AB production[1][0]=1; production[1][1]=2; production[1][2]=3 #A->BC production[2][0]=1; production[2][1]=0; production[2][2]=-1 #A->a production[3][0]=2; production[3][1]=1; production[3][2]=3 #B->AC production[4][0]=2; production[4][1]=1; production[4][2]=-1 #B->b production[5][0]=3; production[5][1]=0; production[5][2]=-1 #C->a production[6][0]=3; production[6][1]=1; production[6][2]=-1 #C->b result='' #Read File Line By Line for string in br: string=string.strip() print 'Processing '+string+'...' length=len(string) w=[0]*length for i in range(length): w[i]=ord(string[i])-ord('a') #convert 'a' to 0 and 'b' to 1 #Use CYK algorithm to calculate X calcCYK(w) #Get/print the full table X for step in range(length-1,-1,-1): for i in range(length-step): j=i+step for k in range(VarNum): if (X[i][j][k]): result=result+str(k) result=result+' ' result=result+'\n' #Close the input stream br.close() except: exc_type, exc_value, exc_traceback = sys.exc_info() print "*** print_exception:" traceback.print_exception(exc_type, exc_value, exc_traceback,limit=2, file=sys.stdout) result=result+'error' return result def main(filepath): return Start('testCYK.in') if __name__ == '__main__': main(sys.argv[1])
[ "1062018952@qq.com" ]
1062018952@qq.com
30b9c1c0a8e43980eaba935bac27eca234435171
d3050f495f22b40cf300a05fa8ce2961ce1fbfa3
/Day3/AUC Robotics Summer Camp/Day3/catkin_ws/build/src/ball_chaser/catkin_generated/pkg.develspace.context.pc.py
281ee97a4d4ca6c9893ab2587c05f49a798e5a13
[]
no_license
mirashanouda/ROS_course
57214a97beee1ec01d341b0fb5684bae2176f90c
d847fa430f216b370a6211be8c2004aae1915269
refs/heads/master
2022-12-11T20:07:28.392143
2020-09-07T00:50:52
2020-09-07T00:50:52
287,122,652
1
0
null
null
null
null
UTF-8
Python
false
false
616
py
# generated from catkin/cmake/template/pkg.context.pc.in CATKIN_PACKAGE_PREFIX = "" PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/mira/AUC-Robotics/ROS_course/Day3/AUC Robotics Summer Camp/Day3/catkin_ws/devel/include".split(';') if "/home/mira/AUC-Robotics/ROS_course/Day3/AUC Robotics Summer Camp/Day3/catkin_ws/devel/include" != "" else [] PROJECT_CATKIN_DEPENDS = "".replace(';', ' ') PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else [] PROJECT_NAME = "ball_chaser" PROJECT_SPACE_DIR = "/home/mira/AUC-Robotics/ROS_course/Day3/AUC Robotics Summer Camp/Day3/catkin_ws/devel" PROJECT_VERSION = "0.0.0"
[ "mira.shanouda" ]
mira.shanouda
76586ca741364a3c5216604960f1c75befc06bc1
c1253970a0b872b73d1ebb1360532b585faf3baa
/process_result.py
a88688521e4af1d6edf81902a0a199f6ff4973fb
[]
no_license
sruthi2498/TelegramCovid
3996900a6f69bcf1dba4e0385ed3f165e2a3a600
050f07a860945b715493c9418832db408da8cc60
refs/heads/master
2023-04-25T05:49:18.565250
2021-05-15T04:04:57
2021-05-15T04:04:57
366,968,767
1
0
null
null
null
null
UTF-8
Python
false
false
1,403
py
import re import csv import pandas as pd from fuzzywuzzy import fuzz filename="vaccine_slot_trends.csv" df = pd.read_csv(filename) df.drop_duplicates(subset=None, inplace=True) df = df.sort_values("pincode").reset_index() print("Existing : ",len(df)) out_file = "hospital_timeslots.txt" f=open(out_file,"w") hospitals = df.name.unique().tolist() hospitals =[ h for h in hospitals if h!=None and h!="" and type(h)==str] hospitals = sorted(hospitals) for i in range(len(hospitals)): h = hospitals[i] for j in range(i+1,len(hospitals)): if(h!=hospitals[j]): match = fuzz.token_sort_ratio(h,hospitals[j]) if(match>75): #print(h,hospitals[j],match ) df['name'] = df['name'].replace([hospitals[j]],h) hospitals[j] = h hospitals = sorted(list(set(hospitals))) print(hospitals) for hp in hospitals: if(hp!=None and hp!=""): df_hosp = df[df["name"]==hp].sort_values("slots",ascending=False).reset_index() if(len(df_hosp)>0): hour_mins=[] for i in range(len(df_hosp)): row = str(df_hosp.loc[i,"hour"])+":"+str(df_hosp.loc[i,"minute"]) if(row not in hour_mins): hour_mins.append(row) line = str(df_hosp.loc[0,"pincode"])+" "+ hp +" : "+ ",".join(hour_mins)+"\n" f.write(line) f.close()
[ "vsruthi98@gmail.com" ]
vsruthi98@gmail.com
12ef4a4ee84efe8dddbe8dc234cdb04eb07adb9c
65a7496000953f8aa56aae0da74611292e30201e
/mongoDB/__init__.py
578cd9e584b2be2dad0fcb8efcbb7b76bd3ff25a
[]
no_license
sileyouhe/Douban_movie_data_analysis
19d202c32133bc6778480232dafe8672ac2660a2
2f97609b9885dff62547ae9a1e635d5c83ec2ebb
refs/heads/master
2020-09-10T21:25:13.966807
2019-12-19T08:55:33
2019-12-19T08:55:33
221,838,137
0
2
null
null
null
null
UTF-8
Python
false
false
641
py
import pymongo #和数据库连接 client = pymongo.MongoClient(host='localhost', port=27017) #访问“test”数据库 db = client.test print(db) # 访问‘movie’ 集合 collection = db.movie # 插入一条数据 item = { 'name': 'abc', 'actor': 'zhang san', } #result = collection.insert_one(item) #返回一个独一无二的ID值 #print(result) items =[{ 'name': 'abcd', 'actor': 'li si', }, { 'name': 'abcde', 'actor': 'wang wu', }, ] # result = collection.insert_many(items) # print(result) #寻找数据 print(collection.find_one()) print(collection.find_one({'actor':'wang wu'}))
[ "sileyouhe@gmail.com" ]
sileyouhe@gmail.com
2113063d729a811629f3bc376ba4bf53a6246231
ea2f7efb514b7e33eb205519cfffc356f58a9816
/Clases en Python/__repr()__.py
6f40939cad236fb3207cb6550a444771c025da4d
[]
no_license
MGijon/Learning-Python
fa79071bf53172743e96d2c614be2963a5107a9d
728f8d7e30729a965c5a093e08005d715aa6e46b
refs/heads/master
2021-06-28T15:44:34.082472
2019-02-23T17:58:06
2019-02-23T17:58:06
105,207,614
2
0
null
null
null
null
UTF-8
Python
false
false
2,145
py
''' __REPR()__: Nos devuelve una cadena de texto con la representación única de un objeto. Es útil, por ejemplo, a la hora de depurar un error. ------------ A la representación única accedemos de dos formas: con la función repr() o con las dobles comillas hacia atrás (``). Si __repr()__ no está definido, Python en lugar de darnos un error nos generará una representación automática del objeto, indicando el nombre de su clase y su posición en la memoria. ''' class Triangulo(object): def __init__(self, base, altura): self.base = base self.altura = altura def __str__(self): clase = type(self).__name__ mensaje = '{0} con base {1} y altura {2}.'.format(clase, self.base, self.altura) return mensaje t = Triangulo(12, 124) print(t) print('en este caso no hemos definido __repr()__, Python lo generará automáticamente...') print(repr(t)) import math class Circulo(object): def __init__(self, radio): self.radio = radio @property def area(self): return 2 * math.pi * self.radio def __str__(self): clase = type(self).__name__ mensaje = '{0} de radio {1} y área {2}'.format(clase, self.radio, self.area) return mensaje def __repr__(self): clase = type(self).__name__ mensaje = '{0}({1})'.format(clase, self.radio) return mensaje c = Circulo(131) print(c) # Circulo de radio 131 y área 823.0972752405258 print(repr(c)) # Circulo(131) print(eval(repr(c))) # Circulo de radio 131 y área 823.0972752405258 ##################### MORALEJA ########################################################### # --------- # # # # __str__ : PARA USUARIOS # # __repr–– : PARA DESARROLLADORES # # # ###########################################################################################
[ "mgijon94@gmail.com" ]
mgijon94@gmail.com
1c015a0854fcde9d4cf0f6123bcf6118fef54688
1ee0cfce421668e7d698602e6aded4f53b48345c
/slover/libMF.py
4b60c7719074c05eeb64a9e21ef462f971a10bd7
[]
no_license
Jackjet/datagrand_recSys
bb3e9ff3eb93ba813373eeb7575223e66fe39f48
b59945dbb6d3fd17fac4404a3b5f232d09e7745d
refs/heads/master
2020-05-07T18:06:49.430415
2017-07-23T08:43:03
2017-07-23T08:43:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,838
py
# coding=utf-8 import csv import os import gc import numpy import pandas as pd #w(x) = log( 1+N('view') / N(x) ),实际评分和w(x)=1一样 import time """ 基于矩阵分解的推荐算法 使用的台大开源的libMF包 先根据SGD优化MSE,得到用户矩阵和物品矩阵(脚本:libMF.sh) 然后计算每一个用户对每一个商品的评分,选择top20,去掉看过的,生成top5推荐列表 麻烦的是全量计算的时间复杂度非常高,而且效果还差 最终使用test中的top10筛选的候选物品 """ def get_action_weight( x): if x == 'view': return 1 if x == 'deep_view': return 2 if x == 'share':return 8 if x == 'comment': return 6 if x == 'collect':return 5 else:return 1 def make_train_test( ): train = pd.read_csv('../data/train.csv') user = pd.read_csv('../data/candidate.txt') item = pd.read_csv('../data/all_news_info.csv') #将user_id和item_id重新映射成连续的id uid_uniqid = user[['user_id']].sort_values(['user_id']) uid_uniqid.index = user['user_id'].values uid_uniqid['user_id'] = range(len(uid_uniqid)) uid_uniqid = uid_uniqid['user_id'].to_dict() iid_uniqid = item[['item_id']].sort_values(['item_id']) iid_uniqid.index = item['item_id'].values iid_uniqid['item_id'] = range(len(iid_uniqid)) iid_uniqid = iid_uniqid['item_id'].to_dict() train['weight'] = train['action_type'].apply(get_action_weight) train = pd.merge(user, train, on='user_id') rat_mat = train[['user_id', 'item_id', 'weight']].groupby(['user_id', 'item_id'], as_index=False).sum() rat_mat['user_id'] = rat_mat['user_id'].apply( lambda x: uid_uniqid.get(x) ) rat_mat['item_id'] = rat_mat['item_id'].apply( lambda x: iid_uniqid.get(x) ) rat_mat['weight'] = rat_mat['weight'].apply(float) rat_mat.to_csv('../data/real_matrix.tr.txt', index=False, header=False,sep=" ") def save_user_mat( factor_num, ): f = open('../model/libMF_model_l1l2', 'r') user_mat_csv = open('../data/user_mat.csv', 'w') csv_writer = csv.writer(user_mat_csv,delimiter=',') csv_writer.writerow( ['uniqid', 'flag']+["factor_" + str(i) for i in range(factor_num)] ) n = 0 while( True ): line = f.readline() if(line.startswith("p")): #去掉p标志 ss = line[1:].strip().split(" ") csv_writer.writerow( ss ) n += 1 if( n % 1000 == 0 ): print("write lines "+str(n)) if ( line == None or line.startswith("q") ): break f.close() user_mat_csv.close() print(' write all lines '+str(n) ) def save_item_mat( factor_num, ): f = open('../model/libMF_model_l1l2', 'r') item_mat_csv = open('../data/item_mat.csv', 'w') csv_writer = csv.writer(item_mat_csv,delimiter=',') csv_writer.writerow( ['uniqid', 'flag']+["factor_" + str(i) for i in range(factor_num)] ) n = 0 while( True ): line = f.readline() if(line.startswith("q")): #去掉标志 ss = line[1:].strip().split(" ") csv_writer.writerow( ss ) n += 1 if( n % 1000 == 0 ): print("write lines "+str(n)) if ( not line ): break f.close() item_mat_csv.close() print(' write all lines '+str(n) ) def help( p ): ss = str(p).split(",") rec,viewed = ss[0],ss[1] rec = list( rec.split(" ") ) viewed_list = list( set( viewed.split(" ") ) ) size = 0 for i in rec: size += 1 if i in viewed_list: rec.remove( i ) size -= 1 if size == 5:break rec = " ".join( rec[:5]) return rec def addAndSortTopK( e,sorted_list,k=60 ): if( len(sorted_list)<k ): sorted_list.append( e ) if( len(sorted_list)>=k and e[1]>sorted_list[k-1][1] ): sorted_list.append( e ) sorted_list.sort(key=lambda x:-x[1]) return sorted_list def make_predict( num_factor ): print(' 读取用户和物品矩阵 ') user_mat = pd.read_csv('../data/user_mat.csv') item_mat = pd.read_csv('../data/item_mat.csv') item = pd.read_csv('../data/news_info.csv') train = pd.read_csv('../data/train.csv') user = pd.read_csv('../data/candidate.txt') item_all = pd.read_csv('../data/all_news_info.csv') print(' 将uniqid重新映射成user_id,item_id ') uniqid_uid = user[['user_id']].sort_values(['user_id']) uniqid_uid.index = range(len(uniqid_uid)) uniqid_uid = uniqid_uid['user_id'].to_dict() uniqid_iid = item_all[['item_id']].sort_values(['item_id']) uniqid_iid.index = range(len(uniqid_iid)) uniqid_iid = uniqid_iid['item_id'].to_dict() user_mat['user_id'] = user_mat['uniqid'].apply( lambda x:uniqid_uid[x] ) item_mat['item_id'] = item_mat['uniqid'].apply( lambda x: uniqid_iid[x] ) #这里有些新品是没处理的,可以通过同cate的隐向量进行均值填充 item_mat = pd.merge( item[['item_id']],item_mat,on='item_id' ) print(' 去掉空值减少计算量 ') item_mat = item_mat[ item_mat['flag']=='T'] item_mat.index = range(len(item_mat)) print(' 待推荐item总数'+str(len(item_mat))) print(' 过滤掉那些阅读晚高峰也没被看过的item,大约1w多 ') start = time.mktime(time.strptime('2017-2-18 18:00:00', '%Y-%m-%d %H:%M:%S')) item_max_time = train.groupby(['item_id'], as_index=False).max()[['item_id', 'action_time']] item_max_time = item_max_time[item_max_time['action_time'] > start] item_mat = pd.merge( item_max_time[['item_id']],item_mat,on='item_id' ) print(' 测试集里top10的item_id ') test = pd.read_csv('../data/test.csv') test = test.groupby(['item_id'],as_index=False).count().sort_values(['user_id'],ascending=False)[:10] item_mat = pd.merge(item_mat, test[['item_id']].drop_duplicates(), on='item_id') print( ' 预测评分 ' ) rec = pd.DataFrame() user_list = [] rec_items_list = [] sorted_list = [] n = 0 feat = ["factor_" + str(i) for i in range(num_factor)] user_mat = user_mat[ ['user_id']+feat ] item_mat = item_mat[ ['item_id']+feat ] for i in range( len(user_mat) ): recitems = [] for j in range( len(item_mat) ): predict = user_mat.ix[i,1:].dot( item_mat.ix[j,1:] ) addAndSortTopK( [item_mat.ix[j,0],predict],sorted_list ) for item_predict in sorted_list: recitems.append( int(item_predict[0]) ) sorted_list.clear() user_list.append( user_mat.ix[i,0] ) rec_items_list.append( " ".join( map(str,recitems) ) ) n += 1 if( n%2==0 ):print(' rec users '+str( n )) rec['user_id'] = user_list rec['item_id'] = rec_items_list del item_all del user del item del user_list del rec_items_list gc.collect() print('过滤掉用户已经看过的') user_viewed = pd.DataFrame() user_list = [] viewed_item = [] for user, group in train[['user_id', 'item_id']].groupby(['user_id'], as_index=False): user_list.append(user) viewed_item.append(" ".join( map( str, map( int,list(group['item_id'].unique())) ))) user_viewed['user_id'] = user_list user_viewed['item_id'] = viewed_item del user_list del viewed_item gc.collect() rec = pd.merge(rec, user_viewed, how='left', on='user_id').fillna("") rec['item_id'] = rec['item_id_x'] + "," + rec['item_id_y'] rec['item_id'] = rec['item_id'].apply(help) rec = rec[['user_id', 'item_id']] rec.drop_duplicates('user_id').to_csv('../result/result.csv', index=None, header=None) if __name__ == '__main__': # make_train_test() # exit_num = os.system("../bins/libMF.sh") # print( exit_num >> 8 ) # save_user_mat(35) # save_item_mat(35) make_predict(35)
[ "18210872892@163.com" ]
18210872892@163.com
70d28bdb9d82aa11081654760958d50a0e9b5ae3
55647a80c8b412af9df0ba3f50595cc2f29c25e6
/res/scripts/client/gui/battle_control/controllers/consumables/__init__.py
de6cdb4912e1bd8a0b0ace2de737e8453afc24ad
[]
no_license
cnsuhao/WOT-0.9.17-CT
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
d1f932d8cabaf8aa21708622e87f83c8d24d6451
refs/heads/master
2021-06-08T18:11:07.039293
2016-11-19T19:12:37
2016-11-19T19:12:37
null
0
0
null
null
null
null
WINDOWS-1250
Python
false
false
1,212
py
# 2016.11.19 19:48:19 Střední Evropa (běžný čas) # Embedded file name: scripts/client/gui/battle_control/controllers/consumables/__init__.py from gui.battle_control.controllers.consumables import ammo_ctrl from gui.battle_control.controllers.consumables import equipment_ctrl from gui.battle_control.controllers.consumables import opt_devices_ctrl def createAmmoCtrl(setup): if setup.isReplayRecording: return ammo_ctrl.AmmoReplayRecorder(setup.replayCtrl) if setup.isReplayPlaying: return ammo_ctrl.AmmoReplayPlayer(setup.replayCtrl) return ammo_ctrl.AmmoController() def createEquipmentCtrl(setup): if setup.isReplayPlaying: clazz = equipment_ctrl.EquipmentsReplayPlayer else: clazz = equipment_ctrl.EquipmentsController return clazz() def createOptDevicesCtrl(): return opt_devices_ctrl.OptionalDevicesController() __all__ = ('createAmmoCtrl', 'createEquipmentCtrl', 'createOptDevicesCtrl') # okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\battle_control\controllers\consumables\__init__.pyc # decompiled 1 files: 1 okay, 0 failed, 0 verify failed # 2016.11.19 19:48:19 Střední Evropa (běžný čas)
[ "info@webium.sk" ]
info@webium.sk
b273f9b5b2501af9b9e5f2bf763f41c49463f08e
37dc13aeae55edb3bf94769c9dec738cb20d3c21
/GUI/BrokerBenchmark_v2.py
bc7f6e983da32050320623fb8bd549014178b475
[]
no_license
Parr0t/BrokerBenching
24a964bd2169817bdcc4923856739dd24f2139a8
57c8e015fedfeda018f4ba73cacdc0afdbdc4f86
refs/heads/master
2021-01-25T01:21:00.389155
2017-06-19T06:55:49
2017-06-19T06:55:49
94,745,454
0
0
null
null
null
null
UTF-8
Python
false
false
6,334
py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'BrokerBenchmark.ui' # # Created by: PyQt5 UI code generator 5.6 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.pushButton = QtWidgets.QPushButton(self.centralwidget) self.pushButton.setGeometry(QtCore.QRect(690, 510, 75, 23)) self.pushButton.setObjectName("pushButton") self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setGeometry(QtCore.QRect(40, 510, 541, 23)) self.progressBar.setProperty("value", 0) self.progressBar.setObjectName("progressBar") self.radioButton = QtWidgets.QRadioButton(self.centralwidget) self.radioButton.setGeometry(QtCore.QRect(390, 180, 141, 17)) self.radioButton.setObjectName("radioButton") self.radioButton_2 = QtWidgets.QRadioButton(self.centralwidget) self.radioButton_2.setGeometry(QtCore.QRect(390, 150, 121, 17)) self.radioButton_2.setChecked(True) self.radioButton_2.setObjectName("radioButton_2") self.comboBox = QtWidgets.QComboBox(self.centralwidget) self.comboBox.setGeometry(QtCore.QRect(560, 180, 181, 22)) self.comboBox.setObjectName("comboBox") self.comboBox_2 = QtWidgets.QComboBox(self.centralwidget) self.comboBox_2.setGeometry(QtCore.QRect(70, 180, 221, 22)) self.comboBox_2.setObjectName("comboBox_2") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setGeometry(QtCore.QRect(70, 160, 101, 16)) self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setGeometry(QtCore.QRect(560, 160, 101, 16)) self.label_2.setObjectName("label_2") self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setGeometry(QtCore.QRect(600, 510, 75, 23)) self.pushButton_2.setObjectName("pushButton_2") self.checkBox = QtWidgets.QCheckBox(self.centralwidget) self.checkBox.setGeometry(QtCore.QRect(600, 480, 131, 17)) self.checkBox.setObjectName("checkBox") self.label_3 = QtWidgets.QLabel(self.centralwidget) self.label_3.setGeometry(QtCore.QRect(70, 230, 221, 141)) self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.label_3.setObjectName("label_3") self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setGeometry(QtCore.QRect(560, 230, 221, 141)) self.label_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.label_4.setObjectName("label_4") MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21)) self.menubar.setObjectName("menubar") self.menuDatei = QtWidgets.QMenu(self.menubar) self.menuDatei.setObjectName("menuDatei") self.menuInfo = QtWidgets.QMenu(self.menubar) self.menuInfo.setObjectName("menuInfo") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.action_ffnen = QtWidgets.QAction(MainWindow) self.action_ffnen.setObjectName("action_ffnen") self.actionBeenden = QtWidgets.QAction(MainWindow) self.actionBeenden.setObjectName("actionBeenden") self.actionSpeichern = QtWidgets.QAction(MainWindow) self.actionSpeichern.setObjectName("actionSpeichern") self.actionSpeichern_unter = QtWidgets.QAction(MainWindow) self.actionSpeichern_unter.setObjectName("actionSpeichern_unter") self.action_ber = QtWidgets.QAction(MainWindow) self.action_ber.setObjectName("action_ber") self.menuDatei.addAction(self.action_ffnen) self.menuDatei.addAction(self.actionSpeichern) self.menuDatei.addAction(self.actionSpeichern_unter) self.menuDatei.addSeparator() self.menuDatei.addAction(self.actionBeenden) self.menuInfo.addAction(self.action_ber) self.menubar.addAction(self.menuDatei.menuAction()) self.menubar.addAction(self.menuInfo.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.pushButton.setText(_translate("MainWindow", "Run")) self.radioButton.setText(_translate("MainWindow", "Daten in Datenbank")) self.radioButton_2.setText(_translate("MainWindow", "ohne Datenablage")) self.label.setText(_translate("MainWindow", "Broker Auswahl:")) self.label_2.setText(_translate("MainWindow", "Datenbank Auswahl:")) self.pushButton_2.setText(_translate("MainWindow", "Stop")) self.checkBox.setText(_translate("MainWindow", "Generiere Log Datei")) self.label_3.setText(_translate("MainWindow", "Broker Beschreibung")) self.label_4.setText(_translate("MainWindow", "Datenbank Beschreibung")) self.menuDatei.setTitle(_translate("MainWindow", "Datei")) self.menuInfo.setTitle(_translate("MainWindow", "Info")) self.action_ffnen.setText(_translate("MainWindow", "Öffnen")) self.actionBeenden.setText(_translate("MainWindow", "Beenden")) self.actionSpeichern.setText(_translate("MainWindow", "Speichern")) self.actionSpeichern_unter.setText(_translate("MainWindow", "Speichern unter")) self.action_ber.setText(_translate("MainWindow", "Über")) if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
[ "henric.breuer@gmail.com" ]
henric.breuer@gmail.com
f49cc8a318dbe041128c54cd3be1978f05320e6e
12b9a593bd6a10b8aba48ed940d9fdb800ca51bf
/venv/Lib/site-packages/pytube/streams.py
59e4e96ea0b933ae766b68ac52d243c92aac52d1
[]
no_license
Saifahmad-svg/YouTube-Downloader
1210384e20627826fb1f8dded87b175f12f93f84
84921fdfe5994e55a1ea4a76c09edc260b1a2509
refs/heads/master
2023-04-12T11:24:53.467967
2021-04-17T10:28:36
2021-04-17T10:28:36
358,836,206
2
0
null
null
null
null
UTF-8
Python
false
false
13,078
py
# -*- coding: utf-8 -*- """ This module contains a container for stream manifest data. A container object for the media stream (video only / audio only / video+audio combined). This was referred to as ``Video`` in the legacy pytube version, but has been renamed to accommodate DASH (which serves the audio and video separately). """ import logging import os from datetime import datetime from typing import BinaryIO, Dict, Optional, Tuple from urllib.error import HTTPError from urllib.parse import parse_qs from pytube import extract, request from pytube.helpers import safe_filename, target_directory from pytube.itags import get_format_profile from pytube.monostate import Monostate logger = logging.getLogger(__name__) class Stream: """Container for stream manifest data.""" def __init__( self, stream: Dict, player_config_args: Dict, monostate: Monostate ): """Construct a :class:`Stream <Stream>`. :param dict stream: The unscrambled data extracted from YouTube. :param dict player_config_args: The data object containing video media data like title and keywords. :param dict monostate: Dictionary of data shared across all instances of :class:`Stream <Stream>`. """ # A dictionary shared between all instances of :class:`Stream <Stream>` # (Borg pattern). self._monostate = monostate self.url = stream["url"] # signed download url self.itag = int( stream["itag"] ) # stream format id (youtube nomenclature) # set type and codec info # 'video/webm; codecs="vp8, vorbis"' -> 'video/webm', ['vp8', 'vorbis'] self.mime_type, self.codecs = extract.mime_type_codec(stream["type"]) # 'video/webm' -> 'video', 'webm' self.type, self.subtype = self.mime_type.split("/") # ['vp8', 'vorbis'] -> video_codec: vp8, audio_codec: vorbis. DASH # streams return NoneType for audio/video depending. self.video_codec, self.audio_codec = self.parse_codecs() self.is_otf: bool = stream["is_otf"] self.bitrate: Optional[int] = stream["bitrate"] self._filesize: Optional[int] = None # filesize in bytes # Additional information about the stream format, such as resolution, # frame rate, and whether the stream is live (HLS) or 3D. itag_profile = get_format_profile(self.itag) self.is_dash = itag_profile["is_dash"] self.abr = itag_profile["abr"] # average bitrate (audio streams only) self.fps = stream[ "fps" ] # frames per second (video streams only) self.resolution = itag_profile[ "resolution" ] # resolution (e.g.: "480p") self.is_3d = itag_profile["is_3d"] self.is_hdr = itag_profile["is_hdr"] self.is_live = itag_profile["is_live"] # The player configuration, contains info like the video title. self.player_config_args = player_config_args @property def is_adaptive(self) -> bool: """Whether the stream is DASH. :rtype: bool """ # if codecs has two elements (e.g.: ['vp8', 'vorbis']): 2 % 2 = 0 # if codecs has one element (e.g.: ['vp8']) 1 % 2 = 1 return bool(len(self.codecs) % 2) @property def is_progressive(self) -> bool: """Whether the stream is progressive. :rtype: bool """ return not self.is_adaptive @property def includes_audio_track(self) -> bool: """Whether the stream only contains audio. :rtype: bool """ return self.is_progressive or self.type == "audio" @property def includes_video_track(self) -> bool: """Whether the stream only contains video. :rtype: bool """ return self.is_progressive or self.type == "video" def parse_codecs(self) -> Tuple[Optional[str], Optional[str]]: """Get the video/audio codecs from list of codecs. Parse a variable length sized list of codecs and returns a constant two element tuple, with the video codec as the first element and audio as the second. Returns None if one is not available (adaptive only). :rtype: tuple :returns: A two element tuple with audio and video codecs. """ video = None audio = None if not self.is_adaptive: video, audio = self.codecs elif self.includes_video_track: video = self.codecs[0] elif self.includes_audio_track: audio = self.codecs[0] return video, audio @property def filesize(self) -> int: """File size of the media stream in bytes. :rtype: int :returns: Filesize (in bytes) of the stream. """ if self._filesize is None: try: self._filesize = request.filesize(self.url) except HTTPError as e: if e.code != 404: raise self._filesize = request.seq_filesize(self.url) return self._filesize @property def title(self) -> str: """Get title of video :rtype: str :returns: Youtube video title """ return self._monostate.title or "Unknown YouTube Video Title" @property def filesize_approx(self) -> int: """Get approximate filesize of the video Falls back to HTTP call if there is not sufficient information to approximate :rtype: int :returns: size of video in bytes """ if self._monostate.duration and self.bitrate: bits_in_byte = 8 return int( (self._monostate.duration * self.bitrate) / bits_in_byte ) return self.filesize @property def expiration(self) -> datetime: expire = parse_qs(self.url.split("?")[1])["expire"][0] return datetime.utcfromtimestamp(int(expire)) @property def default_filename(self) -> str: """Generate filename based on the video title. :rtype: str :returns: An os file system compatible filename. """ filename = safe_filename(self.title) return f"{filename}.{self.subtype}" def download( self, output_path: Optional[str] = None, filename: Optional[str] = None, filename_prefix: Optional[str] = None, skip_existing: bool = True, timeout: Optional[int] = None, max_retries: Optional[int] = 0 ) -> str: """Write the media stream to disk. :param output_path: (optional) Output path for writing media file. If one is not specified, defaults to the current working directory. :type output_path: str or None :param filename: (optional) Output filename (stem only) for writing media file. If one is not specified, the default filename is used. :type filename: str or None :param filename_prefix: (optional) A string that will be prepended to the filename. For example a number in a playlist or the name of a series. If one is not specified, nothing will be prepended This is separate from filename so you can use the default filename but still add a prefix. :type filename_prefix: str or None :param skip_existing: (optional) Skip existing files, defaults to True :type skip_existing: bool :param timeout: (optional) Request timeout length in seconds :type timeout: int :returns: Path to the saved video :rtype: str """ file_path = self.get_file_path( filename=filename, output_path=output_path, filename_prefix=filename_prefix, ) if skip_existing and self.exists_at_path(file_path): logger.debug(f'file {file_path} already exists, skipping') self.on_complete(file_path) return file_path bytes_remaining = self.filesize logger.debug(f'downloading ({self.filesize} total bytes) file to {file_path}') with open(file_path, "wb") as fh: try: for chunk in request.stream( self.url, timeout=timeout, max_retries=max_retries ): # reduce the (bytes) remainder by the length of the chunk. bytes_remaining -= len(chunk) # send to the on_progress callback. self.on_progress(chunk, fh, bytes_remaining) except HTTPError as e: if e.code != 404: raise # Some adaptive streams need to be requested with sequence numbers for chunk in request.seq_stream( self.url, timeout=timeout, max_retries=max_retries ): # reduce the (bytes) remainder by the length of the chunk. bytes_remaining -= len(chunk) # send to the on_progress callback. self.on_progress(chunk, fh, bytes_remaining) self.on_complete(file_path) return file_path def get_file_path( self, filename: Optional[str], output_path: Optional[str], filename_prefix: Optional[str] = None, ) -> str: if filename: filename = f"{safe_filename(filename)}.{self.subtype}" else: filename = self.default_filename if filename_prefix: filename = f"{safe_filename(filename_prefix)}{filename}" return os.path.join(target_directory(output_path), filename) def exists_at_path(self, file_path: str) -> bool: return ( os.path.isfile(file_path) and os.path.getsize(file_path) == self.filesize ) def stream_to_buffer(self, buffer: BinaryIO) -> None: """Write the media stream to buffer :rtype: io.BytesIO buffer """ bytes_remaining = self.filesize logger.info( "downloading (%s total bytes) file to buffer", self.filesize, ) for chunk in request.stream(self.url): # reduce the (bytes) remainder by the length of the chunk. bytes_remaining -= len(chunk) # send to the on_progress callback. self.on_progress(chunk, buffer, bytes_remaining) self.on_complete(None) def on_progress( self, chunk: bytes, file_handler: BinaryIO, bytes_remaining: int ): """On progress callback function. This function writes the binary data to the file, then checks if an additional callback is defined in the monostate. This is exposed to allow things like displaying a progress bar. :param bytes chunk: Segment of media file binary data, not yet written to disk. :param file_handler: The file handle where the media is being written to. :type file_handler: :py:class:`io.BufferedWriter` :param int bytes_remaining: The delta between the total file size in bytes and amount already downloaded. :rtype: None """ file_handler.write(chunk) logger.debug("download remaining: %s", bytes_remaining) if self._monostate.on_progress: self._monostate.on_progress(self, chunk, bytes_remaining) def on_complete(self, file_path: Optional[str]): """On download complete handler function. :param file_path: The file handle where the media is being written to. :type file_path: str :rtype: None """ logger.debug("download finished") on_complete = self._monostate.on_complete if on_complete: logger.debug("calling on_complete callback %s", on_complete) on_complete(self, file_path) def __repr__(self) -> str: """Printable object representation. :rtype: str :returns: A string representation of a :class:`Stream <Stream>` object. """ parts = ['itag="{s.itag}"', 'mime_type="{s.mime_type}"'] if self.includes_video_track: parts.extend(['res="{s.resolution}"', 'fps="{s.fps}fps"']) if not self.is_adaptive: parts.extend( ['vcodec="{s.video_codec}"', 'acodec="{s.audio_codec}"',] ) else: parts.extend(['vcodec="{s.video_codec}"']) else: parts.extend(['abr="{s.abr}"', 'acodec="{s.audio_codec}"']) parts.extend(['progressive="{s.is_progressive}"', 'type="{s.type}"']) return f"<Stream: {' '.join(parts).format(s=self)}>"
[ "ahmadmoiz079@gmail.com" ]
ahmadmoiz079@gmail.com
2b625e5241e1fce827189ba0c63e713e459992b9
34b83bacb45ab3f4367238ee586109a797c82e23
/Proyecto/env/bin/easy_install-3.7
24ed2954c3ec02dac32a004125cd403339c55ba7
[]
no_license
adbetin/proyecto-pruebas-miso4208
66f859f68e3f5cecec79c18cdb6e1fe727b52598
f14da2e52f1059144fff0d75d5f1b74b2b364b23
refs/heads/master
2020-03-26T03:46:51.583155
2018-11-22T00:13:48
2018-11-22T00:13:48
144,470,516
0
1
null
null
null
null
UTF-8
Python
false
false
273
7
#!/Users/davidsaavedra/Documents/Proyecto/env/bin/python3 # -*- coding: utf-8 -*- import re import sys from setuptools.command.easy_install import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "davidsaavedra@MacBook-Air-de-David.local" ]
davidsaavedra@MacBook-Air-de-David.local
7d72e289cd1a204ce8d9543b02b422fad79372c9
9848584d5f1858692fb4cdbe793bc91ed3be920e
/coding/00239-sliding-window-max/solution.py
aef03fa50138e58d6f572230081501d70f98fcf3
[]
no_license
misaka-10032/leetcode
1212223585cc27d3dfc6d2ca6a27770f06e427e3
20580185c6f72f3c09a725168af48893156161f5
refs/heads/master
2020-12-12T09:45:31.491801
2020-09-14T00:18:19
2020-09-14T00:18:19
50,267,669
3
1
null
null
null
null
UTF-8
Python
false
false
1,055
py
#!/usr/bin/env python3 # encoding: utf-8 import collections from typing import List class DecreasingGarbageCollectionQueue: def __init__(self, ttl: int): self._ttl = ttl self._q = collections.deque() def append(self, t: int, v: int): # First, clean up the stale elements. while self._q and self._q[0][0] + self._ttl <= t: self._q.popleft() # Second, make sure the values are decreasing. while self._q and self._q[-1][1] <= v: self._q.pop() self._q.append((t, v)) def peek(self) -> int: return self._q[0][1] class Solution: def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]: # Construct a queue that has decreasing values, and only contains the # element in a time window. q = DecreasingGarbageCollectionQueue(k) result = [] for i, v in enumerate(nums): q.append(i, v) if i < k - 1: continue result.append(q.peek()) return result
[ "longqicai@gmail.com" ]
longqicai@gmail.com
deb8bf603ffe349649ad018dc1bad52c104ecc00
a6486474f8de739c5c38115860d9806a63926f9f
/NJF/settings.py
a817d3f40d454554e85e78d9961aa8db942cff1b
[]
no_license
AdityaSreepad/NJF
151801279602075c352e4f306078b46382a74e8f
0a780e44f7e50ec1809f1e13f250af3e1aec9a0e
refs/heads/master
2022-05-24T08:13:50.656950
2019-09-07T10:10:46
2019-09-07T10:10:46
204,331,673
0
0
null
2022-04-22T22:08:33
2019-08-25T18:07:49
Python
UTF-8
Python
false
false
3,233
py
@@ -1,126 +0,0 @@ """ Django settings for NJF project. Generated by 'django-admin startproject' using Django 2.2.4. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ import django_heroku import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 't8jf@6lspz@bb@e9jtx_(-a3#_tui1_#4jopdfw+s7ne2*29+q' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'welcome_test', 'rest_framework', 'corsheaders', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'NJF.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'NJF.wsgi.application' # Database # https://docs.djangoproject.com/en/2.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.2/howto/static-files/ STATIC_URL = '/static/' #Activate Django Heroku django_heroku.settings(locals())
[ "aditya.g.sreepad@gmail.com" ]
aditya.g.sreepad@gmail.com
57eb63aef52b1d51cc147831e2de70dd7402e9ae
9e35cd8bdfba25749d4a75d8093e14c3b63aaa93
/Thursty-Backend-master/Party/models.py
693c8591a6c1c30525d871e67d87528474a5d207
[]
no_license
aaryangrg/Shortin
756db3be1a722ad8a215aeba907ee0570b8706e1
d452362f001c6d357da8ab75779b7873e27b288f
refs/heads/master
2022-12-09T11:26:16.116983
2018-05-29T16:45:00
2018-05-29T16:45:00
135,323,997
0
0
null
2022-12-08T02:08:20
2018-05-29T16:27:23
Python
UTF-8
Python
false
false
1,427
py
from django.db import models from django.contrib.postgres.fields import ArrayField # Create your models here. class Party(models.Model): #Unique Party identifier partyid = models.CharField(max_length = 100, blank = False, null = False, unique = True, primary_key = True) #Basic Details createdAt = models.DateTimeField(auto_now_add = True) eventName = models.CharField(max_length = 100, blank = False, default = "Thursty Party") hostedBy = models.CharField(max_length = 100, blank = False) hostedByNameCache = models.CharField(max_length = 100, blank = False) #Put organization name here time = models.DateTimeField(null = False) location = models.CharField(max_length = 100, blank = False, null = False) #Guest List - indices correspond across these fields to indicate one guest instance guests = ArrayField(models.CharField(blank = True, max_length = 100)) #Indicate User's unique ID here guestsNameCache = ArrayField(models.CharField(blank = True, max_length = 200)) #Indicate User's name here entryTime = ArrayField(models.DateTimeField(blank = True)) exitTime = ArrayField(models.DateTimeField(blank = True)) paymentMethod = ArrayField(models.CharField(default = "Cash", max_length = 50, blank = True)) #Status status = models.CharField(max_length = 20, blank = False, default = "Upcoming",) class Meta: ordering = ('time',)
[ "aaryan.garg1@gmail.com" ]
aaryan.garg1@gmail.com
55a11c16a314a81724eaf59517091e360790c84f
1a19ac07ef5e5803e75155c02d2a746e295a778b
/backend/App/app.py
e933eb639367f8f68b853e23e72f5527b9a7cd12
[]
no_license
Chinmay-KB/cfg2020
f1f30f3ecdb0818f6ed6a2f9b0e1ad1c818b4a47
d7f59b1ac1c74943f91a11d9f71a21be5a71bffd
refs/heads/main
2023-01-25T05:19:24.237957
2020-12-06T16:24:03
2020-12-06T16:24:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
557
py
from flask import Flask from flask import jsonify,after_this_request,make_response import json import sqlquery as query from flask_cors import CORS, cross_origin app = Flask(__name__) CORS(app) app.config['JSON_SORT_KEYS'] = False @app.route('/getRows',methods=['GET']) def getRows(): all_rows, column_names=query.getRows() res=[{column_names[i]:all_rows[i] for i in range(len(all_rows))} for row in all_rows] return jsonify(res) @app.route('/') def hello_name(): return 'Hello' if __name__ == '__main__': app.run(host='localhost', port=5000)
[ "mr.sayan.dey@gmail.com" ]
mr.sayan.dey@gmail.com
b35a9ff601e7840c26a996d0f54eca9c718d1e47
7297aa83b4d2759c07714208f59fc112249b27f3
/guest/sign/views.py
fb0f990bf90577836541d1947a03b70bce866c22
[]
no_license
yehshuwen/Django_practice
26e2e5eff9bba14c0a700db97bf0e8b6d4f70a67
9d46b9f5456e65566a60c642c9eb0ceface91300
refs/heads/master
2020-04-03T09:20:55.578892
2018-12-27T10:50:13
2018-12-27T10:50:13
155,162,247
0
0
null
null
null
null
UTF-8
Python
false
false
1,563
py
from django.shortcuts import render from django.http import HttpResponse from django.http import HttpResponseRedirect from django.contrib import auth from django.contrib.auth.decorators import login_required # Create your views here. ''' def index(request): return HttpResponse("Hello Django!") ''' def index(request): return render (request,"index.html") ''' def add(request,a,b): s = int(a)+int(b) return HttpResponse(str(s)) ''' #登錄動作 def login_action(request): if request.method =='POST': username = request.POST.get('username','') password = request.POST.get('password','') user = auth.authenticate(username = username,password = password) #if username =='admin' and password == 'admin123': if user is not None: auth.login(request,user) #登錄 #return HttpResponse('login success!') #return HttpResponseRedirect('/event_manage/') request.session['user'] = username #將session資訊紀錄到瀏覽器 response = HttpResponseRedirect('/event_manage/') #response.set_cookie('user',username,3600) #新增瀏覽器cookie return response else: return render(request,'index.html',{'error': 'username or password error!'}) #發佈會管理 @login_required def event_manage(request): #username = request.COOKIES.get('user','') #讀取瀏覽器cookie username = request.session.get('user','') #讀取瀏覽器session return render(request,"event_manage.html",{"user":username})
[ "passage543@gmail.com" ]
passage543@gmail.com
5aab7beb6743d9b8f2c9470c478f4166c9f30859
8e22003b5cb6213862d537e03ae7b4272df543b3
/daftarsiswa/models.py
a4bb4b34f0e5072447f9035cfa2bb5bccec86354
[]
no_license
dananggeek/profilsekolah
c864d57ac17e9875d1b88e85bcba06315d3ad879
8166adbe3deda218bdfce5f72888dd509a19e124
refs/heads/master
2020-03-18T13:58:42.318176
2018-05-25T07:45:35
2018-05-25T07:45:35
134,821,725
0
0
null
null
null
null
UTF-8
Python
false
false
1,114
py
from django.db import models from django.contrib.auth.models import User # Create your models here. class pendaftaran (models.Model): #user =models.ForeignKey(User, on_delete=models.CASCADE) nama_depan =models.CharField(max_length=50) nama_belakang =models.CharField(max_length=50) email =models.CharField(max_length=50) foto =models.FileField() alamat =models.CharField(max_length=100) tempat_lahir =models.CharField(max_length=50) tanggal_lahir =models.DateField() agama =models.CharField(max_length=50) nohp =models.IntegerField() nilai_un =models.FileField() nama_orangtua =models.CharField(max_length=50) alamat_orangtua =models.CharField(max_length=50) nohp_orangtua =models.IntegerField() tanggal_daftar =models.DateTimeField(auto_now=False,auto_now_add=True) #timestamp = models.DateTimeField(auto_now=False, auto_now_add=True) status_terima =models.BooleanField(default=False) def __str__(self): return self.nama_depan
[ "dananggeek@gmail.com" ]
dananggeek@gmail.com
34457b9f1292450d30115f4b973ae6c397ad444b
f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb
/weapp/tools/weather/views.py
bc7bb0580ac24124ade857e06b690326ad36e083
[]
no_license
chengdg/weizoom
97740c121724fae582b10cdbe0ce227a1f065ece
8b2f7befe92841bcc35e0e60cac5958ef3f3af54
refs/heads/master
2021-01-22T20:29:30.297059
2017-03-30T08:39:25
2017-03-30T08:39:25
85,268,003
1
3
null
null
null
null
UTF-8
Python
false
false
6,497
py
# -*- coding: utf-8 -*- __author__ = "liupeiyu" import time from datetime import timedelta, datetime, date import urllib, urllib2 import os import json from django.http import HttpResponseRedirect, HttpResponse from django.template import Context, RequestContext from django.contrib.auth.decorators import login_required, permission_required from django.conf import settings from django.shortcuts import render_to_response from django.contrib.auth.models import User, Group, Permission from django.contrib import auth from django.db.models import Q import httplib from core.jsonresponse import JsonResponse, create_response, decode_json_str from core import dateutil from core.exceptionutil import full_stack from tools.models import * from watchdog.utils import watchdog_fatal WATCHDOG_TYPE = 'WHETHER_API' ######################################################################## # get_weather_info: 获得天气信息 ######################################################################## def get_weather_info(request): weathers = Weather.objects.all() response = create_response(200) city_code = "101180801" morning_time = 6 # 早晨时间 night_time = 18 # 晚上时间 today_date = datetime.now() try: if weathers.count() == 0: weather_info, weather = __get_weather_info(city_code) else: weather = weathers[0] if __is_out_time_span(weather.update_time, weather.update_span): weather_info, weather = __get_weather_info(city_code, weather_id=weather.id) else: weather_info = json.loads(weather.info) response.data.weather_info = weather_info response.data.today_date = today_date.strftime("%Y年%m月%d日") response.data.create_time = weather.update_time.strftime("%Y年%m月%d日 %H:%M") # 计算白天还是晚上,True为白天,False为晚上 hour = int(weather.update_time.strftime("%H")) if morning_time <= hour and hour < night_time: response.data.is_daytime = True else: response.data.is_daytime = False # 当前温度 response.data.current_temp = __get_current_temp(city_code) except: response = create_response(500) response.errMsg = u'获取失败' response.innerErrMsg = full_stack() watchdog_fatal(u'代码错误!%s' % response.innerErrMsg, WATCHDOG_TYPE) return response.get_response() ######################################################################## # __get_weather_info: 获取近6天气信息 ######################################################################## def __get_weather_info(city_code, weather_id = 0): data_str, error_info = __get_http_response_data("m.weather.com.cn", "/data/%s.html" % city_code) weather_info = [] weather = None if data_str: info_json = decode_json_str(data_str) weather_json = info_json['weatherinfo'] # 计算周几 weeks = [u'一', u'二', u'三', u'四', u'五', u'六', u'日'] week_index = __get_week_index(weeks, weather_json['week']) # 获取今天日期 today_date = datetime.now() total_days, low_date, cur_date, high_date = dateutil.get_date_range(dateutil.get_today(), '6', 6) date_list = dateutil.get_date_range_list(datetime.date(today_date), high_date) for i in range(1,7): data = dict() data['date'] = date_list[i-1].strftime("%Y年%m月%d日") data['weather'] = weather_json['weather%d' % i] data['temp'] = weather_json['temp%d' % i] data['week'] = u'周%s' % weeks[week_index] # 给week赋值下标 week_index = week_index + 1 if week_index + 1 < len(weeks) else 0 weather_info.append(data) # 判断是否已经添加过数据,如果添加过就修改 if weather_id: weather = Weather.objects.get(id=weather_id) weather.info = json.dumps(weather_info) weather.update_time = today_date weather.save() else: weather = Weather.objects.create(info=json.dumps(weather_info), city_code = city_code) else: if weather_id: weather = Weather.objects.get(id=weather_id) weather_info = json.loads(weather.info) # print u'更新数据,天气的api不可用!' watchdog_fatal(u'更新数据,天气的api不可用!%s' % error_info, WATCHDOG_TYPE) else: # print u'首次获取数据,天气的api不可用!' watchdog_fatal(u'首次获取数据,天气的api不可用!%s' % error_info, WATCHDOG_TYPE) return weather_info, weather ######################################################################## # __get_current_temp: 获取当前天气温度 ######################################################################## def __get_current_temp(city_code): data_str, error_info = __get_http_response_data("www.weather.com.cn", "/data/sk/%s.html" % city_code) temp = '' if data_str: info_json = decode_json_str(data_str) # 当前温度 temp = info_json['weatherinfo']['temp'] else: # print u'获取当前天气温度,天气的api不可用!' watchdog_fatal(u'获取当前天气温度,发送请求失败!%s' % error_info, WATCHDOG_TYPE) return temp ######################################################################## # __is_out_time_span: 判断时间是否超出时间间隔 ######################################################################## def __is_out_time_span(update_time, update_span): update_span = update_span * 60 * 1000 create_time = long(time.mktime(update_time.timetuple()))*1000 now = long(time.time()) * 1000 if now-create_time > update_span: return True else: return False ######################################################################## # __get_http_response_data: 发送http请求,返回数据 ######################################################################## def __get_http_response_data(domain, url, method="GET"): error_info = None conn = httplib.HTTPConnection(domain) try: conn.request(method, url) r1 = conn.getresponse() print r1.status if r1.status is not 200: error_info = r1.read() data_str = None else: data_str = r1.read() except: data_str = None error_info = full_stack() finally: conn.close() return data_str, error_info ######################################################################## # __get_week_index: 获取周期下标 ######################################################################## def __get_week_index(weeks, string): string = string[-1:] for i in range(len(weeks)): if weeks[i] == string: return i
[ "jiangzhe@weizoom.com" ]
jiangzhe@weizoom.com
ed33f94bbd108c9000ac2d9dc0d03f9bc890dcbc
1f689e448d8b510ea6575590cb6920048b4e9aea
/leetcode/202_happy_number.py
238115bb7972505ac6b64021c56ccdb3faf05303
[]
no_license
lijenpan/python
52c6061ff90c611efd039b1858339edbefdb5ad0
7f67045a83bd2592ccc399420194094fb78404b8
refs/heads/master
2020-05-30T10:53:15.634090
2016-12-02T20:50:28
2016-12-02T20:50:28
7,646,477
1
0
null
null
null
null
UTF-8
Python
false
false
977
py
""" Write an algorithm to determine if a number is "happy". A happy number is a number defined by the following process: Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers. Example: 19 is a happy number 12 + 92 = 82 82 + 22 = 68 62 + 82 = 100 12 + 02 + 02 = 1 ============================== This question shouldn't be easy. The naive approach will get you stuck in the loop. Until you found out that (through repetitions) happy numbers contains 4, you are in for a hell of a coding session. """ def isHappy(n): """ :type n: int :rtype: bool """ temp = 0 while n != 1 and n != 4: while n: temp += (n % 10) * (n % 10) n /= 10 n = temp temp = 0 return 1 == n
[ "noreply@github.com" ]
noreply@github.com
a6cfc4148c5956d771b4b5f544581c26e4416276
cb799e1939148bf0784d8be80da909499b361184
/angr/analyses/decompiler/graph_region.py
59af543ed240015f0bb67cc21f5fa230e13e7692
[ "BSD-2-Clause" ]
permissive
wwwzbwcom/angr
474d272ad1e80b3967bec5c851bb2457cd7fc7b3
7c710f2b11f4918651bd9f087cdf883e2fa79b6c
refs/heads/master
2022-08-01T00:19:13.997613
2020-05-26T22:28:06
2020-05-26T22:28:06
259,056,991
0
0
BSD-2-Clause
2020-04-26T14:54:51
2020-04-26T14:54:50
null
UTF-8
Python
false
false
5,915
py
import logging from typing import Optional import networkx from .structurer_nodes import MultiNode l = logging.getLogger(name=__name__) class GraphRegion: """ GraphRegion represents a region of nodes. :ivar head: The head of the region. :ivar graph: The region graph. :ivar successors: A set of successors of nodes in the graph. These successors do not belong to the current region. :ivar graph_with_successors: The region graph that includes successor nodes. """ __slots__ = ('head', 'graph', 'successors', 'graph_with_successors', 'cyclic', ) def __init__(self, head, graph, successors: Optional[set], graph_with_successors, cyclic): self.head = head self.graph = graph self.successors = successors # successors inside graph_with_successors should be treated as read-only. when deep-copying GraphRegion objects, # successors inside graph_with_successors are *not* deep copied. therefore, you should never modify any # successor node in graph_with_successors. to avoid potential programming errors, just treat # graph_with_successors as read-only. self.graph_with_successors = graph_with_successors self.cyclic = cyclic def __repr__(self): addrs = [ ] s = "" for node in self.graph.nodes(): if hasattr(node, 'addr'): addrs.append(node.addr) if addrs: s = ": %#x-%#x" % (min(addrs), max(addrs)) if not s: s = ": %s" % self.head return "<GraphRegion of %d nodes%s>" % (self.graph.number_of_nodes(), s) def recursive_copy(self): nodes_map = { } new_graph = self._recursive_copy(self.graph, nodes_map) if self.graph_with_successors is not None: successors = set(nodes_map.get(succ, succ) for succ in self.successors) # for performance reasons, successors that are only in graph_with_successors are not recursively copied new_graph_with_successors = self._recursive_copy(self.graph_with_successors, nodes_map, ignored_nodes=successors) else: new_graph_with_successors = None successors = None return GraphRegion(nodes_map[self.head], new_graph, successors, new_graph_with_successors, self.cyclic) @staticmethod def _recursive_copy(old_graph, nodes_map, ignored_nodes=None): new_graph = networkx.DiGraph() # make copy of each node and add the mapping from old nodes to new nodes into nodes_map for node in old_graph.nodes(): if node in nodes_map: new_graph.add_node(nodes_map[node]) elif ignored_nodes is not None and node in ignored_nodes: # do not copy. use the reference instead new_graph.add_node(node) # drop it into the nodes_map nodes_map[node] = node else: # make recursive copies if type(node) is GraphRegion: new_node = node.recursive_copy() nodes_map[node] = new_node elif type(node) is MultiNode: new_node = node.copy() nodes_map[node] = new_node else: new_node = node nodes_map[node] = new_node new_graph.add_node(new_node) # add all edges for src, dst, edge_data in old_graph.edges(data=True): new_graph.add_edge(nodes_map[src], nodes_map[dst], **edge_data) return new_graph @property def addr(self): return self.head.addr @staticmethod def dbg_get_repr(obj, ident=0): if type(obj) is GraphRegion: s = obj.dbg_print(ident=ident) else: s = " " * ident + str(obj) return s def dbg_print(self, ident=0): s = self.dbg_get_repr(self.head, ident=ident) + "\n" successors = list(self.graph.successors(self.head)) if len(successors) == 2: left_kid, right_kid = successors s += " " * ident + "if (...) {\n" + \ self.dbg_get_repr(left_kid, ident=ident + 2) + "\n" + \ " " * ident + "}\n" + \ " " * ident + "else if (...) {\n" + \ self.dbg_get_repr(right_kid, ident=ident + 2) + "\n" + \ " " * ident + "}" # TODO: other nodes elif len(successors) == 1: s += self.dbg_get_repr(successors[0], ident=ident) return s def replace_region(self, sub_region, replace_with): if sub_region not in self.graph: l.error("The sub-region to replace must be in the current region. Note that this method is not recursive.") raise Exception() if sub_region is self.head: self.head = replace_with self._replace_node_in_graph(self.graph, sub_region, replace_with) if self.graph_with_successors is not None: self._replace_node_in_graph(self.graph_with_successors, sub_region, replace_with) @staticmethod def _replace_node_in_graph(graph, node, replace_with): in_edges = list(graph.in_edges(node)) out_edges = list(graph.out_edges(node)) graph.remove_node(node) graph.add_node(replace_with) for src, _ in in_edges: if src is node: graph.add_edge(replace_with, replace_with) else: graph.add_edge(src, replace_with) for _, dst in out_edges: if dst is node: graph.add_edge(replace_with, replace_with) else: graph.add_edge(replace_with, dst) assert node not in graph
[ "noreply@github.com" ]
noreply@github.com
7b6226c6e82c06fda9bbbf1708beca784473b9ae
44138967ec141d689a77dfd57b1f57f637039d86
/learn-test/fanyi.py
b14962cd5fc20b7b8e81251d781e68a67a9a30eb
[]
no_license
JaydenWade/WebCrawlers
3ce026d9e25999a3662777b85e21e787d036e99b
40c59e2bae4db6c20aebe58ddea7286798f2776e
refs/heads/master
2020-12-04T17:54:51.836760
2020-02-11T07:08:33
2020-02-11T07:08:33
231,859,082
0
0
null
null
null
null
UTF-8
Python
false
false
642
py
# -*-coding:utf-8-*- import requests headers = { 'ser-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/79''.0.3945.88 Safari/537.36' } data = { "source":"auto", "target": "zh", "sourceText": "hola", "qtv":"e1886386250e8e04", "qtk": "YutIYbav37LDdbGR8jLsSBlqMkCyXRsrSRspGwOhdAw+ZS6aCMv4TI7nrnnEjNXJgFNoa8kInMDSYqILYFm23kb+SIN8NeNmOm5ifoj9UqwnMZZ0KbjzKdysGaU6BmEQo+9uqcT1AbNWQfALgvqduA==", "sessionUuid": "translate_uuid1579249802033" } post_url = "https://fanyi.qq.com/api/translate" r = requests.post(post_url, data=data, headers=headers) print(r)
[ "593849745@qq.com" ]
593849745@qq.com
ebebd592aa47837a19c5ee2b75bf60093d19f7ce
2a0f1e7d713d2c2e7ab55c3ebc023211b825e71a
/ps7b.py
9e4439efb418f08a4b6d19722887718285025007
[]
no_license
monaghrp/600ocwHW7
d7ada690fa0a39d42232fdd78e8c59e73acc3892
72ae49e117e0bc7598dd3930c49633eb8f45a6b9
refs/heads/master
2016-09-05T10:42:43.980103
2013-08-08T20:14:19
2013-08-08T20:14:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
648
py
## 1.1 1/8 ## 1.2. 1/8 ## 1.3. 3/8 ## 1.4. 1/2 ## 2. 6*(1/6)^5 import random count=[] random.seed() iterations=10000 trials=100 for i in xrange(0,trials): count.append(0) for j in xrange(0,trials): for i in xrange(0,iterations): d1=random.randint(1,6) d2=random.randint(1,6) d3=random.randint(1,6) d4=random.randint(1,6) d5=random.randint(1,6) if d1==d2==d3==d4==d5: ##print 'Yahtzee!' count[j]+=1 chances=float(sum(count)/(float(trials)*float(iterations))) print 'Chances of Yahtzee! over ' + str(trials) +' trials and ' + str(iterations) +' rolls: ' + str(chances)
[ "ryan_p_monaghan@hotmail.com" ]
ryan_p_monaghan@hotmail.com
ec032a5a668893cd894d1edf47f1f8f00cfef317
d9f868545ccfc628ac89c1d654c6baa7fd15d201
/Samples/Core/LearningWithEmbeddedPython/Data/demo_init.py
70af4a39e5c6c3997d0a8fe959af8cfd2b05d5b0
[]
no_license
bingbin83/DDGI-1
88fc0e5b8baed07ce940486b73cf4ee02610b871
cc8b6b8194ff4473cdf21f72754a103cd6f8526d
refs/heads/master
2022-04-01T18:06:45.337480
2020-01-16T16:11:16
2020-01-16T16:11:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,296
py
#*************************************************************************** # Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #************************************************************************** from tensorflow.contrib.keras.api.keras.layers import Input from tensorflow.contrib.keras.api.keras.layers import UpSampling2D, UpSampling3D, Reshape from tensorflow.contrib.keras.api.keras.models import Model, Sequential from tensorflow.contrib.keras.api.keras.layers import Dense from tensorflow.contrib.keras.api.keras.layers import Dropout from tensorflow.contrib.keras.api.keras.layers import Flatten from tensorflow.contrib.keras.api.keras.layers import Conv1D, Conv2D from tensorflow.contrib.keras.api.keras.layers import MaxPooling2D from tensorflow.contrib.keras.api.keras.optimizers import Adam from tensorflow.contrib.keras.api.keras import backend as K os.environ['TF_CPP_MIN_LOG_LEVEL']='2' K.set_image_data_format('channels_last') np.random.seed(7) def CreateSimpleImageModel_128(): dataIn = Input(shape=(3,)) layer = Dense(4 * 4, activation='tanh')(dataIn) layer = Dense(128 * 128 * 4, activation='linear')(layer) layer = Reshape((128, 128, 4))(layer) layer = UpSampling3D((4, 4, 1))(layer) layer = Reshape((1, 512, 512, 4))(layer) modelOut = layer model = Model(inputs=[dataIn], outputs=[modelOut]) adam = Adam(lr=0.005, decay=0.0001) model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy']) return model def CreateSimpleImageModel_256(): dataIn = Input(shape=(3,)) layer = Dense(4 * 4, activation='tanh')(dataIn) layer = Dense(256 * 256 * 4, activation='linear')(layer) layer = Reshape((256, 256, 4))(layer) layer = UpSampling3D((2, 2, 1))(layer) layer = Reshape((1, 512, 512, 4))(layer) modelOut = layer model = Model(inputs=[dataIn], outputs=[modelOut]) adam = Adam(lr=0.005, decay=0.0001) model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy']) return model def CreateSimpleImageModel_512(): dataIn = Input(shape=(3,)) layer = Dense(4 * 4, activation='tanh')(dataIn) layer = Dense(512 * 512 * 4, activation='linear')(layer) layer = Reshape((1, 512, 512, 4))(layer) modelOut = layer model = Model(inputs=[dataIn], outputs=[modelOut]) adam = Adam(lr=0.005, decay=0.0001) model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy']) return model def ConvertDataToNumpy( trainDataIn, trainDataOut, resOutW, resOutH ): npInput = np.ones((1, trainDataIn.shape[0]), dtype='float32') npInput[0] = trainDataIn tmpData = trainDataOut.reshape(resOutW,resOutH,4) / 256.0 npOutput = np.zeros((1, 1, resOutW, resOutH,4), dtype='float32') npOutput[0][0] = np.array(tmpData, dtype='float32') return (npInput, npOutput)
[ "guoxx@me.com" ]
guoxx@me.com
8ab8b6ab34a49d1936eb7cb2cdfa1fa2034968d1
2b42b40ae2e84b438146003bf231532973f1081d
/spec/mgm4459225.3.spec
bae40e140f22b81e61c4e43270ce074687df979f
[]
no_license
MG-RAST/mtf
0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a
e2ddb3b145068f22808ef43e2bbbbaeec7abccff
refs/heads/master
2020-05-20T15:32:04.334532
2012-03-05T09:51:49
2012-03-05T09:51:49
3,625,755
0
1
null
null
null
null
UTF-8
Python
false
false
14,687
spec
{ "id": "mgm4459225.3", "metadata": { "mgm4459225.3.metadata.json": { "format": "json", "provider": "metagenomics.anl.gov" } }, "providers": { "metagenomics.anl.gov": { "files": { "100.preprocess.info": { "compression": null, "description": null, "size": 736, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.info" }, "100.preprocess.passed.fna.gz": { "compression": "gzip", "description": null, "size": 193250, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.passed.fna.gz" }, "100.preprocess.passed.fna.stats": { "compression": null, "description": null, "size": 309, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.passed.fna.stats" }, "100.preprocess.removed.fna.gz": { "compression": "gzip", "description": null, "size": 4432, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.removed.fna.gz" }, "100.preprocess.removed.fna.stats": { "compression": null, "description": null, "size": 303, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/100.preprocess.removed.fna.stats" }, "205.screen.h_sapiens_asm.info": { "compression": null, "description": null, "size": 477, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/205.screen.h_sapiens_asm.info" }, "205.screen.h_sapiens_asm.removed.fna.gz": { "compression": "gzip", "description": null, "size": 210, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/205.screen.h_sapiens_asm.removed.fna.gz" }, "299.screen.info": { "compression": null, "description": null, "size": 410, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.info" }, "299.screen.passed.fna.gcs": { "compression": null, "description": null, "size": 1733, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.gcs" }, "299.screen.passed.fna.gz": { "compression": "gzip", "description": null, "size": 128179, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.gz" }, "299.screen.passed.fna.lens": { "compression": null, "description": null, "size": 469, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.lens" }, "299.screen.passed.fna.stats": { "compression": null, "description": null, "size": 309, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/299.screen.passed.fna.stats" }, "440.cluster.rna97.fna.gz": { "compression": "gzip", "description": null, "size": 18542, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.fna.gz" }, "440.cluster.rna97.fna.stats": { "compression": null, "description": null, "size": 306, "type": "fasta", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.fna.stats" }, "440.cluster.rna97.info": { "compression": null, "description": null, "size": 947, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.info" }, "440.cluster.rna97.mapping": { "compression": null, "description": null, "size": 216473, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.mapping" }, "440.cluster.rna97.mapping.stats": { "compression": null, "description": null, "size": 48, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/440.cluster.rna97.mapping.stats" }, "450.rna.expand.lca.gz": { "compression": "gzip", "description": null, "size": 142884, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.expand.lca.gz" }, "450.rna.expand.rna.gz": { "compression": "gzip", "description": null, "size": 37744, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.expand.rna.gz" }, "450.rna.sims.filter.gz": { "compression": "gzip", "description": null, "size": 24616, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.sims.filter.gz" }, "450.rna.sims.gz": { "compression": "gzip", "description": null, "size": 261896, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/450.rna.sims.gz" }, "900.abundance.function.gz": { "compression": "gzip", "description": null, "size": 13979, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.function.gz" }, "900.abundance.lca.gz": { "compression": "gzip", "description": null, "size": 9828, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.lca.gz" }, "900.abundance.md5.gz": { "compression": "gzip", "description": null, "size": 18821, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.md5.gz" }, "900.abundance.ontology.gz": { "compression": "gzip", "description": null, "size": 43, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.ontology.gz" }, "900.abundance.organism.gz": { "compression": "gzip", "description": null, "size": 28290, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.abundance.organism.gz" }, "900.loadDB.sims.filter.seq": { "compression": null, "description": null, "size": 1977900, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.loadDB.sims.filter.seq" }, "900.loadDB.source.stats": { "compression": null, "description": null, "size": 97, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/900.loadDB.source.stats" }, "999.done.COG.stats": { "compression": null, "description": null, "size": 1, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.COG.stats" }, "999.done.KO.stats": { "compression": null, "description": null, "size": 1, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.KO.stats" }, "999.done.NOG.stats": { "compression": null, "description": null, "size": 1, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.NOG.stats" }, "999.done.Subsystems.stats": { "compression": null, "description": null, "size": 1, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.Subsystems.stats" }, "999.done.class.stats": { "compression": null, "description": null, "size": 766, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.class.stats" }, "999.done.domain.stats": { "compression": null, "description": null, "size": 36, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.domain.stats" }, "999.done.family.stats": { "compression": null, "description": null, "size": 2624, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.family.stats" }, "999.done.genus.stats": { "compression": null, "description": null, "size": 3693, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.genus.stats" }, "999.done.order.stats": { "compression": null, "description": null, "size": 1353, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.order.stats" }, "999.done.phylum.stats": { "compression": null, "description": null, "size": 374, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.phylum.stats" }, "999.done.rarefaction.stats": { "compression": null, "description": null, "size": 22933, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.rarefaction.stats" }, "999.done.sims.stats": { "compression": null, "description": null, "size": 79, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.sims.stats" }, "999.done.species.stats": { "compression": null, "description": null, "size": 10339, "type": "txt", "url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4459225.3/file/999.done.species.stats" } }, "id": "mgm4459225.3", "provider": "metagenomics.anl.gov", "providerId": "mgm4459225.3" } }, "raw": { "mgm4459225.3.fna.gz": { "compression": "gzip", "format": "fasta", "provider": "metagenomics.anl.gov", "url": "http://api.metagenomics.anl.gov/reads/mgm4459225.3" } } }
[ "jared.wilkening@gmail.com" ]
jared.wilkening@gmail.com