blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
770c83ca8481c2ca94e0b13d777b3ae3ae117705 | 798fd982237327ba6968df52af21c7f5ba64921a | /Python/projectEuler/test.py | 95fc26fdacee1c6af46a1a57222bcec5810690ea | [] | no_license | kriegaex/projects | ab47c90a07033b8314306c0757759397b5cf603e | c9ef96bcd0909d4310321bacde7651a4eb9a6f16 | refs/heads/master | 2022-12-27T07:54:24.373210 | 2020-09-29T04:35:42 | 2020-09-29T04:35:42 | 299,513,827 | 1 | 0 | null | 2020-09-29T05:31:54 | 2020-09-29T05:31:53 | null | UTF-8 | Python | false | false | 1,151 | py | from unit_tester import test
def search_binary(xs, target):
""" Find and return the index of key in sequence xs """
lb = 0
ub = len(xs)
while True:
if lb == ub: # If region of interest (ROI) becomes empty
return -1
# Next probe should be in the middle of the ROI
mid_index = (lb + ub) // 2
# Fetch the item at that position
item_at_mid = xs[mid_index]
# print("ROI[{0}:{1}](size={2}), probed='{3}', target='{4}'"
# .format(lb, ub, ub-lb, item_at_mid, target))
# How does the probed item compare to the target?
if item_at_mid == target:
return mid_index # Found it!
if item_at_mid < target:
lb = mid_index + 1 # Use upper half of ROI next time
else:
ub = mid_index # Use lower half of ROI next time
xs = [2, 3, 5, 7, 11, 13, 17, 23, 29, 31, 37, 43, 47, 53]
test(search_binary(xs, 29) == -1)
test(search_binary(xs, 99) == -1)
test(search_binary(xs, 1) == -1)
for (i, v) in enumerate(xs):
test(search_binary(xs, v) == i)
if "chaozy" < "bob":
print("true")
else: print("false") | [
"chaozy.zhu.19@ucl.ac.uk"
] | chaozy.zhu.19@ucl.ac.uk |
3d27a5f70858a93347aafb3d4a58e352edf746b0 | c5b7c422ab73f779bec97fda8b54682815300fa3 | /aprs-master/aprs/decimaldegrees.py | de7b1de3374c952a6890ac4c95b8b153a846e0c8 | [
"Apache-2.0"
] | permissive | bbaydar/cloudhoppers | c7240566998f1024e319d94773f2617c385f4b3f | 0ec94fbea77d85227b7caff6ba494b6b13e93832 | refs/heads/master | 2021-01-20T22:44:46.052750 | 2017-08-30T01:06:24 | 2017-08-30T01:06:24 | 101,823,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,882 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
PyDecimalDegrees - geographic coordinates conversion utility.
Copyright (C) 2006-2013 by Mateusz Łoskot <mateusz@loskot.net>
Copyright (C) 2010-2013 by Evan Wheeler <ewheeler@unicef.org>
This file is part of PyDecimalDegrees module.
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from
the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
DESCRIPTION
DecimalDegrees module provides functions to convert between
degrees/minutes/seconds and decimal degrees.
Original source distribution:
http://mateusz.loskot.net/software/gis/pydecimaldegrees/
Inspired by Walter Mankowski's Geo::Coordinates::DecimalDegrees module
for Perl, originally located in CPAN Archives:
http://search.cpan.org/~waltman/Geo-Coordinates-DecimalDegrees-0.05/
doctest examples are based following coordinates:
DMS: 121 8' 6"
DM: 121 8.1'
DD: 121.135
To run doctest units just execut this module script as follows
(-v instructs Python to run script in verbose mode):
$ python decimaldegrees.py [-v]
"""
__revision__ = '$Revision: 1.1 $'
import decimal as libdecimal
from decimal import Decimal as D
def decimal2dms(decimal_degrees):
""" Converts a floating point number of degrees to the equivalent
number of degrees, minutes, and seconds, which are returned
as a 3-element tuple of decimals. If 'decimal_degrees' is negative,
only degrees (1st element of returned tuple) will be negative,
minutes (2nd element) and seconds (3rd element) will always be positive.
Example:
>>> decimal2dms(121.135)
(Decimal('121'), Decimal('8'), Decimal('6.000'))
>>> decimal2dms(-121.135)
(Decimal('-121'), Decimal('8'), Decimal('6.000'))
"""
degrees = D(int(decimal_degrees))
decimal_minutes = libdecimal.getcontext().multiply(
(D(str(decimal_degrees)) - degrees).copy_abs(), D(60))
minutes = D(int(decimal_minutes))
seconds = libdecimal.getcontext().multiply(
(decimal_minutes - minutes), D(60))
return (degrees, minutes, seconds)
def decimal2dm(decimal_degrees):
"""
Converts a floating point number of degrees to the degress & minutes.
Returns a 2-element tuple of decimals.
If 'decimal_degrees' is negative, only degrees (1st element of returned
tuple) will be negative, minutes (2nd element) will always be positive.
Example:
>>> decimal2dm(121.135)
(Decimal('121'), Decimal('8.100'))
>>> decimal2dm(-121.135)
(Decimal('-121'), Decimal('8.100'))
"""
degrees = D(int(decimal_degrees))
minutes = libdecimal.getcontext().multiply(
(D(str(decimal_degrees)) - degrees).copy_abs(), D(60))
return (degrees, minutes)
def dms2decimal(degrees, minutes, seconds):
""" Converts degrees, minutes, and seconds to the equivalent
number of decimal degrees. If parameter 'degrees' is negative,
then returned decimal-degrees will also be negative.
NOTE: this method returns a decimal.Decimal
Example:
>>> dms2decimal(121, 8, 6)
Decimal('121.135')
>>> dms2decimal(-121, 8, 6)
Decimal('-121.135')
"""
decimal = D(0)
degs = D(str(degrees))
mins = libdecimal.getcontext().divide(D(str(minutes)), D(60))
secs = libdecimal.getcontext().divide(D(str(seconds)), D(3600))
if degrees >= D(0):
decimal = degs + mins + secs
else:
decimal = degs - mins - secs
return libdecimal.getcontext().normalize(decimal)
def dm2decimal(degrees, minutes):
""" Converts degrees and minutes to the equivalent number of decimal
degrees. If parameter 'degrees' is negative, then returned decimal-degrees
will also be negative.
Example:
>>> dm2decimal(121, 8.1)
Decimal('121.135')
>>> dm2decimal(-121, 8.1)
Decimal('-121.135')
"""
return dms2decimal(degrees, minutes, 0)
def run_doctest(): # pragma: no cover
"""Runs doctests for this module."""
import doctest
import decimaldegrees # pylint: disable=W0406
return doctest.testmod(decimaldegrees)
if __name__ == '__main__':
run_doctest() # pragma: no cover
| [
"LilTigs@gmail.com"
] | LilTigs@gmail.com |
9f36779e687ed1474e2e97a9940161e6764000b2 | b0c8e0cafa4a8916faab3cce65756ae91426c43f | /study/Python/Week8/BOJ_11497_강의현.py | e96c5e3d830e8bd8766220fac444dcc77cd359af | [] | no_license | Rurril/IT-DA-3rd | b3e3ec3c2a5efbc75b76b84e9002c27a0ba4a1c4 | 9985e237cb1b90e9609656d534e0ed164723e281 | refs/heads/master | 2022-07-22T15:26:39.085369 | 2021-11-23T13:30:06 | 2021-11-23T13:30:06 | 288,980,334 | 3 | 29 | null | 2020-11-05T10:25:30 | 2020-08-20T10:49:17 | Java | UTF-8 | Python | false | false | 598 | py | # 통나무 건너뛰기 - S2
import sys
from collections import deque
T=int(sys.stdin.readline())
for _ in range(T):
new_log=deque()
N=int(sys.stdin.readline())
log=list(map(int,sys.stdin.readline().split()))
log.sort()
new_log.append(log[-1])
for i in range(N-2,-1,-1):
if i%2==0:
new_log.appendleft(log[i])
else:
new_log.append(log[i])
difficulty=list()
difficulty.append(abs(new_log[-1]-new_log[0]))
for i in range(1,N):
difficulty.append(abs(new_log[i]-new_log[i-1]))
print(max(difficulty)) | [
"riverkeh@naver.com"
] | riverkeh@naver.com |
3b47c5da4741a0da192e27f24e3861bc0a2247cc | d9a4e69a3cb6b791fecc0508eaf33896bc5c265b | /course.py | 8e13ca4af6e400848e0aba1e59d4a3792c024cc7 | [] | no_license | Jennifer-Vo/Organize-Grouping-Algorithm | 7d117fd96f32ab106a9a928ec9ee8a2c0d0264f1 | 44bd773c42c7a769a3e6d96eb663c6ab3647450f | refs/heads/main | 2023-02-09T18:23:12.804649 | 2021-01-03T07:48:48 | 2021-01-03T07:48:48 | 326,352,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,721 | py | """CSC148 Assignment 1
=== CSC148 Winter 2020 ===
Department of Computer Science,
University of Toronto
This code is provided solely for the personal and private use of
students taking the CSC148 course at the University of Toronto.
Copying for purposes other than this use is expressly prohibited.
All forms of distribution of this code, whether as given or with
any changes, are expressly prohibited.
Authors: Misha Schwartz, Mario Badr, Christine Murad, Diane Horton,
Sophia Huynh and Jaisie Sin
All of the files in this directory and all subdirectories are:
Copyright (c) 2020 Misha Schwartz, Mario Badr, Christine Murad, Diane Horton,
Sophia Huynh and Jaisie Sin
=== Module Description ===
This file contains classes that describe a university course and the students
who are enrolled in these courses.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, List, Tuple, Optional, Dict, Union
if TYPE_CHECKING:
from survey import Answer, Survey, Question
def sort_students(lst: List[Student], attribute: str) -> List[Student]:
"""
Return a shallow copy of <lst> sorted by <attribute>
=== Precondition ===
<attribute> is a attribute name for the Student class
>>> s1 = Student(1, 'Misha')
>>> s2 = Student(2, 'Diane')
>>> s3 = Student(3, 'Mario')
>>> sort_students([s1, s3, s2], 'id') == [s1, s2, s3]
True
>>> sort_students([s1, s2, s3], 'name') == [s2, s3, s1]
True
"""
return sorted(lst, key=lambda s: getattr(s, attribute))
class Student:
"""
A Student who can be enrolled in a university course.
=== Public Attributes ===
id: the id of the student
name: the name of the student
=== Private Attributes ===
_records: records of the student
_answers: the answers of the student
=== Representation Invariants ===
name is not the empty string
"""
id: int
name: str
_records: List[Dict[str, Union[str, int, bool, List[str]]]]
_answers: List[Answer]
def __init__(self, id_: int, name: str) -> None:
""" Initialize a student with name <name> and id <id>"""
self.id = id_
self.name = name
self._records = list()
self._answers = list()
def __str__(self) -> str:
""" Return the name of this student """
return self.name
def has_answer(self, question: Question) -> bool:
"""
Return True iff this student has an answer for a question with the same
id as <question> and that answer is a valid answer for <question>.
"""
for i, record in enumerate(self._records):
if record['question_id'] == question.id:
if self._answers[i].is_valid(question):
return True
return False
def set_answer(self, question: Question, answer: Answer) -> None:
"""
Record this student's answer <answer> to the question <question>.
"""
self._records.append(
{'question_id': question.id, 'answer': answer.content})
self._answers.append(answer)
def get_answer(self, question: Question) -> Optional[Answer]:
"""
Return this student's answer to the question <question>. Return None if
this student does not have an answer to <question>
"""
for i, record in enumerate(self._records):
if record['question_id'] == question.id:
if self._answers[i].is_valid(question):
return self._answers[i]
return None
class Course:
"""
A University Course
=== Public Attributes ===
name: the name of the course
students: a list of students enrolled in the course
=== Representation Invariants ===
- No two students in this course have the same id
- name is not the empty string
"""
name: str
students: List[Student]
def __init__(self, name: str) -> None:
"""
Initialize a course with the name of <name>.
"""
self.name = name
self.students = list()
def enroll_students(self, students: List[Student]) -> None:
"""
Enroll all students in <students> in this course.
If adding any student would violate a representation invariant,
do not add any of the students in <students> to the course.
"""
for student in students:
if self.__has_student(student):
return
for student in students:
if student.name != '':
self.students.append(student)
def all_answered(self, survey: Survey) -> bool:
"""
Return True iff all the students enrolled in this course have a valid
answer for every question in <survey>.
"""
for question in survey.get_questions():
for student in self.students:
if not student.has_answer(question):
return False
return True
def get_students(self) -> Tuple[Student, ...]:
"""
Return a tuple of all students enrolled in this course.
The students in this tuple should be in order according to their id
from lowest id to highest id.
Hint: the sort_students function might be useful
"""
return tuple(sort_students(self.students, 'id'))
def __has_student(self, student: Student) -> bool:
"""
Return True iff the student has enrolled in this course.
"""
for stu in self.students:
if stu.id == student.id:
return True
return False
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={'extra-imports': ['typing', 'survey']})
| [
"noreply@github.com"
] | noreply@github.com |
2ef3ab6c3fc3636d57b51cc1ce3cfb2152b737f7 | 8362040379d8c857d53abc88d80894496d512026 | /web_abang/Avangard/urls.py | 6c6073704ab29180da3bab3522a6f787994b3103 | [] | no_license | Huskiss/web_abang | 3266c0090ebc8cf59be7eb6b8b00bd059bb2a596 | 896d80c696ef7b16045a5ea4ecf2bf92ae5aeb80 | refs/heads/master | 2023-03-01T11:53:36.161516 | 2021-02-19T01:10:54 | 2021-02-19T01:10:54 | 335,554,690 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | """Avangard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"sh0316@kakao.com"
] | sh0316@kakao.com |
2ba8d0457b0f510232d4d95043d81f82a5fa7b41 | b8fed8222b41e447cd5ce83513eb4d014c01742b | /sbm_purchase_rent/contract/po_contract.py | bc25bb0bcb7486749663e8676b19e87b615275c2 | [] | no_license | lajayuhniyarsyah/ERP-Supra | e993d8face6e022b6f863d1dff7cb51cda36be8d | 5a64dbb57ee40070354926700091fb9025c1350c | refs/heads/master | 2021-01-25T22:09:46.306990 | 2017-11-08T05:32:04 | 2017-11-08T05:32:04 | 23,605,825 | 0 | 10 | null | 2017-11-08T05:32:05 | 2014-09-03T03:58:28 | Python | UTF-8 | Python | false | false | 1,186 | py | from datetime import datetime
import netsvc
from osv import osv, fields
class POContract(osv.osv):
_inherit = 'purchase.order'
_columns = {
'contract_id' : fields.many2one('purchase.order.contract.data','Contract',ondelete="Cascade"),
'contract_no' : fields.related('contract_id','contract_no',type="char",string="Contract No",store=False),
'start_contract' : fields.related('contract_id','start_contract',type="date",string="Contract Start",store=False),
'expire_contract' : fields.related('contract_id','expire_contract',type="date",string="Contract Expire",store=False),
'notes' : fields.related('contract_id','notes',type="text",string="Notes",store=False),
}
# def write(self,cr,uid,ids,vals,context=None):
# return False
POContract()
class POContractData(osv.osv):
_name = 'purchase.order.contract.data'
_rec_name = 'contract_no'
_columns = {
'contract_no' : fields.char('Contract No',30,required=True),
'start_contract' : fields.date('Date Start',required=True),
'expire_contract' : fields.date('Expire Contract',required=True),
'notes' : fields.text('Notes'),
'pos' : fields.one2many('purchase.order','contract_id','POs')
}
POContractData() | [
"lajayuhni@gmail.com"
] | lajayuhni@gmail.com |
ecd4a680ed96fd1e6823712d929270a37f0452b7 | 52665ece10eda10a3e6a345eaf181a54410ca6b4 | /api/contractt/wsgi.py | f1b4cb5e7d481079b1ff9ec0cbde5a415ec8b17c | [] | no_license | arthurzeras/contractt | 11680cf1423ba7c86080f62f1181a48a7b60c06c | 0d63e451bcdb9d9b4024da74cabe3e48eb6efcf4 | refs/heads/master | 2023-03-23T04:34:35.282238 | 2020-07-20T13:05:07 | 2020-07-20T13:05:07 | 246,305,578 | 0 | 0 | null | 2021-03-19T23:25:31 | 2020-03-10T13:16:47 | Python | UTF-8 | Python | false | false | 170 | py | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'contractt.settings')
application = get_wsgi_application()
| [
"lucas.souto@instruct.com.br"
] | lucas.souto@instruct.com.br |
b840c6a48ee6c433de33cef3b63ad6544edebd28 | b183d2e19a913e1e28c958646ecb945f4d42de1b | /funnyitem/urls.py | 12042904e1714321c0e3601981a26f995e4f9af6 | [] | no_license | lydiapierce/Django-Humor-Site | 18e8609dad0edbe2b1a27365058cce17dc116073 | 7b8c8b7e21fc02b0bc079e28c1be4f48bf65fb7d | refs/heads/master | 2016-09-06T02:35:50.630857 | 2012-11-25T04:51:48 | 2012-11-25T04:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from funnyitemposts.views import *
urlpatterns = patterns('',
('^$', home), #added
(r'^funnyitempost/(?P<funnyitempost_id>\d+)/$',funnyitempost_specific), #added
# Examples:
# url(r'^$', 'funnyitem.views.home', name='home'),
# url(r'^funnyitem/', include('funnyitem.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| [
"liddy.pierce@gmail.com"
] | liddy.pierce@gmail.com |
839c9387ac151c746e63cdb75c3c0509c99be87d | f9e1d9c71d232aa0bcf03627259e6c9f88538b18 | /gs92QuerysetAggregation/gs92/asgi.py | a4d73d71bdf12411bd1bef9349c4ca3d3cea7831 | [] | no_license | nayan-gujju/Django-Practice | a7db202b6a3627a6a4e9f96953b61e43eaf68cb1 | eafa29e9321a1683867b2ea1d26ca74dfa6db12d | refs/heads/master | 2023-07-27T11:41:43.956705 | 2021-09-09T08:47:44 | 2021-09-09T08:47:44 | 403,917,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for gs92 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs92.settings')
application = get_asgi_application()
| [
"nayangujarati007@gmail.com"
] | nayangujarati007@gmail.com |
7ed63c610ff7d345da84727a19573d00b2b98a52 | 4271ef14947f5d38a993fc3484c433612e57d65e | /models/token.py | 190407a821612366bc59a1c51f1ce72c9004821e | [] | no_license | CHIKITCHONG/bbs | 4365db161a7d2eee468995e009f9bc4a02825254 | d7a3007efa55af284909b618dd215e480425c432 | refs/heads/master | 2020-03-28T05:31:50.848747 | 2018-09-12T03:58:05 | 2018-09-12T03:58:05 | 147,782,141 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from sqlalchemy import Column, Integer, String
from models.base_model import db, SQLMixin
from models.user import User
class Token(SQLMixin, db.Model):
content = Column(String(36), nullable=False)
user_id = Column(Integer, nullable=True)
def user(self):
u = User.one(id=self.user_id)
return u
| [
"765150388@qq.com"
] | 765150388@qq.com |
6fde5d9d3dbfc441667d9375073164c721be9a12 | 64b1b6de177f39e174cd4ea038f515092a34f77f | /classes/Variable.py | 5c2d17e71a75077e112a89cda5f060c5bb22668d | [
"Apache-2.0"
] | permissive | chaoli314/openbn | 4bd48df9dfa2f69d463a58bc5ebdde48a1288b58 | bcb40984d930fc4d6c966b21381a4a25c26981f2 | refs/heads/master | 2020-12-03T01:57:08.121203 | 2017-07-07T02:38:13 | 2017-07-07T02:38:13 | 95,886,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # This Python file uses the following encoding: utf-8
""" Subject line.
Main text.
"""
from functools import total_ordering
__author__ = 'Chao Li'
@total_ordering
class Variable(object):
def __init__(self, variable_index: int, variable_name, values: list):
self._variable_index = variable_index
self._variable_name = variable_name
self._values = values
self._cardinality = len(values)
@property
def index(self):
return self._variable_index
@property
def name(self):
return self._variable_name
@property
def card(self):
return self._cardinality
@property
def values(self):
return self._values
def get_value_name(self, index):
return self.values[index]
def get_value_index(self, value):
return self.values.index(value)
def __hash__(self) -> int:
return hash(self.index)
def __lt__(self, other):
return self.index < other.index
def __eq__(self, other):
return self.index == other.index
def __repr__(self) -> str:
return self.name
| [
"chao.li.314@gmail.com"
] | chao.li.314@gmail.com |
78eea12de223a2e58e86a79835f6face2fa47646 | e401470228d7c92690b05f7268e1a0709bc87dff | /Roleta/menus.py | 496addb896689cada632787a4cfac4febd80c8dc | [] | no_license | Djsouls/roleta | 1dfa505f5cedb3dceef7d8bb2f465bfd0cd10f82 | 384db2211d1df26b96a40e19b5cac967cab35c5f | refs/heads/master | 2020-08-08T01:47:49.876024 | 2019-10-10T14:00:57 | 2019-10-10T14:00:57 | 213,664,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | def showRouletteMenu():
print('+=================================+')
print('+ Escolha o tipo de roleta +')
print('+ 1 - Roleta Americana +')
print('+ 2 - Roleta Europeia +')
print('+ 3 - Roleta Francesa +')
print('+=================================+') | [
"djalmirinha@gmail.com"
] | djalmirinha@gmail.com |
f595c71d34cfb1606170d2728772f40f56279ff0 | 2309a185c5427f576b7f5bb927a572c778533403 | /smc_updater.py | 5f0996cfab1af45aa08a92a4ed4d4888848be66d | [] | no_license | wxlg1117/smc_updater | 6f4b8a5389dd417a3fce33270f78b036e31e1119 | aa88364a7c000c2dfed80cb70a77751ff4ae7a9f | refs/heads/master | 2020-06-20T11:11:45.107570 | 2019-05-20T21:18:46 | 2019-05-20T21:18:46 | 197,104,198 | 1 | 1 | null | 2019-07-16T02:16:17 | 2019-07-16T02:16:16 | null | UTF-8 | Python | false | false | 9,690 | py | import os
import time
import sys
import re
import json
import cli_ui
import delegator
import logging
import urllib.request
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from pyvirtualdisplay import Display
manual_run = True
sum_bin = "./sum_2.1.0_Linux_x86_64/sum"
ipmicfg_bin = "./IPMICFG_1.29.0_build.181029/Linux/64bit/IPMICFG-Linux.x86_64"
alpha_dict = {
"a":0,
"b":1,
"c":2,
"d":3,
"e":4,
"f":5,
"g":6,
"h":7,
"i":8,
"j":9
}
""" Check if a process ID is valid or not (signal 0 doesn't kill anything)"""
def check_pid(pid):
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
""" Test if the system is using UEFI or BIOS"""
""" Some BIOS updates are seperated by these types"""
def is_uefi_boot():
return os.path.isdir("/sys/firmware/efi")
def get_board_model():
return delegator.chain("sudo dmidecode -t baseboard|grep 'Product Name: ' |sed 's/Product Name: //' | tr -d '[:space:]'").out
def get_current_bios():
return delegator.chain("sudo dmidecode -t bios| grep Version |sed 's/Version://' |tr -d '[:space:]'").out
def get_current_ipmi():
return delegator.chain("sudo ./IPMICFG_1.29.0_build.181029/Linux/64bit/IPMICFG-Linux.x86_64 -ver | awk -F ':' {'print $2'}| tr -d [:space:]").out
def update_ipmi(ipmi_bin):
logging.info("update_ipmi() | Running IPMI Update With {0}".format(ipmi_bin))
print("Updating with {0}".format(ipmi_bin))
ipmi_update = delegator.run("sudo {0} -c UpdateBmc --file {1}".format(sum_bin, ipmi_bin), block=False, timeout=600)
timer = 0
print(ipmi_update.pid)
while check_pid(ipmi_update.pid):
print("Updating IPMI....This may take up to 10 minutes. [ Elapsed Time: {0}m ]".format(str(timer)))
time.sleep(60)
timer += 1
print("IPMI Update Complete")
logging.info(ipmi_update)
logging.info("main::update_ipmi() | IPMI Update Complete {0}".format(ipmi_bin))
def update_bios(bios_bin):
print("Updating BIOS with {0}".format(bios_bin))
logging.info("main::update_bios() | Running BIOS update with {0}".format(bios_bin))
bios_update = delegator.run("sudo {0} -c UpdateBios --file {1}".format(sum_bin, bios_bin), block=False, timeout=600)
timer = 0
while check_pid(bios_update.pid):
print(bios_update.pid)
print("Updating BIOS....This may take up to 10 minutes. [ Elapsed Time: {0}m ]".format(str(timer)))
time.sleep(60)
timer += 1
if 'Manual steps are required' in bios_update.out:
print("Automated BIOS Update Failed: Please Reboot System And Try Again")
logging.error("main::update_bios() | BIOS Update Failed")
logging.error(bios_update)
else:
logging.info("main::update_bios() | BIOS Update Complete {0}".format(bios_bin))
print("BIOS Update Complete. Please reboot to use new BIOS.")
logging.info(bios_update)
logging.info("main::update_bios() | IPMI Update Complete {0}".format(bios_bin))
def download_file(url, dl_path):
print("Downloading {0} to {1}".format(url, dl_path))
urllib.request.urlretrieve(url, dl_path)
def extract_zip(zip_file, extract_dir):
import zipfile
with zipfile.ZipFile(zip_file,"r") as zipped:
zipped.extractall(extract_dir)
def find_bios_file(bios_bin):
if os.path.isdir("/tmp/bios_update/{0}/UEFI".format(bios_bin)) and is_uefi_boot():
return delegator.run("ls /tmp/bios_update/{0}/UEFI/{0}".format(bios_bin)).out
elif os.path.isdir("/tmp/bios_update/BIOS") and not is_uefi_boot():
return delegator.run("ls /tmp/bios_update/{0}/BIOS/{0}".format(bios_bin)).out
else:
return delegator.run("ls /tmp/bios_update/{0}".format(bios_bin)).out
def find_ipmi_file(ipmi_bin):
return delegator.run("ls /tmp/ipmi_update/*.bin").out
def is_alpha_version(ver):
try:
return ver.encode('ascii').isalpha()
except:
return False
def eval_version(cur_version, new_version, ipmi=False):
version = re.findall(r"[^\W\d_]+|\d+", cur_version)
cur_major = int(version[0])
cur_minor = int(version[1])
cur_ver = float(str(cur_major) + "." + str(cur_minor))
if ipmi == False and is_alpha_version(cur_version):
cur_alpha = cur_version[2]
version_new = re.findall(r"[^\W\d_]+|\d+", new_version)
if ipmi == False and is_alpha_version(new_version):
new_alpha = new_version[2]
new_major = int(version_new[0])
new_minor = int(version_new[1])
new_ver = float(str(new_major) + "." + str(new_minor))
if new_ver > cur_ver:
return True
if new_ver == cur_ver:
if is_alpha_version(new_version):
if is_alpha_version(old_version):
if alpha_dict[new_alpha] > alpha_dict[cur_alpha]:
return True
else:
return False
else:
return True #Alpha versions are higher than a non-alpha version (3.1a > 3.1)
if new_ver < cur_ver:
return False
def get_latest_bios(board_model):
update_choice = None
latest_bios_revision = None
for link in links:
link_board = link.split("/")[-1].split(".")[0]
if board_model.replace("+", "_") == link_board:
driver.get("https://www.supermicro.com{0}".format(link))
driver.find_element_by_xpath('//a[@href="{0}"]'.format("javascript:document.biosForm.submit();")).click()
raw = driver.find_element_by_class_name("yui-skin-sam").text.split("\n")
for line in raw:
if "BIOS Revision:" in line:
latest_bios_version = line.split(":")[1].replace("R", "").strip()
a = driver.find_element_by_partial_link_text('.zip')
filename = a.text
software_id = a.get_attribute("href").split("=")[-1]
bios_dl_link = "https://www.supermicro.com/Bios/softfiles/{0}/{1}".format(software_id, filename)
if latest_bios_version and bios_dl_link:
return [latest_bios_version, bios_dl_link]
else:
print("failed to download bios information")
if latest_bios_revision == None:
print("Failed to find BIOS online")
def get_latest_ipmi(board_model):
for link in links:
link_board = link.split("/")[-1].split(".")[0]
if board_model.replace("+", "_") == link_board:
driver.get("https://www.supermicro.com{0}".format(link))
driver.find_element_by_xpath('//a[@href="{0}"]'.format("javascript:document.IPMIForm.submit();")).click()
raw = driver.find_element_by_class_name("yui-skin-sam").text.split("\n")
for line in raw:
if "Firmware Revision:" in line:
latest_ipmi_version = line.split(":")[1].strip(" R")
a = driver.find_element_by_partial_link_text('.zip')
filename = a.text
software_id = a.get_attribute("href").split("=")[-1]
ipmi_dl_link = "https://www.supermicro.com/Bios/softfiles/{0}/{1}".format(software_id, filename)
return [latest_ipmi_version, ipmi_dl_link]
def main():
board_model = get_board_model()
bios_version = get_current_bios()
bios_dl = get_latest_bios(board_model)
ipmi_version = get_current_ipmi()
ipmi_dl = get_latest_ipmi(board_model)
sys_headers = ['FW', 'CURRENT', 'LATEST']
cli_ui.info_section(cli_ui.green, cli_ui.bold, "SMC UPDATER")
board_header = ['BOARD MODEL']
board_data = [[(cli_ui.bold, board_model)]]
cli_ui.info_table(board_data, headers=board_header)
print()
sys_data = [
[(cli_ui.bold, "BIOS"), (cli_ui.bold, bios_version), (cli_ui.bold, bios_dl[0])],
[(cli_ui.bold, "IPMI"), (cli_ui.bold, ipmi_version), (cli_ui.bold, ipmi_dl[0])]
]
cli_ui.info_table(sys_data, headers=sys_headers)
print()
if eval_version(bios_version, bios_dl[0]):
update_choice = None
while update_choice == None or update_choice == 'y':
bios_old = True
if manual_run == True:
update_choice = cli_ui.ask_string("BIOS is out of date. Would you like to update now? [y/n]")
if update_choice != 'y':
continue
bin_file = bios_dl[1].split("/")[-1]
bin_name = bin_file.split("_")[0]
bin_ver = bin_file.split(".")[0].split("_")[-1]
bin_raw = bin_file.split(".")[0]
bin_ex = "{0}.{1}".format(bin_name, bin_ver)
download_file(bios_dl[1], '/tmp/{0}'.format(bios_dl[1].split("/")[-1]))
extract_zip("/tmp/{0}".format(bin_file), "/tmp/bios_update/")
bios_file_path = find_bios_file(bin_ex)
update_bios(bios_file_path)
break
else:
print("BIOS is up-to-date.")
# logging.info("main(): Website version is newer, updating BIOS...")
if eval_version(ipmi_version, ipmi_dl[0], ipmi=True):
update_choice = None
while update_choice == None or update_choice == 'y':
ipmi_old = True
if manual_run == True:
update_choice = cli_ui.ask_string("IPMI is out of date. Would you like to update now? [y/n]")
logging.info("main(): Webiste version is newer, updating IPMI...")
bin_file = ipmi_dl[1].split("/")[-1]
bin_name = bin_file.split(".")[0]
bin_ex = "{0}.{1}".format(bin_name, 'bin')
download_file(ipmi_dl[1], '/tmp/{0}'.format(ipmi_dl[1].split("/")[-1]))
extract_zip("/tmp/{0}".format(bin_file), "/tmp/ipmi_update/")
ipmi_file_path = "/tmp/ipmi_update/{0}.bin".format(bin_name)
update_ipmi(ipmi_file_path)
break
else:
print("IPMI is up-to-date.")
print("\nExiting...")
if __name__ == "__main__":
if not delegator.run("which dmidecode").out:
print("Fatal Error: dmidecode not detected.")
exit()
#logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
#binary = FirefoxBinary('./geckodriver')
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
#File created with "for i in `curl -s https://www.supermicro.com/products/motherboard/| grep quaternaryNavItem|awk -F ' ' {'print $2'}| sed 's/href=\"//'|sed 's/\"//'|grep -v 'Global_SKU'`; do curl -s https://www.supermicro.com/${i} | grep prodLink| awk -F '<a href="' {'print $2'}| awk -F 'class=' {'print $1'}|sed 's/\"//'|grep -v Global_SKU >> smc_board_links.txt;done"
with open("smc_board_links.txt") as f:
links = f.readlines()
main()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
558b51d0948db20b4794d1b8d06e95d8f2c5f64d | 85100de7ca86b5d4ab68450542142b8a6f66e65e | /display_plot.py | 96dc46a0f7209e4c416d45de511c255f714c61bc | [] | no_license | PhilWicke/PotPi_Plant_Irrigation | a2146ef5258af3d1d0ad96ac39b4c28f576f7e45 | ce15ecdd08d466507f2812e10cc9592a00b4e667 | refs/heads/main | 2023-02-03T02:00:40.741178 | 2020-12-20T19:25:32 | 2020-12-20T19:25:32 | 319,044,756 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import busio, digitalio, board, time
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
import RPi.GPIO as RPIO
def interpolate(val, source_range, target_range):
a = source_range[0]
b = source_range[1]
c = target_range[0]
d = target_range[1]
return int(((1-((val-1)/(b-1)))*(c-d))+d)
# Setting up RPIO for moisture sensors
RPIO.setmode(RPIO.BCM)
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
cs = digitalio.DigitalInOut(board.D5)
mcp = MCP.MCP3008(spi,cs)
channel1 = AnalogIn(mcp, MCP.P0)
channel2 = AnalogIn(mcp, MCP.P1)
# Setting up OLED SPI pins
RST = None
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware I2C:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Initialize library and clear display
disp.begin()
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image and clear with black box
draw = ImageDraw.Draw(image)
draw.rectangle((0,0,width,height), outline=0, fill=0)
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Load default font.
font = ImageFont.load_default()
# Empirically determined wetness values
max_wet_pot1 = 1.8
min_wet_pot1 = 2.8
max_wet_pot2 = 0.7
min_wet_pot2 = 1.7
# Store data for plotting
current_sample_pot1 = []
current_sample_pot2 = []
# Starting Voltage scan
print("Start Voltage Scan")
RPIO.setup(18, RPIO.OUT)
RPIO.setup(18, RPIO.LOW)
while True:
try:
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
#print("Raw ADC Value: %.2f " % channel2.value)
volt_01 = channel1.voltage
volt_02 = channel2.voltage
pot1_vol = "{0:.3f}V".format(volt_01)
#print("Raw ADC Value: %.2f " % channel1.value)
pot2_vol = "{0:.3f}V".format(volt_02)
pot1_volt= (volt_01 - min_wet_pot1) / (max_wet_pot1 - min_wet_pot1)
pot1_perc = int(pot1_volt*100)
pot1_perc_str = "{0:d}%".format(pot1_perc)
pot2_volt= (volt_02 - min_wet_pot2) / (max_wet_pot2 - min_wet_pot2)
pot2_perc = int(pot2_volt*100)
pot2_perc_str = "{0:d}%".format(pot2_perc)
# Set limit to 100%
if pot1_perc>100:
pot1_perc = 100
if pot2_perc>100:
pot2_perc =100
# make it a fixed size by poping first element if too long
current_sample_pot1.append(pot1_perc)
current_sample_pot2.append(pot2_perc)
if len(current_sample_pot1) > ((width/2)-2):
current_sample_pot1.pop(0)
current_sample_pot2.pop(0)
# Display Header Information
draw.text((x, top), " Wetness of Soil", font=font, fill=255)
draw.text((x, top+8), " Pot A:" + pot1_perc_str + " Pot B:" + pot2_perc_str, font=font, fill=255)
# Draw Coordinate System - POT 1
draw.line((x, top+20, x, top+height), fill=255, width=2)
draw.line((x, top+height, x+(width/2)-2, top+height), fill=255, width=2)
# Draw Coordinate System - POT 2
draw.line((x+(width/2)+2, top+20, x+(width/2)+2, top+height), fill=255, width=2)
draw.line((x+(width/2)+2, top+height, x+width, top+height), fill=255, width=2)
# left first point at (0|0)
# draw.point((x+2, top+63), fill=250)
# left first point at (0|100)
# draw.point((x+2, top+20), fill=250)
# Plot data
for idx, (point1, point2) in enumerate(zip(current_sample_pot1, current_sample_pot2)):
pos_pix = interpolate(point1, [1,100], [height-1,20])
draw.point((x+2+idx, top+pos_pix), fill=250)
pos_pix = interpolate(point2, [1,100], [height-1,20])
draw.point((x+(width/2)+2+idx, top+pos_pix), fill=250)
# Display image.
disp.image(image)
disp.display()
time.sleep(.05)
except KeyboardInterrupt:
print("Stopping Voltage Scan")
RPIO.setup(18, RPIO.HIGH)
disp.clear()
disp.display()
break
| [
"philipp.wicke@ucdconnect.ie"
] | philipp.wicke@ucdconnect.ie |
dcd84d7c0b53c2f40a66486fdf6c3037d9364199 | ff5404ecdac6281b982376fcb664f28fda46ecef | /benchmarks/asv_bench/benchmarks/serialize.py | bd8cb5f26f66d48a424b9c98905b1689a0251bef | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | qinxuye/mars | 628aa106214eb85bcc84d3b5b27761d4b51c57f8 | 6ffc7b909c790c2b4094d8a80bd749a6d90d2006 | refs/heads/master | 2022-06-17T04:58:57.885557 | 2022-06-06T12:11:11 | 2022-06-06T12:11:11 | 160,643,357 | 0 | 2 | Apache-2.0 | 2019-11-28T14:44:34 | 2018-12-06T08:28:59 | Python | UTF-8 | Python | false | false | 9,418 | py | # Copyright 1999-2022 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cloudpickle
import numpy as np
import pandas as pd
from mars.core.operand import MapReduceOperand
from mars.dataframe.operands import DataFrameOperandMixin
from mars.oscar.core import ActorRef
from mars.oscar.backends.message import SendMessage, new_message_id, ActorRefMessage
from mars.serialization import serialize, deserialize
from mars.serialization.serializables import (
Serializable,
Int64Field,
Float64Field,
ListField,
DataTypeField,
SeriesField,
NDArrayField,
StringField,
FieldTypes,
BoolField,
Int32Field,
Float32Field,
SliceField,
Datetime64Field,
Timedelta64Field,
TupleField,
DictField,
)
from mars.services.subtask import Subtask, SubtaskResult, SubtaskStatus
from mars.services.task import new_task_id
from mars.utils import tokenize
# do warmup
serialize(None)
class SerializableChild(Serializable):
str_field = StringField("str_field")
int_field = Int64Field("int_field")
float_field = Float64Field("float_field")
dtype_field = DataTypeField("dtype_field")
series_field = SeriesField("series_field")
ndarray_field = NDArrayField("ndarray_field")
int_list_field = ListField("int_list_field", field_type=FieldTypes.int64)
float_list_field = ListField("float_list_field", field_type=FieldTypes.float64)
str_list_field = ListField("str_list_field", field_type=FieldTypes.string)
class SerializableParent(Serializable):
children = ListField("children", field_type=FieldTypes.reference)
class MySerializable(Serializable):
_bool_val = BoolField("f1")
_int32_val = Int32Field("f2")
_int64_val = Int64Field("f3")
_float32_val = Float32Field("f4")
_float64_val = Float64Field("f5")
_string_val = StringField("f6")
_datetime64_val = Datetime64Field("f7")
_timedelta64_val = Timedelta64Field("f8")
_datatype_val = DataTypeField("f9")
_slice_val = SliceField("f10")
_list_val = ListField("list_val", FieldTypes.int64)
_tuple_val = TupleField("tuple_val", FieldTypes.string)
_dict_val = DictField("dict_val", FieldTypes.string, FieldTypes.bytes)
class SerializeSerializableSuite:
def setup(self):
children = []
for idx in range(1000):
child = SerializableChild(
str_field="abcd" * 1024,
int_field=idx,
float_field=float(idx) * 1.42,
dtype_field=np.dtype("<M8"),
series_field=pd.Series([np.dtype(int)] * 1024, name="dtype"),
ndarray_field=np.random.rand(1000),
int_list_field=np.random.randint(0, 1000, size=(1000,)).tolist(),
float_list_field=np.random.rand(1000).tolist(),
str_list_field=[str(i * 2.8571) for i in range(100)],
)
children.append(child)
self.test_data = SerializableParent(children=children)
def time_serialize_deserialize(self):
deserialize(*serialize(self.test_data))
class SerializeSubtaskSuite:
def setup(self):
self.subtasks = []
for i in range(10000):
subtask = Subtask(
subtask_id=new_task_id(),
stage_id=new_task_id(),
logic_key=new_task_id(),
session_id=new_task_id(),
task_id=new_task_id(),
chunk_graph=None,
expect_bands=[
("ray://mars_cluster_1649927648/17/0", "numa-0"),
],
bands_specified=False,
virtual=False,
priority=(1, 0),
retryable=True,
extra_config={},
)
self.subtasks.append(subtask)
def time_pickle_serialize_deserialize_subtask(self):
deserialize(*cloudpickle.loads(cloudpickle.dumps(serialize(self.subtasks))))
class SerializePrimitivesSuite:
def setup(self):
self.test_primitive_serializable = []
for i in range(10000):
my_serializable = MySerializable(
_bool_val=True,
_int32_val=-32,
_int64_val=-64,
_float32_val=np.float32(2.0),
_float64_val=2.0,
_complex64_val=np.complex64(1 + 2j),
_complex128_val=1 + 2j,
_string_val="string_value",
_datetime64_val=pd.Timestamp(123),
_timedelta64_val=pd.Timedelta(days=1),
_datatype_val=np.dtype(np.int32),
_slice_val=slice(1, 10, 2),
_list_val=[1, 2],
_tuple_val=("a", "b"),
_dict_val={"a": b"bytes_value"},
)
self.test_primitive_serializable.append(my_serializable)
def time_serialize_deserialize_primitive(self):
deserialize(*serialize(self.test_primitive_serializable))
def time_pickle_serialize_deserialize_basic(self):
deserialize(
*cloudpickle.loads(
cloudpickle.dumps(serialize(self.test_primitive_serializable))
)
)
class SerializeContainersSuite:
def setup(self):
self.test_list = list(range(100000))
self.test_tuple = tuple(range(100000))
self.test_dict = {i: i for i in range(100000)}
def time_pickle_serialize_deserialize_list(self):
deserialize(*cloudpickle.loads(cloudpickle.dumps(serialize(self.test_list))))
def time_pickle_serialize_deserialize_tuple(self):
deserialize(*cloudpickle.loads(cloudpickle.dumps(serialize(self.test_tuple))))
def time_pickle_serialize_deserialize_dict(self):
deserialize(*cloudpickle.loads(cloudpickle.dumps(serialize(self.test_dict))))
class MockDFOperand(MapReduceOperand, DataFrameOperandMixin):
_op_type_ = 14320
class SerializeFetchShuffleSuite:
def setup(self):
from mars.core import OutputType
from mars.core.operand import OperandStage
from mars.dataframe.operands import DataFrameShuffleProxy
from mars.utils import build_fetch
source_chunks = []
for i in range(1000):
op = MockDFOperand(
_output_types=[OutputType.dataframe],
_key=tokenize(i),
stage=OperandStage.map,
)
source_chunks.append(op.new_chunk([], index=(i,)))
shuffle_chunk = DataFrameShuffleProxy(
output_types=[OutputType.dataframe]
).new_chunk(source_chunks)
fetch_chunk = build_fetch(shuffle_chunk)
self.test_fetch_chunks = []
for i in range(1000):
reduce_op = MockDFOperand(
_output_types=[OutputType.dataframe],
_key=tokenize((i, 1)),
stage=OperandStage.reduce,
)
self.test_fetch_chunks.append(
reduce_op.new_chunk([fetch_chunk], index=(i,))
)
def time_pickle_serialize_fetch_shuffle_chunks(self):
for fetch_chunk in self.test_fetch_chunks:
header, buffers = serialize(fetch_chunk)
serialized = cloudpickle.dumps((header, buffers))
deserialize(*cloudpickle.loads(serialized))
class SerializeMessageSuite:
def setup(self):
self.send_messages = []
self.actor_ref_messages = []
for i in range(10000):
ref = ActorRef(
"ray://mars_cluster_1649927648/17/0",
b"F20Wyerq6EiqltB8jAVs7L3N_task_manager",
)
new_result = SubtaskResult(
subtask_id=new_task_id(),
session_id=new_task_id(),
task_id=new_task_id(),
stage_id=new_task_id(),
status=SubtaskStatus.succeeded,
progress=1.0,
data_size=1000000.0,
bands=[("ray://mars_cluster_1649927648/17/0", "numa-0")],
execution_start_time=1646125099.622051,
execution_end_time=1646125104.448726,
)
send_message = SendMessage(
new_message_id(),
ref,
new_result,
protocol=0,
)
self.send_messages.append(send_message)
actor_ref_message = ActorRefMessage(
message_id=new_message_id(),
actor_ref=ref,
protocol=0,
)
self.actor_ref_messages.append(actor_ref_message)
def time_pickle_serialize_deserialize_send_messages(self):
deserialize(
*cloudpickle.loads(cloudpickle.dumps(serialize(self.send_messages)))
)
def time_pickle_serialize_deserialize_actor_ref_messages(self):
deserialize(
*cloudpickle.loads(cloudpickle.dumps(serialize(self.actor_ref_messages)))
)
| [
"noreply@github.com"
] | noreply@github.com |
89f5da991953c12bdc0fc4486078c5d931e2dcd0 | 0c1de02e93efff93b4de879dbe2ccc2a25434875 | /products/routers.py | b0c76c8d60625e278ab633852135228991bfbf31 | [] | no_license | lalkrishnakothare/TradexaTask1 | 569d6305119a881c4b02cdc7e75e04e1b10c666b | 562073eab5022e05b7053918d6a1d06bb63079e8 | refs/heads/master | 2023-06-20T05:59:50.982490 | 2021-07-17T07:13:31 | 2021-07-17T07:13:31 | 386,849,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | class ProductRouter:
def db_for_read(self, model, **hints):
if model._meta.app_label == 'products':
return 'products_db'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'products':
return 'products_db'
return None
def allow_relation(self, obj1, obj2, **hints):
if ((obj1._meta.app_label == 'products') or (obj2._meta.app_label == 'products')):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if app_label == 'products':
return db =='products_db'
return None | [
"lalu.baluk@gmail.com"
] | lalu.baluk@gmail.com |
3665d3a6ec48cd4a1409ee51f0ae7b370ea65355 | 66aee2edec7683d59aec36db50c1bfdc351d99a6 | /data_extarction.py | d55d0fe4c1af4fde2c7e689398e0eb7d8e9f2d08 | [] | no_license | Shafim61/Machine-Learning | 1b52be3693e0fc1d9b4acae082560442f052e380 | 3d2bd6649dfafd7b5f263adefb0a112e8c8310a9 | refs/heads/main | 2023-02-26T10:26:03.729537 | 2021-02-03T12:05:18 | 2021-02-03T12:05:18 | 335,608,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | import requests
from bs4 import BeautifulSoup
url = "http://en.wikipedia.org/wiki/Iphone"
response = requests.get(url).text
soup = BeautifulSoup(response,"lxml")
table = soup.find("table",class_="wikitable")
rows = table.find_all('tr')[1::1]
d={}
for row in rows:
data = row.find_all(['th','td'])
try:
version_text = data[0].a.text.split('/')[0]
version_text = ''.join(i for i in version_text if i.isdigit())
version = int(version_text)
if version < 3:
continue
price_text = data[8]
price = price_text.text.split('/')[-1].split('*')[-1].replace('$','').replace('\n','')
d[version] = int(price)
except:
pass
csv = open("iphone_price.csv","a")
csv.write('version,price\n')
for key in d:
csv.write(f'{key},{d[key]}\n')
csv.close()
| [
"tanvirrashik94@gmail.com"
] | tanvirrashik94@gmail.com |
d10302e6ff2ba378e2dae11f318316dd463018e5 | 5f1ddeebf606975f559bcf9ef20c2184479b07c0 | /apps/cdda/cdda.py | 4a938f3c46fed60fbb6acf1854f6a584c7cf4c77 | [
"Unlicense"
] | permissive | cassaundra/cassaundra_talon | 3fc7d3d3712b17012da9bf31525b84a568d494fe | 18d1449c0840ec0fa462e546fdb8d1fee5ad4c0c | refs/heads/master | 2023-02-02T11:26:54.739139 | 2020-12-17T20:11:05 | 2020-12-17T20:11:05 | 301,019,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from talon import Context, Module, actions, ui
# ctx = Context()
mod = Module()
mod.tag("cdda", desc="Cataclysm: Dark Days Ahead")
# ctx.matches = r"""
# app: cataclysm-tiles
# """
@mod.action_class
class Actions:
def key_repeat(key: str, count: int):
"""Play key with delay"""
for i in range(0, count):
actions.key(key)
actions.sleep("10ms")
| [
"cass@cassaundra.io"
] | cass@cassaundra.io |
0485c06a92bd564030cc3dff86d3ed9c9bb8fbd3 | ec931947aa3e06ce565637e7ee1cb707f56375a2 | /aoc2015/modules/grid.py | 145b44dc85960aecdd0fa419d8dbac10c91fa804 | [] | no_license | chrisglencross/advent-of-code | 5f16ed7e2265d27ce15f502ce2a1c2f11fc99fc0 | 21623d4aa01a9e20285a0233c50f8f56c4099af5 | refs/heads/master | 2023-01-24T22:01:30.829679 | 2023-01-12T23:03:03 | 2023-01-12T23:03:03 | 224,833,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,596 | py | from __future__ import annotations
from typing import Tuple, Dict, List, Optional
import networkx as nx
from aoc2019.modules import textgridprinter
from aoc2019.modules.directions import COMPASS_DIRECTIONS
Coords = Tuple[int, int]
def default_is_navigable(grid: Grid, from_coord: Coords, to_coord: Coords):
return grid[from_coord] in {"."} and grid[to_coord] in {"."}
def default_node_factory(coords):
return coords
class Grid:
def __init__(self, grid, directions=COMPASS_DIRECTIONS.values()):
self.grid = grid
self.directions = directions
def items(self):
return self.grid.items()
def get_bounds(self) -> Tuple[Coords, Coords]:
xs = set([c[0] for c in self.grid.keys()])
ys = set([c[1] for c in self.grid.keys()])
if not xs:
xs = {0}
if not ys:
ys = {0}
return (min(xs), min(ys)), (max(xs), max(ys))
def find_cell(self, symbol) -> Optional[Coords]:
for coords, cell in self.grid.items():
if cell == symbol:
return coords
return None
def find_cells(self, symbol) -> List[Coords]:
result = []
for coords, cell in self.grid.items():
if cell == symbol:
result.append(coords)
return result
def index_cells(self, symbols=None, not_symbols=None) -> Dict[str, Coords]:
if symbols is None and not_symbols is None:
not_symbols = {".", "#", " "}
result = {}
for coords, cell in self.grid.items():
if (symbols and cell in symbols) or (not_symbols and cell not in not_symbols):
if result.get(cell) is not None:
raise Exception(f"Symbol {cell} is repeated in grid. Index it with index_repeating_cells()")
result[cell] = coords
return result
def index_repeating_cells(self, symbols=None, not_symbols=None) -> Dict[str, List[Coords]]:
if symbols is None and not_symbols is None:
not_symbols = {".", "#", " "}
result = {}
for coords, cell in self.grid.items():
if (symbols and cell in symbols) or (not_symbols and cell not in not_symbols):
result_list = result.get(cell)
if result_list is None:
result_list = []
result[cell] = result_list
result_list.append(coords)
return result
def keys(self):
return self.grid.keys()
def values(self):
return self.grid.values()
def get(self, coords: Coords, default_value=None):
return self.grid.get(coords, default_value)
def __getitem__(self, coords: Coords):
return self.get(coords)
def __setitem__(self, coords: Coords, cell: str):
self.grid[coords] = cell
def build_graph(self,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable) -> nx.Graph:
graph = nx.Graph()
self.add_graph_edges(graph, directions, node_factory, is_navigable)
return graph
def build_digraph(self,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable) -> nx.DiGraph:
graph = nx.DiGraph()
self.add_graph_edges(graph, directions, node_factory, is_navigable)
return graph
def add_graph_edges(self, graph: nx.Graph,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable):
for from_coords, from_symbol in self.items():
from_node = node_factory(from_coords)
for direction in directions:
to_coords = direction.move(from_coords)
to_symbol = self.get(to_coords)
if to_symbol and is_navigable(self, from_coords, to_coords):
to_node = node_factory(to_coords)
graph.add_edge(from_node, to_node, distance=1)
def print(self):
textgridprinter.TextGridPrinter().print(self)
def parse_grid(content: str) -> Grid:
grid = {}
for y, line in enumerate(content.split("\n")):
for x, cell in enumerate(line.rstrip()):
grid[(x, y)] = cell
return Grid(grid)
def load_grid(file: str) -> Grid:
with open(file) as f:
content = f.read()
return parse_grid(content)
| [
"chris@glencross.org"
] | chris@glencross.org |
ac89a3e772ac4651679f9bbcd3d7859f8433465b | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /python/ray/serve/controller.py | 3176a6321bab846bb8ad713f732a0067362fc473 | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 41,020 | py | import asyncio
from collections import defaultdict
from itertools import chain
import os
import random
import time
from dataclasses import dataclass, field
from typing import Dict, Any, List, Optional, Tuple
from pydantic import BaseModel
import ray
import ray.cloudpickle as pickle
from ray.serve.autoscaling_policy import BasicAutoscalingPolicy
from ray.serve.backend_worker import create_backend_replica
from ray.serve.constants import ASYNC_CONCURRENCY, SERVE_PROXY_NAME
from ray.serve.http_proxy import HTTPProxyActor
from ray.serve.kv_store import RayInternalKVStore
from ray.serve.exceptions import RayServeException
from ray.serve.utils import (format_actor_name, get_random_letters, logger,
try_schedule_resources_on_nodes, get_all_node_ids)
from ray.serve.config import BackendConfig, ReplicaConfig
from ray.serve.long_poll import LongPollerHost
from ray.actor import ActorHandle
import numpy as np
# Used for testing purposes only. If this is set, the controller will crash
# after writing each checkpoint with the specified probability.
_CRASH_AFTER_CHECKPOINT_PROBABILITY = 0
CHECKPOINT_KEY = "serve-controller-checkpoint"
# Feature flag for controller resource checking. If true, controller will
# error if the desired replicas exceed current resource availability.
_RESOURCE_CHECK_ENABLED = True
# How often to call the control loop on the controller.
CONTROL_LOOP_PERIOD_S = 1.0
REPLICA_STARTUP_TIME_WARNING_S = 5
# TypeDefs
BackendTag = str
EndpointTag = str
ReplicaTag = str
NodeId = str
GoalId = int
class TrafficPolicy:
def __init__(self, traffic_dict: Dict[str, float]) -> None:
self.traffic_dict: Dict[str, float] = dict()
self.shadow_dict: Dict[str, float] = dict()
self.set_traffic_dict(traffic_dict)
def set_traffic_dict(self, traffic_dict: Dict[str, float]) -> None:
prob = 0
for backend, weight in traffic_dict.items():
if weight < 0:
raise ValueError(
"Attempted to assign a weight of {} to backend '{}'. "
"Weights cannot be negative.".format(weight, backend))
prob += weight
# These weights will later be plugged into np.random.choice, which
# uses a tolerance of 1e-8.
if not np.isclose(prob, 1, atol=1e-8):
raise ValueError("Traffic dictionary weights must sum to 1, "
"currently they sum to {}".format(prob))
self.traffic_dict = traffic_dict
def set_shadow(self, backend: str, proportion: float):
if proportion == 0 and backend in self.shadow_dict:
del self.shadow_dict[backend]
else:
self.shadow_dict[backend] = proportion
def __repr__(self) -> str:
return f"<Traffic {self.traffic_dict}; Shadow {self.shadow_dict}>"
class BackendInfo(BaseModel):
# TODO(architkulkarni): Add type hint for worker_class after upgrading
# cloudpickle and adding types to RayServeWrappedReplica
worker_class: Any
backend_config: BackendConfig
replica_config: ReplicaConfig
class Config:
# TODO(architkulkarni): Remove once ReplicaConfig is a pydantic
# model
arbitrary_types_allowed = True
@dataclass
class SystemState:
backends: Dict[BackendTag, BackendInfo] = field(default_factory=dict)
traffic_policies: Dict[EndpointTag, TrafficPolicy] = field(
default_factory=dict)
routes: Dict[BackendTag, Tuple[EndpointTag, Any]] = field(
default_factory=dict)
backend_goal_ids: Dict[BackendTag, GoalId] = field(default_factory=dict)
traffic_goal_ids: Dict[EndpointTag, GoalId] = field(default_factory=dict)
route_goal_ids: Dict[BackendTag, GoalId] = field(default_factory=dict)
def get_backend_configs(self) -> Dict[BackendTag, BackendConfig]:
return {
tag: info.backend_config
for tag, info in self.backends.items()
}
def get_backend(self, backend_tag: BackendTag) -> Optional[BackendInfo]:
return self.backends.get(backend_tag)
def add_backend(self,
backend_tag: BackendTag,
backend_info: BackendInfo,
goal_id: GoalId = 0) -> None:
self.backends[backend_tag] = backend_info
self.backend_goal_ids = goal_id
def get_endpoints(self) -> Dict[EndpointTag, Dict[str, Any]]:
endpoints = {}
for route, (endpoint, methods) in self.routes.items():
if endpoint in self.traffic_policies:
traffic_policy = self.traffic_policies[endpoint]
traffic_dict = traffic_policy.traffic_dict
shadow_dict = traffic_policy.shadow_dict
else:
traffic_dict = {}
shadow_dict = {}
endpoints[endpoint] = {
"route": route if route.startswith("/") else None,
"methods": methods,
"traffic": traffic_dict,
"shadows": shadow_dict,
}
return endpoints
@dataclass
class ActorStateReconciler:
controller_name: str = field(init=True)
detached: bool = field(init=True)
routers_cache: Dict[NodeId, ActorHandle] = field(default_factory=dict)
backend_replicas: Dict[BackendTag, Dict[ReplicaTag, ActorHandle]] = field(
default_factory=lambda: defaultdict(dict))
backend_replicas_to_start: Dict[BackendTag, List[ReplicaTag]] = field(
default_factory=lambda: defaultdict(list))
backend_replicas_to_stop: Dict[BackendTag, List[ReplicaTag]] = field(
default_factory=lambda: defaultdict(list))
backends_to_remove: List[BackendTag] = field(default_factory=list)
endpoints_to_remove: List[EndpointTag] = field(default_factory=list)
# TODO(edoakes): consider removing this and just using the names.
def router_handles(self) -> List[ActorHandle]:
return list(self.routers_cache.values())
def get_replica_handles(self) -> List[ActorHandle]:
return list(
chain.from_iterable([
replica_dict.values()
for replica_dict in self.backend_replicas.values()
]))
def get_replica_tags(self) -> List[ReplicaTag]:
return list(
chain.from_iterable([
replica_dict.keys()
for replica_dict in self.backend_replicas.values()
]))
async def _start_pending_backend_replicas(
self, current_state: SystemState) -> None:
"""Starts the pending backend replicas in self.backend_replicas_to_start.
Waits for replicas to start up, then removes them from
self.backend_replicas_to_start.
"""
fut_to_replica_info = {}
for backend_tag, replicas_to_create in self.backend_replicas_to_start.\
items():
for replica_tag in replicas_to_create:
replica_handle = await self._start_backend_replica(
current_state, backend_tag, replica_tag)
ready_future = replica_handle.ready.remote().as_future()
fut_to_replica_info[ready_future] = (backend_tag, replica_tag,
replica_handle)
start = time.time()
prev_warning = start
while fut_to_replica_info:
if time.time() - prev_warning > REPLICA_STARTUP_TIME_WARNING_S:
prev_warning = time.time()
logger.warning("Waited {:.2f}s for replicas to start up. Make "
"sure there are enough resources to create the "
"replicas.".format(time.time() - start))
done, pending = await asyncio.wait(
list(fut_to_replica_info.keys()), timeout=1)
for fut in done:
(backend_tag, replica_tag,
replica_handle) = fut_to_replica_info.pop(fut)
self.backend_replicas[backend_tag][
replica_tag] = replica_handle
self.backend_replicas_to_start.clear()
async def _start_backend_replica(self, current_state: SystemState,
backend_tag: BackendTag,
replica_tag: ReplicaTag) -> ActorHandle:
"""Start a replica and return its actor handle.
Checks if the named actor already exists before starting a new one.
Assumes that the backend configuration is already in the Goal State.
"""
# NOTE(edoakes): the replicas may already be created if we
# failed after creating them but before writing a
# checkpoint.
replica_name = format_actor_name(replica_tag, self.controller_name)
try:
replica_handle = ray.get_actor(replica_name)
except ValueError:
logger.debug("Starting replica '{}' for backend '{}'.".format(
replica_tag, backend_tag))
backend_info = current_state.get_backend(backend_tag)
replica_handle = ray.remote(backend_info.worker_class).options(
name=replica_name,
lifetime="detached" if self.detached else None,
max_restarts=-1,
max_task_retries=-1,
**backend_info.replica_config.ray_actor_options).remote(
backend_tag, replica_tag,
backend_info.replica_config.actor_init_args,
backend_info.backend_config, self.controller_name)
return replica_handle
def _scale_backend_replicas(self, backends: Dict[BackendTag, BackendInfo],
backend_tag: BackendTag,
num_replicas: int) -> None:
"""Scale the given backend to the number of replicas.
NOTE: this does not actually start or stop the replicas, but instead
adds the intention to start/stop them to self.backend_replicas_to_start
and self.backend_replicas_to_stop. The caller is responsible for then
first writing a checkpoint and then actually starting/stopping the
intended replicas. This avoids inconsistencies with starting/stopping a
replica and then crashing before writing a checkpoint.
"""
logger.debug("Scaling backend '{}' to {} replicas".format(
backend_tag, num_replicas))
assert (backend_tag in backends
), "Backend {} is not registered.".format(backend_tag)
assert num_replicas >= 0, ("Number of replicas must be"
" greater than or equal to 0.")
current_num_replicas = len(self.backend_replicas[backend_tag])
delta_num_replicas = num_replicas - current_num_replicas
backend_info = backends[backend_tag]
if delta_num_replicas > 0:
can_schedule = try_schedule_resources_on_nodes(requirements=[
backend_info.replica_config.resource_dict
for _ in range(delta_num_replicas)
])
if _RESOURCE_CHECK_ENABLED and not all(can_schedule):
num_possible = sum(can_schedule)
raise RayServeException(
"Cannot scale backend {} to {} replicas. Ray Serve tried "
"to add {} replicas but the resources only allows {} "
"to be added. To fix this, consider scaling to replica to "
"{} or add more resources to the cluster. You can check "
"avaiable resources with ray.nodes().".format(
backend_tag, num_replicas, delta_num_replicas,
num_possible, current_num_replicas + num_possible))
logger.debug("Adding {} replicas to backend {}".format(
delta_num_replicas, backend_tag))
for _ in range(delta_num_replicas):
replica_tag = "{}#{}".format(backend_tag, get_random_letters())
self.backend_replicas_to_start[backend_tag].append(replica_tag)
elif delta_num_replicas < 0:
logger.debug("Removing {} replicas from backend '{}'".format(
-delta_num_replicas, backend_tag))
assert len(
self.backend_replicas[backend_tag]) >= delta_num_replicas
for _ in range(-delta_num_replicas):
replica_tag, _ = self.backend_replicas[backend_tag].popitem()
if len(self.backend_replicas[backend_tag]) == 0:
del self.backend_replicas[backend_tag]
self.backend_replicas_to_stop[backend_tag].append(replica_tag)
async def _stop_pending_backend_replicas(self) -> None:
"""Stops the pending backend replicas in self.backend_replicas_to_stop.
Removes backend_replicas from the router, kills them, and clears
self.backend_replicas_to_stop.
"""
for backend_tag, replicas_list in self.backend_replicas_to_stop.items(
):
for replica_tag in replicas_list:
# NOTE(edoakes): the replicas may already be stopped if we
# failed after stopping them but before writing a checkpoint.
replica_name = format_actor_name(replica_tag,
self.controller_name)
try:
replica = ray.get_actor(replica_name)
except ValueError:
continue
# TODO(edoakes): this logic isn't ideal because there may be
# pending tasks still executing on the replica. However, if we
# use replica.__ray_terminate__, we may send it while the
# replica is being restarted and there's no way to tell if it
# successfully killed the worker or not.
ray.kill(replica, no_restart=True)
self.backend_replicas_to_stop.clear()
def _start_routers_if_needed(self, http_host: str, http_port: str,
http_middlewares: List[Any]) -> None:
"""Start a router on every node if it doesn't already exist."""
if http_host is None:
return
for node_id, node_resource in get_all_node_ids():
if node_id in self.routers_cache:
continue
router_name = format_actor_name(SERVE_PROXY_NAME,
self.controller_name, node_id)
try:
router = ray.get_actor(router_name)
except ValueError:
logger.info("Starting router with name '{}' on node '{}' "
"listening on '{}:{}'".format(
router_name, node_id, http_host, http_port))
router = HTTPProxyActor.options(
name=router_name,
lifetime="detached" if self.detached else None,
max_concurrency=ASYNC_CONCURRENCY,
max_restarts=-1,
max_task_retries=-1,
resources={
node_resource: 0.01
},
).remote(
http_host,
http_port,
controller_name=self.controller_name,
http_middlewares=http_middlewares)
self.routers_cache[node_id] = router
def _stop_routers_if_needed(self) -> bool:
"""Removes router actors from any nodes that no longer exist.
Returns whether or not any actors were removed (a checkpoint should
be taken).
"""
actor_stopped = False
all_node_ids = {node_id for node_id, _ in get_all_node_ids()}
to_stop = []
for node_id in self.routers_cache:
if node_id not in all_node_ids:
logger.info(
"Removing router on removed node '{}'.".format(node_id))
to_stop.append(node_id)
for node_id in to_stop:
router_handle = self.routers_cache.pop(node_id)
ray.kill(router_handle, no_restart=True)
actor_stopped = True
return actor_stopped
def _recover_actor_handles(self) -> None:
# Refresh the RouterCache
for node_id in self.routers_cache.keys():
router_name = format_actor_name(SERVE_PROXY_NAME,
self.controller_name, node_id)
self.routers_cache[node_id] = ray.get_actor(router_name)
# Fetch actor handles for all of the backend replicas in the system.
# All of these backend_replicas are guaranteed to already exist because
# they would not be written to a checkpoint in self.backend_replicas
# until they were created.
for backend_tag, replica_dict in self.backend_replicas.items():
for replica_tag in replica_dict.keys():
replica_name = format_actor_name(replica_tag,
self.controller_name)
self.backend_replicas[backend_tag][
replica_tag] = ray.get_actor(replica_name)
async def _recover_from_checkpoint(
self, current_state: SystemState, controller: "ServeController"
) -> Dict[BackendTag, BasicAutoscalingPolicy]:
self._recover_actor_handles()
autoscaling_policies = dict()
for backend, info in current_state.backends.items():
metadata = info.backend_config.internal_metadata
if metadata.autoscaling_config is not None:
autoscaling_policies[backend] = BasicAutoscalingPolicy(
backend, metadata.autoscaling_config)
# Start/stop any pending backend replicas.
await self._start_pending_backend_replicas(current_state)
await self._stop_pending_backend_replicas()
return autoscaling_policies
@dataclass
class Checkpoint:
goal_state: SystemState
current_state: SystemState
reconciler: ActorStateReconciler
# TODO(ilr) Rename reconciler to PendingState
@ray.remote
class ServeController:
"""Responsible for managing the state of the serving system.
The controller implements fault tolerance by persisting its state in
a new checkpoint each time a state change is made. If the actor crashes,
the latest checkpoint is loaded and the state is recovered. Checkpoints
are written/read using a provided KV-store interface.
All hard state in the system is maintained by this actor and persisted via
these checkpoints. Soft state required by other components is fetched by
those actors from this actor on startup and updates are pushed out from
this actor.
All other actors started by the controller are named, detached actors
so they will not fate share with the controller if it crashes.
The following guarantees are provided for state-changing calls to the
controller:
- If the call succeeds, the change was made and will be reflected in
the system even if the controller or other actors die unexpectedly.
- If the call fails, the change may have been made but isn't guaranteed
to have been. The client should retry in this case. Note that this
requires all implementations here to be idempotent.
"""
async def __init__(self,
controller_name: str,
http_host: str,
http_port: str,
http_middlewares: List[Any],
detached: bool = False):
# Used to read/write checkpoints.
self.kv_store = RayInternalKVStore(namespace=controller_name)
# Current State
self.current_state = SystemState()
# Goal State
# TODO(ilr) This is currently *unused* until the refactor of the serve
# controller.
self.goal_state = SystemState()
# ActorStateReconciler
self.actor_reconciler = ActorStateReconciler(controller_name, detached)
# backend -> AutoscalingPolicy
self.autoscaling_policies = dict()
# Dictionary of backend_tag -> router_name -> most recent queue length.
self.backend_stats = defaultdict(lambda: defaultdict(dict))
# Used to ensure that only a single state-changing operation happens
# at any given time.
self.write_lock = asyncio.Lock()
self.http_host = http_host
self.http_port = http_port
self.http_middlewares = http_middlewares
# If starting the actor for the first time, starts up the other system
# components. If recovering, fetches their actor handles.
self.actor_reconciler._start_routers_if_needed(
self.http_host, self.http_port, self.http_middlewares)
# NOTE(edoakes): unfortunately, we can't completely recover from a
# checkpoint in the constructor because we block while waiting for
# other actors to start up, and those actors fetch soft state from
# this actor. Because no other tasks will start executing until after
# the constructor finishes, if we were to run this logic in the
# constructor it could lead to deadlock between this actor and a child.
# However we do need to guarantee that we have fully recovered from a
# checkpoint before any other state-changing calls run. We address this
# by acquiring the write_lock and then posting the task to recover from
# a checkpoint to the event loop. Other state-changing calls acquire
# this lock and will be blocked until recovering from the checkpoint
# finishes.
checkpoint = self.kv_store.get(CHECKPOINT_KEY)
if checkpoint is None:
logger.debug("No checkpoint found")
else:
await self.write_lock.acquire()
asyncio.get_event_loop().create_task(
self._recover_from_checkpoint(checkpoint))
# NOTE(simon): Currently we do all-to-all broadcast. This means
# any listeners will receive notification for all changes. This
# can be problem at scale, e.g. updating a single backend config
# will send over the entire configs. In the future, we should
# optimize the logic to support subscription by key.
self.long_poll_host = LongPollerHost()
self.notify_backend_configs_changed()
self.notify_replica_handles_changed()
self.notify_traffic_policies_changed()
asyncio.get_event_loop().create_task(self.run_control_loop())
def notify_replica_handles_changed(self):
self.long_poll_host.notify_changed(
"worker_handles", {
backend_tag: list(replica_dict.values())
for backend_tag, replica_dict in
self.actor_reconciler.backend_replicas.items()
})
def notify_traffic_policies_changed(self):
self.long_poll_host.notify_changed("traffic_policies",
self.current_state.traffic_policies)
def notify_backend_configs_changed(self):
self.long_poll_host.notify_changed(
"backend_configs", self.current_state.get_backend_configs())
async def listen_for_change(self, keys_to_snapshot_ids: Dict[str, int]):
"""Proxy long pull client's listen request.
Args:
keys_to_snapshot_ids (Dict[str, int]): Snapshot IDs are used to
determine whether or not the host should immediately return the
data or wait for the value to be changed.
"""
return await (
self.long_poll_host.listen_for_change(keys_to_snapshot_ids))
def get_routers(self) -> Dict[str, ActorHandle]:
"""Returns a dictionary of node ID to router actor handles."""
return self.actor_reconciler.routers_cache
def get_router_config(self) -> Dict[str, Tuple[str, List[str]]]:
"""Called by the router on startup to fetch required state."""
return self.current_state.routes
def _checkpoint(self) -> None:
"""Checkpoint internal state and write it to the KV store."""
assert self.write_lock.locked()
logger.debug("Writing checkpoint")
start = time.time()
checkpoint = pickle.dumps(
Checkpoint(self.goal_state, self.current_state,
self.actor_reconciler))
self.kv_store.put(CHECKPOINT_KEY, checkpoint)
logger.debug("Wrote checkpoint in {:.2f}".format(time.time() - start))
if random.random(
) < _CRASH_AFTER_CHECKPOINT_PROBABILITY and self.detached:
logger.warning("Intentionally crashing after checkpoint")
os._exit(0)
async def _recover_from_checkpoint(self, checkpoint_bytes: bytes) -> None:
"""Recover the instance state from the provided checkpoint.
Performs the following operations:
1) Deserializes the internal state from the checkpoint.
2) Pushes the latest configuration to the routers
in case we crashed before updating them.
3) Starts/stops any replicas that are pending creation or
deletion.
NOTE: this requires that self.write_lock is already acquired and will
release it before returning.
"""
assert self.write_lock.locked()
start = time.time()
logger.info("Recovering from checkpoint")
restored_checkpoint: Checkpoint = pickle.loads(checkpoint_bytes)
# Restore SystemState
self.current_state = restored_checkpoint.current_state
# Restore ActorStateReconciler
self.actor_reconciler = restored_checkpoint.reconciler
self.autoscaling_policies = await self.actor_reconciler.\
_recover_from_checkpoint(self.current_state, self)
logger.info(
"Recovered from checkpoint in {:.3f}s".format(time.time() - start))
self.write_lock.release()
async def do_autoscale(self) -> None:
for backend, info in self.current_state.backends.items():
if backend not in self.autoscaling_policies:
continue
new_num_replicas = self.autoscaling_policies[backend].scale(
self.backend_stats[backend], info.backend_config.num_replicas)
if new_num_replicas > 0:
await self.update_backend_config(
backend, BackendConfig(num_replicas=new_num_replicas))
async def run_control_loop(self) -> None:
while True:
await self.do_autoscale()
async with self.write_lock:
self.actor_reconciler._start_routers_if_needed(
self.http_host, self.http_port, self.http_middlewares)
checkpoint_required = self.actor_reconciler.\
_stop_routers_if_needed()
if checkpoint_required:
self._checkpoint()
await asyncio.sleep(CONTROL_LOOP_PERIOD_S)
def get_backend_configs(self) -> Dict[str, BackendConfig]:
"""Fetched by the router on startup."""
return self.current_state.get_backend_configs()
def get_traffic_policies(self) -> Dict[str, TrafficPolicy]:
"""Fetched by the router on startup."""
return self.current_state.traffic_policies
def _list_replicas(self, backend_tag: BackendTag) -> List[ReplicaTag]:
"""Used only for testing."""
return list(self.actor_reconciler.backend_replicas[backend_tag].keys())
def get_traffic_policy(self, endpoint: str) -> TrafficPolicy:
"""Fetched by serve handles."""
return self.current_state.traffic_policies[endpoint]
def get_all_replica_handles(self) -> Dict[str, Dict[str, ActorHandle]]:
"""Fetched by the router on startup."""
return self.actor_reconciler.backend_replicas
def get_all_backends(self) -> Dict[str, BackendConfig]:
"""Returns a dictionary of backend tag to backend config."""
return self.current_state.get_backend_configs()
def get_all_endpoints(self) -> Dict[str, Dict[str, Any]]:
return self.current_state.get_endpoints()
async def _set_traffic(self, endpoint_name: str,
traffic_dict: Dict[str, float]) -> None:
if endpoint_name not in self.current_state.get_endpoints():
raise ValueError("Attempted to assign traffic for an endpoint '{}'"
" that is not registered.".format(endpoint_name))
assert isinstance(traffic_dict,
dict), "Traffic policy must be a dictionary."
for backend in traffic_dict:
if self.current_state.get_backend(backend) is None:
raise ValueError(
"Attempted to assign traffic to a backend '{}' that "
"is not registered.".format(backend))
traffic_policy = TrafficPolicy(traffic_dict)
self.current_state.traffic_policies[endpoint_name] = traffic_policy
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
self.notify_traffic_policies_changed()
async def set_traffic(self, endpoint_name: str,
traffic_dict: Dict[str, float]) -> None:
"""Sets the traffic policy for the specified endpoint."""
async with self.write_lock:
await self._set_traffic(endpoint_name, traffic_dict)
async def shadow_traffic(self, endpoint_name: str, backend_tag: BackendTag,
proportion: float) -> None:
"""Shadow traffic from the endpoint to the backend."""
async with self.write_lock:
if endpoint_name not in self.current_state.get_endpoints():
raise ValueError("Attempted to shadow traffic from an "
"endpoint '{}' that is not registered."
.format(endpoint_name))
if self.current_state.get_backend(backend_tag) is None:
raise ValueError(
"Attempted to shadow traffic to a backend '{}' that "
"is not registered.".format(backend_tag))
self.current_state.traffic_policies[endpoint_name].set_shadow(
backend_tag, proportion)
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
self.notify_traffic_policies_changed()
# TODO(architkulkarni): add Optional for route after cloudpickle upgrade
async def create_endpoint(self, endpoint: str,
traffic_dict: Dict[str, float], route,
methods) -> None:
"""Create a new endpoint with the specified route and methods.
If the route is None, this is a "headless" endpoint that will not
be exposed over HTTP and can only be accessed via a handle.
"""
async with self.write_lock:
# If this is a headless endpoint with no route, key the endpoint
# based on its name.
# TODO(edoakes): we should probably just store routes and endpoints
# separately.
if route is None:
route = endpoint
# TODO(edoakes): move this to client side.
err_prefix = "Cannot create endpoint."
if route in self.current_state.routes:
# Ensures this method is idempotent
if self.current_state.routes[route] == (endpoint, methods):
return
else:
raise ValueError(
"{} Route '{}' is already registered.".format(
err_prefix, route))
if endpoint in self.current_state.get_endpoints():
raise ValueError(
"{} Endpoint '{}' is already registered.".format(
err_prefix, endpoint))
logger.info(
"Registering route '{}' to endpoint '{}' with methods '{}'.".
format(route, endpoint, methods))
self.current_state.routes[route] = (endpoint, methods)
# NOTE(edoakes): checkpoint is written in self._set_traffic.
await self._set_traffic(endpoint, traffic_dict)
await asyncio.gather(*[
router.set_route_table.remote(self.current_state.routes)
for router in self.actor_reconciler.router_handles()
])
async def delete_endpoint(self, endpoint: str) -> None:
"""Delete the specified endpoint.
Does not modify any corresponding backends.
"""
logger.info("Deleting endpoint '{}'".format(endpoint))
async with self.write_lock:
# This method must be idempotent. We should validate that the
# specified endpoint exists on the client.
for route, (route_endpoint,
_) in self.current_state.routes.items():
if route_endpoint == endpoint:
route_to_delete = route
break
else:
logger.info("Endpoint '{}' doesn't exist".format(endpoint))
return
# Remove the routing entry.
del self.current_state.routes[route_to_delete]
# Remove the traffic policy entry if it exists.
if endpoint in self.current_state.traffic_policies:
del self.current_state.traffic_policies[endpoint]
self.actor_reconciler.endpoints_to_remove.append(endpoint)
# NOTE(edoakes): we must write a checkpoint before pushing the
# updates to the routers to avoid inconsistent state if we crash
# after pushing the update.
self._checkpoint()
await asyncio.gather(*[
router.set_route_table.remote(self.current_state.routes)
for router in self.actor_reconciler.router_handles()
])
async def create_backend(self, backend_tag: BackendTag,
backend_config: BackendConfig,
replica_config: ReplicaConfig) -> None:
"""Register a new backend under the specified tag."""
async with self.write_lock:
# Ensures this method is idempotent.
backend_info = self.current_state.get_backend(backend_tag)
if backend_info is not None:
if (backend_info.backend_config == backend_config
and backend_info.replica_config == replica_config):
return
backend_replica = create_backend_replica(
replica_config.func_or_class)
# Save creator that starts replicas, the arguments to be passed in,
# and the configuration for the backends.
self.current_state.add_backend(
backend_tag,
BackendInfo(
worker_class=backend_replica,
backend_config=backend_config,
replica_config=replica_config))
metadata = backend_config.internal_metadata
if metadata.autoscaling_config is not None:
self.autoscaling_policies[
backend_tag] = BasicAutoscalingPolicy(
backend_tag, metadata.autoscaling_config)
try:
self.actor_reconciler._scale_backend_replicas(
self.current_state.backends, backend_tag,
backend_config.num_replicas)
except RayServeException as e:
del self.current_state.backends[backend_tag]
raise e
# NOTE(edoakes): we must write a checkpoint before starting new
# or pushing the updated config to avoid inconsistent state if we
# crash while making the change.
self._checkpoint()
await self.actor_reconciler._start_pending_backend_replicas(
self.current_state)
self.notify_replica_handles_changed()
# Set the backend config inside the router
# (particularly for max_concurrent_queries).
self.notify_backend_configs_changed()
async def delete_backend(self, backend_tag: BackendTag) -> None:
async with self.write_lock:
# This method must be idempotent. We should validate that the
# specified backend exists on the client.
if self.current_state.get_backend(backend_tag) is None:
return
# Check that the specified backend isn't used by any endpoints.
for endpoint, traffic_policy in self.current_state.\
traffic_policies.items():
if (backend_tag in traffic_policy.traffic_dict
or backend_tag in traffic_policy.shadow_dict):
raise ValueError("Backend '{}' is used by endpoint '{}' "
"and cannot be deleted. Please remove "
"the backend from all endpoints and try "
"again.".format(backend_tag, endpoint))
# Scale its replicas down to 0. This will also remove the backend
# from self.current_state.backends and
# self.actor_reconciler.backend_replicas.
self.actor_reconciler._scale_backend_replicas(
self.current_state.backends, backend_tag, 0)
# Remove the backend's metadata.
del self.current_state.backends[backend_tag]
if backend_tag in self.autoscaling_policies:
del self.autoscaling_policies[backend_tag]
# Add the intention to remove the backend from the router.
self.actor_reconciler.backends_to_remove.append(backend_tag)
# NOTE(edoakes): we must write a checkpoint before removing the
# backend from the router to avoid inconsistent state if we crash
# after pushing the update.
self._checkpoint()
await self.actor_reconciler._stop_pending_backend_replicas()
self.notify_replica_handles_changed()
async def update_backend_config(self, backend_tag: BackendTag,
config_options: BackendConfig) -> None:
"""Set the config for the specified backend."""
async with self.write_lock:
assert (self.current_state.get_backend(backend_tag)
), "Backend {} is not registered.".format(backend_tag)
assert isinstance(config_options, BackendConfig)
stored_backend_config = self.current_state.get_backend(
backend_tag).backend_config
backend_config = stored_backend_config.copy(
update=config_options.dict(exclude_unset=True))
backend_config._validate_complete()
self.current_state.get_backend(
backend_tag).backend_config = backend_config
# Scale the replicas with the new configuration.
self.actor_reconciler._scale_backend_replicas(
self.current_state.backends, backend_tag,
backend_config.num_replicas)
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
# Inform the router about change in configuration
# (particularly for setting max_batch_size).
await self.actor_reconciler._start_pending_backend_replicas(
self.current_state)
await self.actor_reconciler._stop_pending_backend_replicas()
self.notify_replica_handles_changed()
self.notify_backend_configs_changed()
def get_backend_config(self, backend_tag: BackendTag) -> BackendConfig:
"""Get the current config for the specified backend."""
assert (self.current_state.get_backend(backend_tag)
), "Backend {} is not registered.".format(backend_tag)
return self.current_state.get_backend(backend_tag).backend_config
async def shutdown(self) -> None:
"""Shuts down the serve instance completely."""
async with self.write_lock:
for router in self.actor_reconciler.router_handles():
ray.kill(router, no_restart=True)
for replica in self.actor_reconciler.get_replica_handles():
ray.kill(replica, no_restart=True)
self.kv_store.delete(CHECKPOINT_KEY)
| [
"noreply@github.com"
] | noreply@github.com |
6d4ccc01a0dc81b33d21a9f1ecd1714a78978b4a | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/markov_chain_20200121115909.py | bdd2412da83d245c8cc5b9d7f73b3d70654d3c82 | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 2,340 | py | import sample
import random
from clean_text import clean
from dictogram import Dictogram
class Markov():
def __init__(self, corpus):
self.corpus = clean(corpus)
self.states = {}
self.chain()
def chain(self):
last_word = None
for word in self.corpus:
if last_word is not None: # set last word line 14
if last_word not in self.states: # if we haven't seen this word before
self.states[last_word] = Dictogram() # empty histogram as value
self.states[last_word].add_count(word) # add word to last word histogram
last_word = word # set word as last_word
def __str__(self):
return str(self.states)
def random_walk(self, num_words=11):
# length = len(self.states)
# rand = random.randint(0, length)
sentence = []
length = len(self.states)
rand = random.randint(0, length)
words_counter = 0
last_word = None
while num_words > words_counter:
if last_word:
pickings = self.states[last_word] # dictionary of words to pick from based on last_word's hist
print(pickings)
total_wc = 0 # number of words in dictionary for a word
print(total_wc)
dart = random.randint(0, 100) # as percentage
print(dart)
for value in pickings.values(): # calculates total word count
total_wc += value
print(value)
counter = 0
for key,value in pickings.items():
print(key, value)
while counter < dart:
counter += (value / total_wc) * 100 # as percentage
print(counter)
last_word = key
print(last_word)
else:
last_word = (list(self.states)[rand])
words_counter += 1
sentence.append(last_word)
return sentence
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
markov = Markov('source.txt')
print(markov.states)
print('')
print(markov.random_walk())
| [
"samir.ingle7@gmail.com"
] | samir.ingle7@gmail.com |
bf3ab03aff33092943b6feb95eb8ecc781d53477 | 91d9bba0d7608818c077676c588701855dd1382c | /virtual/bin/pyrsa-encrypt | 3bf93434aec95d8f816bcb119259cc94286688e0 | [] | no_license | Antony-me/movie-library | 89e82c908288d55153aa81f2289087246e383cf4 | cbd333a79362ab1a2aa26356a6e914e9b67f5a63 | refs/heads/main | 2023-01-11T10:29:56.626291 | 2020-11-19T12:17:29 | 2020-11-19T12:17:29 | 313,741,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | #!/home/moringa/Documents/Moringa-Projects/CORE-PYTHON/Django/Netflix/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt())
| [
"antonymunyasia993@gmail.com"
] | antonymunyasia993@gmail.com | |
5dd6bb6075e05874f3d0b9cdc6b8d511e8d16137 | 819732c3a24312626fd5e244b527f228515cda2e | /POMDemo/GenericMethods/Methods.py | 642fcbdc4a0b35038ff9c0012096f447673e1975 | [] | no_license | kumail-anis/PythonSeleniumProject | edf93de57d0a85478c644d2b2ea75794d984be58 | 001ffeb5736dbc3077c48e3bbf2d73beea885b56 | refs/heads/main | 2023-03-02T03:26:32.891853 | 2021-02-17T04:38:24 | 2021-02-17T04:38:24 | 339,612,053 | 0 | 0 | null | 2021-02-17T04:50:57 | 2021-02-17T04:36:42 | Python | UTF-8 | Python | false | false | 783 | py | class Methods():
def __init__(self, driver):
self.driver = driver
def clickElement(self, elementName):
if "//" not in elementName:
print("element is an ID")
self.driver.find_element_by_id(elementName).click()
else:
print("element is an xpath")
self.driver.find_element_by_xpath(elementName).click()
def sendKey(self, elementName, elementText):
if "//" not in elementName:
self.driver.find_element_by_id(elementName).clear()
self.driver.find_element_by_id(elementName).send_keys(elementText)
else:
self.driver.find_element_by_xpath(elementName).clear()
self.driver.find_element_by_xpath(elementName).send_keys(elementText)
| [
"syed.anis@Syeds-MacBook-Pro.local"
] | syed.anis@Syeds-MacBook-Pro.local |
0f7eff965aed0af96a8a0388254490d8a4d909e0 | 9acdf90f1faf6c77981b51f66b5005787328e325 | /io_file.py | a74b1ff36ab337a9cff5700cd43e59c1ae98b9d4 | [] | no_license | 0-gpa-gang/NumRoll | ea6113b5c7380ca2aac9c03c0f4855f37cf02a62 | 742e3798185a58734dd1e6bdb3d433094054d10b | refs/heads/master | 2023-01-06T23:11:53.813542 | 2020-11-08T18:45:49 | 2020-11-08T18:45:49 | 310,912,650 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | import sqlite3
import os
# import the following lines to the main py file
# conn = sqlite3.connect("image.db")
# c = conn.cursor()
def read_from_db():
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("SELECT * FROM image")
total = []
for row in c.fetchall():
total.append(row[0])
return total
def output_to_db(classify):
conn = sqlite3.connect("image.db")
c = conn.cursor()
total = read_from_db()
for i in range(len(classify)):
num = classify[i]
location = total[i]
c.execute("UPDATE image SET classifier = (?) WHERE path = (?)", (num, location))
conn.commit()
# if want to see the classified result in a printed list, turn docstring into code
"""
classified = []
c.execute("SELECT * FROM image")
for row in c.fetchall():
classified.append(row[1])
print(classified)
"""
def special_case():
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("SELECT * FROM image")
special = ""
for row in c.fetchall():
special += str(row[1])
if special == "42069":
os.system("vlc RickRoll.mp4") # change with system
| [
"zony249@gmail.com"
] | zony249@gmail.com |
24f099a2224e7baa91a9ab2ebaa2b26ed626e085 | b86a0656dfb861e0af4b784c94f52742738c29ae | /abf-repoclosure/repoclosure/renders/render_repoclosure.py | a50e71f3bd0e2a7cd0cf7642ac73fd62d6f25f28 | [] | no_license | OpenMandrivaSoftware/docker-abf | dba52547ac51fa86028a4ee56f9b165297e66fd5 | 635774f0f97e71aeaa8f9a3965bfa94c99ad1d93 | refs/heads/master | 2023-06-21T14:59:42.311892 | 2023-06-20T01:04:10 | 2023-06-20T01:04:10 | 45,573,628 | 0 | 6 | null | 2018-03-19T21:56:09 | 2015-11-04T23:12:59 | Shell | UTF-8 | Python | false | false | 560 | py | from ..templates import repoclosure
def render_repoclosure(result, title, compressed_report, path):
t = repoclosure()
if result['return_code'] == -1:
t.code = -1
t.title = title
t.errors = result['errors_raw']
else:
t.bad_packages = result['report']
t.code = result['return_code']
t.errors = result['errors_raw']
t.count = result['count']
t.title = title
t.total_count = result['total_count']
t.percent = result['percent']
t.compressed = compressed_report
with open(path, "w") as f:
f.write(t.respond()) | [
"you@example.com"
] | you@example.com |
50e74bffa5f94a4ea2c29d8da1e872c1b3c93b0a | 31bb1f9390f862f21b1d1d710d8ce493705e233d | /stepics.py | 12d03210f608ab4436b39a595236fdaee72126f6 | [] | no_license | Tookser/norbi-bot | 8eb0dd26636e8c6e65962cad36e55f53b8704592 | 0f49997817edf8b392993e0000533f3451e83046 | refs/heads/main | 2023-04-04T03:37:49.667685 | 2021-04-11T18:20:10 | 2021-04-11T18:20:10 | 356,941,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,876 | py | '''здесь будут храниться тесты (шкала Бека/Альтмана), процедуры КПТ и прочее'''
# from abc import ABC, abstractmethod
# import configparser
from collections import namedtuple
import configparser
from functools import wraps
import os
from baseconfig import TESTS_DIRNAME
from telebot import types
import userdblib
# import handlersbot as hb
import baseconfig
from wrapper_sendmessage import send_message, create_keyboard
bot = baseconfig.bot
db = userdblib.db
all_handlers = []
def stop_when_stop(func):
'''декоратор.
возвращает функцию,
которая обрабатывает request_handler,
если мы были на полшаге'''
# мб сделать DEPRECATED
# TODO перенести логику в textprocess
def is_stop(s):
return s.strip().lower() in ['/stop', 'stop', 'остановись', 'прервись']
@wraps(func)
def result_func(message):#, *args, **kwargs):
if is_stop(message.text):
send_message(message.from_user.id, "Вы вернулись назад.")
else:
func(message) #, *args, **kwargs)
return result_func
class CBTTest:
# для хранения в глобальной области хендлеров
number_of_handler = 0
handler_prefix = 'cbtest_handler'
def __init__(self, *, name='Test', keyword='test', steps=None,
process_function=None,
all_handlers_decorator=stop_when_stop,
middle_handlers_decorator=lambda x: x):
'''объекты этого класса - тесты
name - имя в базе данных, дб уникально
keyword - команда для входа в тест, типа beck (если команда /beck)
steps - шаги - объекты класса Step
process_function - функция, выдающая результат по списку из бд
all_handlers_decorator - декоратор, обрамляющий все хендлеры.
по умолчанию - stop_when_stop, останавливается при остановке
middle_handlers_decorator - декоратор, обрамляющий хендлеры из середины
'''
assert steps is not None
assert process_function is not None
# self._bot = bot
self._name = name
self._keyword = keyword
self._process_function = process_function
self._all_handlers_decorator = all_handlers_decorator
self._middle_handlers_decorator = middle_handlers_decorator
self._steps = steps[:]
first_step = steps[0]
other_steps = steps[1:]
# перевёрнутый список хендлеров
self._handlers_reversed_list = []
self._last_handler = self._create_last_handler()
for step in reversed(other_steps):
self._handlers_reversed_list.append(self._create_middle_handler(step))
self._first_handler = self._create_first_handler(first_step, keyword)
self._handlers_reversed_list = \
[self._last_handler] + \
self._handlers_reversed_list + \
[self._first_handler]
self._handlers_list = self._handlers_reversed_list[::-1]
@property
def name(self):
return self._name
@property
def keyword(self):
return self._keyword
@property
def steps(self):
return self._steps
def _next_handler(self, id):
'''обращается к БД чтобы определить, на каком ты сейчас шаге'''
return self._handlers_list[db[id]['step']]
@property
def _last_existing_handler(self):
'''возвращает последний уже созданный хендлер, он же первый в тесте'''
raise NotImplementedError
if self._handlers_reversed_list:
print('first branch')
print(self._handlers_reversed_list)
return self._handlers_reversed_list[-1]
else:
print('second branch')
return self._last_handler
def _create_first_handler(self, step, keyword):
'''создаёт первый хендлер - который будет работать всегда
по команде типа /beck'''
#TODO вернуть
@bot.message_handler(commands=[keyword])
@self._all_handlers_decorator
# @bot.message_handler(commands=['test'])
def handler(message):
'''TODO прописать поподробнее, клавиатурку и тд'''
id = message.from_user.id
print('keyboard:', step.keyboard)
msg = send_message(id, step.text, keyboard=step.keyboard)
record = db[id]
record['step'] = 1
db[id] = record
bot.register_next_step_handler(msg, self._next_handler(id))
return handler
def _create_middle_handler(self, step):
'''создаёт хендлер из середины'''
@self._all_handlers_decorator
@self._middle_handlers_decorator
def handler(message):
'''TODO прописать поподробнее, клавиатурку и тд'''
id = message.from_user.id
self._write_to_db(id, message.text)
msg = send_message(id, step.text, keyboard=step.keyboard)
record = db[id]
record['step'] += 1
db[id] = record
bot.register_next_step_handler(msg,
self._next_handler(id))
return handler
def _process(self, id):
return self._process_function(self._get_from_db(id))
def _create_last_handler(self):
'''создаёт финальный хендлер, который всё обрабатывает'''
@self._all_handlers_decorator
def handler(message):
id = message.from_user.id
self._write_to_db(id, message.text)
msg = send_message(id, self._process(id), keyboard=None)
return handler
def _write_to_db(self, id, info):
'''записывает в key-value кусочек информации, полученной от человека
в ходе теста.
хранит в списке'''
record = db[id]
if self._name not in record:
record[self._name] = []
record[self._name].append(info)
db[id] = record
def _get_from_db(self, id):
'''получает список из кусков информации'''
return userdblib.db[id].get(self._name, [])
class Step:
'''элементарный шаг'''
def __init__(self, text, answers=None):
'''text - текст вопроса
answers - список возможных ответов, если пустой - принимается так
TODO сделать валидацию ответа и повторно спросить, если непонятно'''
# def create_keyboard(lst):
# keyboard = types.ReplyKeyboardMarkup(one_time_keyboard=True)
# for button_text in lst:
# key = types.KeyboardButton(text=button_text)
# keyboard.add(key)
# return keyboard
self._text = text
self._keyboard = create_keyboard(answers)
@property
def text(self):
return self._text
@property
def keyboard(self):
return self._keyboard
def __str__(self):
return self.text
class NumericTest(CBTTest):
'''тест с клавиатуркой и числами, типа шкалы Бека
questions - вопросы'''
def __init__(self, *, name, keyword, steps, process_function=None):
super().__init__(name=name, keyword=keyword, steps=steps,
process_function=process_function,)
@classmethod
def create_from_file(cls, file_name, *, process_function=None):
# Считывает тест из конфига
if process_function is None:
'''если обработчик результата не задан'''
def process_function(lst):
'''просто посчитать баллы'''
result = 0
for i, val in enumerate(lst):
# TODO сделать больше 9 баллов возможно
result += int(val[1])
return result
test = configparser.ConfigParser()
test.read(file_name)
general_info = test['general']
name = general_info['name']
keyword = general_info['keyword']
_number_of_questions = int(general_info['number_of_questions'])
assert _number_of_questions > 0
_number_of_answers = int(general_info['number_of_answers'])
assert _number_of_answers > 0
steps = []
for i in range(1, _number_of_questions + 1):
question = test['question' + str(i)]
text = question['text']
answers = []
for j in range(1, _number_of_answers + 1):
try:
answers.append(question['answer' + str(j)])
except IndexError:
# TODO сделать динамическое количество ответов
raise
steps.append(Step(text, answers))
return NumericTest(name=name, keyword=keyword, steps=steps,
process_function=process_function)
| [
"t2468ttt@gmail.com"
] | t2468ttt@gmail.com |
856aa99358678c45223c58b60f7449f2bf7b46dd | ae3fbbef23c5531aa64ea29da2a1529285540100 | /Develop/node_modules/fsevents/build/config.gypi | 76faa637410fa44f227638f129ed3b5b20b01fe2 | [
"MIT"
] | permissive | 0216johnsoneric/Employee-Summary-App | 751d381cd03fb48c9bcf9e53f70471ea4a43d78b | d4710777c7a25a5e43d7739200a58f2dbf6c5ee6 | refs/heads/main | 2023-01-02T09:00:14.914422 | 2020-10-21T17:21:42 | 2020-10-21T17:21:42 | 302,193,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,722 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt66l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "66",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/eric/Library/Caches/node-gyp/14.2.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/Users/eric/.nvm/versions/node/v14.2.0/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/eric/.nvm/versions/node/v14.2.0/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/eric/.npm-init.js",
"userconfig": "/Users/eric/.npmrc",
"cidr": "",
"node_version": "14.2.0",
"user": "501",
"auth_type": "legacy",
"editor": "code",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/Users/eric/.nvm/versions/node/v14.2.0/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/eric/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.4 node/v14.2.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/rt/jmf96_856ss8r_281tcxht7r0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/Users/eric/.nvm/versions/node/v14.2.0"
}
}
| [
"ericjohnson0216@icloud.com"
] | ericjohnson0216@icloud.com |
c4be40118f1a01dfe57c289b347db5318d05063a | e0cd95636c305ca14523ff1d8405cd6d3253e34b | /Programming/CS313E/Blackjack.py | d84383618c71d95ec1d06e41584987f4a48e82ea | [] | no_license | stOracle/Migrate | f9563b49b275ade35bb8055b140ab85179c94c1e | 4e5a8ecab5141ba42dece8741381912d63e635bb | refs/heads/master | 2020-03-28T17:50:28.818475 | 2018-09-14T18:48:22 | 2018-09-14T18:48:22 | 148,827,694 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,670 | py | # File: Blackjack.py
# Description: A program to simulate a game of blackjack
# Student's Name: Stephen Rauner
# Student's UT EID: STR428
# Course Name: CS 313E
# Unique Number: 50945
# Date Created: 2/15/2016
# Date Last Modified: 2/17/2016
# ///////////////////////////////////////////////////////////////
import random
class Card (object):
RANKS = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)
SUITS = ('C', 'D', 'H', 'S')
def __init__(self, rank = 12, suit = 'S'):
if (rank in Card.RANKS):
self.rank = rank
else:
self.rank = 12
if (suit in Card.SUITS):
self.suit = suit
else:
self.suit = 'S'
def __str__ (self):
if (self.rank == 1):
rank = 'A'
elif (self.rank == 13):
rank = 'K'
elif (self.rank == 12):
rank = 'Q'
elif (self.rank == 11):
rank = 'J'
else:
rank = str(self.rank)
return rank + self.suit
def __eq__ (self, other):
return (self.rank == other.rank)
def __ne__ (self, other):
return (self.rank != other.rank)
def __lt__ (self, other):
return (self.rank < other.rank)
def __le__ (self, other):
return (self.rank <= other.rank)
def __gt__ (self, other):
return (self.rank < other.rank)
def __ge__ (self, other):
return (self.rank <= other.rank)
class Deck (object):
def __init__(self):
# create the deck
self.deck = []
# fill the deck
for suit in Card.SUITS:
for rank in Card.RANKS:
# create a card
card = Card(rank,suit)
# build the deck by adding said card
self.deck.append(card)
def shuffle(self):
random.shuffle (self.deck)
def deal (self):
# if you're out of cards, you can't deal
if (len(self.deck) == 0):
return None
else:
return self.deck.pop(0)
class Player (object):
def __init__ (self, cards):
# define said player based on the cards fed
self.cards = cards
def hit (self, card):
# if hit is called, it's going to build a card and add it to the self's hand
self.cards.append(card)
def get_points (self):
count = 0
for card in self.cards:
if card.rank > 9:
count += 10
elif card.rank == 1:
count += 11
else:
count += card.rank
for card in self.cards:
if count <= 21:
break
elif card.rank == 1:
count = count - 10
return count
def __str__ (self):
hand = ""
for i in range (len(self.cards)):
hand += str(self.cards[i]) + " "
return "{}- {}".format(hand, self.get_points())
'''The outcomes of the game (blackjack, winner, tie, lose) are all attributes
of the player's hand, thus these methods are under Player, since there
isn't a class for hand.'''
def has_blackjack (self):
return (len(self.cards) == 2) and (self.get_points() == 21)
# is_winner is dependent on self's hand and how it compares to other's
def is_winner (self, other):
# if you didn't bust,
if (self.get_points() <= 21):
# but other did,
if (other.get_points() > 21):
# you won
return True
# and you have more points than other,
elif (self.get_points() > other.get_points()):
# you win
return True
# and other has more points (not exceeding 21)
else:
# you lose
return False
# if you busted
else:
# you lost
return False
def is_tie (self, other):
# the second conditional is in case they both busted at the same number
return (self.get_points() == other.get_points()) and (self.get_points() <= 21)
class Dealer (Player):
def __init__ (self, cards):
Player.__init__ (self, cards)
self.show_one_card = True
# over-ride the hit() function in the parent class
def hit (self, deck):
self.show_one_card = False
while self.get_points() < 17:
self.cards.append(deck.deal())
def __str__ (self):
# Only show the first card, ...
if self.show_one_card:
return str(self.cards[0])
# ...until the dealer starts playing.
else:
return Player.__str__ (self)
class Blackjack (object):
def __init__ (self, num_players = 1):
# make and shuffle the deck
self.deck = Deck()
self.deck.shuffle()
self.num_players = num_players
# seat_list includes the dealer, player_list does not
# seat_list is there to assist in round robin dealing
self.seat_list = []
self.player_list = []
for i in range(self.num_players + 1):
self.seat_list.append([])
# round robin style of shuffling
for i in range((self.num_players + 1) * 2):
# the if statement appends is for the dealer
if ((i == self.num_players) or (i == 2 * self.num_players + 1)):
self.seat_list[-1].append(self.deck.deal())
# the else statement is for everyone else
else:
res = i % (self.num_players + 1)
self.seat_list[res].append(self.deck.deal())
'''Now, self.seat_list is populated with the entire table's hands;
the last element in self.seatlist is the dealer.'''
# pick out the players' hands; leave the dealer's
for i in range(len(self.seat_list) - 1):
# create the players
self.player_list.append(Player(self.seat_list[i]))
# create the dealer
self.dealer = Dealer(self.seat_list[-1])
for i in range(len(self.player_list)):
print ("Player {}: {}".format(i + 1, str(self.player_list[i])))
print ("Dealer: {}".format(str(self.dealer)))
def play (self):
for i in range(self.num_players):
print()
points = 0
while True:
# if the ith player has a blackjack, they don't get a choice.
if (self.player_list[i].has_blackjack()):
print ("Player {} has a blackjack".format(str(i + 1)))
break
choice = input('Player {}, you have {} points\nDo you want to hit? [y/n]: \n'
.format(i + 1, self.player_list[i].get_points()))
if choice in ('y', 'Y', 'yes'):
(self.player_list[i]).hit(self.deck.deal())
points = (self.player_list[i]).get_points()
print ("Player {}: {}".format(str(i + 1), str(self.player_list[i])))
if (points >= 21):
break
else:
break
# once everyone has gone, the dealer plays out his hand
self.dealer.hit(self.deck)
print ("\nDealer: {}\n".format(str(self.dealer)))
for i in range(len(self.player_list)):
if self.player_list[i].is_winner(self.dealer):
print ("Player {} wins".format(i + 1))
elif self.player_list[i].is_tie(self.dealer):
# push means tie in blackjack
print ("Player {} pushes".format(i + 1))
else:
print ("Player {} loses".format(i + 1))
def main():
print()
# user-input in range [1, 6]
num_players = int(input("Enter number of players: "))
while((num_players < 1) or (num_players > 6)):
num_players = int(input("Enter number of players: "))
print()
# setup the game
game = Blackjack(num_players)
# play the game
game.play()
main()
| [
"srauner@srauner-T470.us.oracle.com"
] | srauner@srauner-T470.us.oracle.com |
6dbc58c49c56a5e6581818457229cba26f09ae68 | 9b8839d2d1170322f4a8d6fd5e38dcec45a3ed1d | /client/template.py | bc83ad7dc7f08a01a036b00cf149eee2d96b776b | [] | no_license | ke0m/distrmq | f568e6f75bc6fc4f831db80e807d81ab903846e8 | 1513444f852e9c803102ffdf51a422bd4bdef002 | refs/heads/master | 2023-03-27T12:32:59.672506 | 2021-03-15T16:12:08 | 2021-03-15T16:12:08 | 288,106,441 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | """
A template for a client (worker) that will
be launched on a cluster
@author: Joseph Jennings
@version: 2020.08.17
"""
import zmq # ZMQ sockets
from comm.sendrecv import notify_server, send_zipped_pickle, recv_zipped_pickle # Comm functions
from foo import foo # Function that will do the work
# Connect to socket
context = zmq.Context()
socket = context.socket(zmq.REQ) # this is a "request" type ZMQ socket
socket.connect("tcp://serveraddr:5555")
# Listen for work from server
while True:
# Notify we are ready
notify_server(socket)
# Get work
chunk = recv_zipped_pickle(socket)
# If chunk is empty, keep listening
if(chunk == {}):
continue
# If I received something, do some work
ochunk = {}
ochunk['result'] = foo(chunk)
# Return other parameters if desired
ochunk['other'] = chunk['other']
# Tell server this is the result
ochunk['msg'] = "result"
# Send back the result
send_zipped_pickle(socket,ochunk)
# Receive 'thank you'
socket.recv()
| [
"joseph.s.jennings@gmail.com"
] | joseph.s.jennings@gmail.com |
7ce634d95eddadf617b6a03c86073c228cd312ae | 63076e324075cdd9ec26c099ff3a7cb8d460e52e | /17_Ternary_Operator.py | 5bf58a2e201c72df18d345ea5384deb021939143 | [] | no_license | mahrrony/python_toturial_anis | 4c661e5919e41aa28465cc810d82537993c29ab4 | 4988a1677f370688dd449fdf1d0298eda836839b | refs/heads/main | 2023-07-27T05:34:46.899059 | 2021-09-09T10:02:59 | 2021-09-09T10:02:59 | 398,530,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | num1 = 20
num2 = -34
'''
if num1 >num2:
print(num1)
else:
print(num2)
'''
print(num1 if num1>num2 else num2)
max=num1 if num1>num2 else num2
min=num1 if num1<num2 else num2
#print(max)
print("Maximum =",max)
print("Minimum =",min)
| [
"mahrrony@gmail.com"
] | mahrrony@gmail.com |
590dfe20b180eb3890d52d15e988acae284a291f | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-7295.py | b08c00077bf2436014166c990deddcaa8e461e1f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = $Var.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
d2ec64ea15f59540e79ac47ff2d986162fbfc913 | bbcb35cd0e3e0724274e76dc976f00b89cda3d29 | /PySock 0.0.3/client.py | 365d8b8dbfe8d77d2ef33c2d0792a97ef91edce0 | [
"MIT"
] | permissive | ShikharY10/PySock | 3c01fc9d904e52c8de75ee9a02c6c28b70978f81 | 12423fa9d7fabee4d568587143d355a0053e177b | refs/heads/main | 2023-08-29T19:07:52.288157 | 2021-10-06T15:16:30 | 2021-10-06T15:16:30 | 391,745,937 | 1 | 2 | MIT | 2021-10-06T13:51:09 | 2021-08-01T21:47:35 | Python | UTF-8 | Python | false | false | 8,133 | py | import socket
import base64
import pickle
import threading
import multiprocessing
import hashlib
class MAIN():
def __init__(self,client_name : str = None, secure = False, DSP_enable : bool = False, file : str = None, debug : bool = False, rememberServer = True, MPCL : bool = False, MTCL : bool = True):
self.__debug = debug
if secure:
if not file:
raise TypeError("__init__() missing 1 required positional argument: 'file'")
else:
self.__secure = secure
self.__file_location = file
self.__DSP_enable = DSP_enable
self.__rememberSever = rememberServer
self.__client_name = hashlib.sha256(bytes(client_name,"utf-8")).digest()
else:
self.__secure = secure
self.__client_name = client_name
if MPCL and MTCL:
raise ValueError("both 'MPCL' abd 'MTCL' should not be set to True")
elif not MPCL and not MTCL:
raise ValueError("both 'MPCL' abd 'MTCL' should not be set to False")
else:
self.__MPCL = MPCL
self.__MTCL = MTCL
self.__CUSTOM_CHANNEL = []
self.__MESSAGE_HANDLER = []
self.__CALLBACK_LOOP = []
self.__SENDER_QUEUE = []
self.__EX_COUNTER = {}
if rememberServer:
pass
self.__CUSTOM_CHANNEL.append("DSP_MSG")
def CLIENT(self,address : str = None, port : int = None):
if self.__debug:
print("[Connecting To Server]")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((address,port))
ini = base64.b64encode(pickle.dumps(self.__client_name))
self.sock.send(bytes(str(len(ini)).center(32,"-"),"utf-8"))
self.sock.send(ini)
if self.__debug:
print("[Connected]")
if self.__secure:
self.__VARIFIED = self.get_node(
file = self.__file_location,
key = hashlib.sha256(bytes("__VARIFIED","utf-8")).digest()
)
receiver_thread = threading.Thread(
target = self.__receiver,
args = ()
)
sender_thread = threading.Thread(
target = self.__sender,
args = (
self.sock,
self.__SENDER_QUEUE
)
)
if self.__MTCL:
callback_loop_thread_process = threading.Thread(
target = self.__callback_loop,
args = (self.__CALLBACK_LOOP,)
)
else:
callback_loop_thread_process = multiprocessing.Thread(
target = self.__callback_loop,
args = (self.__CALLBACK_LOOP,)
)
receiver_thread.start()
sender_thread.start()
callback_loop_thread_process.start()
if self.__secure:
if not self.__VARIFIED:
pass
def __receiver(self):
if self.__secure:
while True:
if not self.__VARIFIED:
pass
else:
pass
else:
while True:
data_len = int(self.sock.recv(16).decode().strip("|"))
if not data_len:
self.sock.close()
raise ConnectionError("[SERVER GOES DOWN - CONNECTION LOST]")
recv_data = self.sock.recv(data_len).decode().strip("|").encode("utf-8")
recv_data = pickle.loads(base64.b64decode(recv_data))
if type(recv_data) is type({}):
if recv_data["channel"] == "DSP_MSG":
self.__MESSAGE_HANDLER.append(recv_data)
elif recv_data["channel"] in self.__CUSTOM_CHANNEL:
self.__MESSAGE_HANDLER.append(recv_data)
def __sender(self,sock,message_queue):
while True:
for i,s in enumerate(message_queue):
prepare_for_send = base64.b64encode(pickle.dumps(s))
sock.send(bytes(str(len(prepare_for_send)).center(32,"-"),"utf-8"))
sock.send(prepare_for_send)
message_queue.pop(i)
def __callback_loop(self,callback_lst):
while True:
for i,func in enumerate(callback_lst):
callback_lst.pop(i)
func[0](*func[1])
def CREATE_CHANNEL(self,channels : str = None, multiple : bool = False):
if multiple:
if type(channels) is type([]):
for channel in channels:
if channel not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channel)
else:
if channels not in self.__CUSTOM_CHANNEL:
self.__CUSTOM_CHANNEL.append(channels)
pass
def HANDSHAKE(self):
pass
def LISTEN(self,channel : str = None, function : object = None, ex_counter = None, args = None):
if not channel:
raise TypeError("LISTEN() missing 1 required positional argument: 'channel'")
else:
found = False
index = None
if channel in self.__CUSTOM_CHANNEL:
for i,d in enumerate(self.__MESSAGE_HANDLER):
if d["channel"] == channel:
found = True
index = i
break
if found:
if not args:
p_data = self.__MESSAGE_HANDLER.pop(index)
self.__CALLBACK_LOOP.append([function,[p_data]])
else:
p_data = self.__MESSAGE_HANDLER.pop(index)
args = list(args)
args.insert(0,p_data)
self.__CALLBACK_LOOP.append([function,args])
def SEND(self,channel : str = None, data = None):
if not channel:
raise TypeError("SEND() missing 1 required positional argument: 'channel'")
if not data:
raise TypeError("SEND() missing 1 required positional argument: 'data'")
lst = [ [1,2], {"a":1}, (1,2), {1,2,}, "a", 12, 0.45, b"bytes" ]
allowed_lst= []
for l in lst:
allowed_lst.append(type(l))
if type(data) in allowed_lst:
if channel in self.__CUSTOM_CHANNEL:
prepare_send_data = {
"channel" : channel,
"sender_name" : self.__client_name,
"target_name" : "SERVER",
"data" : data
}
self.__SENDER_QUEUE.append(prepare_send_data)
def SEND_TO_CLIENT(self,target_name : str = None, data = None):
if not target_name:
raise TypeError("SEND() missing 1 required positional argument: 'target_name'")
if not data:
raise TypeError("SEND() missing 1 required positional argument: 'data'")
lst = [ [1,2], {"a":1}, (1,2), {1,2,}, "a", 12, 0.45, b"bytes" ]
allowed_lst= []
for l in lst:
allowed_lst.append(type(l))
if type(data) in allowed_lst:
prepare_send_data = {
"channel" : "DSP_MSG",
"sender_name" : self.__client_name,
"target_name" : target_name,
"data" : data
}
self.__SENDER_QUEUE.append(prepare_send_data)
class client():
def __init__(self,client_name : str = None, secure = False, DSP_enable : bool = False, file : str = None, debug : bool = False, rememberServer = True, MPCL : bool = False, MTCL : bool = True):
__parent = MAIN(client_name,secure,DSP_enable,file,debug,rememberServer,MPCL,MTCL)
self.CLIENT = __parent.CLIENT
self.LISTEN = __parent.LISTEN
self.CREATE_CHANNEL = __parent.CREATE_CHANNEL
self.SEND = __parent.SEND
self.SEND_TO_CLIENT = __parent.SEND_TO_CLIENT
| [
"65106263+ShikharY10@users.noreply.github.com"
] | 65106263+ShikharY10@users.noreply.github.com |
b9e81395259e21bffbb3382851a820fa419e4403 | b7e4ef885d681c823c286d48ccbfa6855a92d294 | /trimSAM.py | 355ce5422dabe2a93234734508684d3ab9adf157 | [] | no_license | nebo56/branch-point-detection-2 | a46c023c2fa4a194edb2eec156384192b5c63805 | 2a8f303e6ad00dcf605f781e1c2cc28cf920c275 | refs/heads/master | 2021-06-03T11:39:16.043602 | 2018-12-12T14:47:31 | 2018-12-12T14:47:31 | 31,903,360 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,019 | py | '''
Created on Jan 29, 2014
@author: Nejc Haberman
Script will trim reads that contains a genomic A mutation on first nucleotide
'''
import sys
#method will return decreased column by 1. example: input XM:i:1, otput XM:i:0
def decrease_sam_column(col):
tokens = col.rsplit(':')
return tokens[0] + ':' + tokens[1] + ':' + str(int(tokens[-1]) - 1)
def trim_sam (fin_sam, fout_sam):
finSam = open(fin_sam, "rt")
foutSam = open(fout_sam, "w")
line = finSam.readline()
while line:
if line[0] != '@':
col = line.rstrip('\n').rsplit('\t')
if col[1] != "4": #ignore unmapped reads
seq = col[9]
quality = col[10]
missmatches = col[-2]
tokens = missmatches.rsplit(':') #example: MD:Z:0A29
if col[1] == "0": #same strand
if tokens[-1][0:2] == "0A":
missmatches = missmatches.replace("0A","")
seq = seq[1:]
length = str(seq.__len__()) + "M"
quality = quality[1:]
XM = decrease_sam_column(col[-6])
NM = decrease_sam_column(col[-3])
position = str(int(col[3]) + 1)
foutSam.write(col[0] + '\t' + col[1] + '\t' + col[2] + '\t' + position + '\t' + col[4] + '\t' + length + '\t' + col[6] + '\t' + col[7] + '\t' + col[8] + '\t' + seq + '\t' + quality + '\t' + col[11] + '\t' + col[12] + '\t' + XM + '\t' + col[14] + '\t' + col[15] + '\t' + NM + '\t' + missmatches + '\t' + col[18] + '\n')
else:
foutSam.write(line)
if col[1] == "16": #minus strand
if tokens[-1][-2:] == "T0":
missmatches = missmatches.replace("T0","")
seq = seq[0:-1]
length = str(seq.__len__()) + "M"
quality = quality[0:-1]
XM = decrease_sam_column(col[-6])
NM = decrease_sam_column(col[-3])
position = str(int(col[3])) #position on the anti strand stays the same
foutSam.write(col[0] + '\t' + col[1] + '\t' + col[2] + '\t' + position + '\t' + col[4] + '\t' + length + '\t' + col[6] + '\t' + col[7] + '\t' + col[8] + '\t' + seq + '\t' + quality + '\t' + col[11] + '\t' + col[12] + '\t' + XM + '\t' + col[14] + '\t' + col[15] + '\t' + NM + '\t' + missmatches + '\t' + col[18] + '\n')
else:
foutSam.write(line)
else: #write header
foutSam.write(line)
line = finSam.readline()
finSam.close()
foutSam.close()
if sys.argv.__len__() == 3:
fin_sam = sys.argv[1]
fout_sam = sys.argv[2]
trim_sam (fin_sam, fout_sam)
else:
print "you need 2 arguments to run the script"
quit()
| [
"skgthab@morecambe1.local"
] | skgthab@morecambe1.local |
69689c44f76b7900253462dda11d7c08bc9e9911 | 263ba541e1195a08042ba49edf2bf8689f1b41e3 | /hotel/models.py | 9aa1987746e86892b522983e969a2f15129b334a | [] | no_license | NitinThakur24/Resto | be7ae8aa2112065d8ee7f2dcfdf1608d0fa2344e | c8fe9b313881d3529a3cda0206b14fb080339b2a | refs/heads/master | 2022-12-12T16:21:34.709736 | 2019-04-29T03:55:56 | 2019-04-29T03:55:56 | 183,147,446 | 0 | 0 | null | 2022-06-27T16:55:09 | 2019-04-24T04:21:55 | HTML | UTF-8 | Python | false | false | 4,626 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Customer(models.Model):
pending = 'Pending'
verified = 'Verified'
STATUS = (
(pending,pending),
(verified,verified),
)
customer = models.ForeignKey(User, on_delete=models.CASCADE)
address = models.TextField()
contact = models.CharField(max_length = 10)
orders = models.IntegerField(default=0)
total_sale = models.IntegerField(default=0)
def __str__(self):
return self.customer.first_name + " " + self.customer.last_name
class Staff(models.Model):
admin = 'Admin'
deliveryboy = 'Delivery Boy'
chef = 'Chef'
ROLES = (
(admin,admin),
(chef,chef),
(deliveryboy,deliveryboy),
)
staff_id = models.ForeignKey(User, on_delete=models.CASCADE)
address = models.TextField()
contact = models.CharField(max_length = 10)
email = User.email
salary = models.IntegerField()
role = models.CharField(max_length = 30, choices = ROLES)
def __str__(self):
return self.staff_id.first_name
# + " " + self.contact
class Order(models.Model):
pending = 'Pending'
completed = 'Completed'
STATUS = (
(pending,pending),
(completed,completed),
)
cod = 'Cash On Delivery'
card = 'Card Payment'
upi = 'UPI Payment'
PAYMENT = (
(cod,cod),
(card,card),
(upi,upi),
)
pickup = 'PickUp'
delivery = 'Delivery'
TYPE = (
(pickup, pickup),
(delivery, delivery),
)
customer = models.ForeignKey(Customer,on_delete=models.CASCADE)
order_timestamp = models.CharField(max_length=100, blank=True)
delivery_timestamp = models.CharField(max_length=100, blank=True)
payment_status = models.CharField(max_length = 100, choices = STATUS)
delivery_status = models.CharField(max_length = 100, choices = STATUS)
if_cancelled = models.BooleanField(default = False)
total_amount = models.IntegerField()
payment_method = models.CharField(max_length = 100, choices = PAYMENT)
location = models.CharField(max_length=200, blank=True, null=True)
delivery_boy = models.ForeignKey(Staff,on_delete=models.CASCADE, null=True, blank=True)
def confirmOrder(self):
self.order_timestamp = timezone.localtime().__str__()[:19]
self.save()
def confirmDelivery(self):
self.delivery_timestamp = timezone.localtime().__str__()[:19]
self.payment_status = self.completed
self.delivery_status = self.completed
self.save()
def __str__(self):
return self.customer.__str__()
class Food(models.Model):
indian = 'Indian'
south = 'South Indian'
gujarati = 'Gujarati'
punjabi = 'Punjabi'
fast = 'Fast Food'
COURSE = (
(indian,indian),
(south,south),
(gujarati,gujarati),
(punjabi,punjabi),
(fast,fast),
)
disabled = 'Disabled'
enabled = 'Enabled'
STATUS = (
(disabled, disabled),
(enabled, enabled),
)
name = models.CharField(max_length=250)
course = models.CharField(max_length = 50, choices = COURSE)
status = models.CharField(max_length=50, choices=STATUS)
content_description = models.TextField()
base_price = models.FloatField()
sale_price = models.FloatField(default=base_price)
discount = models.DecimalField(default=0, decimal_places=2, max_digits=5)
image = models.FileField(blank=True, null =True)
num_order = models.IntegerField(default=0)
def __str__(self):
return self.name
#def calculateSalePrice(self):
# self.sale_price = (100.0 - self.discount)/100.0 * self.base_price
class Comment(models.Model):
user = models.ForeignKey(Customer, on_delete=models.CASCADE)
content = models.CharField(max_length=250)
class Data(models.Model):
date = models.DateField()
sales = models.IntegerField()
expenses = models.IntegerField()
class OrderContent(models.Model):
quantity = models.IntegerField(default=1)
food = models.ForeignKey(Food, on_delete=models.CASCADE)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
class Cart(models.Model):
quantity = models.IntegerField(default=1)
food = models.ForeignKey(Food, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class DeliveryBoy(models.Model):
order= models.ForeignKey(Order, on_delete=models.CASCADE)
delivery_boy = models.ForeignKey(Staff, on_delete=models.CASCADE)
| [
"nitinthakur229@gmail.com"
] | nitinthakur229@gmail.com |
d9e8ec8dd1656e555d3ddca7a06763865eb6a8e0 | ea0c78111f5900305ac03b9c6f06749e9239728d | /finalFlickerDataset.py | 36bd5455c4b03c064acf3c3f258fa7013fbecbcc | [] | no_license | prashant15072/Recommender | 5312b64c2d2de3fbe88ad73cd82dbfb9422927b3 | 6728b1b8081da3e1ce34ff7fee8c080c1029d10f | refs/heads/master | 2020-04-09T04:00:15.699884 | 2018-12-02T03:16:47 | 2018-12-02T03:16:47 | 160,006,493 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | filo = open("test1.dat","r")
xx = filo.readlines()
filo.close()
fill = open("train1.dat","r")
xxx = fill.readlines()
fill.close()
str = ""
for x in xx:
str = str + x + "\n"
for x in xxx:
str = str + x + "\n"
filllo = open("finalDataset.txt","w")
filllo.write(str)
filllo.close()
| [
"prashant15072@iiitd.ac.in"
] | prashant15072@iiitd.ac.in |
6b93b8fdbaf53a5289edeb52321ed9813ebe08ba | 6af9dae1c12f10b336f05dadd8b1ab0639e82b06 | /meiduo_mall/apps/goods/migrations/0002_goodsvisitcount.py | beebb9d59191018ca4e0eaf4350b15828ae21337 | [] | no_license | Yaooooooooooooo/meiduo_mall | 72ae2eff3875b993ed07c45f7d29f1d83f49cefb | b8b2a0c9b01d2ba54d1993ab28815f64418dd2a6 | refs/heads/master | 2020-09-30T13:40:15.060914 | 2019-12-11T12:40:24 | 2019-12-11T12:40:24 | 227,298,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-06-28 08:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GoodsVisitCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('count', models.IntegerField(default=0, verbose_name='访问次数')),
('date', models.DateField(auto_now_add=True, verbose_name='访问时间')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='goods.GoodsCategory', verbose_name='类别')),
],
options={
'verbose_name': '统计分类商品访问量',
'verbose_name_plural': '统计分类商品访问量',
'db_table': 'tb_goods_visit',
},
),
]
| [
"1123719336@qq.com"
] | 1123719336@qq.com |
5279db03fb74f9841d47a42e21e912db658fad9f | 449a5b1361394d15d988faf0295aeaf6448a02a0 | /hermes/web/receipt.py | 22628957562668b0128a8f1c9a5268ffc2bec7af | [] | no_license | cenenh/Hermes | c6a14d91e0f6344ed68f9b8a74abbb6a9f74f55a | df28bb5cd2fb2a1e867f63a249ad407710f42d1b | refs/heads/master | 2021-01-24T17:14:31.900770 | 2016-07-15T22:38:04 | 2016-07-15T22:38:04 | 61,477,072 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | # coding=utf-8
import asyncio
import logging
import json
from hermes.mysql import MySQL
from aiohttp import web, Response, errors
from hermes.constant import HTTP_HEADERS
@asyncio.coroutine
def get_receipt_handler(request):
req = request.GET
mysql = MySQL()
yield from mysql.connect()
query = """SELECT card_id, employee_name, unit_price, date,
working_time FROM employee natural join works
where card_id = '%s'""" % req['card_id']
rows = yield from mysql.execute_query(query)
yield from mysql.close()
response = {
'pay': 0,
'card_id': req['card_id'],
'working_data': []
}
for row in rows:
response['pay'] += row['unit_price']
response['working_data'].append({
'date': row['date'],
'unit_price': row['unit_price'],
})
return web.Response(headers=HTTP_HEADERS,
text=json.dumps(response))
| [
"gseunwo@gmail.com"
] | gseunwo@gmail.com |
4ed23654ad6fd0936ab4e0fa2f304df25d69b4bc | fdc7bb609cc1e2b39f0f0b211e849b4971a3ae01 | /products.py | 7843975fe443bdb07f075fad9417774ec5bacc83 | [] | no_license | moonlove8310/products | 3f61378738985986da128bf895519960ee26be85 | bffd5c95b566133ceba857eee204d5cfb298851b | refs/heads/master | 2020-06-07T03:05:34.724374 | 2019-06-20T12:06:26 | 2019-06-20T12:06:26 | 192,906,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | products = []
while True:
name = input('please input the thing you bought: ')
if name == 'q':
break
price = input('please input the price of the thing: ')
# i = []
# i.append(name)
# i.append(price)
# i = [name, price]
# products.append(i)
products.append([name, price])
# print(products)
for p in products:
# print(p)
# print(p[0])
print('the price of', p[0], 'is', p[1]) | [
"moonlove8310@gmail.com"
] | moonlove8310@gmail.com |
9ea95b3cbe04b5c4c0ea9517f7307f3eab838f14 | 349dadbf45b7c12a3fe41c5e0421c0488b679919 | /transformers/src/transformers/models/pegasus/tokenization_pegasus_fast.py | 67c065e7ecf815ac334c6ebdf52ce62839538df3 | [
"BSD-3-Clause",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Apache-2.0"
] | permissive | salesforce/CodeRL | c772e408bac690527759f416ea22add4c97e5bec | 51db4ff983d5376e62b9e7eba150316a651c80d9 | refs/heads/main | 2023-08-18T18:38:02.740995 | 2022-11-18T16:14:28 | 2022-11-18T16:14:28 | 508,912,853 | 412 | 52 | BSD-3-Clause | 2023-08-31T07:51:27 | 2022-06-30T02:54:36 | Python | UTF-8 | Python | false | false | 9,820 | py | # coding=utf-8
# Copyright 2020 Google and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model PEGASUS."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
PegasusTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/pegasus-xsum": 512,
}
class PegasusTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" PEGASUS tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
mask_token (`str`, *optional*, defaults to `"<mask_2>"`):
The token used for masking single token values. This is the token used when training this model with masked
language modeling (MLM). This is the token that the PEGASUS encoder will try to predict during pretraining.
It corresponds to *[MASK2]* in [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive
Summarization](https://arxiv.org/pdf/1912.08777.pdf).
mask_token_sent (`str`, *optional*, defaults to `"<mask_1>"`):
The token used for masking whole target sentences. This is the token used when training this model with gap
sentences generation (GSG). This is the sentence that the PEGASUS decoder will try to predict during
pretraining. It corresponds to *[MASK1]* in [PEGASUS: Pre-training with Extracted Gap-sentences for
Abstractive Summarization](https://arxiv.org/pdf/1912.08777.pdf).
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer. If no additional_special_tokens are provided <mask_2> and
<unk_2, ..., unk_102> are used as additional special tokens corresponding to the [original PEGASUS
tokenizer](https://github.com/google-research/pegasus/blob/939830367bcf411193d2b5eca2f2f90f3f9260ca/pegasus/ops/pretrain_parsing_ops.cc#L66)
that uses the tokens 2 - 104 only for pretraining
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = PegasusTokenizer
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
pad_token="<pad>",
eos_token="</s>",
unk_token="<unk>",
mask_token="<mask_2>",
mask_token_sent="<mask_1>",
additional_special_tokens=None,
offset=103, # entries 2 - 104 are only used for pretraining
**kwargs
):
self.offset = offset
if additional_special_tokens is not None:
assert isinstance(
additional_special_tokens, list
), f"additional_special_tokens should be of type {type(list)}, but is {type(additional_special_tokens)}"
additional_special_tokens_extended = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(additional_special_tokens_extended), self.offset - 1)
]
if len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended):
raise ValueError(
f"Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}."
)
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2, self.offset)]
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
pad_token=pad_token,
eos_token=eos_token,
unk_token=unk_token,
mask_token=mask_token,
mask_token_sent=mask_token_sent,
offset=offset,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = False if not self.vocab_file else True
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
assert all_special_ids == set(
range(len(self.additional_special_tokens) + 3)
), f"There should be 3 special tokens: mask_token, pad_token, and eos_token + {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}"
return [1 if x in all_special_ids else 0 for x in seq]
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""Get list where entries are [1] if a token is [eos] or [pad] else 0."""
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif token_ids_1 is None:
return self._special_token_mask(token_ids_0) + [1]
else:
return self._special_token_mask(token_ids_0 + token_ids_1) + [1]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
"""
Build model inputs from a sequence by adding eos to the end. no bos token is added to the front.
- single sequence: `X </s>`
- pair of sequences: `A B </s>` (not intended use)
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0 + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_0 + token_ids_1 + [self.eos_token_id]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| [
"hungle@salesforce.com"
] | hungle@salesforce.com |
b86737529613c43c2fac1b0c14193760fbed8cf1 | 12500bdc6c68db0f9075eba6421d41e4f1190464 | /analyse/__init__.py | 74198d7472255771967a480ff29a279aac7b8aa3 | [] | no_license | jphme/jptrade5 | 0d305f008bf3f5cd9e38f337fae794e9ce7ae546 | 39a4066e67f8af7e77924a4bce551512da063756 | refs/heads/master | 2023-07-19T20:23:34.791140 | 2014-10-24T00:41:37 | 2014-10-24T00:41:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | __author__ = 'jph'
| [
"jpdus@users.noreply.github.com"
] | jpdus@users.noreply.github.com |
3af099fce18a35cd4837291a2a99727140954c97 | 74f8d8c8030ce0c8cd3622cb99f0a668ba93a7e8 | /dialogue-engine/test/programytest/parser/template/node_tests/test_authorise.py | d421ef78f11762c08248c75b2c9b95023a672603 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Tommytosik/cotoba-agent-oss | 3124a376ac0ca1147a58405a8f269a0eb68bc014 | 78e5c2c244b92e71755783d972228904c4d0d373 | refs/heads/master | 2022-12-08T15:44:27.731731 | 2020-08-04T08:36:41 | 2020-08-04T08:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,002 | py | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.authorise import TemplateAuthoriseNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateAuthoriseNode(TemplateAuthoriseNode):
def __init__(self):
TemplateAuthoriseNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is a failure")
class TemplateAuthoriseNodeTests(ParserTestsBaseClass):
def test_node_init(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("[AUTHORISE (role=root)]", node.to_string())
def test_node_init_optiona_srai(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
node.denied_srai = "ACCESS_DENIED"
self.assertIsNotNone(node)
self.assertEqual("ACCESS_DENIED", node.denied_srai)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("[AUTHORISE (role=root, denied_srai=ACCESS_DENIED)]", node.to_string())
def test_to_xml_service_no_content(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise role="root" /></template>', xml_str)
def test_to_xml_service_with_content(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
node.append(TemplateWordNode("Hello"))
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise role="root">Hello</authorise></template>', xml_str)
def test_to_xml_service_no_content_and_optional_srai(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
node.denied_srai = "ACCESS_DENIED"
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise denied_srai="ACCESS_DENIED" role="root" /></template>', xml_str)
def test_node_exception_handling(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = MockTemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
root.append(node)
self.assertEqual(len(root.children), 1)
with self.assertRaises(Exception):
node.resolve_to_string(self._client_context)
with self.assertRaises(Exception):
root.resolve(self._client_context)
| [
"cliff@cotobadesign.com"
] | cliff@cotobadesign.com |
3d2aaa0cb8542634ad2e34bb34b0f93a00592949 | b97fc333f16e8a73deabf542378a1ae8e3a0cd83 | /prometheus/check_prometheus_disk | 48ec8ad0877dfd5c8898c0ba34e5bf3a69551bb0 | [
"Apache-2.0"
] | permissive | wftech/icinga-check-prometheus | cf6e3e6ab4881433aee7fb1503038c01231991f0 | c37d1d1da023c03d9927cb6d96cb4b244e9386bd | refs/heads/master | 2020-03-30T16:05:31.861953 | 2019-07-11T07:39:09 | 2019-07-11T07:39:09 | 151,392,733 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,680 | #!/usr/bin/env python3
#
# This plugin is designed as a nagios compatible check plugin to use with
# Icinga 2 and others
#
import argparse
import sys
from datetime import datetime, timedelta
from urllib.parse import urljoin
import requests
proxies = {
"http": None,
"https": None,
}
def humanize_bytes(bytes, precision=1):
abbrevs = (
(1<<50, 'PB'),
(1<<40, 'TB'),
(1<<30, 'GB'),
(1<<20, 'MB'),
(1<<10, 'kB'),
(1, 'bytes')
)
if bytes == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytes >= factor:
break
return '%.*f %s' % (precision, bytes / factor, suffix)
class PrometheusAPI:
def __init__(self, endpoint='http://127.0.0.1:9090/'):
"""
:param endpoint: address of
"""
self.endpoint = endpoint
def _to_timestamp(self, input, base=None):
"""
Convert string input to UNIX timestamp for Prometheus
:param input:
:param base:
:return:
"""
if type(input) == datetime:
return input.timestamp()
if input == 'now':
return datetime.utcnow().timestamp()
if type(input) in [int, float]:
if input > 0:
return input
if input == 0: # return now
return datetime.utcnow().timestamp()
if input < 0:
base = self._to_timestamp(base)
return base + input
assert type(input) == float
def query(self, query='prometheus_build_info'):
return self._get(
uri='/api/v1/query',
params=dict(
query=query,
)
)
def scalar(self, query):
data = self.query(query)
return(int(data['data']['result'][0]['value'][1]))
def _get(self, uri, params, method='GET'):
url = urljoin(self.endpoint, uri)
assert method == 'GET'
result = requests.get(
url=url,
params=params,
proxies=proxies,
)
return result.json()
prom = PrometheusAPI()
def main():
parser = argparse.ArgumentParser('Check disk')
parser.add_argument('--instance', required=True)
parser.add_argument('--disk', required=True, default='/')
parser.add_argument('--critical-percentage-free', type=int)
parser.add_argument('--critical-bytes-free', type=int)
parser.add_argument('--warning-percentage-free', type=int)
parser.add_argument('--warning-bytes-free', type=int)
args = parser.parse_args()
if args.disk.startswith('/'):
is_wmi = 0
query_size = 'node_filesystem_size{instance="%s",mountpoint="%s"}' % (args.instance, args.disk)
query_free = 'node_filesystem_free{instance="%s",mountpoint="%s"}' % (args.instance, args.disk)
else:
is_wmi = 1
query_size = 'wmi_logical_disk_size_bytes{instance="%s",volume="%s"}' % (args.instance, args.disk)
query_free = 'wmi_logical_disk_free_bytes{instance="%s",volume="%s"}' % (args.instance, args.disk)
disk_free = prom.scalar(query_free)
disk_size = prom.scalar(query_size)
percent_free = int(disk_free / disk_size * 100)
retv,state = 0, ''
perf_warn, perf_crit = '', ''
if args.warning_bytes_free:
perf_warn = disk_size - args.warning_bytes_free
if disk_free < args.warning_bytes_free:
retv,state = 1, 'WARNING - '
if args.warning_percentage_free:
perf_warn = disk_size - int(disk_size * args.warning_percentage_free / 100)
if percent_free < args.warning_percentage_free:
retv,state = 1, 'WARNING - '
if args.critical_bytes_free:
perf_crit = disk_size - args.critical_bytes_free
if disk_free < args.critical_bytes_free:
retv,state = 2, 'CRITICAL - '
if args.critical_percentage_free:
perf_crit = disk_size - int(disk_size * args.critical_percentage_free / 100)
if percent_free < args.critical_percentage_free:
retv,state = 2, 'CRITICAL - '
perfdata = '{}={}B;{};{};0;{}'.format(args.disk, (disk_size-disk_free), perf_warn, perf_crit, disk_size)
print('{}{} free of {} ({}%)|{}'.format(
state, humanize_bytes(disk_free), humanize_bytes(disk_size),
percent_free, perfdata) )
raise SystemExit(retv)
if perfdata:
print('%s - %s | %s' % (status, msg, perfdata))
else:
print('%s - %s' % (status, msg))
return retval
if __name__ == '__main__':
try:
sys.exit(main())
except Exception as e:
print('Error - %s' % str(e))
sys.exit(3)
| [
"VerosK@users.noreply.github.com"
] | VerosK@users.noreply.github.com | |
3ce44a2b8916f2fc89ed11d0e3d777799793513f | a0bf83b2b8dfd34c265a1acd4bb3b62b0c61f598 | /portmone/urls.py | 96a792df340ab872d027e31fd21a897818ddafb3 | [
"MIT"
] | permissive | satels/django-portmone-ua | 2f3394576e5107b0432f0c0daf3b5c6fdfe170d9 | f2e1b170f4738f441644e420082a8615cb4e08dc | refs/heads/master | 2020-08-23T07:11:39.557487 | 2020-02-14T17:12:43 | 2020-02-14T17:12:43 | 216,567,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # -*- coding: utf-8 -*-
try:
from django.conf.urls import url
except ImportError:
from django.urls import re_path as url
from portmone import views
urlpatterns = [
url(r'^result/$', views.authorize_result, name='portmone-result'),
url(r'^success/$', views.success, name='portmone-success'),
url(r'^fail/$', views.fail, name='portmone-fail'),
]
| [
"satels@gmail.com"
] | satels@gmail.com |
62ff258681c63933e5126fa69fd2e6b64e22104d | 30432bfcba3414b1307d8a2a17a3af3e9402ccb4 | /zeronine/migrations/0011_remove_joindetail_element_code.py | 8a2397230ea470e58f08ec958c4127a79b1bc8c2 | [] | no_license | hyewonSim/zeronine-project | 001309125c85829ede989ed2c4bb99079c2ad073 | 61cd6ca8db3b81a837799b7b11090895a0783737 | refs/heads/master | 2023-07-26T18:31:17.859733 | 2021-09-05T06:46:31 | 2021-09-05T06:46:31 | 402,990,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # Generated by Django 3.1.5 on 2021-08-21 18:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zeronine', '0010_auto_20210821_1603'),
]
operations = [
migrations.RemoveField(
model_name='joindetail',
name='element_code',
),
]
| [
"shim000113@naver.com"
] | shim000113@naver.com |
1d459a517c03e78b3a3af2ce91a294d65b06ab5a | 104ed042183a204346031bcd6cabbcb29fbf8268 | /tests/example.py | 754f2a41d5cf52ae4c9f62242e8ce79c6f8ef2c1 | [] | no_license | lyrl/console-logging | 4712b4b6577243c4fc63b57e0832e77bc8bbc37a | facf7eab28e0016aace89a0dec5868786286d3e9 | refs/heads/master | 2021-01-22T07:39:45.283392 | 2017-09-02T06:41:15 | 2017-09-02T06:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from console_logging import console
console.log("This is a log.")
console.error("This is an error.")
console.info("This is some neutral info.")
console.success("This is a success message.") | [
"priansh123@me.com"
] | priansh123@me.com |
16182ef5cf21139670d8f12551976a7d883b8138 | 7fdb54681f4d83bd1135f0da20d6f858f755b971 | /active_projects/eop/chapter1/maps.py | d1ae1e214a5926fc28e1f2ffeab964b88f2682cc | [
"MIT"
] | permissive | miriamhille/manim | 385da4ef0fdf828690ae8c54c4c59f873cedaf42 | af3a8d5809b0f8ac579dbfa02b223efd16987e06 | refs/heads/master | 2020-04-30T23:56:43.328357 | 2019-03-22T14:26:33 | 2019-03-22T14:26:33 | 177,155,840 | 0 | 0 | null | 2019-03-22T14:33:25 | 2019-03-22T14:33:25 | null | UTF-8 | Python | false | false | 1,104 | py | from big_ol_pile_of_manim_imports import *
class MapsOfPossibilities(TeacherStudentsScene):
CONFIG = {
"default_pi_creature_kwargs": {
"color": MAROON_E,
"flip_at_start": True,
},
}
def construct(self):
self.wait(2)
teacher_text = TextMobject("Coin flips are just a metaphor")
self.teacher_says(teacher_text, target_mode = "happy")
self.wait()
self.play(
self.students[0].change_mode, "thinking",
self.students[1].change_mode, "conniving",
self.students[2].change_mode, "tease"
)
self.wait(3)
self.play(
Uncreate(self.teacher.bubble),
Uncreate(self.teacher.bubble.content),
self.students[0].look_at, self.teacher,
self.students[1].look_at, self.teacher,
self.students[2].look_at, self.teacher,
self.teacher.look_at, self.students,
)
self.play(
self.change_all_student_modes, "happy",
self.teacher.change_mode, "happy",
) | [
"ben@hambrecht.ch"
] | ben@hambrecht.ch |
0aaaa602501647f1847867e84027ca8ac9a01bce | 1333eb44a7a02b8fa8b5a2be711e4b5ca9323351 | /Accounts/migrations/0004_alter_account_id.py | 3f7da6aabbc3f563bbc92896e6b6bd2557057468 | [] | no_license | teekaygithub/TomlineBanking | e40dfa4eb3113c44b10c0e9d2837deb4781a19fa | 1af53492aa0d7df59578326316c0c60d068d9d5b | refs/heads/main | 2023-06-07T08:45:07.031702 | 2021-07-06T00:52:58 | 2021-07-06T00:52:58 | 330,070,277 | 0 | 0 | null | 2021-02-01T21:01:51 | 2021-01-16T02:49:01 | JavaScript | UTF-8 | Python | false | false | 465 | py | # Generated by Django 3.2.4 on 2021-07-05 19:20
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('Accounts', '0003_rename_description_account_type'),
]
operations = [
migrations.AlterField(
model_name='account',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
]
| [
"tomoyuki.kato@yahoo.com"
] | tomoyuki.kato@yahoo.com |
dd0ce0a23fa377025e938b98df37a94cbe02026a | 31bec62400386fc26142d52a1e45052c998cd085 | /examples/HF_deriv.py | 4ec4b10399870c23259da282c1182bb0afaaa8d4 | [
"MIT"
] | permissive | gharib85/wick | 6b4d80259795c64aae0166a56c229c646a1b1245 | 59f8a4b522f11875320e9e98b3af57fe7b7d8313 | refs/heads/master | 2023-09-02T08:08:09.071181 | 2021-10-16T01:13:27 | 2021-10-16T01:13:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | from wick.expression import AExpression
from wick.wick import apply_wick
from wick.convenience import one_e, two_e, braE1, ketE1, braE2, ketE2
H1 = one_e("f", ["occ", "vir"], norder=True)
H2 = two_e("I", ["occ", "vir"], norder=True)
H = H1 + H2
# first derivative wrt X*
bra = braE1("occ", "vir")
S = bra*H
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
print("dE/dX* =")
print(final)
# first derivative wrt X
ket = ketE1("occ", "vir")
S = H*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final.transpose((1, 0))
print("dE/dX =")
print(final)
print("")
# second derivative wrt X*X*
bra = braE2("occ", "vir", "occ", "vir")
S = bra*H
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
print("d^2E/dX*dX* =")
print(final)
# second derivative wrt X*X
ket = ketE1("occ", "vir")
bra = braE1("occ", "vir")
S = bra*H*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final.transpose((0, 1, 3, 2))
print("d^2E/dX*dX =")
print(final)
# second derivative wrt XX
ket = ketE2("occ", "vir", "occ", "vir")
S = H*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final.transpose((2, 3, 0, 1))
print("d^2E/dXdX =")
print(final)
| [
"whiteaf862@gmail.com"
] | whiteaf862@gmail.com |
0c95ceb8dc262c26f94b8b6fa1bd7d5d209ffe19 | 7bbef167f9cc73ba9084158ec21af8f406bc0f38 | /EE_624_Assignment_2/2/ctransform.py | 8cbb1e7285ee73526b185950e07602bd9169f125 | [] | no_license | ajdroid/DIP15 | 903ec64d2dffa4b0f4f54e6f38c7e36dc2794e5b | 62c562557a708c267e2a1f9e1ca3e2d72129b867 | refs/heads/master | 2021-03-12T20:00:56.289659 | 2015-12-02T18:06:35 | 2015-12-02T18:06:35 | 41,491,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | from __future__ import division
from math import sqrt
import numpy as np
import cv2
from matplotlib import pyplot as plt
def topfilt (block, f=0):
M, N = block.shape[:2]
dct = cv2.dct(block)
A = dct.flatten()
check = A[sorted(range(len(A)), key=lambda i: A[i])[-16:][0]]
if f:
print A
print check
print dct
dct[dct<check]=0
block = np.uint8(cv2.idct(dct)*255)
return block
img = cv2.imread('../sunflower.jpg',0)
img_float32 = np.float32(img)/255.0
dct = cv2.dct(img_float32)
img_back = np.zeros(img.shape, dtype=float)
for i in range(0,img.shape[0],8):
for j in range(0,img.shape[1],8):
img_back[i:i+8,j:j+8] = topfilt(img_float32[i:i+8,j:j+8])
print topfilt(img_float32[:8,:8],1)
# for i in range(0,img.shape[0],8):
# for j in range(0,img.shape[1],8):
# dct_comp[i:i+8,j:j+8] = topfilt(img_float32[i:i+8,j:j+8])
imgn_back = np.uint8(cv2.idct(dct)*255)
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img_back, cmap = 'gray')
plt.title('Reconstructed from blocks'), plt.xticks([]), plt.yticks([])
plt.show() | [
"abhijatbiswas@gmail.com"
] | abhijatbiswas@gmail.com |
37ded20f05f26421a9e451e059c8f4a36ea53226 | 3ea2b962fa16acafcc946e800041ee9ab1a14afd | /example_agent.py | 6801680b423bea951a414d8dbfb70ff55a98688e | [] | no_license | kth0522/tetris-gym | ae62f03af58c5f77985209c66630c555ef9831a1 | ab90f211a93c0394b727b7e34bbf73bd1cf29d1f | refs/heads/master | 2022-10-13T13:30:22.456721 | 2020-06-11T15:13:34 | 2020-06-11T15:13:34 | 271,578,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | import gym
import gym_tetris
def random_agent(episodes=1000000):
env = gym.make('Tetris-v0')
env.reset()
env.render()
for e in range(episodes):
action = env.action_space.sample()
state, reward, done, _ = env.step(action)
env.render()
print(done)
if done:
break
if __name__ == "__main__":
random_agent()
| [
"rlaxogh369@gmail.com"
] | rlaxogh369@gmail.com |
d0a4182705d6ce87e37ca56ee294f560e77317be | f833259e86569a3c5caf52393023a54ea387dd6c | /Assignment2/MasterFile/2019MCS2574/vignere.py | 395072eded45b7fb681a2100fbc1917f11346125 | [] | no_license | thevivekcode/Cryptography-ComputerSecurity | b1e74113d94505b3c97d500444274dbff4af6fac | c952cff0b602d733f1b4080b439da22673c3c893 | refs/heads/master | 2022-12-05T10:42:42.345853 | 2020-08-28T21:56:52 | 2020-08-28T21:56:52 | 234,770,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py |
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
def chatToint(c):
return ord(c) - ord('a')
def intTochar(i):
return chr(i+97)
# In[3]:
def VigenereEncryption(key,plainText):
key = list(map(chatToint,list(key)))
plainText = list(map(chatToint,list(plainText.lower())))
keySize =len(key)
textSize = len(plainText)
q = textSize//keySize
r = textSize%keySize
finalKey = key*q + key[:r]
return "".join(list(map(intTochar,np.mod(np.array(finalKey) + np.array(plainText),26))))
def VigenereDecryption(key,cipherText):
key = list(map(chatToint,list(key)))
cipherText = list(map(chatToint,list(cipherText.lower())))
keySize =len(key)
textSize = len(cipherText)
q = textSize//keySize
r = textSize%keySize
finalKey = key*q + key[:r]
return "".join(list(map(intTochar,np.mod(np.array(cipherText) - np.array(finalKey) ,26))))
| [
"thevivekcode@gmail.com"
] | thevivekcode@gmail.com |
2bf2dfd436a03b708773f6fe91c76d111f9836b3 | 03ec8aaed061d736f0c6005a92d28fdc496c865f | /main.py | dfce935214912826a44dea7b21de86227d19b02d | [] | no_license | ragib70/AppBuildozerCrop | 6419f1ba11943e469f63da7ba7cb2f131715caf6 | 8e76bad8266a24217925389115d84c7c7ec77f0d | refs/heads/master | 2022-12-03T01:21:45.656238 | 2020-08-11T04:17:41 | 2020-08-11T04:17:41 | 277,562,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,709 | py | import numpy as np
import Feedforward
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.core.window import Window
from kivymd.uix.dialog import MDDialog
from kivymd.uix.button import MDFlatButton
from kivymd.uix.list import OneLineAvatarListItem
from kivymd.uix.list import OneLineListItem, OneLineIconListItem
from kivy.properties import StringProperty
from kivymd.uix.card import MDCard
from kivy.uix.image import Image
from kivymd.uix.label import MDLabel
from kivymd.app import MDApp
# Window.size = (600,500)
KV = '''
<Item>
ImageLeftWidget:
source: root.source
<ContentNavigationDrawer>:
orientation: "vertical"
padding: "8dp"
spacing: "8dp"
AnchorLayout:
anchor_x: "left"
size_hint_y: None
height: avatar.height
Image:
id: avatar
size_hint: None, None
size: "56dp", "56dp"
source: "wheat.png"
MDLabel:
text: "Crop Predictor"
font_style: "Button"
size_hint_y: None
height: self.texture_size[1]
MDLabel:
text: "Contact Info - ragib.hussain70@gmail.com"
font_style: "Caption"
size_hint_y: None
height: self.texture_size[1]
ScrollView:
MDList:
OneLineAvatarListItem:
text: "USA Predictor"
on_press:
root.nav_drawer.set_state("close")
root.screen_manager.current = "scr 1"
ImageLeftWidget:
source: "USA_flag_icon.png"
OneLineAvatarListItem:
text: "USA Crops"
on_press:
root.nav_drawer.set_state("close")
root.screen_manager.current = "scr 2"
ImageLeftWidget:
source: "USA_flag_icon.png"
Screen:
NavigationLayout:
x: toolbar.height
ScreenManager:
id: screen_manager
Screen:
name: "scr 1"
MDToolbar:
id: toolbar
pos_hint: {"top": 1}
elevation: 10
title: "USA Crop Predictor"
left_action_items: [["menu", lambda x: nav_drawer.set_state("open")]]
MDGridLayout:
cols: 1
padding: "30dp"
spacing: "10dp"
MDBoxLayout:
MDTextField:
id: nitrogen
hint_text: "Nitrogen Content"
max_text_length: 6
icon_right: "percent"
mode: "rectangle"
#pos_hint: {'center_x' : 0.5, 'center_y': 1 }
helper_text: "Enter content in % upto 5 decimal places"
helper_text_mode: "on_focus"
halign: "center"
MDTextField:
id: phosphorus
hint_text: "Phosphorus Content"
max_text_length: 6
icon_right: "percent"
mode: "rectangle"
# pos_hint: {'center_x' : 0.5, 'center_y': 0.5 }
helper_text: "Enter content in % upto 5 decimal places"
helper_text_mode: "on_focus"
halign: "center"
MDTextField:
id: potassium
hint_text: "Potassium Content"
max_text_length: 6
icon_right: "percent"
mode: "rectangle"
# pos_hint: {'center_x' : 0.5, 'center_y': 0.5 }
helper_text: "Enter content in % upto 5 decimal places"
helper_text_mode: "on_focus"
halign: "center"
MDTextField:
id: moisture
hint_text: "Moisture Content"
max_text_length: 6
mode: "rectangle"
icon_right: "percent"
# pos_hint: {'center_x' : 0.5, 'center_y': 0.5 }
helper_text: "Enter content in % upto 5 decimal places"
helper_text_mode: "on_focus"
halign: "center"
MDGridLayout:
cols: 2
spacing: "50dp"
padding: "30dp"
MDRectangleFlatButton:
text: "Suggest"
size_hint: (0.2,None)
height: dp(50)
on_release:
app.predict()
MDRectangleFlatButton:
text: "Reset"
size_hint: (0.2,None)
height: dp(50)
on_release:
app.reset()
Screen:
name: "scr 2"
BoxLayout:
orientation: 'vertical'
MDToolbar:
id: toolbar
pos_hint: {"top": 1}
elevation: 10
title: "USA Crops"
left_action_items: [["menu", lambda x: nav_drawer.set_state("open")]]
ScrollView:
MDList:
id: md_list
#
# MDCard:
# size_hint: None, None
# size: root.width,300
# pos_hint: {"center_x": .5, "center_y": .5}
# Image:
# source:"USA_flag_icon.png"
# allow_stretch:"True"
# keep_ratio:"False"
#
# MDCard:
# size_hint: None, None
# size: self.parent.width,300
# pos_hint: {"center_x": .5, "center_y": .5}
MDNavigationDrawer:
id: nav_drawer
ContentNavigationDrawer:
screen_manager: screen_manager
nav_drawer: nav_drawer
'''
class ContentNavigationDrawer(BoxLayout):
screen_manager = ObjectProperty()
nav_drawer = ObjectProperty()
class Item(OneLineAvatarListItem):
divider = None
source = StringProperty()
class TestNavigationDrawer(MDApp):
def close_dialog(self,obj):
self.dialog.dismiss()
def reset(self):
self.root.ids.moisture.text = ""
self.root.ids.nitrogen.text = ""
self.root.ids.potassium.text = ""
self.root.ids.phosphorus.text = ""
def predict(self):
if(self.root.ids.moisture.text=="" or self.root.ids.nitrogen.text=="" or self.root.ids.phosphorus.text=="" or self.root.ids.potassium.text==""):
alert1 = "Insufficient Data"
close_button1 = MDFlatButton(text="Dismiss", on_release= self.close_dialog)
self.dialog = MDDialog(
title="Warning",
text=alert1,
radius=[20, 7, 20, 7],
size_hint= (0.75,0.8),
buttons= [close_button1])
self.dialog.open()
return
num1 = np.asfarray(self.root.ids.moisture.text, float)
num2 = np.asfarray(self.root.ids.nitrogen.text, float)
num3 = np.asfarray(self.root.ids.phosphorus.text, float)
num4 = np.asfarray(self.root.ids.potassium.text, float)
result = Feedforward.output_results(num1, num2, num3, num4)
# Display the result
close_button2 = MDFlatButton(text="Dismiss", on_release= self.close_dialog)
self.dialog = MDDialog(
title="Crop Suggestions",
type="simple",
items=[
Item(text=result[0],source="wheat.png"),
Item(text=result[1],source="wheat.png"),
Item(text=result[2],source="wheat.png"),
Item(text=result[3],source="wheat.png"),
Item(text=result[4],source="wheat.png")],
radius=[20, 7, 20, 7],
size_hint= (0.75,0.8),
buttons= [close_button2])
self.dialog.open()
def build(self):
self.theme_cls.primary_palette = "Green"
return Builder.load_string(KV)
def on_start(self):
for crop in sorted(Feedforward.labels):
card = MDCard(orientation='vertical',size_hint=(None,None), size = (self.root.width,300))
imgr = Image(source="images/"+str(crop)+str(".jpg"), size_hint_y = 0.8, allow_stretch="True", keep_ratio="False")
lbl = MDLabel(text=str(crop), size_hint_y = 0.2, halign = 'center', valign = 'center')
card.add_widget(imgr)
card.add_widget(lbl)
self.root.ids.md_list.add_widget(card)
TestNavigationDrawer().run()
| [
"noreply@github.com"
] | noreply@github.com |
d92d88a90e3d3629c5262cff0521dabd3e43ceb5 | fbe3a52d2dd02bec18f7f52b31e357aed192a308 | /misc/begin/recursion/misc_lhm.py | 261358dc125ac9c701cf6739853038d988549f0f | [] | no_license | lherrada/python | 8fc5bd5ceb6038479fa6347dd6c0bd6e17f92e98 | d8260f35ba91b89590ef8e489188fb80ca1aed4e | refs/heads/master | 2022-10-29T06:23:17.297554 | 2022-09-24T15:45:59 | 2022-09-24T15:45:59 | 23,411,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | #!/usr/bin/python
#import misc1
#print misc1.fib2(30)
import fibo
print fibo.fib2(10)
fibo.printing()
| [
"lherrada@lucid64.hsd1.ca.comcast.net"
] | lherrada@lucid64.hsd1.ca.comcast.net |
6bac024f9b0fbcbf1868095ef413091081c0377c | daf70c103d236f6b4d4efaf98a1bf4244a3ca507 | /utils/autocrafter_funcs.py | e3cebf14d791c2c6e9ab6dd63e1d5fff27bc0e06 | [] | no_license | Mydoriel/ffxiv-crafter-automation | f357afd01d4f148ccb575d2989bebbd4a600e920 | 8f13c1d215fd33f460fb88e4371f564e2971b86b | refs/heads/master | 2023-02-01T09:25:23.176781 | 2020-12-12T06:28:52 | 2020-12-12T06:28:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,702 | py | #!/usr/bin/env python3
"""TODO, create profile_list as global var
"""
import utils.helpers as h
import utils.input_handler as input_handler
import utils.settings as s
import utils.notifications as notify
from utils.process import Process
from time import sleep
import os
import sys
def list_macros(args):
sys.stdout.write(f"Available macros: {list(s.PROFILES)}")
def opt_repair(proc):
"""Repairing rotation
Sleep: 9.5s
"""
select = "{VK_NUMPAD0}"
esc = "{VK_ESCAPE}"
left = "{LEFT}"
right = "{RIGHT}"
proc.press_key(esc)
sleep(3)
proc.press_key(s.REPAIR)
sleep(0.5)
proc.press_key(right)
sleep(0.5)
proc.press_key(select)
sleep(0.5)
proc.press_key(left)
sleep(0.5)
proc.press_key(select)
sleep(0.5)
proc.press_key(esc)
sleep(3)
proc.press_key(s.CRAFT_ITEM)
sleep(1)
def opt_food(proc):
pass
def opt_potion(proc):
pass
def use_macro(args):
"""Uses the selected macro on a set amt of cycles.
Args:
macro: Comes from read_macro()
flags: List of options like repairing, and collectables
"""
inputs = input_handler.craft(args) #inputs has macro amt opt
macro = inputs["macro"]
options = {
"-repair": False,
"-food": False,
"-pot": False
}
for f in inputs["opt"]:
if options.get(f, "") != "":
options[f] = True
print(f"Starting {inputs['amt']} crafts:")
proc = Process()
select = "{VK_NUMPAD0}"
steps = len(macro["macro"]["keys"])
repair_counter = 0
# Can adjust sleeps according to lag
sleeps = {
"step1": 0.5,
"step2": 1,
"input": 2.5
}
est = h.get_time_estimation(macro, inputs["amt"], sleeps)
print(f" > Time estimation: {est:.2f} minutes.")
for i in range(inputs["amt"]):
print(f" > Craft #{i + 1}")
for _ in range(4):
proc.press_key(select)
sleep(1)
for step in range(steps):
wait = macro["macro"]["wait"][step]
key = macro["macro"]["keys"][step]
print(f" > Pressing {key}")
sleep(sleeps["step1"])
proc.press_key(key)
print(f" > Waiting {wait}s")
sleep(wait)
sleep(sleeps["step2"])
if repair_counter > s.REPAIR_COUNTER:
if options["-repair"] == True:
print("Self repairing...")
opt_repair(proc)
repair_counter = 0
repair_counter += 1
sleep(sleeps["input"])
print("Crafts finished.")
notify.finished()
| [
"haruspace.dev@gmail.com"
] | haruspace.dev@gmail.com |
abc30a68a630c3f77b5838a14d5ae3f723a23e80 | 199cf4d0f66f463b40a653dd02357b28b0d5616e | /map_function.py | 365a1eacb0bd045f7bba168c4d4f5561d15362cc | [] | no_license | manali1312/pythonsnippets | d5246a3f57f5436dc3fab5cdcdebff89d2a827ac | d953becfbef474b247c62402e4bdf4ae194bc519 | refs/heads/master | 2020-05-09T17:21:59.467653 | 2019-09-17T10:59:41 | 2019-09-17T10:59:41 | 181,306,508 | 0 | 0 | null | 2019-08-24T12:40:12 | 2019-04-14T12:36:09 | Python | UTF-8 | Python | false | false | 240 | py | def double(n):
return n + n
numbers = (1,2,3,4,5,6,7,8,9)
#result = map(double,numbers)
#rint(result)
doubleNums = []
for i in numbers:
#doubleNums.append(double(i))
r = double(i)
doubleNums.append(r)
print(doubleNums)
| [
"mandar.forkmedia.in"
] | mandar.forkmedia.in |
1cb73c6568858279025a470e045c2fd95de4ee58 | 1d1f173d67a04b78f732aee99ef0e2d4e8284d63 | /dev/migrate_testing_phylesystem.py | 8df26db7e044c0beb132c03618620d7e68edd506 | [
"Python-2.0",
"BSD-2-Clause"
] | permissive | rvosa/peyotl | 8767165ec85129c8f25c56a572f0bd879158aa2a | 98154af9832d18cbcb079f7e2db3b0e45893e1da | refs/heads/master | 2021-01-18T19:48:31.273061 | 2015-09-03T15:30:13 | 2015-09-03T15:30:13 | 41,867,598 | 0 | 0 | null | 2015-09-03T15:29:00 | 2015-09-03T15:29:00 | null | UTF-8 | Python | false | false | 4,445 | py | #!/usr/bin/env python
from peyotl.phylografter.nexson_workaround import workaround_phylografter_export_diffs, \
add_default_prop
from peyotl.phylesystem.git_actions import get_filepath_for_namespaced_id
from peyotl import get_logger
from subprocess import call
import codecs
import json
import sys
import os
import re
_LOG = get_logger(__name__)
def debug(m):
_LOG.debug(m)
old_phylesystem = sys.argv[1]
old_phylesystem_study = os.path.abspath(os.path.join(old_phylesystem, 'study'))
new_phylesystem = sys.argv[2]
new_phylesystem_study = os.path.abspath(os.path.join(new_phylesystem, 'study'))
scratch_par = sys.argv[3]
assert(os.path.isdir(old_phylesystem_study))
assert(os.path.isdir(new_phylesystem_study))
assert(os.path.isdir(scratch_par))
script_name = os.path.abspath(sys.argv[0])
peyotl_dev_dir = os.path.split(script_name)[0]
peyotl_dir =os.path.split(peyotl_dev_dir)[0]
conversion_script = os.path.join(peyotl_dir, 'scripts', 'nexson', 'nexson_nexml.py')
assert(os.path.isfile(conversion_script))
validation_script = os.path.join(peyotl_dir, 'scripts', 'nexson', 'validate_ot_nexson.py')
assert(os.path.isfile(conversion_script))
failed = []
pg_study_pat = re.compile(r'^\d+')
if len(sys.argv) > 4:
sl = sys.argv[4:]
else:
sl = os.listdir(old_phylesystem_study)
for f in sl:
if pg_study_pat.match(f):
source_study = f
dest_full = get_filepath_for_namespaced_id(new_phylesystem, f)
scratch_dir = os.path.join(scratch_par, f)
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
full_source = os.path.join(old_phylesystem_study, source_study, source_study + '.json')
dest_dir = os.path.split(dest_full)[0]
assert(os.path.exists(full_source))
if os.path.exists(dest_full):
debug('Skipping {} because output exists'.format(f))
continue
# read input and do the phylografter_workaround to valid 0.0.0 syntax
# store in scratch.
valid_bf = os.path.join(scratch_dir, 'v0.0.0-' + source_study + '.json')
debug('Raw phylografter from "{}" to valid 0.0.0 NexSON at "{}" ...'.format(full_source, valid_bf))
inp = codecs.open(full_source, mode='rU', encoding='utf-8')
obj = json.load(inp)
try:
workaround_phylografter_export_diffs(obj, valid_bf)
except:
_LOG.exception('Exception in workaround_phylografter_export_diffs for study ' + f)
failed.append(f)
continue
# Convert to 1.2.1
unchecked_hbf = os.path.join(scratch_dir, 'v1.2.1-' + source_study + '.json')
debug('Converting cleaned 0.0.0 NexSON from "{}" to unchecked 1.2.1 NexSON at "{}" ...'.format(valid_bf, unchecked_hbf))
invoc = [sys.executable,
conversion_script,
'-s',
'-e',
'1.2.1',
'-o',
unchecked_hbf,
valid_bf]
debug('invoc: "{}"'.format('" "'.join(invoc)))
rc = call(invoc)
if rc != 0:
failed.append(f)
else:
inp = codecs.open(unchecked_hbf, mode='rU', encoding='utf-8')
obj = json.load(inp)
aug_hbf = os.path.join(scratch_dir, 'augmentedv1.2.1-' + source_study + '.json')
add_default_prop(obj, aug_hbf)
# validate
annotation = os.path.join(scratch_dir, 'validation.json')
tmp = os.path.join(scratch_dir, 'final.json')
debug('Writing annotated version of "{}" to "{}" with annotations to "{}" ...'.format(
aug_hbf,
tmp,
annotation))
invoc = [sys.executable,
validation_script,
'--embed',
'--agent-only',
'-e',
annotation,
'-o',
tmp,
aug_hbf]
debug('invoc: "{}"'.format('" "'.join(invoc)))
rc = call(invoc)
if rc != 0:
failed.append(f)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
os.rename(tmp, dest_full)
if failed:
m = '\n '.join(failed)
sys.exit('Conversion of the following studies failed:\n {}'.format(m)) | [
"mtholder@gmail.com"
] | mtholder@gmail.com |
2aa12c1d9a5cf420247ff47a82a12789e0cc2643 | 34e85c7be0e9e270318601d919a537b21e510a03 | /ContentRecs.py | be3c4ae40520d33c61c16bafcce9829ed5970829 | [] | no_license | ddezav/dprecommend | 5d331cfff42cef34a2e9fd03380679ab30b67688 | 4fb9bc840615b76bfc741959bb053686ff182b2c | refs/heads/main | 2023-04-26T11:35:13.190209 | 2021-05-16T01:22:15 | 2021-05-16T01:22:15 | 366,579,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 4 16:25:39 2018
@author: Frank
"""
from MovieLens import MovieLens
from ContentKNNAlgorithm import ContentKNNAlgorithm
from Evaluator import Evaluator
from surprise import NormalPredictor
import random
import numpy as np
def LoadMovieLensData():
ml = MovieLens()
print("Loading movie ratings...")
data = ml.loadMovieLensLatestSmall()
print("\nComputing movie popularity ranks so we can measure novelty later...")
rankings = ml.getPopularityRanks()
return (ml, data, rankings)
np.random.seed(0)
random.seed(0)
# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()
# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)
contentKNN = ContentKNNAlgorithm()
evaluator.AddAlgorithm(contentKNN, "ContentKNN")
# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")
evaluator.Evaluate(True)
evaluator.SampleTopNRecs(ml)
| [
"noreply@github.com"
] | noreply@github.com |
1968cafb7539e6a61fb1b2c491c4ca2d09e9891d | e6dd376fdb9b511975e90c8ec73972984b4a4895 | /tests/test_numba.py | 772bb541e64c0e1871a8d23e54402a1295d5c67d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | levandoskije/chemicals | 41fd8ad2230a64797953d16937eec61d82050c49 | a5c531c1c1822a816cce9d071eb03092fb311a41 | refs/heads/master | 2023-04-09T19:52:50.317171 | 2021-04-15T02:38:32 | 2021-04-15T02:38:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,414 | py | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
from chemicals import *
import chemicals.vectorized
from math import *
from random import random
from fluids.constants import *
from fluids.numerics import assert_close, assert_close1d, assert_close2d
import pytest
try:
import numba
import chemicals.numba
import chemicals.numba_vectorized
except:
numba = None
import numpy as np
def swap_funcs_and_test(names, substitutions, test):
'''
names : list[str]
object names to switch out
substitutions : list[obj]
Objects to put in
test : function
Unit test to run in the file
'''
originals = {}
glob = test.__globals__
for name, sub in zip(names, substitutions):
originals[name] = glob[name]
glob[name] = sub
try:
test()
except Exception as e:
glob.update(originals)
raise e
glob.update(originals)
def mark_as_numba(func):
func = pytest.mark.numba(func)
func = pytest.mark.skipif(numba is None, reason="Numba is missing")(func)
return func
@mark_as_numba
def test_return_1d_array():
# Functions which initialize an array, and then need to return the correct value
N = 30
zs = zs_orig = normalize([random() for i in range(N)])
MWs = [random()*200 for i in range(N)]
zs2 = np.array(zs)
MWs2 = np.array(MWs)
# Took the slightest performance hit to CPython only, 186 us original, 190 us revised
# at 1000 elements; no performance difference < 50 compounds
ws = zs_to_ws(zs, MWs)
ws_np = chemicals.numba.zs_to_ws(zs2, MWs2)
assert type(ws_np) is np.ndarray
assert_close1d(ws, ws_np)
zs = ws_to_zs(ws, MWs)
zs_np = chemicals.numba.ws_to_zs(ws_np, MWs2)
assert type(zs_np) is np.ndarray
assert_close1d(zs, zs_np)
# Treat MWs as Vfs; doesn't matter to math
Vfs = zs_to_Vfs(zs, MWs)
Vfs_np = chemicals.numba.zs_to_Vfs(zs2, MWs2)
assert type(Vfs_np) is np.ndarray
assert_close1d(Vfs, Vfs_np)
zs = Vfs_to_zs(Vfs, MWs)
zs_np = chemicals.numba.Vfs_to_zs(Vfs_np, MWs2)
assert type(Vfs_np) is np.ndarray
assert_close1d(zs, zs_np)
# Functions which have a return list comprehension
vals = [-2651.3181821109024, -2085.574403592012, -2295.0860830203587]
dxsn1 = chemicals.dxs_to_dxsn1(vals)
dxsn1_np = chemicals.numba.dxs_to_dxsn1(np.array(vals))
assert_close1d(dxsn1, dxsn1_np)
assert type(dxsn1_np) is np.ndarray
dxs, xs = [-0.0028, -0.00719, -0.00859], [0.7, 0.2, 0.1]
dns = dxs_to_dns(dxs, xs)
dns_np = chemicals.numba.dxs_to_dns(np.array(dxs), np.array(xs))
assert type(dns_np) is np.ndarray
assert_close1d(dns, dns_np)
dns = [0.001459, -0.002939, -0.004334]
dn_partials = dns_to_dn_partials(dns, -0.0016567)
dn_partials_np = chemicals.numba.dns_to_dn_partials(np.array(dns), -0.0016567)
assert type(dn_partials_np) is np.ndarray
assert_close1d(dn_partials_np, dn_partials)
dxs = [-0.0026404, -0.00719, -0.00859]
xs = [0.7, 0.2, 0.1]
F = -0.0016567
dn_partials = dxs_to_dn_partials(dxs, xs, F)
dn_partials_np = chemicals.numba.dxs_to_dn_partials(np.array(dxs), np.array(xs), F)
assert_close1d(dn_partials, dn_partials_np)
assert type(dn_partials_np) is np.ndarray
@mark_as_numba
def test_return_2d_array():
d2xs = [[0.152, 0.08, 0.547], [0.08, 0.674, 0.729], [0.547, 0.729, 0.131]]
xs = [0.7, 0.2, 0.1]
dxdn_partials = d2xs_to_dxdn_partials(d2xs, xs)
a, b = np.array(d2xs), np.array(xs)
dxdn_partials_np = chemicals.numba.d2xs_to_dxdn_partials(a, b)
assert type(dxdn_partials_np) is np.ndarray
assert_close1d(dxdn_partials, dxdn_partials_np)
@mark_as_numba
def test_mixing_simple():
a = np.array([1,2])
b = np.array([.1, .2])
tot = chemicals.numba.mixing_simple(a, b)
assert_close(tot, 0.5, rtol=1e-14)
a = np.array([.1, .9])
b = np.array([.01, .02])
val = chemicals.numba.mixing_logarithmic(a, b)
assert_close(val, 0.01866065983073615, rtol=1e-13)
@mark_as_numba
def test_dippr_correlations():
orders = (0, 1, -1, -1j)
args = (20, 33.19, 66.653, 6765.9, -123.63, 478.27)
for i in orders:
assert_close(chemicals.numba.EQ114(*args, order=i), chemicals.numba.EQ114(*args, order=i), rtol=1e-13)
args = (300, 276370., -2090.1, 8.125, -0.014116, 0.0000093701)
for i in orders:
assert_close(chemicals.numba.EQ100(*args, order=i), chemicals.numba.EQ100(*args, order=i), rtol=1e-13)
# EQ102 - numba-scipy does not support complex numbers so this does not work in numba
args = (300., 647.096, 17.863, 58.606, -95.396, 213.89, -141.26)
for i in orders:
assert_close(chemicals.numba.EQ116(*args, order=i), chemicals.numba.EQ116(*args, order=i), rtol=1e-13)
args = (20., 3.3258E4, 3.6199E4, 1.2057E3, 1.5373E7, 3.2122E3, -1.5318E7, 3.2122E3)
for i in orders:
assert_close(chemicals.numba.EQ127(*args, order=i), chemicals.numba.EQ127(*args, order=i), rtol=1e-13)
args = (300., 33363., 26790., 2610.5, 8896., 1169)
for i in orders:
assert_close(chemicals.numba.EQ107(*args, order=i), chemicals.numba.EQ107(*args, order=i), rtol=1e-13)
args = (300.0, 0.02222, -26.38, -16750000, -3.894E19, 3.133E21)
for i in orders:
assert_close(chemicals.numba.EQ104(*args, order=i), chemicals.numba.EQ104(*args, order=i), rtol=1e-13)
@mark_as_numba
def test_thermal_conductivity_misc():
assert_close(chemicals.numba.Bahadori_liquid(273.15, 170),
Bahadori_liquid(273.15, 170))
assert_close(chemicals.numba.Missenard(304., 6330E5, 591.8, 41E5, 0.129),
chemicals.Missenard(304., 6330E5, 591.8, 41E5, 0.129))
assert_close(chemicals.numba.DIPPR9H(np.array([0.258, 0.742]), np.array([0.1692, 0.1528])),
DIPPR9H([0.258, 0.742], [0.1692, 0.1528]))
assert_close(chemicals.numba.Filippov(np.array([0.258, 0.742]), np.array([0.1692, 0.1528])),
Filippov([0.258, 0.742], [0.1692, 0.1528]))
assert_close(chemicals.numba.DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='linear'),
chemicals.DIPPR9B(200., 28.01, 20.826, 1.277E-5, 132.92, chemtype='linear'))
assert_close(chemicals.numba.Eli_Hanley(T=373.15, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267, omega=0.227, Cvm=135.9),
chemicals.Eli_Hanley(T=373.15, MW=72.151, Tc=460.4, Vc=3.06E-4, Zc=0.267, omega=0.227, Cvm=135.9))
assert_close(chemicals.numba.Eli_Hanley_dense(T=473., MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274, omega=0.144, Cvm=82.70, Vm=1.721E-4),
chemicals.Eli_Hanley_dense(T=473., MW=42.081, Tc=364.9, Vc=1.81E-4, Zc=0.274, omega=0.144, Cvm=82.70, Vm=1.721E-4))
assert_close(chemicals.numba.Chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4),
chemicals.Chung_dense(T=473., MW=42.081, Tc=364.9, Vc=184.6E-6, omega=0.142, Cvm=82.67, Vm=172.1E-6, mu=134E-7, dipole=0.4))
# Does not work - atom input
# chemicals.numba.Mersmann_Kind_thermal_conductivity_liquid(400, 170.33484, 658.0, 0.000754, {'C': 12, 'H': 26})
@mark_as_numba
def test_viscosity_misc():
assert_close(chemicals.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6, drho_dP_Tr=3.119177410324e-6),
chemicals.numba.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6, drho_dP_Tr=3.119177410324e-6), rtol=1e-13)
assert_close(chemicals.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6),
chemicals.numba.mu_IAPWS(T=647.35, rho=222, drho_dP=175.456980972231e-6), rtol=1e-13)
assert_close(chemicals.mu_IAPWS(T=647.35, rho=222),
chemicals.numba.mu_IAPWS(T=647.35, rho=222), rtol=1e-13)
# Has a min, if statement
args = (300., 500E5, 572.2, 34.7E5, 0.236, 0, 0.00068)
ans = chemicals.numba.Lucas(*args)
ans_base = chemicals.viscosity.Lucas(*args)
assert_close(ans, ans_base, rtol=1e-14)
# There is a dict lokup but it is not always needed
new = Lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)
fast = chemicals.numba.Lucas_gas(T=550., Tc=512.6, Pc=80.9E5, Zc=0.224, MW=32.042, dipole=1.7)
assert_close(new, fast, rtol=1e-12)
# Test the dict lookup has been turned into a couple if statements - not suitable for large
# tables but for three elements it is just as fast as a dict lookup
kwargs = dict(T=6, Tc=5.1889, Pc=226968.0, Zc=0.3014, MW=4.002602, CASRN='7440-59-7')
assert_close(chemicals.numba.Lucas_gas(**kwargs), Lucas_gas(**kwargs), rtol=1e-14)
# A couple of points with Herning-Sipperer; works fine
zs = np.array([0.5, 0.25, 0.25]*10)
mus = np.array([1.78e-05, 1.12e-05, 9.35e-06]*10)
MWs = np.array([28.0134, 16.043, 30.07]*10)
fast = chemicals.numba.Herning_Zipperer(zs, mus, MWs)
base = chemicals.Herning_Zipperer(zs.tolist(), mus.tolist(), MWs.tolist())
assert_close(fast, base, rtol=1e-14)
# Function calling other functions
n = 1
zs = np.array([.4, .3, .3]*n)
MWs = np.array([16.04246, 30.06904, 44.09562]*n)
Tcs = np.array([190.564, 305.32, 369.83]*n)
Pcs = np.array([4599000.0, 4872000.0, 4248000.0]*n)
Vcs = np.array([9.86e-05, 0.0001455, 0.0002]*n)
mu = chemicals.numba.Lorentz_Bray_Clarke(T=300.0, P=1e6, Vm=0.0023025, zs=zs, MWs=MWs, Tcs=Tcs, Pcs=Pcs, Vcs=Vcs)
assert_close(mu, 9.925488160761484e-06, rtol=1e-14)
# Viscosity index - works beautifully
assert_close(chemicals.numba.viscosity_index(73.3E-6, 8.86E-6, rounding=False),
chemicals.viscosity_index(73.3E-6, 8.86E-6, rounding=False), rtol=1e-14)
assert_close(chemicals.numba.viscosity_index(73.3E-6, 8.86E-6, rounding=True),
chemicals.viscosity_index(73.3E-6, 8.86E-6, rounding=True), rtol=1e-14)
@mark_as_numba
def test_interface_misc():
# Tested quite a bit with numba/PyPy
# At first numba had 3x the speed, but then I made the optimizations by hand
# I knew were possible. Their speed is about equal after, with a slight edge up
# by numba with large arrays
n = 1
xs = np.array([0.1606, 0.8394]*n)
xs /= sum(xs)
sigmas = np.array([0.01547, 0.02877]*n)
rhoms = np.array([8610., 15530.]*n)
xs2, sigmas2, rhoms2 = xs.tolist(), sigmas.tolist(), rhoms.tolist()
assert_close(chemicals.numba.Winterfeld_Scriven_Davis(xs, sigmas, rhoms),
Winterfeld_Scriven_Davis(xs2, sigmas2, rhoms2))
n = 1
xs = np.array([0.1606, 0.8394]*n)
sigmas_Tb = np.array([0.01424, 0.02530]*n)
Tbs = np.array([309.21, 312.95]*n)
Tcs = np.array([469.7, 508.0]*n)
assert_close(chemicals.Diguilio_Teja(T=298.15, xs=xs,sigmas_Tb=sigmas_Tb, Tbs=Tbs, Tcs=Tcs),
chemicals.numba.Diguilio_Teja(T=298.15, xs=xs,sigmas_Tb=sigmas_Tb, Tbs=Tbs, Tcs=Tcs), rtol=1e-12)
# Exception is correctly raised with numba
with pytest.raises(ValueError):
chemicals.numba.Diguilio_Teja(T=1000, xs=xs,sigmas_Tb=sigmas_Tb, Tbs=Tbs, Tcs=Tcs)
@mark_as_numba
def test_virial():
Z = chemicals.numba.Z_from_virial_pressure_form(102919.99946855308, 4.032286555169439e-09, 1.6197059494442215e-13, 6.483855042486911e-19)
assert_close(Z, 1.00283753944, rtol=1e-13)
# # Takes 8 seconds to compile. Fun!
# assert_close(chemicals.numba.BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469),
# chemicals.BVirial_Tsonopoulos_extended(430., 405.65, 11.28E6, 0.252608, a=0, b=0, species_type='ketone', dipole=1.469),
# rtol=1e-13)
@mark_as_numba
def test_phase_change():
# Function had some duplicated powers; numba was optimizing them on me anyway
# Had list-in-list constants being indexed. I thought that would take a lot of time
# but instead removing it only saved 25%, and ~8% in CPython, and zilch in PyPy.
# PyPy takes 19% of the time numba does here, numba has a high overhead.
assert_close(chemicals.numba.MK(553.15, 751.35, 0.302),
chemicals.MK(553.15, 751.35, 0.302), rtol=1e-12)
@mark_as_numba
def test_vapor_pressure():
# PyPy 75 ns, CPython 2470 ns, numba 214 ns
assert_close(chemicals.numba.dPsat_IAPWS_dT(300.),
chemicals.dPsat_IAPWS_dT(300.), rtol=1e-14)
Psats_vec_expect = [34478.367349639906, 33596697.716487624, 109799836.81382856, 179376011.49286702, 234627689.09298804]
Ts = np.linspace(100, 1000, 5)
Psats_calc = chemicals.numba_vectorized.Antoine(Ts, 8.7687, 395.744, -6.469, 10)
assert_close(Psats_calc, Psats_vec_expect, rtol=1e-11)
@mark_as_numba
def test_temperature():
# Note also the last four decimals are different!
# 494 us numba, 388 us PyPy, 1740 us CPython
assert_close(chemicals.numba.ITS90_68_difference(1000.),
chemicals.ITS90_68_difference(1000.0), rtol=1e-12)
# Probably never going to work
# chemicals.numba.T_converter(500, 'ITS-68', 'ITS-48')
@mark_as_numba
def test_critical():
assert_close(chemicals.numba.Li(np.array([0.6449, 0.2359, 0.1192]), np.array([425.12, 469.7, 507.6]),np.array([0.000255, 0.000313, 0.000371])),
Li([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [0.000255, 0.000313, 0.000371]), rtol=1e-13)
assert_close(chemicals.numba.Chueh_Prausnitz_Tc(np.array([0.6449, 0.2359, 0.1192]), np.array([425.12, 469.7, 507.6]),
np.array([0.000255, 0.000313, 0.000371]), np.array([[0, 1.92681, 6.80358],
[1.92681, 0, 1.89312], [ 6.80358, 1.89312, 0]])),
Chueh_Prausnitz_Tc([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6],
[0.000255, 0.000313, 0.000371], [[0, 1.92681, 6.80358],
[1.92681, 0, 1.89312], [ 6.80358, 1.89312, 0]]), rtol=1e-13)
zs = np.array([0.6449, 0.2359, 0.1192])
Tcs = np.array([425.12, 469.7, 507.6])
Aijs = np.array([[0, 1.2503, 1.516], [0.799807, 0, 1.23843], [0.659633, 0.807474, 0]])
assert_close(chemicals.numba.Grieves_Thodos(zs, Tcs, Aijs),
Grieves_Thodos(zs, Tcs, Aijs), rtol=1e-12)
Aijs = np.array([[0, 1.174450, 1.274390], [0.835914, 0, 1.21038], [0.746878, 0.80677, 0]])
assert_close(chemicals.numba.modified_Wilson_Tc(zs, Tcs, Aijs),
modified_Wilson_Tc(zs, Tcs, Aijs), rtol=1e-12)
assert_close(chemicals.numba.Chueh_Prausnitz_Vc(np.array([0.4271, 0.5729]), np.array([0.000273, 0.000256]), np.array([[0, 5.61847], [5.61847, 0]])),
Chueh_Prausnitz_Vc([0.4271, 0.5729], [0.000273, 0.000256], [[0, 5.61847], [5.61847, 0]]), rtol=1e-13)
assert_close(chemicals.numba.modified_Wilson_Vc(np.array([0.4271, 0.5729]), np.array([0.000273, 0.000256]), np.array([[0, 0.6671250], [1.3939900, 0]])),
modified_Wilson_Vc([0.4271, 0.5729], [0.000273, 0.000256], [[0, 0.6671250], [1.3939900, 0]]), rtol=1e-13)
# Not working yet: Ihmels, Meissner, Grigoras, critical_surface_methods
# Maybe a future numba update will make this work.
@mark_as_numba
def test_volume():
assert_close(chemicals.numba.Yen_Woods_saturation(300, 647.14, 55.45E-6, 0.245),
chemicals.Yen_Woods_saturation(300, 647.14, 55.45E-6, 0.245))
assert_close(chemicals.numba.COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532),
chemicals.COSTALD(272.03889, 369.83333, 0.20008161E-3, 0.1532))
assert_close(chemicals.numba.Bhirud_normal(280.0, 469.7, 33.7E5, 0.252),
Bhirud_normal(280.0, 469.7, 33.7E5, 0.252))
assert_close(chemicals.numba.SNM0(121, 150.8, 7.49e-05, -0.004),
SNM0(121, 150.8, 7.49e-05, -0.004))
assert_close(chemicals.numba.SNM0(121, 150.8, 7.49e-05, -0.004, -0.03259620),
SNM0(121, 150.8, 7.49e-05, -0.004, -0.03259620))
kwargs = dict(T=405.45, Tb=239.82, Tc=405.45, Pc=111.7*101325, MW=17.03, dipole=None)
assert_close(chemicals.numba.Campbell_Thodos(**kwargs),
Campbell_Thodos(**kwargs))
# Test a slow one
# 81.2 us orig, then 67.6 after optimizations in CPython
# numba: 2.25 µs, PYPY: 1.31; numba with numpy: 4 us
N = 100
xs = [0.4576, 0.5424]*N
MWs = [32.04, 18.01]*N
Tcs = [512.58, 647.29]*N
Pcs = [8.096E6, 2.209E7]*N
Zrs = [0.2332, 0.2374]*N
xs2 = np.array(xs)
MWs2 = np.array(MWs)
Tcs2 = np.array(Tcs)
Pcs2 = np.array(Pcs)
Zrs2 = np.array(Zrs)
orig = Rackett_mixture(T=298., xs=xs, MWs=MWs, Tcs=Tcs, Pcs=Pcs, Zrs=Zrs)
new = chemicals.numba.Rackett_mixture(T=298., xs=xs2, MWs=MWs2, Tcs=Tcs2, Pcs=Pcs2, Zrs=Zrs2)
assert_close(orig, new)
# Test COSTALD_mixture - even slower
# timing after optimization at 200 elements - 1.49 m CPython, 27.1 µs numba, 63.5 µs PyPy3, 71.4 us PyPy2
T = 300.0
N = 15
xs = normalize([0.4576, 0.5424]*N)
Tcs = [512.58, 647.29]*N
Vcs = [0.000117, 5.6e-05]*N
omegas = [0.559,0.344]*N
xs2 = np.array(xs)
Tcs2 = np.array(Tcs)
Vcs2 = np.array(Vcs)
omegas2 = np.array(omegas)
assert_close(COSTALD_mixture(xs, T, Tcs, Vcs, omegas),
chemicals.numba.COSTALD_mixture(xs2, T, Tcs2, Vcs2, omegas2))
@mark_as_numba
def test_solbility():
assert_close(Henry_converter(1.2e-5, old_scale='Hcp', new_scale='SI', rhom=55344.59, MW=18.01528),
chemicals.numba.Henry_converter(1.2e-5, old_scale='Hcp', new_scale='SI', rhom=55344.59, MW=18.01528))
@mark_as_numba
def test_refractivity():
assert_close(brix_to_RI(5.8), chemicals.numba.brix_to_RI(5.8))
@mark_as_numba
def test_rachford_rice():
n = 10
zs = np.array([0.5, 0.3, 0.2]*n)
Ks = np.array([1.685, 0.742, 0.532]*n)
assert_close(chemicals.numba.Rachford_Rice_flash_error(0.5, zs=zs, Ks=Ks),
Rachford_Rice_flash_error(0.5, zs=zs, Ks=Ks))
zs = np.array([0.5, 0.3, 0.2])
Ks = np.array([1.685, 0.742, 0.532])
VF_new, xs_new, ys_new = chemicals.numba.flash_inner_loop(zs=zs, Ks=Ks)
VF, xs, ys = flash_inner_loop(zs=zs.tolist(), Ks=Ks.tolist())
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
@mark_as_numba
def test_Rachford_Rice_solutionN():
ns = [0.204322076984, 0.070970999150, 0.267194323384, 0.296291964579, 0.067046080882, 0.062489248292, 0.031685306730]
Ks_y = [1.23466988745, 0.89727701141, 2.29525708098, 1.58954899888, 0.23349348597, 0.02038108640, 1.40715641002]
Ks_z = [1.52713341421, 0.02456487977, 1.46348240453, 1.16090546194, 0.24166289908, 0.14815282572, 14.3128010831]
ns2, Ks2, betas2 = np.array(ns), np.array([Ks_y, Ks_z]), np.array([.1, .6])
betas_new, zs_new = chemicals.numba.Rachford_Rice_solutionN(ns2, Ks2, betas2)
betas, zs = Rachford_Rice_solutionN(ns, [Ks_y, Ks_z], [.1, .6])
assert_close1d(betas, betas_new, rtol=1e-14)
assert_close2d(zs, zs_new, rtol=1e-14)
@mark_as_numba
def test_Rachford_Rice_solution2():
ns = [0.204322076984, 0.070970999150, 0.267194323384, 0.296291964579, 0.067046080882, 0.062489248292, 0.031685306730]
Ks_y = [1.23466988745, 0.89727701141, 2.29525708098, 1.58954899888, 0.23349348597, 0.02038108640, 1.40715641002]
Ks_z = [1.52713341421, 0.02456487977, 1.46348240453, 1.16090546194, 0.24166289908, 0.14815282572, 14.3128010831]
ns2, Ksy2, Ksz2 = np.array(ns), np.array(Ks_y), np.array(Ks_z)
beta0_new, beta1_new, z0_new, z1_new, z2_new = chemicals.numba.Rachford_Rice_solution2(ns2, Ksy2, Ksz2, beta_y=.1, beta_z=.6)
beta0, beta1, z0, z1, z2 = Rachford_Rice_solution2(ns, Ks_y, Ks_z, beta_y=.1, beta_z=.6)
assert_close(beta0_new, beta0)
assert_close(beta1_new, beta1)
assert_close1d(z0, z0_new)
assert_close1d(z1, z1_new)
assert_close1d(z2, z2_new)
@mark_as_numba
def test_rachford_rice_polynomial():
zs, Ks = [.4, .6], [2, .5]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
zs = [0.5, 0.3, 0.2]
Ks = [1.685, 0.742, 0.532]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
zs = [0.2, 0.3, 0.4, 0.1]
Ks = [2.5250, 0.7708, 1.0660, 0.2401]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
zs = [0.2, 0.3, 0.4, 0.05, 0.05]
Ks = [2.5250, 0.7708, 1.0660, 0.2401, 0.3140]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
# 6 and higher use generic routine
zs = [0.05, 0.10, 0.15, 0.30, 0.30, 0.10]
Ks = [6.0934, 2.3714, 1.3924, 1.1418, 0.6457, 0.5563]
VF_new, xs_new, ys_new = chemicals.numba.Rachford_Rice_solution_polynomial(np.array(zs), np.array(Ks))
VF, xs, ys = Rachford_Rice_solution_polynomial(zs, Ks)
assert_close(VF, VF_new)
assert_close1d(xs, xs_new)
assert_close1d(ys, ys_new)
@mark_as_numba
def test_lazy_loading():
# Numba interfers with to_num
# The data_reader functions are not part of the public API so are not converted
chemicals.numba.heat_capacity.zabransky_dicts
chemicals.numba.heat_capacity.CRC_standard_data
assert 'jitclass' in str(chemicals.numba.heat_capacity.ZabranskySpline)
assert 'jitclass' in str(chemicals.numba.heat_capacity.ZabranskyQuasipolynomial)
assert 'jitclass' in str(chemicals.numba.heat_capacity.zabransky_dict_iso_s['2016-57-1'].models[0])
@mark_as_numba
def test_safety_functions():
import test_safety
swap_funcs_and_test(['NFPA_30_classification'],
[chemicals.numba.NFPA_30_classification],
test_safety.test_NFPA_30_classification)
| [
"Caleb.Andrew.Bell@gmail.com"
] | Caleb.Andrew.Bell@gmail.com |
fbaad15c61832e74295dfe1dae5088f30bff8971 | 726151e5c9386351fa79c98690894133a526115f | /adminpage/migrations/0013_auto_20200813_2142.py | d4913dbb8b714ae633030a875664da55c90ba9b3 | [] | no_license | youssriaboelseod/ERP | 867ae72f094d725891943bf0d9309efe16d4f7c6 | b56c3b4e3c2b787e472d145ddffa8d3dee3947e3 | refs/heads/main | 2023-01-22T03:39:13.312894 | 2020-12-01T12:44:32 | 2020-12-01T12:44:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # Generated by Django 3.0.7 on 2020-08-13 16:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adminpage', '0012_announcement'),
]
operations = [
migrations.AlterField(
model_name='addemployee',
name='image',
field=models.ImageField(default='pics/person.png', upload_to='pics'),
),
]
| [
"pvaishnavi8701@gmail.com"
] | pvaishnavi8701@gmail.com |
d6a017668b77161fc0092d339bbc5ee76cb9b2a8 | 29ed133feb870455ca619c9fa2ce9b7eb1dcc470 | /URIs/URI1930.py | 22e1944f07f5f32fae61f640c2b1136be3f4465e | [] | no_license | jrantunes/URIs-Python-3 | c5e676686a979b6bbfd10b8e7168a6d35fb8f6a2 | 4692f3fba4a1c9a0f51322a13e9e267d8b07ea3e | refs/heads/master | 2022-04-17T10:56:52.468275 | 2020-03-28T17:07:46 | 2020-03-28T17:07:46 | 250,395,664 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #Electrical Outlet
t1, t2, t3, t4 = input().split()
t1, t2, t3, t4 = int(t1) - 1, int(t2) - 1, int(t3) - 1, int(t4)
tomadas = [t1, t2, t3, t4]
print(sum(tomadas)) | [
"noreply@github.com"
] | noreply@github.com |
03da5c8d12b696c495dd8bcdff8ba6d4f299d446 | 5e2da1003c96ca15fa021f644bb7866b5ff4230f | /dictionary.py | 36c3d5cf78bbfd5aa6c249939efa7665d94a8001 | [] | no_license | LioHub/CW | f1f3a125d1374d4b8e676181051b59e2372e5995 | 21996218e97e204685ad1e5c7849673659e56468 | refs/heads/master | 2020-07-07T19:11:01.849338 | 2019-08-20T21:19:34 | 2019-08-20T21:19:34 | 203,450,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,934 | py | #!/usr/bin/env python
# coding=utf-8
from button import inline_main_menu
from button import inline_order_water
from button import inline_confirm_menu
from button import inline_location_menu
from button import inline_comment_menu
from button import inline_pre_order_menu
from button import inline_other_goods_menu
from button import inline_pre_payment_menu
from button import inline_order_acsess
from button import inline_sub_menu
from button import inline_cin_abone_menu
from button import inline_confirm_sub_menu
from button import inline_shop_menu
from button import inline_add_else_menu
from button import inline_instruction_menu
from button import inline_write_question_menu
from button import inline_news_menu
provider_token = '' # - origin
users = {}
prices = {0: []}
menu = {
1: {'button': inline_main_menu(), 'index': 1},
2: {'button': inline_order_water('order_water'), 'index': 2},
3: {'button': inline_order_acsess('order_pompa'), 'index': 3},
4: {'button': inline_order_acsess('order_pompaEL'), 'index': 4},
5: {'button': inline_order_acsess('order_culer'), 'index': 5},
6: {'button': inline_other_goods_menu(), 'index': 6},
7: {'button': inline_location_menu('d'), 'index': 7},
8: {'button': inline_confirm_menu(), 'index': 8},
9: {'button': inline_pre_payment_menu(), 'index': 9},
10: {'button': inline_comment_menu(), 'index': 10},
11: {'button': inline_pre_order_menu(), 'index': 11},
12: {'button': inline_sub_menu(), 'index': 12},
13: {'button': inline_order_water('subs'), 'index': 13},
14: {'button': inline_cin_abone_menu('back_to_abonem'), 'index': 14},
15: {'button': inline_confirm_sub_menu(), 'index': 15},
16: {'button': inline_shop_menu(), 'index': 16},
17: {'button': inline_order_acsess('shop'), 'index': 17},
18: {'button': inline_order_acsess('shop_water'), 'index': 18},
19: {'button': inline_order_acsess('shop_pompa'), 'index': 19},
20: {'button': inline_order_acsess('shop_pompaEL'), 'index': 20},
21: {'button': inline_order_acsess('shop_culer'), 'index': 21},
22: {'button': inline_add_else_menu(), 'index': 22},
23: {'button': inline_instruction_menu(), 'index': 23},
24: {'button': inline_write_question_menu(), 'index': 24},
25: {'button': inline_write_question_menu(), 'index': 25},
26: {'button': inline_news_menu(), 'index': 26}
}
product_price = {
'water': 150,
'pompa': 450,
'pompaEL': 1150,
'culer': 6990
}
product_name = {
'water': 'Вода 19л',
'pompa': 'Помпа',
'pompaEL': 'Помпа электрическая',
'culer': 'Кулер'
}
sub_price2 = {
'1_month': {'min_sub': 7, 'term': '1 месяц', 'price': 145},
'3_months': {'min_sub': 21, 'term': '3 месяц', 'price': 140},
'6_months': {'min_sub': 42, 'term': '6 месяц', 'price': 130},
'1_year': {'min_sub': 84, 'term': '1 год', 'price': 120}
} | [
"lioknuckles@gmail.com"
] | lioknuckles@gmail.com |
c0a560dc7b728ab8f5e2bb57b87cb1e63a75ab05 | 30a2f77f5427a3fe89e8d7980a4b67fe7526de2c | /python/HERWIGPP_POWHEG_GluonFusion_H1200_bbbar_8TeV_cff.py | 69a444d3216d8ed1263402b24a90ec1ffe8bbca7 | [] | no_license | DryRun/QCDAnalysis | 7fb145ce05e1a7862ee2185220112a00cb8feb72 | adf97713956d7a017189901e858e5c2b4b8339b6 | refs/heads/master | 2020-04-06T04:23:44.112686 | 2018-01-08T19:47:01 | 2018-01-08T19:47:01 | 55,909,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.HerwigppDefaults_cfi import *
generator = cms.EDFilter(
"ThePEGGeneratorFilter",
herwigDefaultsBlock,
configFiles = cms.vstring(),
parameterSets = cms.vstring(
'cm8TeV',
'powhegNewDefaults',
'GluonFusionHbbParameters',
'basicSetup',
'setParticlesStableForDetector',
),
powhegNewDefaults = cms.vstring(
'# Need to use an NLO PDF',
'# and strong coupling',
'cp /Herwig/Partons/MRST-NLO /Herwig/Partons/cmsPDFSet',
'create Herwig::O2AlphaS O2AlphaS',
'set /Herwig/Generators/LHCGenerator:StandardModelParameters:QCD/RunningAlphaS O2AlphaS',
'# Setup the POWHEG shower',
'cd /Herwig/Shower',
'set Evolver:HardEmissionMode POWHEG',
'# higgs + W (N.B. if considering all W decay modes useful to set )',
'# (jet pT cut to zero so no cut on W decay products )',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2WH',
'# set /Herwig/Cuts/JetKtCut:MinKT 0.0*GeV',
'# higgs + Z (N.B. if considering all Z decay modes useful to set )',
'# (jet pT cut to zero so no cut on Z decay products )',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2ZH',
'# set /Herwig/Cuts/JetKtCut:MinKT 0.0*GeV',
'# gg/qqbar -> Higgs',
'# insert SimpleQCD:MatrixElements[0] PowhegMEHiggs',
'# Weak boson pair production: WW / ZZ / WZ / W+Z [WpZ] / W-Z [WmZ]',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2VV',
'# set PowhegMEPP2VV:Process WpZ'
),
pdfCTEQ6M = cms.vstring(
'mkdir /LHAPDF',
'cd /LHAPDF',
'create ThePEG::LHAPDF CTEQ6M',
'set CTEQ6M:PDFName cteq6mE.LHgrid',
'set CTEQ6M:RemnantHandler /Herwig/Partons/HadronRemnants',
'cp CTEQ6M /cmsPDFSet',
'cd /'
),
GluonFusionHbbParameters = cms.vstring(
'cd /Herwig/MatrixElements/',
'insert SimpleQCD:MatrixElements[0] PowhegMEHiggs',
'set /Herwig/Particles/h0:NominalMass 1200.*GeV',
'set /Herwig/Particles/h0/h0->b,bbar;:OnOff On',
#'set /Herwig/Particles/h0/h0->b,bbar;:BranchingRatio 0.7195',
#'set /Herwig/Particles/h0/h0->b,bbar;:BranchingRatio 1.0000',
'set /Herwig/Particles/h0/h0->W+,W-;:OnOff Off',
'set /Herwig/Particles/h0/h0->tau-,tau+;:OnOff Off',
'set /Herwig/Particles/h0/h0->g,g;:OnOff Off',
'set /Herwig/Particles/h0/h0->c,cbar;:OnOff Off',
'set /Herwig/Particles/h0/h0->Z0,Z0;:OnOff Off',
'set /Herwig/Particles/h0/h0->gamma,gamma;:OnOff Off',
'set /Herwig/Particles/h0/h0->mu-,mu+;:OnOff Off',
'set /Herwig/Particles/h0/h0->t,tbar;:OnOff Off'
),
#crossSection = cms.untracked.double(0.1665),
#filterEfficiency = cms.untracked.double(1.0)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('\$Revision: 1.3 $'),
name = cms.untracked.string('\$Source: /local/reps/CMSSW/CMSSW/Configuration/GenProduction/python/EightTeV/HERWIGPP_POWHEG_H125_bbbar_W_lnu_8TeV_cff.py,v $'),
annotation = cms.untracked.string('HERWIGPP/POWHEG: (H->bb)(W->lnu), m(H)=125 GeV, l=e or mu or tau')
)
| [
"david.renhwa.yu@gmail.com"
] | david.renhwa.yu@gmail.com |
6139dd6afbca70b0561bf4f17a3172109b66be16 | 333976029b38cc82c2adcee98a0a8a11d37a3385 | /bin/cleanse.py | 96c478a8f4841e9e2df97f42b74e2625c318272a | [] | no_license | evanditter/Hadoop_Project_on_sales_data | 7b1f21f7f53a90930cecea39e122595fa6e17550 | 3e802354791812b9984e2b33600577a64d9efd13 | refs/heads/master | 2022-07-17T19:24:37.149264 | 2020-05-14T19:18:54 | 2020-05-14T19:18:54 | 263,999,665 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py |
import csv
import os
try:
os.mkdir("sales_update")
except:
print("directory already exists")
# row = #your data
path="salesdb/Employees2.csv"
path2="salesdb/Customers2.csv"
write="sales_update/Employees2/Employees2.csv"
write_2="sales_update/Customers2/Customers2.csv"
reader = list(csv.reader(open(path, "rU"), delimiter=','))
writer = csv.writer(open(write, 'w'), delimiter='|')
writer.writerows(row for row in reader)
with open(path2, 'rw') as csvfile:
reader_new = csv.reader(csvfile, delimiter='|')
rows = []
for row in reader_new:
for i in range(len(row)):
if row[i] == '':
row[i] = None
rows.append(row)
reader = list(csv.reader(open(path2, "rU"), delimiter='|'))
writer_new = csv.writer(open(write_2, 'w'), delimiter='|')
for row in reader:
for i in range(len(row)):
if row[i] == '' or row[i] == "":
row[i] = None
writer_new.writerow(row)
| [
"ditte049@umn.edu"
] | ditte049@umn.edu |
74436ec5031d0e2d1c5221f8e0675a8db01ac485 | 68eefd4c9da3d4dcc41416969f423af6a976c6f0 | /UMD/METK/Modules/Macros/MedicalExplorationToolkit/METKCollections/METKCollections.py | 4c1ba7834b449a92319e0d75552a7f091c9e2be0 | [] | no_license | hjkuijf/communitymodules | ce7bf85bf5ea9c5fc669c5384f77c8c1305b5668 | 1d49ef927100479f393944d457ceb214b7e2ef71 | refs/heads/master | 2021-01-21T09:52:40.439787 | 2015-11-09T16:25:55 | 2015-11-09T16:25:55 | 43,959,271 | 0 | 0 | null | 2015-10-09T14:06:02 | 2015-10-09T14:06:02 | null | ISO-8859-1 | Python | false | false | 22,171 | py | # Python module import
from VisDefinitions import *
LOAD = 1
SAVE = 2
REMOVE = 3
CANCEL = 0
# Frequently used controls
_objectInclude = ctx.field("objectTypeInclude")
_layerExclude = ctx.field("layerExclude")
_infoExclude = ctx.field("infoExclude")
# Global class instances
_cls_info = None
_cls_iter = None
_cls_exchange = None
_listView = None
# Global variables
_countViewer = 0
_waitForExchange = False
_waitForScriptBuilding = False
_renderVideo = False
_preConfiguredCollection = False
_saveCollection = False
_activeCollection = ""
_caseDir = ""
_unsavedCollections = []
def init():
global _cls_info
global _cls_iter
global _cls_exchange
global _countColl
global _waitForExchange
# Create an ObjInfo class instance to modify the database and to handle database events
_cls_info = METKObjInfo(ctx.module("ObjInfo"))
_cls_exchange = ObjInfo(ctx.module("exchange"))
# Only receive events which occur in Object OBJ_COLLECTIONS
# Additionally get events from OBJ_COMMUNICATION to be up to date on load or cleanup
_cls_info.setEventReceiveMode(ERM_SELECTED, OBJ_COLLECTIONS + "," + OBJ_COMMUNICATION)
_cls_exchange.setEventReceiveMode(ERM_ALL)
_cls_iter = ObjIterator(ctx.module("iterator"))
_waitForExchange = False
return
###################################################################################
#
# ObjMgr event handling
#
###################################################################################
def handleObjectCreatedEvent():
pass
def handleObjectRemovedEvent():
pass
def handleAttributeCreatedEvent():
handleAttributeModifiedEvent()
return
def handleAttributeRemovedEvent():
pass
def handleAttributeModifiedEvent():
global _countViewer, _waitForExchange, _saveCollection
object = _cls_info.activeObject()
layer = _cls_info.activeLayer()
info = _cls_info.activeInfo()
value = _cls_info.get()
if info == INF_GLOBAL_REFRESH and (_cls_info.getBool() == False or _cls_info.get()=="0" ) and _saveCollection:
_countViewer = _countViewer - 1
if _countViewer == 0:
updateViewerList()
_cls_info.setEventReceiveMode(ERM_SELECTED, OBJ_COLLECTIONS + "," + OBJ_COMMUNICATION + "," + OBJ_ANIMATION + "," + ctx.field("capturedViewer").value)
makeCollection(1)
elif object == ctx.field("capturedViewer").value and info == INF_SCREENSHOTFLAG and _cls_info.getBool() == False and _saveCollection:
makeCollection(2)
elif object == OBJ_ANIMATION and layer == LAY_ANIMATION_SCRIPT and info == INF_SCRIPT_STATUS:
if (value == "ready" and _waitForExchange == True):
_waitForExchange = False
ctx.field("exchange.firstEvent").touch()
while ctx.field("exchange.operationSuccessful").value:
_cls_info.typedSet(_cls_exchange.activeObject(), _cls_exchange.activeLayer(), _cls_exchange.activeInfo(), _cls_exchange.get(), _cls_exchange.getType())
ctx.field("exchange.nextEvent").touch()
_cls_info.notify()
pass
def handleLoadedEvent():
global _caseDir
global _activeCollection
global _unsavedCollections
_caseDir = _cls_info.get(OBJ_CASE, LAY_CASE, INF_CASEDIR)
_activeCollection = ""
_unsavedCollections = []
MLABFileManager.remove (_caseDir + "collections/temp.tif")
MLABFileManager.remove (_caseDir + "collections/temp.thumb.tif")
updateListView()
def handleSaveEvent():
global _unsavedCollections
_unsavedCollections = []
pass
def handleCleanupEvent():
global _activeCollection
global _unsavedCollections
global _caseDir
_activeCollection = ""
MLABFileManager.remove (_caseDir + "collections/temp.tif")
MLABFileManager.remove (_caseDir + "collections/temp.thumb.tif")
# clear all collection files, which are established after the last SAVE event
for file in _unsavedCollections:
MLABFileManager.remove(_caseDir + "collections/" + file)
_unsavedCollections = []
_caseDir = ""
updateListView()
###################################################################################
#
# ListView handling
#
###################################################################################
def initListView():
global _listView
_listView = ctx.control("collectionList")
updateListView()
return
def closeListView():
global _listView
_listView = None
return
def updateListView():
if _listView:
_listView.clearItems()
if _cls_info.get(OBJ_COMMUNICATION, LAY_GLOBALEVENTS, INF_CASELOADED) != MSG_CLEANUP:
_cls_info.activateObject(OBJ_COLLECTIONS)
_cls_info.firstLayer()
while _cls_info.success():
if _cls_info.activeLayer() != LAY_GLOBAL:
layerID = _cls_info.activeLayer()
item = _listView.insertItem()
if _cls_info.existInfo(OBJ_COLLECTIONS, layerID, INF_THUMBNAME):
item.setPixmapFile(0, _caseDir + _cls_info.get(OBJ_COLLECTIONS, layerID, INF_THUMBNAME))
else:
item.setPixmapFile(0, _caseDir + _cls_info.get(OBJ_COLLECTIONS, layerID, INF_SCREENSHOT))
item.setText(1, _cls_info.get(OBJ_COLLECTIONS, layerID, INF_TITLE))
_cls_info.nextLayer()
return
def clicked(item, column):
if ctx.field("singleClick").value == "Load":
loadCollection(getActiveCollection(item))
else:
showDetails(getActiveCollection(item))
return
def doubleClicked(item):
if ctx.field("doubleClick").value == "Load":
loadCollection(getActiveCollection(item))
else:
showDetails(getActiveCollection(item))
return
def showDetails(collection):
ctx.field("title").value = _cls_info.get(OBJ_COLLECTIONS, collection, INF_TITLE)
ctx.field("comment").value = _cls_info.get(OBJ_COLLECTIONS, collection, INF_COMMENT)
# details oben anzeigen
result = ctx.showModalDialog("METKCollectionsDetailsExist", "Collection Details")
if result == LOAD:
loadCollection(_activeCollection)
elif result == REMOVE:
removeCollection(_activeCollection)
pass
def getActiveCollection(item):
global _activeCollection
title = item.text(1)
collection = ""
_cls_info.activateObject(OBJ_COLLECTIONS)
_cls_info.firstLayer()
while _cls_info.success():
if _cls_info.activeLayer() != LAY_GLOBAL:
layerID = _cls_info.activeLayer()
if _cls_info.get(OBJ_COLLECTIONS, layerID, INF_TITLE) == title:
collection = layerID
_cls_info.nextLayer()
_activeCollection = collection
return collection
def setViewer(args=0):
ctx.field("METKScriptBuilder.viewerName").value = ctx.field("capturedViewer").value
return
def wakeup(args=0):
setViewer()
return
###################################################################################
#
# Collection management
#
###################################################################################
def saveCollection(field = 0):
"""Generates a temporary collection from the current ObjMgr state. Collection is not saved permanently as long as the user didn't confirm it.
"""
# if there is a case directory, there is also a loaded and valid case
if _caseDir != "":
# flag is necessary to prevent this module from reacting on external stimuli
global _saveCollection
_saveCollection = True
makeCollection(0)
return None
def savePreConfiguredCollection(title, comment):
"""Generates a temporary collection from the current ObjMgr state. Collection is saved permanently.
"""
global _preConfiguredCollection
_preConfiguredCollection = True
ctx.field("title").value = title
ctx.field("comment").value = comment
makeCollection(0)
return None
def makeCollection(state):
global _activeCollection
global _preConfiguredCollection
global _unsavedCollections
_activeCollection = "temp"
# update all viewer parameters
if state == 0:
#print "makeCollection 0"
updateViewers()
# make a screenshot
elif state == 1:
#print "makeCollection 1"
MLABFileManager.mkdir(_caseDir + "collections/")
MLABFileManager.remove (_caseDir + "collections/temp.tif")
MLABFileManager.remove (_caseDir + "collections/temp.thumb.tif")
filename = _caseDir + "collections/temp"
_cls_info.typedSet(ctx.field("capturedViewer").value, LAY_VIEWER_SCREENSHOT, INF_SCREENSHOTFILE, filename + ".tif", INFOTYPE_STRING)
_cls_info.typedSet(ctx.field("capturedViewer").value, LAY_VIEWER_SCREENSHOT, INF_SCREENSHOTTHUMB, True, INFOTYPE_BOOL)
_cls_info.typedSet(ctx.field("capturedViewer").value, LAY_VIEWER_SCREENSHOT, INF_SCREENSHOTFLAG, True, INFOTYPE_BOOL)
ctx.callLater(0, "_cls_info.notify")
# screenshot is made, lets ask for details
elif state == 2:
if _preConfiguredCollection == False:
ctx.field("title").value = ""
ctx.field("comment").value = ""
result = ctx.showModalDialog("METKCollectionsDetailsNew", "Collection Details")
else:
result = SAVE
_preConfiguredCollection = False
if result == SAVE:
# create new (unexistent) collection entry
if not _cls_info.existObject(OBJ_COLLECTIONS):
_cls_info.createObject(OBJ_COLLECTIONS)
_cls_info.activateObject(OBJ_COLLECTIONS)
_cls_info.firstLayer()
_cls_info.fld_genLayerUIDPrefix.value = "Collection_"
_cls_info.fld_genLayerUID.touch()
_activeCollection = _cls_info.activeLayer()
# rename temporary files
standardFileName = MLABFileManager.normalizePath(_cls_info.get(OBJ_CASE, LAY_CASE, INF_XMLFILE))
pathComponents = MLABFileManager.splitPath(standardFileName)
baseName = pathComponents["base"]
shotName = MLABFileManager.splitPath(MLABFileManager.getUniqueFilename(_caseDir + "collections/", baseName + _activeCollection, ".tif"))["name"]
thumbName = MLABFileManager.splitPath(MLABFileManager.getUniqueFilename(_caseDir + "collections/", baseName + _activeCollection, ".thumb.tif"))["name"]
collName = MLABFileManager.splitPath(MLABFileManager.getUniqueFilename(_caseDir + "collections/", baseName + _activeCollection, ".col"))["name"]
_unsavedCollections.append(shotName)
_unsavedCollections.append(thumbName)
_unsavedCollections.append(collName)
MLABFileManager.rename (_caseDir + "collections/temp.tif", _caseDir + "collections/" + shotName)
MLABFileManager.rename (_caseDir + "collections/temp.thumb.tif", _caseDir + "collections/" + thumbName)
# Einträge in OBJ_COLLECTIONS machen
_cls_info.set(OBJ_COLLECTIONS, _activeCollection, INF_TITLE, ctx.field("title").value)
_cls_info.set(OBJ_COLLECTIONS, _activeCollection, INF_COMMENT, ctx.field("comment").value)
_cls_info.set(OBJ_COLLECTIONS, _activeCollection, INF_SCREENSHOT, "collections/" + shotName)
_cls_info.set(OBJ_COLLECTIONS, _activeCollection, INF_FILENAME, "collections/" + collName)
_cls_info.set(OBJ_COLLECTIONS, _activeCollection, INF_THUMBNAME, "collections/" + thumbName)
ctx.field("ObjDump.fileName").value = _caseDir + "collections/" + collName
ctx.field("ObjMgr.clearObjectContainer").touch()
# Datenbank nach relevanten Einträgen durchsuchen
_cls_iter.setSearchCriteria(LAY_GLOBAL, INF_OBJTYPE)
_cls_iter.searchFirst()
while _cls_iter.success():
if _objectInclude.value.count(_cls_info.get(_cls_iter.object(), LAY_GLOBAL, INF_OBJTYPE)):
# Kopiere rekursiv alle Einträge dieses Objects nach exchange
_cls_info.activateObject(_cls_iter.object())
_cls_info.firstLayer()
while _cls_info.success():
layerID = _cls_info.activeLayer()
if not _layerExclude.value.count(layerID):
_cls_info.firstInfo()
while _cls_info.success():
infoID = _cls_info.activeInfo()
if not _infoExclude.value.count(infoID):
_cls_exchange.typedSet(_cls_iter.object(), layerID, infoID, _cls_info.get(), _cls_info.getType())
_cls_info.nextInfo()
_cls_info.nextLayer()
_cls_iter.searchNext()
_cls_exchange.notify()
ctx.field("ObjDump.save").touch()
_cls_info.notify()
updateListView()
else:
_activeCollection = ""
MLABFileManager.remove (_caseDir + "collections/temp.tif")
MLABFileManager.remove (_caseDir + "collections/temp.thumb.tif")
global _saveCollection
_saveCollection = False
return None
def loadCollection(collection):
global _countViewer
# causes the viewers to change their values in the ObjMgr. This is necessary to enforce the adoption of the camera parameters on loading
updateViewers()
_countViewer = 0
# loads the Collection actually stored in field collection
# alles aufräumen
ctx.field("ObjMgr.clearObjectContainer").touch()
ctx.field("exchange.clearEventQueue").touch()
# Collection laden
ctx.field("ObjLoader.fileName").value = _caseDir + _cls_info.get(OBJ_COLLECTIONS, collection, INF_FILENAME)
ctx.field("ObjLoader.load").touch()
# remove non-existent viewers from the loaded collection
iter = ObjIterator(ctx.module("viewerRemoving"))
iter.setSearchCriteria(LAY_GLOBAL, INF_OBJTYPE, "Viewer3D")
iter.searchFirst()
while iter.success():
if not _cls_info.existObject(iter.object()):
_cls_exchange.removeObject(iter.object())
iter.searchNext()
iter.setSearchCriteria(LAY_GLOBAL, INF_OBJTYPE, "Viewer2D")
iter.searchFirst()
while iter.success():
if not _cls_info.existObject(iter.object()):
_cls_exchange.removeObject(iter.object())
iter.searchNext()
_cls_exchange.notify()
if ctx.field("animated").value:
#run script
#print "buildNewScript"
ctx.field("METKScriptBuilder.useColor").value = False
ctx.field("METKScriptBuilder.useTransparency").value = False
ctx.field("METKScriptBuilder.useVisibility").value = False
ctx.field("METKScriptBuilder.realtime").value = True
ctx.field("METKScriptBuilder.length").value = 2
ctx.field("METKScriptBuilder.buildNewScript").touch()
global _waitForExchange, _waitForScriptBuilding
_waitForExchange = True
_waitForScriptBuilding = True
else:
ctx.field("exchange.firstEvent").touch()
while ctx.field("exchange.operationSuccessful").value and _cls_exchange.existObject(_cls_exchange.activeObject()):
_cls_info.set(_cls_exchange.activeObject(), _cls_exchange.activeLayer(), _cls_exchange.activeInfo(), _cls_exchange.get())
ctx.field("exchange.nextEvent").touch()
_cls_info.notify()
pass
def removeCollection(collectionName):
global _activeCollection
_activeCollection = ""
# Dateien löschen
MLABFileManager.remove(_caseDir + _cls_info.get(OBJ_COLLECTIONS, collectionName, INF_FILENAME))
MLABFileManager.remove(_caseDir + _cls_info.get(OBJ_COLLECTIONS, collectionName, INF_SCREENSHOT))
# remove entry
_cls_info.removeLayer(OBJ_COLLECTIONS, collectionName)
_cls_info.notify()
updateListView()
return
###################################################################################
#
# Detail window handling
#
###################################################################################
def initDetailView():
if _activeCollection == "temp":
ctx.control("imageView").setPixmapFile(_caseDir + "collections/temp.tif")
else:
ctx.control("imageView").setPixmapFile(_caseDir + _cls_info.get(OBJ_COLLECTIONS, _activeCollection, INF_SCREENSHOT))
def modalDialogLoad():
ctx.window().done(LOAD)
pass
def modalDialogSave():
ctx.window().done(SAVE)
pass
def modalDialogRemove():
ctx.window().done(REMOVE)
pass
def modalDialogCancel():
ctx.window().done(CANCEL)
pass
###################################################################################
#
# video rendering / animations
#
###################################################################################
def initRenderVideo(field = 0):
global _renderVideo
ctx.field("METKScriptBuilder.useColor").value = True
ctx.field("METKScriptBuilder.useTransparency").value = True
ctx.field("METKScriptBuilder.useVisibility").value = True
ctx.field("METKScriptBuilder.realtime").value = False
ctx.field("METKScriptBuilder.length").value = 0
ctx.field("METKScriptBuilder.clear").touch()
_renderVideo = True
# get first collection
_cls_info.activateObject(OBJ_COLLECTIONS)
_cls_info.firstLayer()
appendScript()
pass
def appendScript():
print "APPEND SCRIPT......................................"
collection = ""
collection = _cls_info.activeLayer()
#print "appendScript for " + collection
if collection != LAY_GLOBAL:
#print ". load collection"
# alles aufräumen
ctx.field("ObjMgr.clearObjectContainer").touch()
ctx.field("exchange.clearEventQueue").touch()
# Collection laden
ctx.field("ObjLoader.fileName").value = _caseDir + _cls_info.get(OBJ_COLLECTIONS, collection, INF_FILENAME)
ctx.field("ObjLoader.load").touch()
#run script
#print ". appendToScript"
ctx.field("METKScriptBuilder.appendToScript").touch()
else:
_cls_info.nextLayer()
if _cls_info.success():
appendScript()
else:
scriptAppended()
pass
def scriptAppended(field = 0):
global _renderVideo, _waitForScriptBuilding
if _waitForScriptBuilding:
_waitForScriptBuilding = False
#print "set to OBJMGR:"+ctx.field("METKScriptBuilder.script").value
_cls_info.typedSet( OBJ_ANIMATION, LAY_ANIMATION_SCRIPT, INF_SCRIPT_FULLSCRIPT, ctx.field("METKScriptBuilder.script").value,INFOTYPE_MESSAGE)
_cls_info.notify()
elif _renderVideo and ctx.field("METKScriptBuilder.script").value != "":
_cls_info.nextLayer()
if _cls_info.success():
ctx.field("METKScriptBuilder.length").value = ctx.field("videoStepLength").value
appendScript()
else:
_renderVideo = False
#print "set SCRIPT 2 in OBJMGR:" + ctx.field("METKScriptBuilder.script").value
_cls_info.typedSet( OBJ_ANIMATION, LAY_ANIMATION_SCRIPT, INF_SCRIPT_FULLSCRIPT, ctx.field("METKScriptBuilder.script").value, INFOTYPE_MESSAGE)
_cls_info.notify()
pass
###################################################################################
#
# updating
#
###################################################################################
def updateViewers():
global _countViewer
# Sicherstellen, dass das Abarbeiten sich nicht selbst überholt
_countViewer = 0
# Damit handleAttributeModifiedEvent die Nachrichten von allen Viewern erhält, ERM_ALL setzen
_cls_info.setEventReceiveMode(ERM_ALL)
# Alle Viewer ihre aktuellen Parameter in den ObjMgr schreiben lassen
_cls_iter.setSearchCriteria(LAY_GLOBAL, INF_OBJTYPE, "Viewer3D")
_cls_iter.searchFirst()
while _cls_iter.success():
_countViewer = _countViewer + 1
_cls_info.typedSet(_cls_iter.object(), LAY_GLOBAL, INF_GLOBAL_REFRESH, True, INFOTYPE_BOOL)
_cls_iter.searchNext()
_cls_iter.setSearchCriteria(LAY_GLOBAL, INF_OBJTYPE, "Viewer2D")
_cls_iter.searchFirst()
while _cls_iter.success():
_countViewer = _countViewer + 1
_cls_info.typedSet(_cls_iter.object(), LAY_GLOBAL, INF_GLOBAL_REFRESH, True, INFOTYPE_BOOL)
_cls_iter.searchNext()
_cls_info.notify()
pass
def updateViewerList(field = 0):
capturedViewerF = ""
_cls_iter.setSearchCriteria(LAY_GLOBAL, INF_OBJTYPE, "Viewer3D")
_cls_iter.searchFirst()
# ersten gefundenen Viewer einsetzen
if _cls_iter.success() and ctx.field("capturedViewer").value == "[any viewer]":
ctx.field("capturedViewer").value = _cls_iter.object()
while _cls_iter.success():
ctx.log("Found viewer: " + _cls_iter.object())
if capturedViewerF == "":
capturedViewerF = _cls_iter.object()
else:
capturedViewerF += "," + _cls_iter.object()
_cls_iter.searchNext()
_cls_iter.setSearchCriteria(LAY_GLOBAL, INF_OBJTYPE, "Viewer2D")
_cls_iter.searchFirst()
# ersten gefundenen Viewer einsetzen
if _cls_iter.success() and ctx.field("capturedViewer").value == "[any viewer]":
ctx.field("capturedViewer").value = _cls_iter.object()
while _cls_iter.success():
ctx.log("Found viewer: " + _cls_iter.object())
if capturedViewerF == "":
capturedViewerF = _cls_iter.object()
else:
capturedViewerF += "," + _cls_iter.object()
_cls_iter.searchNext()
if capturedViewerF == "":
capturedViewerF = "[any viewer]"
ctx.field("capturedViewerF").value = capturedViewerF
if ctx.field("capturedViewer").value == "":
ctx.field("capturedViewer").value = "[any viewer]"
pass
def notify():
_cls_info.notify() | [
"konrad79@users.sourceforge.net"
] | konrad79@users.sourceforge.net |
e698efba50be676a6cdb2c59212eac32d7a1c160 | eedfbc0db2843d3daa7b3112ce5059dc9bd13b04 | /app/models.py | 85c894f2f88a0bfeaf54a38396759b9d7fef9c56 | [] | no_license | TheMshary/ReactFormsTechTalkBackend | 6c10bc41e900cbd4e9a5558bc1ec3360e0f6c43c | bd38803be27504d6ad9f039b66770a733c83bf6d | refs/heads/master | 2020-03-30T01:58:34.430001 | 2018-09-27T15:07:58 | 2018-09-27T15:07:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from django.db import models
from django.core.validators import RegexValidator
# Create your models here.
class ExampleModel(models.Model):
alias = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
first_name = models.CharField(max_length=255, null=True)
last_name = models.CharField(max_length=255, null=True)
email = models.EmailField(null=True) | [
"m.alsharekh94@gmail.com"
] | m.alsharekh94@gmail.com |
106e3133bcbfec03e51e9bf4bf8dc1e0a224be1c | d2e6164de1e0ffe2e38cfcfcd9fc2840b9907bbe | /boardDes.py | 640cc743a96d6b9333a179a5157c604efdcf2921 | [
"MIT"
] | permissive | Kayal314/SnakesAndLadders | bbccb33d32fb4cfb6290309ecfbf3ef0bd8be298 | 811a0322404477badc85f861c3b027fc4fc0f120 | refs/heads/master | 2023-03-25T23:29:53.255586 | 2021-03-22T04:47:15 | 2021-03-22T04:47:15 | 350,209,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | """
Let S be the set of points (as numbers on the board) {(a,b): the mouth of the snake lies at a and the tail lies at b}
S = {(98,79), (95,75), (93,73), (86,24) ,(64,60),
(62,19), (54,34) ,(17,7)}
Let L be the set of points (as numbers on the board) {(a,b): the bottom of the ladder lies at a and the top lies at b}
L = {(80,100), (71,91), (28,84), (21, 42), (51,67),
(1,38), (4,14), (9,31)}
"""
class Board:
SNAKES = {}
LADDERS = {}
def __init__(self):
Board.SNAKES = {98: 79, 95: 75, 93: 73, 86: 24, 64: 60,
62: 19, 54: 34, 17: 7}
Board.LADDERS = {80: 100, 71: 91, 28: 84, 21: 42, 51: 67,
1: 38, 4: 14, 9: 31}
@staticmethod
def get_coordinates(board_pos):
board_pos -= 1
if int(board_pos / 10) % 2 == 1:
x = int(board_pos / 10) + 1
x = x * 10 - board_pos - 1
else:
x = board_pos % 10
x = 120 + 60 * x
y = int(board_pos / 10)
y = 560 - 60 * y
return x, y
| [
"pritamkayal314@gmail.com"
] | pritamkayal314@gmail.com |
b6fc79b993cd002a05a15ed4d474c68787c15613 | 1b9075ffea7d4b846d42981b41be44238c371202 | /2009/devel/programming/library/caps/actions.py | 518c5e326c2b2857a117363e060e5b3fc85eebcc | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pisitools
WorkDir = "ipfdevlib_linux"
def install():
pisitools.dobin("examples/i686/ipfinfo")
pisitools.insinto("/usr/include/caps", "include/caps/capsimage.h")
pisitools.dolib_so("lib/i686/libcapsimage.so.2.0")
pisitools.dosym("/usr/lib/libcapsimage.so.2.0", "/usr/lib/libcapsimage.so.2")
pisitools.dosym("/usr/lib/libcapsimage.so.2.0", "/usr/lib/libcapsimage.so")
pisitools.dodoc("HISTORY", "LICENSE", "README")
| [
"zaburt@users.noreply.github.com"
] | zaburt@users.noreply.github.com |
4060545b0289b7a52d7a0213f0e0987210831174 | e8bf00dba3e81081adb37f53a0192bb0ea2ca309 | /domains/rescue/problems/auto/problem21_SR.py | a47c6b172a4ffd1a79db00f9d7e9968525fa4476 | [
"BSD-3-Clause"
] | permissive | patras91/rae_release | 1e6585ee34fe7dbb117b084df982ca8a8aed6795 | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | refs/heads/master | 2023-07-13T20:09:41.762982 | 2021-08-11T17:02:58 | 2021-08-11T17:02:58 | 394,797,515 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | __author__ = 'patras'
from domain_searchAndRescue import *
from timer import DURATION
from state import state
def GetCostOfMove(r, l1, l2, dist):
return dist
DURATION.COUNTER = {
'giveSupportToPerson': 15,
'clearLocation': 5,
'inspectPerson': 20,
'moveEuclidean': GetCostOfMove,
'moveCurved': GetCostOfMove,
'moveManhattan': GetCostOfMove,
'fly': 15,
'inspectLocation': 5,
'transfer': 2,
'replenishSupplies': 4,
'captureImage': 2,
'changeAltitude': 3,
'deadEnd': 1,
}
DURATION.COUNTER = {
'giveSupportToPerson': 15,
'clearLocation': 5,
'inspectPerson': 20,
'moveEuclidean': GetCostOfMove,
'moveCurved': GetCostOfMove,
'moveManhattan': GetCostOfMove,
'fly': 15,
'inspectLocation': 5,
'transfer': 2,
'replenishSupplies': 4,
'captureImage': 2,
'changeAltitude': 3,
'deadEnd': 1,
}
rv.WHEELEDROBOTS = ['w1', 'w2']
rv.DRONES = ['a1']
rv.OBSTACLES = { (100, 100)}
def ResetState():
state.loc = {'w1': (7,24), 'w2': (24,11), 'p1': (22,30), 'a1': (23,15)}
state.hasMedicine = {'a1': 0, 'w1': 5, 'w2': 0}
state.robotType = {'w1': 'wheeled', 'a1': 'uav', 'w2': 'wheeled'}
state.status = {'w1': 'free', 'w2': 'free', 'a1': UNK, 'p1': UNK, (22,30): UNK}
state.altitude = {'a1': 'high'}
state.currentImage = {'a1': None}
state.realStatus = {'w1': 'OK', 'p1': 'OK', 'w2': 'OK', 'a1': OK, (22, 30): 'hasDebri'}
state.realPerson = {(22,30): 'p1'}
state.newRobot = {1: None}
state.weather = {(22,30): "clear"}
tasks = {
1: [['survey', 'a1', (22,30)]]
}
eventsEnv = {
} | [
""
] | |
97309f9b1f5a82dac343f54538754ef39bca2df7 | 38b82147a796fb7a237c8e7774eb043cfdb89b46 | /advanced_fields/computer_vision/image_processing/image_creation.py | 8db8925600f15b0966065ffcc09b6736789b9798 | [
"MIT"
] | permissive | EliorBenYosef/data-science | 55eb7fd22bba8a4308490ab58d4113cc851f4898 | 117e5254f63e482c02aff394780bbdc205d492a3 | refs/heads/main | 2023-09-03T18:06:04.381279 | 2021-10-31T03:09:45 | 2021-10-31T03:09:45 | 397,682,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,750 | py | # mode='L' forces the image to be parsed in the grayscale.
import numpy as np
import cv2
from PIL import Image
h = 500
w = 500
img = np.ones([h, w, 3], dtype=np.uint8)
img = img * np.random.randint(0, 256, (3,), dtype=np.uint8)
# img[:, :, 0] = img[:, :, 0] * np.random.randint(0, 256)
# img[:, :, 1] = img[:, :, 1] * np.random.randint(0, 256)
# img[:, :, 2] = img[:, :, 2] * np.random.randint(0, 256)
cv2.imwrite('output_img/generated_image.jpg', img)
cv2.imshow('my image', img)
cv2.waitKey()
########################################
image_pixel_size = 500
color_cube_size = 100
color_1 = np.random.randint(0, 256, (3,))
color_2 = np.random.randint(0, 256, (3,))
img = np.zeros([image_pixel_size, image_pixel_size, 3], dtype=np.uint8)
for row in range(image_pixel_size):
for column in range(image_pixel_size):
if (row // color_cube_size) % 2 == (column // color_cube_size) % 2:
red, green, blue = color_1
else:
red, green, blue = color_2
img[row, column, 0] = blue
img[row, column, 1] = green
img[row, column, 2] = red
cv2.imwrite('output_img/generated_chess_board.jpg', img)
cv2.imshow("my chess board", img)
cv2.waitKey()
########################################
# create an image: black background, white filled rectangle, additive noise std=3
img = np.zeros([500, 500, 3], dtype=np.uint8)
bottom_left_corner = (200, 300)
top_right_corner = (400, 200)
color = (255, 255, 255)
img_with_rect = cv2.rectangle(img, bottom_left_corner, top_right_corner, color, cv2.FILLED)
mean = 0.0 # mean
std = 3.0 # standard deviation
noisy_img = img_with_rect + np.random.normal(mean, std, img_with_rect.shape)
# noisy_img_clipped = np.clip(noisy_img, 0, 255) # we might get out of bounds due to noise
cv2.imwrite('../output_img/Q20_img.jpg', noisy_img)
cv2.imshow("my image", img)
cv2.waitKey()
########################################
def show_white_image_with_vertical_lines(w, h):
img_pil = Image.new('L', (w, h), 'white')
pixel_matrix = img_pil.load()
for x in range(w):
if x % 10 == 0:
for y in range(h):
pixel_matrix[x, y] = 0
img_pil.show()
def show_white_image_with_diagonal_line(n):
img_pil = Image.new('L', (n, n), 'white')
pixel_matrix = img_pil.load()
# less efficient
# for x in range(n):
# for y in range(n):
# if x == y:
# pixel_matrix[x,y] = 0
# more efficient
for x in range(n):
pixel_matrix[x, x] = 0
img_pil.show()
def show_white_image_with_circles(n):
img_pil = Image.new(mode='L', size=(n, n))
pixel_matrix = img_pil.load()
for x in range(n):
for y in range(n):
pixel_matrix[x, y] = round((x - (n // 2)) ** 2 + (y - (n // 2)) ** 2) % (n // 2)
img_pil.show()
def show_psychedelic_image(n):
img_pil = Image.new('L', (n, n), 'white')
pixel_matrix = img_pil.load()
for x in range(n):
for y in range(n):
pixel_matrix[x, y] = ((x - (n // 2)) * (y - (n // 2))) % (n // 2)
img_pil.show()
def show_psychedelic_image_mine(n):
img_pil = Image.new('L', (n, n), 'white')
pixel_matrix = img_pil.load()
for x in range(n):
for y in range(n):
if y % 2 == 0:
if x % 2 == 0:
pixel_matrix[x, y] = ((x + 100) // 2 + (y - 100) ** 2) % (n // 2)
else:
pixel_matrix[x, y] = ((x + 100) ** 2 + (y + 100) // 2) % (n // 2)
else:
if x % 2 == 0:
pixel_matrix[x, y] = round((x - (n // 2)) ** 2 + (y - (n // 2)) ** 2) % (n // 2)
else:
pixel_matrix[x, y] = round((x - (n // 2)) // 2 + (y - (n // 2)) // 2) % (n // 2)
img_pil.show()
def show_image_01(n):
img_pil = Image.new('L', (n, n), 255)
pixel_matrix = img_pil.load()
for i in range(n):
for j in range(n):
if (i + j) % 2 == 0:
pixel_matrix[i, j] = 0
img_pil.show()
def show_image_02(n):
img_pil = Image.new('L', (n, n), 255)
pixel_matrix = img_pil.load()
for i in range(n):
for j in range(n):
if i <= j:
pixel_matrix[i, j] = 0
img_pil.show()
def show_image_03(n):
img_pil = Image.new('L', (n, n), 255)
pixel_matrix = img_pil.load()
for i in range(n):
for j in range(n):
pixel_matrix[i, j] = 20 * i
img_pil.show()
# show_white_image_with_vertical_lines(50, 100)
# show_white_image_with_diagonal_line(500)
# show_white_image_with_circles(512)
# show_psychedelic_image(512)
# show_psychedelic_image_mine(512)
# show_image_01(500)
# show_image_02(500)
# show_image_03(500)
| [
"eliorby@gmail.com"
] | eliorby@gmail.com |
24c9821ee09e36a22850395bcbc3a104f1b923c9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/321/101584/submittedfiles/jogoDaVelha.py | 6a1f739064ae86cf4550efd44217ef6939a0aeb4 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
print('Bem vindo ao JogoDaVelha do grupo 8 [Iara, Ingrid, Luiz Otávio, Tatiane]')
nome = str(input('Qual seu nome? '))
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
while s != X or s != O:
print('Isira um símbolo válido')
s = str(input('Qual símbolo você deseja utilizar no jogo? '))
if s = X
print(sorteio(inicio))
print(tabuleiro) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b614ca7ed169de8fd6bc9ceab1f35f66a6ecbd4e | 3bb70650b4b83e4653dcc18c8233c106c7a5611a | /receipt/models.py | a0249ce6f92b8a79eb8446196a76c3d5c9ab0a32 | [] | no_license | khanhlu2013/pos_connect_code | 48e736a6b1c5ca6a5c4ff39d842d8a93f66e67ef | fdf70de858c10b175832af31ecc0cf770d028396 | refs/heads/master | 2023-04-08T02:35:46.181265 | 2016-10-18T21:12:51 | 2016-10-18T21:12:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,959 | py | from django.db import models
from store.models import Store
from store_product.models import Store_product
import decimal
from payment_type.models import Payment_type
class Receipt(models.Model):
date = models.DateTimeField()
tax_rate = models.DecimalField(max_digits=6, decimal_places=4)
store = models.ForeignKey(Store)
_receipt_doc_id = models.CharField(max_length=40,unique=True)#this field is the receipt doc id from couch.as an optimization to save sale data to master. we bulk create models.Receipt and need this link to document.Receipt to bulk insert models.Receipt_ln
def __unicode__(self):
return str(self.id)
class Tender_ln(models.Model):
receipt = models.ForeignKey(Receipt,related_name="tender_ln_lst")
payment_type = models.ForeignKey(Payment_type,blank=True,null=True)
amount = models.DecimalField(max_digits=6, decimal_places=2)
name = models.CharField(blank=True,null=True,max_length=100)
class Receipt_ln(models.Model):
receipt = models.ForeignKey(Receipt,related_name="receipt_ln_lst")
qty = models.IntegerField()
discount = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
override_price = models.DecimalField(max_digits=6, decimal_places=3,blank=True,null=True)
date = models.DateTimeField()
store_product = models.ForeignKey(Store_product,blank=True,null=True)
sp_stamp_name = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_value_customer_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_crv = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_is_taxable = models.NullBooleanField(blank=True,null=True)
sp_stamp_is_sale_report = models.NullBooleanField(blank=True,null=True)
sp_stamp_p_type = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_p_tag = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_cost = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_vendor = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_buydown = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
mm_deal_discount = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
mm_deal_name = models.CharField(max_length=100,blank=True,null=True)
non_inventory_name = models.CharField(max_length=100,blank=True,null=True)
non_inventory_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
non_inventory_crv = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
non_inventory_is_taxable = models.NullBooleanField(blank=True,null=True)
non_inventory_cost = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True) | [
"khanhlu2013@gmail.com"
] | khanhlu2013@gmail.com |
ea4d23a16f7e800ad1751c8ac5219ea8572661af | e3f62fe7d6127b5763628f223c5a1f59be114dfb | /program29.py | caee97878ff418ba1eaf482780f305ce8fbd239a | [] | no_license | balasaranyav/python_programs | 41a38d9ef25e2dcc052c2ce33ed079c455a10433 | 95b1ca197700ffa773d2b96b09b0e1d848a67886 | refs/heads/master | 2022-04-04T19:18:48.352217 | 2020-02-10T09:39:37 | 2020-02-10T09:39:37 | 238,921,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Write a program to check whether a number is Armstrong number or not
def armstrong(n):
sum = 0
temp = n
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
if n == sum:
print(n,"is an Armstrong number")
else:
print(n,"is not an Armstrong number")
n = int(input("Enter a number: "))
armstrong(n) | [
"noreply@github.com"
] | noreply@github.com |
92760d5b73982d6e05f835c58c5c7238bf3fe56b | 7fb6800427ab46619e848637a131060b19ccf045 | /src/test.py | 5406835ce1b32f78b0edf7b8b42dc10632750688 | [] | no_license | vchennapalli/genomics-assignment | 7b94c4924203d280bc0f21dc474726c936c69d8c | a66bc1b08dc785379f8f60f1163c1703247263d6 | refs/heads/master | 2020-03-08T23:14:57.805476 | 2018-04-11T04:36:29 | 2018-04-11T04:36:29 | 128,457,069 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,598 | py | #Author: Vineeth Chennapalli
#Big Data Genomics Assignment
import pandas as pd
import numpy as np
from keras.layers import Dense, Dropout, LSTM
from keras.models import Sequential
from keras.optimizers import Adadelta
from keras.models import model_from_json
DATA_FOLDER = '../data'
TEST_PATH = DATA_FOLDER + '/test.csv'
MODEL_FOLDER = '../model'
SAVED_MODEL_PATH = MODEL_FOLDER + '/trained_model.json'
SAVED_WEIGHTS_PATH = MODEL_FOLDER + '/model_weights.h5'
RESULTS = '../results/predictions.csv'
def get_test_data(filepath):
"""
input: filepath - string path to test data
output: test_tfbs - List of 14 character tfbs sequences
"""
colnames = ['sequence']
data = pd.read_csv(filepath, names = colnames)
sequences = data.sequence.tolist()
for i in range(1, 401):
seq = sequences[i]
newseq = []
for l in seq:
newseq.append(mapping[l])
sequences[i] = newseq
return sequences[1:]
mapping = {'A': 1, 'C': 2, 'G': 3, 'T': 4}
test_tfbs = get_test_data(TEST_PATH)
tfbs_test = np.array(test_tfbs)
with open(SAVED_MODEL_PATH, 'r') as jf:
json = jf.read()
model = model_from_json(json)
model.load_weights(SAVED_WEIGHTS_PATH)
print("Loaded the saved model")
predictions = model.predict(tfbs_test)
#print(predictions)
results = []
for p in predictions:
if p[0] > 0.5:
results.append([1])
else:
results.append([0])
#print(results)
df = pd.DataFrame(data = results, columns = {"prediction"})
df.to_csv(path_or_buf = RESULTS, columns={"prediction"}, header=True, index=True, index_label="id")
| [
"chennapalli@gmail.com"
] | chennapalli@gmail.com |
cd8aecca91fd152be1487734efe54d582598aa3d | ab47546a5fbf086193130b66a0ac8d849aa75d23 | /venv/bin/pip3.7 | aa3156ab60ab683ac043913e5b5ec19d31981c22 | [] | no_license | icerahi/ecommerce-webapp | 1f7b7a29d78d73ab03baa6aeeb69a4c95e042090 | eb44e9c1f5b8444a72b3aaf5fcd96f30aa4757c9 | refs/heads/master | 2020-05-01T06:30:05.477773 | 2019-03-24T08:11:38 | 2019-03-24T08:11:38 | 177,331,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | 7 | #!/home/rahi/PycharmProjects/E-commerce/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"zanjarwhite@gmail.com"
] | zanjarwhite@gmail.com |
caa1324452f60f4345f56ee55fdce418254174e2 | 32606415426b55b12f2c991a56a238a08a12baca | /ostrich/steps.py | 33875d59810c56f5b30a64136695a291a67a061e | [
"Apache-2.0"
] | permissive | shakenfist/ostrich | 773c6bc15b1a0b571fb80906d4e865238f349cf9 | f1a557c2dc19adf70d32152fbd07bc34676ee345 | refs/heads/master | 2021-07-21T23:26:28.448778 | 2017-07-18T01:24:36 | 2017-07-18T01:24:36 | 83,365,646 | 0 | 0 | Apache-2.0 | 2021-08-03T04:55:48 | 2017-02-27T22:57:05 | Python | UTF-8 | Python | false | false | 14,298 | py | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import fcntl
import json
import os
import psutil
import re
import select
import shutil
import subprocess
import sys
import time
import yaml
import emitters
import utils
def _handle_path_in_cwd(path, cwd):
if not cwd:
return path
if path.startswith('/'):
return path
return os.path.join(cwd, path)
class Step(object):
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
self.depends = kwargs.get('depends', None)
self.attempts = 0
self.max_attempts = kwargs.get('max_attempts', 5)
self.failing_step_delay = kwargs.get('failing_step_delay', 30)
self.on_failure = kwargs.get('on_failure')
def __str__(self):
return 'step %s, depends on %s' % (self.name, self.depends)
def run(self, emit, screen):
if self.attempts > 0:
emit.emit('... not our first attempt, sleeping for %s seconds'
% self.failing_step_delay)
time.sleep(self.failing_step_delay)
self.attempts += 1
if self.attempts > self.max_attempts:
emit.emit('... repeatedly failed step, giving up')
sys.exit(1)
emit.emit('Running %s' % self)
emit.emit(' with kwargs: %s' % self.kwargs)
emit.emit('\n')
return self._run(emit, screen)
class KwargsStep(Step):
def __init__(self, name, r, kwarg_updates, **kwargs):
super(KwargsStep, self).__init__(name, **kwargs)
self.r = r
self.kwarg_updates = kwarg_updates
def run(self, emit, screen):
utils.recursive_dictionary_update(self.r.kwargs, self.kwarg_updates)
emit.emit(json.dumps(self.r.kwargs, indent=4, sort_keys=True))
return True
class SimpleCommandStep(Step):
def __init__(self, name, command, **kwargs):
super(SimpleCommandStep, self).__init__(name, **kwargs)
self.command = command
self.cwd = kwargs.get('cwd')
self.trace_processes = kwargs.get('trace_processes', False)
self.env = os.environ
self.env.update(kwargs.get('env'))
self.acceptable_exit_codes = kwargs.get(
'acceptable_exit_codes', [0])
def _output_analysis(self, d):
pass
def _run(self, emit, screen):
emit.emit('# %s\n' % self.command)
obj = subprocess.Popen(self.command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=self.cwd,
env=self.env)
proc = psutil.Process(obj.pid)
procs = {}
flags = fcntl.fcntl(obj.stdout, fcntl.F_GETFL)
fcntl.fcntl(obj.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
flags = fcntl.fcntl(obj.stderr, fcntl.F_GETFL)
fcntl.fcntl(obj.stderr, fcntl.F_SETFL, flags | os.O_NONBLOCK)
obj.stdin.close()
while obj.poll() is None:
readable, _, _ = select.select([obj.stderr, obj.stdout], [], [], 1)
for f in readable:
d = os.read(f.fileno(), 10000)
self._output_analysis(d)
emit.emit(d)
seen = []
for child in proc.children(recursive=True):
try:
seen.append(child.pid)
if child.pid not in procs:
procs[child.pid] = ' '.join(child.cmdline())
if self.trace_processes:
emit.emit('*** process started *** %d -> %s'
% (child.pid, procs[child.pid]))
except psutil.NoSuchProcess:
pass
ended = []
for pid in procs:
if pid not in seen:
if self.trace_processes:
emit.emit('*** process ended *** %d -> %s'
% (pid, procs.get(child.pid, '???')))
ended.append(pid)
for pid in ended:
del procs[pid]
emit.emit('... process complete')
returncode = obj.returncode
emit.emit('... exit code %d' % returncode)
return returncode in self.acceptable_exit_codes
EXECUTION_RE = re.compile('^\[Executing "(.*)" playbook\]$')
RUN_TIME_RE = re.compile('^Run Time = ([0-9]+) seconds$')
class AnsibleTimingSimpleCommandStep(SimpleCommandStep):
def __init__(self, name, command, timings_path, **kwargs):
super(AnsibleTimingSimpleCommandStep, self).__init__(
name, command, **kwargs)
self.playbook = None
self.timings = []
self.timings_path = timings_path
if os.path.exists(self.timings_path):
with open(self.timings_path, 'r') as f:
self.timings = json.loads(f.read())
def _output_analysis(self, d):
for line in d.split('\n'):
m = EXECUTION_RE.match(line)
if m:
self.playbook = m.group(1)
m = RUN_TIME_RE.match(line)
if m and self.playbook:
self.timings.append((self.playbook, m.group(1)))
def _run(self, emit, screen):
res = super(AnsibleTimingSimpleCommandStep, self)._run(emit, screen)
with open(self.timings_path, 'w') as f:
f.write(json.dumps(self.timings, indent=4))
return res
class PatchStep(SimpleCommandStep):
def __init__(self, name, **kwargs):
self.local_kwargs = copy.copy(kwargs)
self.local_kwargs['cwd'] = __file__.replace('/ostrich/steps.py', '')
self.local_kwargs['acceptable_exit_codes'] = [0, 1]
self.archive_path = os.path.expanduser('~/.ostrich')
self.files = []
with open(os.path.join(self.local_kwargs['cwd'],
'patches/%s' % name)) as f:
for line in f.readlines():
if line.startswith('--- '):
self.files.append(line.split()[1])
super(PatchStep, self).__init__(
name,
'patch -d / -p 1 --verbose < patches/%s' % name,
**self.local_kwargs)
def _archive_files(self, stage):
for f in self.files:
arc_path = os.path.join(self.archive_path,
'%s-%s-%s'
% (self.name, f.replace('/', '_'), stage))
if not os.path.exists(arc_path):
shutil.copyfile(f, arc_path)
def _run(self, emit, screen):
self._archive_files('before')
res = super(PatchStep, self)._run(emit, screen)
self._archive_files('after')
return res
class QuestionStep(Step):
def __init__(self, name, title, helpful, prompt, **kwargs):
super(QuestionStep, self).__init__(name, **kwargs)
self.title = title
self.help = helpful
self.prompt = prompt
def _run(self, emit, screen):
emit.emit('%s' % self.title)
emit.emit('%s\n' % ('=' * len(self.title)))
emit.emit('%s\n' % self.help)
return emit.getstr('>> ')
class RegexpEditorStep(Step):
def __init__(self, name, path, search, replace, **kwargs):
super(RegexpEditorStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.search = search
self.replace = replace
def _run(self, emit, screen):
output = []
changes = 0
emit.emit('--- %s' % self.path)
emit.emit('+++ %s' % self.path)
with open(self.path, 'r') as f:
for line in f.readlines():
line = line.rstrip()
newline = re.sub(self.search, self.replace, line)
output.append(newline)
if newline != line:
emit.emit('- %s' % line)
emit.emit('+ %s' % newline)
changes += 1
else:
emit.emit(' %s' % line)
with open(self.path, 'w') as f:
f.write('\n'.join(output))
return 'Changed %d lines' % changes
class BulkRegexpEditorStep(Step):
def __init__(self, name, path, file_filter, replacements, **kwargs):
super(BulkRegexpEditorStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.file_filter = re.compile(file_filter)
self.replacements = replacements
def _run(self, emit, screen):
silent_emitter = emitters.NoopEmitter('noop', None)
changes = 0
for root, _, files in os.walk(self.path):
for filename in files:
m = self.file_filter.match(filename)
if not m:
continue
path = os.path.join(root, filename)
for (search, replace) in self.replacements:
s = RegexpEditorStep('bulk-edit', path, search, replace)
result = s.run(silent_emitter, None)
emit.emit('%s -> %s' % (path, result))
if result != 'Changed 0 lines':
changes += 1
return changes
class FileAppendStep(Step):
def __init__(self, name, path, text, **kwargs):
super(FileAppendStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.text = text
def _run(self, emit, screen):
if not os.path.exists(self.path):
emit.emit('%s does not exist' % self.path)
return False
with open(self.path, 'a+') as f:
f.write(self.text)
return True
class FileCreateStep(Step):
def __init__(self, name, path, text, **kwargs):
super(FileCreateStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.text = text
def _run(self, emit, screen):
if os.path.exists(self.path):
emit.emit('%s exists' % self.path)
return False
with open(self.path, 'w') as f:
f.write(self.text)
return True
class CopyFileStep(Step):
def __init__(self, name, from_path, to_path, **kwargs):
super(CopyFileStep, self).__init__(name, **kwargs)
self.from_path = _handle_path_in_cwd(from_path, kwargs.get('cwd'))
self.to_path = _handle_path_in_cwd(to_path, kwargs.get('cwd'))
def _run(self, emit, screen):
shutil.copyfile(self.from_path, self.to_path)
return True
class YamlAddElementStep(Step):
def __init__(self, name, path, target_element_path, data, **kwargs):
super(YamlAddElementStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.data = data
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
print key
sub = sub[key]
sub.append(self.data)
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
class YamlUpdateElementStep(Step):
def __init__(self, name, path, target_element_path, target_key, data,
**kwargs):
super(YamlUpdateElementStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.target_key = target_key
self.data = data
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
sub = sub[key]
sub[self.target_key] = self.data
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
class YamlDeleteElementStep(Step):
def __init__(self, name, path, target_element_path, index, **kwargs):
super(YamlDeleteElementStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.index = index
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
sub = sub[key]
del sub[self.index]
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
class YamlUpdateDictionaryStep(Step):
def __init__(self, name, path, target_element_path, data, **kwargs):
super(YamlUpdateDictionaryStep, self).__init__(name, **kwargs)
self.path = _handle_path_in_cwd(path, kwargs.get('cwd'))
self.target_element_path = target_element_path
self.data = data
def _run(self, emit, screen):
with open(self.path) as f:
y = yaml.load(f.read())
sub = y
for key in self.target_element_path:
sub = sub[key]
sub.update(self.data)
emit.emit('YAML after changes:')
emit.emit(yaml.dump(y))
with open(self.path, 'w') as f:
f.write(yaml.dump(y, default_flow_style=False))
return True
| [
"mikal@stillhq.com"
] | mikal@stillhq.com |
a4a1a15fcd715bdc69965843f94c3b2f571c20b3 | 30227ff573bcec32644fca1cca42ef4cdd612c3e | /leetcode/linkedList/singly_linked_list/remove_node_a1.py | 8ee8af6cdfb55f3965cc1e1c627c57e7e5e85560 | [] | no_license | saurabh-pandey/AlgoAndDS | bc55864422c93e6c93b8432e483394f286ce8ef2 | dad11dedea9ceb4904d6c2dea801ce0172abfc81 | refs/heads/master | 2023-07-01T09:12:57.951949 | 2023-06-15T12:16:36 | 2023-06-15T12:16:36 | 88,239,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | #URL: https://leetcode.com/explore/learn/card/linked-list/219/classic-problems/1207/
# Description
"""
Given the head of a linked list and an integer val, remove all the nodes of the linked list that
has Node.val == val, and return the new head.
Example 1:
Input: head = [1,2,6,3,4,5,6], val = 6
Output: [1,2,3,4,5]
Example 2:
Input: head = [], val = 1
Output: []
Example 3:
Input: head = [7,7,7,7], val = 7
Output: []
Constraints:
The number of nodes in the list is in the range [0, 104].
1 <= Node.val <= 50
0 <= k <= 50
"""
def removeElements(head, val):
if head is None:
return
newHead = head
prevNode = head
currNode = head
while currNode is not None:
if currNode._val == val:
if currNode is newHead:
newHead = currNode._next
else:
prevNode._next = currNode._next
else:
# Only move previous node if current node is not the one to be deleted. Previous node should
# always point to something that is going to be part of the list
prevNode = currNode
currNode = currNode._next
return newHead | [
"saurabhpandey85@gmail.com"
] | saurabhpandey85@gmail.com |
0ce8d4ae15eba8e000cbe459928f96dd85b9f175 | 9e5eca27222871dd04e42c9106bb2fba07e598ff | /src/osxification/foundation/ns_string.py | 4305958fd9999299dcc1df4b9c9d9d5641838191 | [] | no_license | jepebe/osxification | b2a68dec07cd0be3b7ebd519bd99d0bbd51e61c7 | c9a539f4dbeda9200e32a2eea2c955dd94e6f45e | refs/heads/master | 2016-09-03T06:35:41.659315 | 2015-05-19T18:00:23 | 2015-05-19T18:00:23 | 35,567,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,614 | py | from osxification.foundation import NSStringEncoding, NSObject
class NSString(NSObject):
def __init__(self, content, encoding=None):
if isinstance(content, str):
encoding = NSStringEncoding.NSUTF8StringEncoding
# elif isinstance(content, unicode):
# encoding = NSStringEncoding.NSUnicodeStringEncoding
else:
raise UserWarning("[%s] Error: 'content' should be a string, received: %s" % (self.__class__.__name__, type(content)))
identifier = NSString._init(self.alloc(), content, encoding)
super(NSString, self).__init__(identifier)
def __str__(self):
return self._asCString(NSStringEncoding.NSUTF8StringEncoding)
# def __unicode__(self):
# return self._asCString(NSStringEncoding.NSUnicodeStringEncoding)
def __int__(self):
return self._intValue()
def __float__(self):
return self._floatValue()
def __eq__(self, other):
return self._isEqualToString(other)
@classmethod
def from_param(cls, instance):
if isinstance(instance, str):
instance = NSString(instance)
return NSObject.from_param(instance)
NSString._init = NSString.bindMethodToClass("initWithCString:encoding:")
NSString._asCString = NSString.bindMethodToClass("cStringUsingEncoding:", returns=str)
NSString._intValue = NSString.bindMethodToClass("integerValue", returns=int)
NSString._floatValue = NSString.bindMethodToClass("doubleValue", returns=float)
NSString._isEqualToString = NSString.bindMethodToClass("isEqualToString:", parameters=[NSString], returns=bool) | [
"jepebe@users.noreply.github.com"
] | jepebe@users.noreply.github.com |
bb4e08299b87e0d44389027cb157b9ba193b8b62 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/250/32382/submittedfiles/swamee.py | 65a4d92fd968b2221e8050cbd8814d6ae8e3c0f0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('digite f:'))
l=float(input('digite l:'))
q=float(input('digite q:'))
deltah=float(input('digite o delta:'))
v=float(input('digite v:'))
g=9.81
e=0.000002
D=((((8*f*l*q*q)/((math.pi**2)*(g*deltah))))**1/5)
print('o valor de D é:%.4f'%D)
rey=((4*q)/(math.pi*D*v))
print('o valor de rey é:%.4f'%rey)
k=0.25/(math.log10((e/(3.7*D))+(5.74/(rey**0.9))))
print('o valor de k é:%.4f'%k)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
ddf2a1f0cc0195a7ea1195d4200196e3b871e4be | 7a20dac7b15879b9453150b1a1026e8760bcd817 | /Curso/ModuloTkinter/Aula001HelloWorld.py | 32a092bff66a7ed68b5bc07212de60aeb6f607e8 | [
"MIT"
] | permissive | DavidBitner/Aprendizado-Python | 7afbe94c48c210ddf1ab6ae21109a8475e11bdbc | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | refs/heads/master | 2023-01-02T13:24:38.987257 | 2020-10-26T19:31:22 | 2020-10-26T19:31:22 | 283,448,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from tkinter import *
# Sempre começar um programa com o root e a classe Tk
root = Tk()
# Criação do primeiro widget, nesse caso uma label presa a root com o texto "Hello World!"
myLabel = Label(root, text="Hello World!")
# Posicionando a label criada
myLabel.pack()
# Todo programa roda através de um loop, e com o atributo mainloop nós definimos que o programa deixa de rodar a partir desta linha de código. Ou seja, as linhas de código que vierem depois do mainloop serão executadas apenas após o fechamento do gui
root.mainloop()
| [
"david-bitner@hotmail.com"
] | david-bitner@hotmail.com |
7739eb1b63119ef29ea4dab33666d41f099528da | 665e08452f8ba8585d8c86fa7de671c87e8422fc | /python_tasks/task_2.6.py | 8da425ef10cb3e779e3f0d176c2f368a5e7031c6 | [] | no_license | Twicer/epam_tasks | 2d071c2b2bc5a60cdb86a7ece5e4e4764e48bbb3 | f06e92dae41824111765637b5a2887eb34c4b400 | refs/heads/master | 2022-03-17T15:04:36.317885 | 2019-10-22T20:10:29 | 2019-10-22T20:10:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | def get_longest_word(s: str):
words = s.split()
lens_words = [len(word) for word in words]
max_len = max(lens_words)
index = 0
for i, len_word in enumerate(lens_words):
if len_word == max_len:
index = i
break
return words[index]
if __name__ == "__main__":
print(get_longest_word("dsadas asd as dasd asdasasdasdsadd\n\t asdasdadjkbfakf")) | [
"mbs131119999@gmail.com"
] | mbs131119999@gmail.com |
305c882feba36d2ce768d97a4bcd908a88cab3bb | 52896d206ece58c7b999153830a3eadce10df189 | /sql.py | b4ae947ea3b9b8b4a546d58760b0835af6df2a92 | [] | no_license | vmelonari/my-flask-blog | 63531623055ca2b44c697a395d70cdd8e09182d0 | 9accc23c039440a71adaaf0c728594b736efecd6 | refs/heads/master | 2020-04-16T11:36:29.182292 | 2019-01-14T07:37:07 | 2019-01-14T07:37:07 | 165,543,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import sqlite3
# with sqlite3.connect("database.db") as connection:
# c = connection.cursor()
# #c.execute("""DROP TABLE users""")
# c.execute("""CREATE TABLE users (
# id integer(11) primary key,
# name VARCHAR(100),
# email VARCHAR(100),
# username VARCHAR(30),
# password VARCHAR(100),
# register_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP
# )""")
# c.execute('INSERT INTO users VALUES(1, "Vlad", "vlad@vlad.de", "vladdy", "xxxx", "10.03.2018")')
# c.execute('INSERT INTO users VALUES(2, "Miha", "mihai@asdadsde", "micky", "xxxx", "10.03.2018")')
#ONLY TO CREATE A NEW DATABASE. JUST RUN THE SCRIPT
#crate a new table
with sqlite3.connect("database.db") as connection:
c = connection.cursor()
c.execute("""CREATE TABLE articles(
id integer(11) PRIMARY KEY,
title VARCHAR(225),
author VARCHAR(100),
body TEXT,
create_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)""")
| [
"vmelonari@users.noreply.github.com"
] | vmelonari@users.noreply.github.com |
264e283c18aef5029aa4e725ce60a412f69f5623 | 7f06384851b0f0d642a1baec976cda84b5c5180a | /abctf-2016/crypto/encryption-service-140/UTkSDn4H.py | b8c5ad99a8d026ad22d4dabf0cefb9565510a098 | [] | no_license | gynvael/write-ups-2016 | 29f47dd79516a419d3233f0427af425ac23d7869 | f130ca6ccbf1afdc28082b399ce3adf2ac670c52 | refs/heads/master | 2021-01-12T12:12:01.599300 | 2016-10-29T18:28:42 | 2016-10-29T18:28:42 | 72,353,051 | 2 | 3 | null | 2016-10-30T14:05:05 | 2016-10-30T14:05:04 | null | UTF-8 | Python | false | false | 1,128 | py | #/usr/bin/env python
from Crypto.Cipher.AES import AESCipher
import SocketServer,threading,os,time
import signal
from secret2 import FLAG, KEY
PORT = 7765
def pad(s):
l = len(s)
needed = 16 - (l % 16)
return s + (chr(needed) * needed)
def encrypt(s):
return AESCipher(KEY).encrypt(pad('ENCRYPT:' + s.decode('hex') + FLAG))
class incoming(SocketServer.BaseRequestHandler):
def handle(self):
atfork()
req = self.request
def recvline():
buf = ""
while not buf.endswith("\n"):
buf += req.recv(1)
return buf
signal.alarm(5)
req.sendall("Send me some hex-encoded data to encrypt:\n")
data = recvline()
req.sendall("Here you go:")
req.sendall(encrypt(data).encode('hex') + '\n')
req.close()
class ReusableTCPServer(SocketServer.ForkingMixIn, SocketServer.TCPServer):
pass
SocketServer.TCPServer.allow_reuse_address = True
server = ReusableTCPServer(("0.0.0.0", PORT), incoming)
print "Server listening on port %d" % PORT
server.serve_forever() | [
"mo@mightym0.de"
] | mo@mightym0.de |
2bdedab6d444a242204a52dcf13078f832e48466 | 691d0ecea7cd80586273f454761ad1ced65e3aa8 | /system/SurveillanceSystem.py | 57292e1c5718926d6c9aff8202466b5948fd0dfc | [] | no_license | martinjohnlopez8/home-final | f4033d1b7ff591b7350e73faea50ba77488e7d30 | 2a2227b3ef5567662d1067efe4cc8cf3c57f46f6 | refs/heads/master | 2021-01-12T12:46:32.296181 | 2016-09-26T21:46:44 | 2016-09-26T21:46:44 | 69,295,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,895 | py |
# Surveillance System Controller.
# Brandon Joffe
# 2016
# Copyright 2016, Brandon Joffe, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code used in this project included opensource software (openface)
# developed by Brandon Amos
# Copyright 2015-2016 Carnegie Mellon University
import time
import argparse
import cv2
import os
import pickle
from operator import itemgetter
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.mixture import GMM
import dlib
import atexit
from subprocess import Popen, PIPE
import os.path
import sys
import logging
import threading
import time
from datetime import datetime, timedelta
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
import requests
import json
import Camera
import openface
import aligndlib
import ImageProcessor
# import pdb
# pdb.set_trace()
from flask import Flask, render_template, Response, redirect, url_for, request
import Camera
from flask.ext.socketio import SocketIO,send, emit #Socketio depends on gevent
from instapush import Instapush, App #Used for push notifications
#Get paths for models
#//////////////////////////////////////////////////////////////////////////////////////////////
start = time.time()
np.set_printoptions(precision=2)
#///////////////////////////////////////////////////////////////////////////////////////////////
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
class Surveillance_System(object):
def __init__(self):
self.training = True
self.trainingEvent = threading.Event()
self.trainingEvent.set()
self.alarmState = 'Disarmed' #disarmed, armed, triggered
self.alarmTriggerd = False
self.alerts = []
self.cameras = []
self.peopleDB = []
self.camera_threads = []
self.camera_facedetection_threads = []
self.people_processing_threads = []
self.svm = None
self.video_frame1 = None
self.video_frame2 = None
self.video_frame3 = None
self.fileDir = os.path.dirname(os.path.realpath(__file__))
self.luaDir = os.path.join(self.fileDir, '..', 'batch-represent')
self.modelDir = os.path.join(self.fileDir, '..', 'models')
self.dlibModelDir = os.path.join(self.modelDir, 'dlib')
self.openfaceModelDir = os.path.join(self.modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('--dlibFacePredictor', type=str, help="Path to dlib's face predictor.", default=os.path.join(self.dlibModelDir, "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel', type=str, help="Path to Torch network model.", default=os.path.join(self.openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--unknown', type=bool, default=False, help='Try to predict unknown people')
self.args = parser.parse_args()
self.align = openface.AlignDlib(self.args.dlibFacePredictor)
self.net = openface.TorchNeuralNet(self.args.networkModel, imgDim=self.args.imgDim, cuda=self.args.cuda)
#////////////////////////////////////////////////////Initialization////////////////////////////////////////////////////
#self.change_alarmState()
#self.trigger_alarm()
#self.trainClassifier() # add faces to DB and train classifier
#default IP cam
#self.cameras.append(Camera.VideoCamera("rtsp://admin:12345@192.168.1.64/Streaming/Channels/2"))
#self.cameras.append(Camera.VideoCamera("rtsp://admin:12345@192.168.1.64/Streaming/Channels/2"))
#self.cameras.append(Camera.VideoCamera("rtsp://admin:12345@192.168.1.64/Streaming/Channels/2"))
#self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
#self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
#self.cameras.append(Camera.VideoCamera("http://192.168.1.48/video.mjpg"))
#self.cameras.append(Camera.VideoCamera("http://192.168.1.37/video.mjpg"))
#self.cameras.append(Camera.VideoCamera("http://192.168.1.37/video.mjpg"))
#self.cameras.append(Camera.VideoCamera("debugging/iphone_distance1080pHD.m4v"))
self.cameras.append(Camera.VideoCamera("debugging/TwoPersonTest.mp4"))
#self.cameras.append(Camera.VideoCamera("debugging/Martin1.mp4"))
#self.cameras.append(Camera.VideoCamera("debugging/Martin2.mp4"))
#self.cameras.append(Camera.VideoCamera("debugging/Martin3.mp4"))
#self.cameras.append(Camera.VideoCamera("debugging/rotationD.m4v"))
#self.cameras.append(Camera.VideoCamera("debugging/singleTest.m4v"))
# self.cameras.append(Camera.VideoCamera("debugging/singleTest.m4v"))
#self.cameras.append(Camera.VideoCamera("http://192.168.8.103:7001/?action=stream"))
# self.cameras.append(Camera.VideoCamera("debugging/pasado.mp4"))
#self.change_alarmState()
#self.trigger_alarm()
self.getFaceDatabaseNames()
#self.trainClassifier() # add faces to DB and train classifier
#processing frame threads- for detecting motion and face detection
for i, cam in enumerate(self.cameras):
self.proccesing_lock = threading.Lock()
thread = threading.Thread(name='frame_process_thread_' + str(i),target=self.process_frame,args=(cam,))
thread.daemon = False
self.camera_threads.append(thread)
thread.start()
#Threads for alignment and recognition
# for i, cam in enumerate(self.cameras):
# #self.proccesing_lock = threading.Lock()
# thread = threading.Thread(name='face_process_thread_' + str(i),target=self.people_processing,args=(cam,))
# thread.daemon = False
# self.people_processing_threads.append(thread)
# thread.start()
#Thread for alert processing
# self.alerts_lock = threading.Lock()
# thread = threading.Thread(name='alerts_process_thread_',target=self.alert_engine,args=())
# thread.daemon = False
# thread.start()
def process_frame(self,camera):
logging.debug('Processing Frames')
state = 1
frame_count = 0;
start = time.time()
while True:
frame = camera.read_frame()
frame = ImageProcessor.resize(frame)
height, width, channels = frame.shape
if state == 1: # if no faces have been found or there has been no movement
camera.motion, frame = ImageProcessor.motion_detector(camera,frame) #camera.motion,
#camera.processing_frame = frame
if camera.motion == True:
logging.debug('\n\n////////////////////// Motion Detected - Looking for faces in Face Detection Mode\n\n')
state = 2
camera.processing_frame = frame
continue
elif state == 2: # if motion has been detected
if frame_count == 0:
start = time.time()
frame_count += 1
camera.faceBoxes = ImageProcessor.detectopencv_face(frame)
#camera.faceBoxes = ImageProcessor.detect_cascade(frame, ImageProcessor.uppercascade)
#camera.faceBoxes = ImageProcessor.detectdlib_face(frame,height,width)
if len(camera.faceBoxes) == 0:
if (time.time() - start) > 20.0:
logging.debug('\n\n////////////////////// No faces found for ' + str(time.time() - start) + ' seconds - Going back to Motion Detection Mode\n\n')
state = 1
frame_count = 0;
#camera.processing_frame = frame
else:
training_blocker = self.trainingEvent.wait()
logging.debug('\n\n////////////////////// '+ str(len(camera.faceBoxes)) +' FACES DETECTED //////////////////////\n\n')
camera.rgbFrame = ImageProcessor.convertImageToNumpyArray(frame,height,width) # conversion required by dlib methods
for x, y, w, h in camera.faceBoxes:
#for bb in camera.faceBoxes:
bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageProcessor.crop(frame, bb, dlibRect = True)
height, width, channels = faceimg.shape
if len(ImageProcessor.detectdlib_face(faceimg,height,width)) == 0 :
continue
alignedFace = ImageProcessor.align_face(camera.rgbFrame,bb)
predictions = ImageProcessor.face_recognition(alignedFace)
with camera.people_dict_lock:
if camera.people.has_key(predictions['name']):
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].set_time()
else:
camera.people[predictions['name']] = Person(predictions['confidence'], alignedFace)
start = time.time() #used to go back to motion detection state of 20s of not finding a face
camera.processing_frame = frame
#camera.processing_frame = ImageProcessor.draw_rects_dlib(frame, camera.faceBoxes)
# def people_processing(self,camera):
# logging.debug('Ready to process faces')
# detectedFaces = 0
# faceBoxes = None
# while True:
# training_blocker = self.trainingEvent.wait()
# with camera.frame_lock: #aquire lock
# faceBoxes = camera.faceBoxes
# if faceBoxes is not None:
# detectedFaces = len(faceBoxes)
# #for bb in faceBoxes:
# for x, y, w, h in faceBoxes:
# bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
# alignedFace = ImageProcessor.align_face(camera.rgbFrame,bb)
# camera.unknownPeople.append(alignedFace) # add to array so that rgbFrame can be released earlier rather than waiting for recognition
# #cv2.imwrite("face.jpg", alignedFace)
# for face in camera.unknownPeople:
# predictions = ImageProcessor.face_recognition(camera,face)
# with camera.people_dict_lock:
# if camera.people.has_key(predictions['name']):
# if camera.people[predictions['name']].confidence < predictions['confidence']:
# camera.people[predictions['name']].confidence = predictions['confidence']
# camera.people[predictions['name']].set_thumbnail(face)
# else:
# camera.people[predictions['name']] = Person(predictions['confidence'], face)
# camera.unknownPeople = []
#
# def alert_engine(self): #check alarm state -> check camera -> check event -> either look for motion or look for detected faces -> take action
# # logging.debug('Alert engine starting')
# # # while True:
# # #
# # with self.alerts_lock:
# # for alert in self.alerts:
# # logging.debug('\nchecking alert\n')
# # if alert.action_taken == False: #if action hasn't been taken for event
# # if alert.alarmState != 'All': #check states
# # if alert.alarmState == self.alarmState:
# # logging.debug('checking alarm state')
# # alert.event_occurred = self.check_camera_events(alert)
# # else:
# # continue # alarm not in correct state check next alert
# # else:
# # alert.event_occurred = self.check_camera_events(alert)
# # else:
# # if (time.time() - alert.eventTime) > 3600: # reinitialize event 1 hour after event accured
# # print "reinitiallising alert: " + alert.id
# # alert.reinitialise()
# # continue
#
# time.sleep(2) #put this thread to sleep - let websocket update alerts if need be (delete or add)
# def check_camera_events(self,alert):
#
# if alert.camera != 'All': #check cameras
# if alert.event == 'Recognition': #Check events
#
# if (alert.person in self.cameras[int(alert.camera)].people.keys()): # has person been detected
# cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
# self.take_action(alert)
# # return True
#
# else:
# return False # person has not been detected check next alert
#
# else:
#
# if self.cameras[int(alert.camera)].motion == True: # has motion been detected
# cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
# self.take_action(alert)
# return True
#
# else:
# return False # motion was not detected check next alert
# else:
# if alert.event == 'Recognition': #Check events
#
# for camera in self.cameras: # look through all cameras
#
# if alert.person in camera.people.keys(): # has person been detected
# cv2.imwrite("notification/image.png", camera.processing_frame)#
# self.take_action(alert)
# return True
#
# else:
# return False # person has not been detected check next camera
#
# else:
# for camera in self.cameras: # look through all cameras
# if camera.motion == True: # has motion been detected
# cv2.imwrite("notification/image.png", camera.processing_frame)#
# self.take_action(alert)
# return True
#
# # else:
# # return False # motion was not detected check next camera
def take_action(self,alert):
print "Taking action: ======================================================="
print alert.actions
print "======================================================================"
if alert.action_taken == False: #only take action if alert hasn't accured
alert.eventTime = time.time()
if alert.actions['push_alert'] == 'true':
print "\npush notification being sent\n"
self.send_push_notification(alert.alertString)
if alert.actions['email_alert'] == 'true':
print "\nemail notification being sent\n"
self.send_email_notification_alert(alert.alertString)
if alert.actions['trigger_alarm'] == 'true':
print "\ntriggering alarm\n"
self.trigger_alarm()
if alert.actions['notify_police'] == 'true':
print "\nnotifying police\n"
#notify police
alert.action_taken = True
def trainClassifier(self):
self.trainingEvent.clear() #event used to hault face_processing threads to ensure no threads try access .pkl file while it is being updated
path = self.fileDir + "/aligned-images/cache.t7"
try:
os.remove(path) # remove cache from aligned images folder
except:
print "Tried to remove cache.t7"
pass
start = time.time()
aligndlib.alignMain("training-images/","aligned-images/","outerEyesAndNose",self.args.dlibFacePredictor,self.args.imgDim)
print("\nAligning images took {} seconds.".format(time.time() - start))
done = False
start = time.time()
done = self.generate_representation()
if done is True:
print("Representation Generation (Classification Model) took {} seconds.".format(time.time() - start))
start = time.time()
#Train Model
self.train("generated-embeddings/","LinearSvm",-1)
print("Training took {} seconds.".format(time.time() - start))
else:
print("Generate representation did not return True")
self.trainingEvent.set() #threads can continue processing
return True
def generate_representation(self):
#2 Generate Representation
print "\n" + self.luaDir + "\n"
self.cmd = ['/usr/bin/env', 'th', os.path.join(self.luaDir, 'main.lua'),'-outDir', "generated-embeddings/" , '-data', "aligned-images/"]
if self.args.cuda:
self.cmd.append('-cuda')
self.p = Popen(self.cmd, stdin=PIPE, stdout=PIPE, bufsize=0)
result = self.p.wait() # wait for subprocess to finish writing to files - labels.csv reps.csv
def exitHandler():
if self.p.poll() is None:
print "======================Something went Wrong============================"
self.p.kill()
return False
atexit.register(exitHandler)
return True
def train(self,workDir,classifier,ldaDim):
print("Loading embeddings.")
fname = "{}/labels.csv".format(workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if classifier == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif classifier == 'GMM':
clf = GMM(n_components=nClasses)
if ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = "{}/classifier.pkl".format(workDir)
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
def send_email_notification_alert(self,alertMessage):
# code produced in this tutorial - http://naelshiab.com/tutorial-send-email-python/
fromaddr = "home.face.surveillance@gmail.com"
toaddr = "bjjoffe@gmail.com"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "HOME SURVEILLANCE NOTIFICATION"
body = "NOTIFICATION ALERT\n_______________________\n" + alertMessage + "\n"
msg.attach(MIMEText(body, 'plain'))
filename = "image.png"
attachment = open("notification/image.png", "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "facialrecognition")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def send_push_notification (self,alarmMesssage): # pip install instapush
#insta = Instapush(user_token='57c5f710a4c48a6d45ee19ce')
#insta.list_app() #List all apps
#insta.add_app(title='Home Surveillance') #Create a app named title
app = App(appid='57c5f92aa4c48adc4dee19ce', secret='2ed5c7b8941214510a94cfe4789ddb9f')
#app.list_event() #List all event
#app.add_event(event_name='FaceDetected', trackers=['message'],
# message='{message} face detected.')
app.notify(event_name='FaceDetected', trackers={'message': "NOTIFICATION ALERT\n_______________________\n" + alarmMesssage})
def add_face(self,name,image):
path = self.fileDir + "/aligned-images/"
num = 0
if not os.path.exists(path + name):
try:
print "Creating New Face Dircectory: " + name
os.makedirs(path+name)
except OSError:
print OSError
return False
pass
else:
num = len([nam for nam in os.listdir(path +name) if os.path.isfile(os.path.join(path+name, nam))])
print "Writing Image To Directory: " + name
cv2.imwrite(path+name+"/"+ name + "-"+str(num) + ".png", image)
self.getFaceDatabaseNames()
return True
def getFaceDatabaseNames(self):
path = self.fileDir + "/aligned-images/"
self.peopleDB = []
for name in os.listdir(path):
if (name == 'cache.t7' or name == '.DS_Store' or name[0:7] == 'unknown'):
continue
self.peopleDB.append(name)
print name
self.peopleDB.append('unknown')
# def change_alarmState(self):
# r = requests.post('http://192.168.1.35:5000/change_state', data={"password": "admin"})
# alarm_states = json.loads(r.text)
#
# print alarm_states
#
# if alarm_states['state'] == 1:
# self.alarmState = 'Armed'
# else:
# self.alarmState = 'Disarmed'
#
# self.alarmTriggerd = alarm_states['triggered']
# def trigger_alarm(self):
#
# r = requests.post('http://192.168.1.35:5000/trigger', data={"password": "admin"})
# alarm_states = json.loads(r.text)
#
# print alarm_states
#
# if alarm_states['state'] == 1:
# self.alarmState = 'Armed'
# else:
# self.alarmState = 'Disarmed'
#
# self.alarmTriggerd = alarm_states['triggered']
# print self.alarmTriggerd
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
class Person(object):
person_count = 0
def __init__(self,confidence, face):
#self.personCoord = personCoord
#oldtimetuple = time.localtime(EpochSeconds)
# oldtimetuple contains (year, month, day, hour, minute, second, weekday, yearday, daylightSavingAdjustment)
self.identity = "unknown_" + str(Person.person_count)
self.confidence = confidence
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
self.face = face
ret, jpeg = cv2.imencode('.jpg', face) #convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
Person.person_count += 1
#self.tracker = dlib.correlation_tracker()
print self.time
def get_identity(self):
return self.identity
def set_time(self):
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
def set_thumbnail(self, face):
self.face = face
ret, jpeg = cv2.imencode('.jpg', face) #convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
# def recognize_face(self):
# return
# def update_position(self, newCoord):
# self.personCoord = newCoord
# def get_current_position(self):
# return self.personCoord
# def start_tracking(self,img):
# self.tracker.start_track(img, dlib.rectangle(self.personCoord))
# def update_tracker(self,img):
# self.tracker.update(img)
# def get_position(self):
# return self.tracker.get_position()
# def find_face(self):
# return
# tracking = FaceTracking(detect_min_size=detect_min_size,
# detect_every=detect_every,
# track_min_overlap_ratio=track_min_overlap_ratio,
# track_min_confidence=track_min_confidence,
# track_max_gap=track_max_gap)
# class Alert(object): #array of alerts alert(camera,alarmstate(use switch statements), event(motion recognition),)
#
# alert_count = 0
#
# def __init__(self,alarmState,camera, event, person, actions):
# print "\n\nalert_"+str(Alert.alert_count)+ " created\n\n"
#
#
# if event == 'Motion':
# self.alertString = "Motion detected in camera " + camera
# else:
# self.alertString = person + " was recognised in camera " + camera
#
# self.id = "alert_"+str(Alert.alert_count)
# self.event_occurred = False
# self.action_taken = False
# self.camera = camera
# self.alarmState = alarmState
# self.event = event
# self.person = person
# self.actions = actions
#
# self.eventTime = 0
#
# Alert.alert_count += 1
#
# def reinitialise(self):
# self.event_occurred = False
# self.action_taken = False
#
# def set_custom_alertmessage(self,message):
# self.alertString = message
| [
"Tinjo@Martins-MacBook-Pro.local"
] | Tinjo@Martins-MacBook-Pro.local |
0a466ab4c471f29f88812abd62fdb5154d70f76f | 8602ae218fb482d0149946cd38b1bd592b838892 | /setup.py | 72dec14cf6606bee332684a3bcd962d997304116 | [] | no_license | tfyr/telegram | 60904a12c59fb7bd1b24e5b5561d1018469be1a1 | 1f8adccf850f4ad8f0b354a7b314a7dd2ebc811a | refs/heads/master | 2021-08-11T18:31:55.106083 | 2021-07-31T19:46:43 | 2021-07-31T19:46:43 | 218,128,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='telegram',
version='0.1.6',
packages=['telegram'],
include_package_data=True,
license='', # example license
description='',
long_description=README,
url='',
author='Nail Sharipov',
author_email='nash34@gmail.com',
classifiers=[
],
)
| [
"nash34@gmail.com"
] | nash34@gmail.com |
ba2a9ab49b7a95378a4cba212f75be9cc1440738 | 85f33a2ba7da34e9b083d755b8eb613a0f3fbac9 | /deploy/fabfile.py | 6c0612a17c2c2c6ab94dc9261c68ee74e696e60d | [] | no_license | AR-S/Nodes | 566bf2ff6bb028c9f7a5dbd0d7b1ccb873f3a401 | 93d046bc949406e5a3854bba0754f532fe6eaae0 | refs/heads/master | 2021-01-18T05:43:14.561742 | 2016-09-14T02:39:05 | 2016-09-14T02:39:05 | 47,211,701 | 0 | 1 | null | 2016-02-09T20:51:58 | 2015-12-01T19:18:53 | Python | UTF-8 | Python | true | false | 728 | py | #!/bin/env python
# To run: fab deploy -I
# then enter password manually
import os
from fabric.api import cd, env, run, put, get
from fabric.contrib.files import exists
from fabric.operations import sudo
from fabric.colors import *
# @note: rest of settings in your ~/.fabricrc file
env.hosts = ['pi@ars-nodes.duckdns.org:2222']
env.app_name = 'massage'
def host_type():
run('uname -s')
def upload():
upload = put("website.zip", "website.zip")
if not upload.succeeded:
print(red("something went wrong with the upload"))
else:
run('unzip -o website.zip')
sudo('cp -R website/ /var/www')
sudo("chown -R www-data:www-data /var/www" )
def deploy():
host_type()
upload()
| [
"dropmeaword@gmail.com"
] | dropmeaword@gmail.com |
6165913d67d223cb252eea0612eb7a4db7b7fc4e | 56a795bd6ade3acc226f7b8852ed85d57d0a5a61 | /ipc/Poet.py | 237294301e6980c310cd2642eff801599a3acb56 | [
"MIT"
] | permissive | farshad-nejati/icp | a78a8c85100cfea5769408f5835549a4a31585bf | e0f8db33fba23382782a0ac3f26ab6dd3b104553 | refs/heads/master | 2020-06-27T11:00:09.173723 | 2018-01-19T22:39:49 | 2018-01-19T22:39:49 | 199,934,164 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | class Poet:
poets_counts = 0
@staticmethod
def get_poets():
json = {"prvinaEtsami": 199, "nzami": 313, "mlkalshEraibhar": 856, "hafz": 518, "sEdi": 1393, "frdvsi": 522,
"snaii": 1534, "rvdki": 452, "khiam": 272, "jami": 843, "shhriar": 136, "mvlvi": 5265}
poets = list(json.keys())
Poet.poets_counts = len(poets)
return poets
| [
"mehdi.sh4rifi96@gmail.com"
] | mehdi.sh4rifi96@gmail.com |
b97790f9903ea63895ce60c71be0ba8f92085f87 | 15043be5ea5765e449df85d4cefba8b10727f892 | /Other/cutsignalimg.py | 0664084dce431494523500b93c8bfc615f09f760 | [] | no_license | HyperSignal/HyperSignal | f4eee84f155537f8d1d63c693fa01e442b572ba4 | 3f7de0a77d5c23a4fb3a63bba118f55a83d769e4 | refs/heads/master | 2016-09-05T19:53:48.513363 | 2014-11-09T02:10:07 | 2014-11-09T02:10:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | #!/usr/bin/env python
#-*- coding: UTF-8 -*-
import Image
im = Image.open("levels.png")
for x in range(8):
for y in range(4):
siglvl = x*4 + y
print "Saving image signal_%d.png" % siglvl
im.crop((x*200,y*200,200+x*200,200+y*200)).save("signal_%d.png" %siglvl);
| [
"lucas@teske.net.br"
] | lucas@teske.net.br |
6503fb48d2a170342d8b2a5ee3a05ef55d8e13a0 | 20aeb3d27359e88463c82a2f7eedb0db3face4f3 | /ecommapp/migrations/0006_auto_20200921_0225.py | a0efc82e18e27a93dd0caff24e1892cb5bab3499 | [] | no_license | rpinedaec/pacha4Grupo4 | 2d3adb158836b97147708880ea996572646e3cde | b05a30eeb93789c4925df05ad7bd9e8a30acb45b | refs/heads/master | 2022-12-25T11:08:21.451571 | 2020-09-26T19:32:43 | 2020-09-26T19:32:43 | 294,994,684 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | # Generated by Django 3.1.1 on 2020-09-21 07:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommapp', '0005_auto_20200920_2318'),
]
operations = [
migrations.AlterModelOptions(
name='cliente',
options={},
),
migrations.RenameField(
model_name='cliente',
old_name='name',
new_name='nombre',
),
migrations.RemoveField(
model_name='cliente',
name='date_joined',
),
migrations.RemoveField(
model_name='cliente',
name='first_name',
),
migrations.RemoveField(
model_name='cliente',
name='groups',
),
migrations.RemoveField(
model_name='cliente',
name='is_active',
),
migrations.RemoveField(
model_name='cliente',
name='is_staff',
),
migrations.RemoveField(
model_name='cliente',
name='is_superuser',
),
migrations.RemoveField(
model_name='cliente',
name='last_login',
),
migrations.RemoveField(
model_name='cliente',
name='last_name',
),
migrations.RemoveField(
model_name='cliente',
name='user_permissions',
),
migrations.AddField(
model_name='cliente',
name='username',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='cliente',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='cliente',
name='password',
field=models.CharField(max_length=200),
),
]
| [
"rpineda@zegelipae.edu.pe"
] | rpineda@zegelipae.edu.pe |
a90cd4d5bf5d588410d769c97cfa33f4a39619c4 | d0eb9e95c796042067aceaf0fc3d43f56d4eb87b | /Tests/PootyTests.py | a75560eb9533c2d64a49521fcaed266ae119f381 | [] | no_license | finneyjm/RynLib | ea0fd0f8ccd21fdac4663d5fb2b6836efce49a10 | 42e7d07ff879f72ae163f682cb07ba7489ce0a06 | refs/heads/master | 2021-02-16T15:30:02.181769 | 2020-03-05T19:20:22 | 2020-03-05T19:20:22 | 245,020,012 | 0 | 0 | null | 2020-03-04T22:47:09 | 2020-03-04T22:47:08 | null | UTF-8 | Python | false | false | 1,094 | py | from Peeves.TestUtils import *
from unittest import TestCase
from PootyAndTheBlowfish.Templator import *
from PootyAndTheBlowfish.PotentialTemplator import PotentialTemplate
import sys
class PootyTests(TestCase):
@inactiveTest
def test_ApplyBaseTemplate(self):
import os
curdir = os.getcwd()
template = os.path.join(curdir, "RynLib", "PootyAndTheBlowfish", "Templates", "PotentialTemplate")
writer = TemplateWriter(template, LibName = "ploot")
out = os.path.expanduser("~/Desktop")
writer.iterate_write(out)
worked = os.path.exists(os.path.join(out, "plootPot", "src", "CMakeLists.txt"))
self.assertTrue(worked)
@inactiveTest
def test_SimplePotential(self):
import os
writer = PotentialTemplate(
lib_name = "DumbPot",
function_name = "DumbPot",
linked_libs = [ "DumbPot" ],
potential_source = TestManager.test_data("DumbPot"),
requires_make = True
)
out = os.path.expanduser("~/Desktop")
writer.apply(out)
| [
"b3m2a1@gmail.com"
] | b3m2a1@gmail.com |
de52e61d7c3d25d3d0779e71c8924797473d23f1 | 1421fbb6b2154276f1c8f3375c60bbd5aacf5be1 | /maddpg/maddpg/trainer/maddpg.py | bfd2596e02dd9365b772e4f27386e5b7cb4d8849 | [
"MIT"
] | permissive | Renzhenxuexidemaimai/maddpg_3v3 | 9970d07ac97afa69e849cb7e3493f2c7a536f835 | e074dfffb0eb94dc39df6a9ff6f1b450223fed24 | refs/heads/master | 2022-04-19T09:27:55.735480 | 2020-04-16T06:33:34 | 2020-04-16T06:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,329 | py | import numpy as np
import random
import tensorflow as tf
import maddpg.common.tf_util as U
from maddpg.common.distributions import make_pdtype
from maddpg import AgentTrainer
from maddpg.trainer.replay_buffer import ReplayBuffer
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma * r
r = r * (1. - done)
discounted.append(r)
return discounted[::-1]
def make_update_exp(vals, target_vals):
polyak = 1.0 - 1e-2
expression = []
for var, var_target in zip(sorted(vals, key=lambda v: v.name), sorted(target_vals, key=lambda v: v.name)):
expression.append(var_target.assign(polyak * var_target + (1.0 - polyak) * var))
expression = tf.group(*expression)
return U.function([], [], updates=[expression])
def p_train(make_obs_ph_n, act_space_n, p_index, p_func, q_func, optimizer, grad_norm_clipping=None,
local_q_func=False, num_units=64, scope="trainer", reuse=tf.AUTO_REUSE):
with tf.variable_scope(scope, reuse=reuse):
# create distribtuions
act_pdtype_n = [make_pdtype(act_space) for act_space in act_space_n]
# set up placeholders
obs_ph_n = make_obs_ph_n
act_ph_n = [act_pdtype_n[i].sample_placeholder([None], name="action" + str(i)) for i in range(len(act_space_n))]
p_input = obs_ph_n[p_index]
p = p_func(p_input, int(act_pdtype_n[p_index].param_shape()[0]), scope="p_func", num_units=num_units)
p_func_vars = U.scope_vars(U.absolute_scope_name("p_func"))
# wrap parameters in distribution
act_pd = act_pdtype_n[p_index].pdfromflat(p)
act_sample = act_pd.sample()
p_reg = tf.reduce_mean(tf.square(act_pd.flatparam()))
act_input_n = act_ph_n + []
act_input_n[p_index] = act_pd.sample()
q_input = tf.concat(obs_ph_n + act_input_n, 1)
if local_q_func:
q_input = tf.concat([obs_ph_n[p_index], act_input_n[p_index]], 1)
q = q_func(q_input, 1, scope="q_func", reuse=True, num_units=num_units)[:, 0]
pg_loss = -tf.reduce_mean(q)
loss = pg_loss + p_reg * 1e-3
optimize_expr = U.minimize_and_clip(optimizer, loss, p_func_vars, grad_norm_clipping)
# Create callable functions
train = U.function(inputs=obs_ph_n + act_ph_n, outputs=loss, updates=[optimize_expr])
act = U.function(inputs=[obs_ph_n[p_index]], outputs=act_sample)
act_test = U.function(inputs=[obs_ph_n[p_index]], outputs=p)
p_values = U.function([obs_ph_n[p_index]], p)
# target network
target_p = p_func(p_input, int(act_pdtype_n[p_index].param_shape()[0]), scope="target_p_func",
num_units=num_units)
target_p_func_vars = U.scope_vars(U.absolute_scope_name("target_p_func"))
update_target_p = make_update_exp(p_func_vars, target_p_func_vars)
target_act_sample = act_pdtype_n[p_index].pdfromflat(target_p).sample()
target_act = U.function(inputs=[obs_ph_n[p_index]], outputs=target_act_sample)
return act_test, act, train, update_target_p, {'p_values': p_values, 'target_act': target_act,
'p_vars': p_func_vars, 'target_p_vars': target_p_func_vars}
def q_train(make_obs_ph_n, act_space_n, q_index, q_func, optimizer, grad_norm_clipping=None,
local_q_func=False, scope="trainer", reuse=tf.AUTO_REUSE, num_units=64):
with tf.variable_scope(scope, reuse=reuse):
# create distribtuions
act_pdtype_n = [make_pdtype(act_space) for act_space in act_space_n]
# set up placeholders
obs_ph_n = make_obs_ph_n
act_ph_n = [act_pdtype_n[i].sample_placeholder([None], name="action" + str(i)) for i in range(len(act_space_n))]
target_ph = tf.placeholder(tf.float32, [None], name="target")
q_input = tf.concat(obs_ph_n + act_ph_n, 1)
if local_q_func:
q_input = tf.concat([obs_ph_n[q_index], act_ph_n[q_index]], 1)
q = q_func(q_input, 1, scope="q_func", num_units=num_units)[:, 0]
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
q_loss = tf.reduce_mean(tf.square(q - target_ph))
# viscosity solution to Bellman differential equation in place of an initial condition
q_reg = tf.reduce_mean(tf.square(q))
loss = q_loss # + 1e-3 * q_reg
optimize_expr = U.minimize_and_clip(optimizer, loss, q_func_vars, grad_norm_clipping)
# Create callable functions
train = U.function(inputs=obs_ph_n + act_ph_n + [target_ph], outputs=loss, updates=[optimize_expr])
q_values = U.function(obs_ph_n + act_ph_n, q)
# target network
target_q = q_func(q_input, 1, scope="target_q_func", num_units=num_units)[:, 0]
target_q_func_vars = U.scope_vars(U.absolute_scope_name("target_q_func"))
update_target_q = make_update_exp(q_func_vars, target_q_func_vars)
target_q_values = U.function(obs_ph_n + act_ph_n, target_q)
return train, update_target_q, {'q_values': q_values, 'target_q_values': target_q_values, 'q_vars': q_func_vars,
'target_q_vars': target_q_func_vars}
class Actor(AgentTrainer):
def __init__(self, name, model, obs_ph_n, act_space_n, agent_index, args, local_q_func=False):
self.name = name
self.args = args
# get tensorflow APIs from p_train
self.act_test, self.act, self.p_train, self.p_update, self.p_debug = p_train(
scope=self.name,
make_obs_ph_n=obs_ph_n,
act_space_n=act_space_n,
p_index=agent_index,
p_func=model,
q_func=model,
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr),
grad_norm_clipping=0.5,
local_q_func=local_q_func,
num_units=args.num_units
)
def action(self, obs):
# input: observations from env
# output: actions from policy
return self.act(obs[None])[0]
def action_test(self, obs):
return self.act_test(obs[None])[0]
def preupdate(self):
return
def update(self, obs_n, act_n):
p_loss = self.p_train(*(obs_n + act_n))
self.p_update
return p_loss
class Critic(AgentTrainer):
def __init__(self, name, model, obs_ph_n, act_space_n, agent_index, args, local_q_func=False):
self.name = name
self.args = args
# get tenesorflow placeholder from q_train
self.q_train, self.q_update, self.q_debug = q_train(
scope=self.name,
make_obs_ph_n=obs_ph_n,
act_space_n=act_space_n,
q_index=agent_index,
q_func=model,
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr),
grad_norm_clipping=0.5,
local_q_func=local_q_func,
num_units=args.num_units
)
def update(self, obs_n, act_n, target_q): # target_q must not be a list type
q_loss = self.q_train(*(obs_n + act_n + [target_q]))
self.q_update()
return q_loss
class MADDPGAgentTrainer(AgentTrainer):
def __init__(self, name, model, obs_shape_n, act_space_n, agent_index, args, replay_buffer, local_q_func=False):
self.name = name
self.n = len(obs_shape_n)
self.agent_index = agent_index
self.args = args
obs_ph_n = []
for i in range(self.n):
obs_ph_n.append(U.BatchInput(obs_shape_n[i], name="observation" + str(i)).get())
# Create all the functions necessary to train the model
self.critic = Critic(name, model, obs_ph_n, act_space_n, agent_index, args, local_q_func)
self.actor = Actor(name, model, obs_ph_n, act_space_n, agent_index, args, local_q_func)
# According to the tensorflow scope p_train and q_train, q_train must be in the front of p_train
# Create experience buffer
self.replay_buffer = replay_buffer # ReplayBuffer(1e6)
self.max_replay_buffer_len = args.batch_size * args.max_episode_len
self.replay_sample_index = None
def action(self, obs):
return self.actor.act(obs[None])[0]
def action_test(self, obs):
return self.actor.act_test(obs[None])[0]
def experience(self, obs, act, rew, new_obs, done, terminal):
# Store transition in the replay buffer.
self.replay_buffer.add(obs, act, rew, new_obs, float(done))
def preupdate(self):
self.replay_sample_index = None
def update(self, agents, t):
if len(self.replay_buffer) < self.max_replay_buffer_len: # replay buffer is not large enough
return
if not t % 100 == 0: # only update every 100 steps
return
self.replay_sample_index = self.replay_buffer.make_index(self.args.batch_size)
# collect replay sample from all agents
obs_n = []
obs_next_n = []
act_n = []
index = self.replay_sample_index
# for i in range(self.n):
# obs, act, rew, obs_next, done = agents[i].replay_buffer.sample_index(index)
# obs_n.append(obs)
# obs_next_n.append(obs_next)
# act_n.append(act)
# obs, act, rew, obs_next, done = self.replay_buffer.sample_index(index)
obs_n_a, act_n_a, rew_n_a, obs_next_n_a, done_n_a = self.replay_buffer.sample_index(index)
# obs_n_a = np.reshape(obs_n_a, [self.n, self.args.batch_size, -1])
# act_n_a = np.reshape(act_n_a, [self.n, self.args.batch_size, -1])
# rew_n_a = np.reshape(rew_n_a, [self.n, self.args.batch_size])
# obs_next_n_a = np.reshape(obs_next_n_a, [self.n, self.args.batch_size, -1])
# done_n_a = np.reshape(done_n_a, [self.n, self.args.batch_size])
for i in range(self.n):
obs_n.append(obs_n_a[:, i, :])
obs_next_n.append(obs_next_n_a[:, i, :])
act_n.append(act_n_a[:, i, :])
rew, done = rew_n_a[:, self.agent_index], done_n_a[:, self.agent_index]
# print(rew.shape, done.shape)
# print(rew)
# print(done)
# train q network
num_sample = 1
target_q = 0.0
for j in range(num_sample):
#####green nodes take ddpg
# target_act_next_n = [agents[i].p_debug['target_act'](obs_next_n[i]) for i in range(self.n)]
target_act_next_n = [agents[i].actor.p_debug['target_act'](obs_next_n[i]) for i in range(self.n)]
# print("target_act_next_n", target_act_next_n[0].shape)
target_q_next = self.critic.q_debug['target_q_values'](*(obs_next_n + target_act_next_n))
# print("target_q_next", target_q_next.shape)
target_q += rew + self.args.gamma * (1.0 - done) * target_q_next
target_q /= num_sample
# target_q = np.mean(target_q, axis=1)
# print(target_q.shape)
q_loss = self.critic.q_train(*(obs_n + act_n + [target_q]))
# train p network
p_loss = self.actor.p_train(*(obs_n + act_n))
self.actor.p_update()
self.critic.q_update()
return [q_loss, p_loss, np.mean(target_q), np.mean(rew), np.mean(target_q_next), np.std(target_q)]
| [
"linyangfei0934@aliyun.com"
] | linyangfei0934@aliyun.com |
499897b60d5f3f3d297081f748b267c4041d710e | d1e9e53385b6ef90e074367d022e5875814c0170 | /basketapp/urls.py | eb742431acd24073a1b265b5e484a0b68243f800 | [] | no_license | semenchuke/virtualshop | 21188a19bfe243e359bc2202f5421acb8b1a9ef0 | ad3e9e28f6a7f75acdbcdbb2023eb149fb115992 | refs/heads/master | 2020-03-22T15:52:51.690607 | 2018-07-09T13:22:16 | 2018-07-09T13:22:16 | 140,286,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from django.urls import re_path
import basketapp.views as basketapp
app_name = 'basketapp'
urlpatterns = [
re_path('^$', basketapp.basket, name = 'watch_in'),
re_path('^add/(?P<pk>\d+)/$', basketapp.add, name = 'add'),
re_path('^remove/(?P<pk>\d+)/$', basketapp.remove, name = 'remove'),
re_path('^edit/(?P<pk>\d+)/(?P<value>\d+)/$', basketapp.edit),
] | [
"semenchuk-e@inbox.ru"
] | semenchuk-e@inbox.ru |
bf4e62fb9514b84a0e8edafa16b0a15ec49f0045 | 63f86fc8537c8effadaf4969664187ba272bae8e | /game_agent.py | 01d6fe0b181272de9b673b487e4f2707ad3ececf | [] | no_license | winstonj/aind_project_2 | c4902acca12cc482ffe1771a5a15a88215196ede | e893dd48e868b0989a3ca420411fa395cbc08c12 | refs/heads/master | 2021-09-06T02:01:54.508107 | 2018-02-01T13:39:51 | 2018-02-01T13:39:51 | 119,839,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,775 | py | """Finish all TODO items in this file to complete the isolation project, then
test your agent's strength against a set of known agents using tournament.py
and include the results in your report.
"""
import random
class SearchTimeout(Exception):
"""Subclass base exception for code clarity. """
pass
def custom_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
This should be the best heuristic function for your project submission.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# TODO: finish this function!
raise NotImplementedError
def custom_score_2(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# TODO: finish this function!
raise NotImplementedError
def custom_score_3(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# TODO: finish this function!
raise NotImplementedError
class IsolationPlayer:
"""Base class for minimax and alphabeta agents -- this class is never
constructed or tested directly.
******************** DO NOT MODIFY THIS CLASS ********************
Parameters
----------
search_depth : int (optional)
A strictly positive integer (i.e., 1, 2, 3,...) for the number of
layers in the game tree to explore for fixed-depth search. (i.e., a
depth of one (1) would only explore the immediate sucessors of the
current state.)
score_fn : callable (optional)
A function to use for heuristic evaluation of game states.
timeout : float (optional)
Time remaining (in milliseconds) when search is aborted. Should be a
positive value large enough to allow the function to return before the
timer expires.
"""
def __init__(self, search_depth=3, score_fn=custom_score, timeout=10.):
self.search_depth = search_depth
self.score = score_fn
self.time_left = None
self.TIMER_THRESHOLD = timeout
class MinimaxPlayer(IsolationPlayer):
"""Game-playing agent that chooses a move using depth-limited minimax
search. You must finish and test this player to make sure it properly uses
minimax to return a good move before the search time limit expires.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
************** YOU DO NOT NEED TO MODIFY THIS FUNCTION *************
For fixed-depth search, this function simply wraps the call to the
minimax method, but this method provides a common interface for all
Isolation agents, and you will replace it in the AlphaBetaPlayer with
iterative deepening search.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# Initialize the best move so that this function returns something
# in case the search fails due to timeout
best_move = (-1, -1)
try:
# The try/except block will automatically catch the exception
# raised when the timer is about to expire.
return self.minimax(game, self.search_depth)
except SearchTimeout:
pass # Handle any actions required after timeout as needed
# Return the best move from the last completed search iteration
return best_move
def terminal_test(self,game,depth):
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
if (depth==1) or (len(game.get_legal_moves())==0) :
return True
else:
return False
def min_value(self,game,depth):
if self.terminal_test(game,depth):
return self.score(game,game.active_player)
best_val=float('inf')
for legal_move in game.get_legal_moves(game.active_player):
best_val=min(best_val,(self.max_value(game.forecast_move(legal_move),(depth-1))))
return best_val
def max_value(self,game,depth):
if self.terminal_test(game,depth):
return self.score(game,game.active_player)
best_val=-1*float('inf')
for legal_move in game.get_legal_moves(game.active_player):
best_val=max(best_val,(self.min_value(game.forecast_move(legal_move),(depth-1))))
return best_val
def minimax(self, game, depth):
"""Implement depth-limited minimax search algorithm as described in
the lectures.
This should be a modified version of MINIMAX-DECISION in the AIMA text.
https://github.com/aimacode/aima-pseudocode/blob/master/md/Minimax-Decision.md
**********************************************************************
You MAY add additional methods to this class, or define helper
functions to implement the required functionality.
**********************************************************************
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
Returns
-------
(int, int)
The board coordinates of the best move found in the current search;
(-1, -1) if there are no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project tests; you cannot call any other evaluation
function directly.
(2) If you use any helper functions (e.g., as shown in the AIMA
pseudocode) then you must copy the timer check into the top of
each helper function or else your agent will timeout during
testing.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
best_move=(-1,-1)
current_max=-1*float('inf')
for legal_move in game.get_legal_moves(game.active_player):
new_val=self.min_value(game.forecast_move(legal_move),depth)
if new_val > current_max:
current_max=new_val
best_move=legal_move
return best_move
# TODO: finish this function!
# raise NotImplementedError
class AlphaBetaPlayer(IsolationPlayer):
"""Game-playing agent that chooses a move using iterative deepening minimax
search with alpha-beta pruning. You must finish and test this player to
make sure it returns a good move before the search time limit expires.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
Modify the get_move() method from the MinimaxPlayer class to implement
iterative deepening search instead of fixed-depth search.
**********************************************************************
NOTE: If time_left() < 0 when this function returns, the agent will
forfeit the game due to timeout. You must return _before_ the
timer reaches 0.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# TODO: finish this function!
raise NotImplementedError
def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf")):
"""Implement depth-limited minimax search with alpha-beta pruning as
described in the lectures.
This should be a modified version of ALPHA-BETA-SEARCH in the AIMA text
https://github.com/aimacode/aima-pseudocode/blob/master/md/Alpha-Beta-Search.md
**********************************************************************
You MAY add additional methods to this class, or define helper
functions to implement the required functionality.
**********************************************************************
Parameters
----------
game : isolation.Board
An instance of the Isolation game `Board` class representing the
current game state
depth : int
Depth is an integer representing the maximum number of plies to
search in the game tree before aborting
alpha : float
Alpha limits the lower bound of search on minimizing layers
beta : float
Beta limits the upper bound of search on maximizing layers
Returns
-------
(int, int)
The board coordinates of the best move found in the current search;
(-1, -1) if there are no legal moves
Notes
-----
(1) You MUST use the `self.score()` method for board evaluation
to pass the project tests; you cannot call any other evaluation
function directly.
(2) If you use any helper functions (e.g., as shown in the AIMA
pseudocode) then you must copy the timer check into the top of
each helper function or else your agent will timeout during
testing.
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
# TODO: finish this function!
raise NotImplementedError
| [
"winstonjimu@gmail.com"
] | winstonjimu@gmail.com |
c3407190cec85d75c3cb6b43f1f8a485df4882fb | 1d830979cc0e2d9b02788085e6540b5532cc4b18 | /utils.py | a5c49bca599637593a4404b5e76b2d25f7c809ff | [] | no_license | Hagesjo/snakebot-client-python | 3a0f540ee740c05a69e162d2b3e9e1dc830f7b9a | fbbade4fe5d184e65188232c40969f0c0024110c | refs/heads/master | 2020-07-05T17:12:22.124226 | 2016-11-19T16:14:22 | 2016-11-19T16:14:22 | 73,989,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | def logging_format(input_dict, depth=1, outp=""):
for key, value in input_dict.items():
if type(value) == dict:
outp += logging_format(value, depth + 1)
else:
outp += "{}{} = {}\n".format("\t" * depth, key, value)
return outp
def direction_as_movement_delta(direction):
directions = {"DOWN" : (0, 1),
"UP" : (0, -1),
"RIGHT" : (1, 0),
"LEFT" : (-1, 0)}
return directions[direction]
def inside_map(game_map, coordinate):
x, y = coordinate
return 0 <= x <= game_map.width and 0 <= y <= game_map.height
def get_snake_by_id(game_map, snake_id):
for snake_info in game_map.snakeInfos:
if snake_info['id'] == snake_id:
return snake_info['name']
else:
return False
def get_tile_at(game_map, coordinate):
position = translate_coordinate(game_map, coordinate)
if position in game_map.obstaclePositions:
return "obstacle"
elif position in game_map.foodPositions:
return "food"
for snake in game_map.snakeInfos:
if position in snake['positions']:
tile_type = snake['positions'].index()
if index == len(snake['positions']) - 1:
return "snaketail"
elif index == 0:
return "snakebody"
else:
return "snakehead"
if inside_map(game_map, coordinate):
return "empty"
else:
return "wall"
def is_tile_available_for_movement(game_map, coordinate):
tile = get_tile_at(game_map, coordinate)
return tile in ["empty", "food", "snaketail"]
def translate_position(game_map, position):
y = position / game_map.width
x = abs(position - y * game_map.width)
return x, y
def translate_positions(game_map, positions):
return [translate_position(game_map, p) for p in positions]
def translate_coordinate(game_map, coordinate):
x, y = coordinate
return x + y * game_map.width
def get_manhattan_distance(start, goal):
xs, ys = start
xg, yg = goal
x = abs(xs - xg)
y = abs(ys - yg)
return x + y
def get_euclidian_distance(start, goal):
xs, ys = start
xg, yg = goal
x = (xs - xg) ** 2
y = (ys - yg) ** 2
return (x, y) ** 0.5
def is_within_square(coord, ne_coord, sw_coord):
x, y = coord
x_ne, y_ne = ne_coord
x_sw, y_sw = sw_coord
return x < ne_x or x > sw_x or y < sw_y or y > ne_y
| [
"andreas.hagesjo@gmail.com"
] | andreas.hagesjo@gmail.com |
20dafa7d284b657578456c3c93e1fdde8a345ed6 | 5817da2441abfe5974cd31f323caaa00958e26f5 | /dekanat/migrations/0002_news_newsimage.py | aa0501d98902d0f66c90062049b5e6076b935ab5 | [] | no_license | katalyzator/Dekanat | 3923f05eefb120fb366db2658e418ea9d87272f3 | af32167bc78ca6ed52ad5910f6bfc8dfa3f91469 | refs/heads/master | 2021-01-12T15:05:09.618166 | 2016-10-23T08:51:11 | 2016-10-23T08:51:11 | 71,691,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,473 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-18 02:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dekanat', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='\u0417\u0430\u0433\u043e\u043b\u043e\u0432\u043e\u043a \u043f\u043e\u0441\u0442\u0430')),
('description', models.CharField(max_length=1000, verbose_name='\u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 \u043f\u043e\u0441\u0442\u0430')),
('text', models.TextField(verbose_name='\u0422\u0435\u043a\u0441\u0442 \u043f\u043e\u0441\u0442\u0430')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
'verbose_name_plural': '\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
},
),
migrations.CreateModel(
name='NewsImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='', verbose_name='\u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0430')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dekanat.News', verbose_name='\u0432\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u044c')),
],
options={
'verbose_name': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
'verbose_name_plural': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
},
),
]
| [
"web.coder96@gmail.com"
] | web.coder96@gmail.com |
3a2d7a2c88faf6b299c891d01dc3265f8d7ffb2f | 8a09ae71df8bd9dd583a8f0be62b7b65dba284b5 | /www/config_override.py | 643c41d370f8c79b3ac765b18faacf4ee4476129 | [] | no_license | xiaolong2009/Bolg | d1660fb3174b2d99822b0407438c6cd09bc487e2 | 66379f79b309a13bf11cd0ba5cc4f4ffb1b4786b | refs/heads/master | 2021-05-01T11:52:31.452260 | 2015-03-11T10:15:30 | 2015-03-11T10:15:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Override configurations.
'''
configs = {
'db': {
'host': '127.0.0.1'
}
} | [
"962484758@qq.com"
] | 962484758@qq.com |
850956482f3bc444086b0488fa4981089c831f18 | 921156b7ffdb84807842582b9ba2613b38ae0f50 | /usersDetail/urls.py | fd6731994d0d4a8a50f68f7cc1a9b4589e3adf6e | [] | no_license | Leslie-Fang/DjangoTemplate | 529d1ca23a7bda1a5a003f0bbac5c4da3f141e09 | 42cbf68a6211f343b33ea555b6f67b69fe40e495 | refs/heads/master | 2021-01-20T06:08:24.856299 | 2017-09-09T10:44:41 | 2017-09-09T10:44:41 | 100,962,414 | 0 | 0 | null | 2017-08-26T13:14:28 | 2017-08-21T14:49:14 | Python | UTF-8 | Python | false | false | 540 | py | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from usersDetail import views
urlpatterns = [
url(r'^$', views.getUsers, name='getUsers'),
url(r'^test$', views.testUsers, name='testUsers'),
url(r'^userhabbit$', views.showUserHabbit, name='showUserHabbit'),
url(r'^createFakeData$', views.createFakeHistoryTradeData, name='createFakeData'),
url(r'^getdata$', views.getData, name='getdata'),
url(r'^showfigure2$', views.showfigure2, name='showfigure2'),
] | [
"fangjiahao@sjtu.edu.cn"
] | fangjiahao@sjtu.edu.cn |
d7eaf97561d92b7aa8a03bf4c71024b1d5404d8f | 7e979e024bcbf8cb763919ceaa7466c25d7a0691 | /33.py | 30afb8b6d4c57a1b1c8c4c1c3b8aff365ff79197 | [] | no_license | win32del/RTR105 | 1de49edfe853924bdeb367fb50bbb65eded5d4c9 | a09ed6b23384d9caadb93c1150c04e473906e785 | refs/heads/master | 2021-08-04T07:32:02.779383 | 2019-01-02T22:03:56 | 2019-01-02T22:03:56 | 148,168,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | f = open("zedd.txt")
inp = f.read()
print(len(inp))
print(inp[:20])
| [
"robertsandersons269@gmail.com"
] | robertsandersons269@gmail.com |
bcc294be4b84f27280cee804ff5a5c66c142b08a | 47338ca2f0ff2fd19e65beab384bdd1558df8ebd | /pdfviewer/esam1.py | d66e08fe2b073f998a346e79c088de118bbf150a | [] | no_license | assomy/thesis | 80a0502727ab0831d970fd9cf93cf6ae95eab231 | 11138065489b0aa5cdec9795545ba65e6059b851 | refs/heads/master | 2021-01-22T20:08:43.311898 | 2014-05-23T17:59:19 | 2014-05-23T17:59:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | import os
print "esam"
x=[]
x.append("esam")
x.append("ali")
print x
| [
"assomy@gmail.com"
] | assomy@gmail.com |
7bc09e89695184c589a6db756b746e3e9450f047 | ab8ea44704ea1a444e4f68ee740b584288d3e558 | /tests/test_execer.py | f0b0661a8b670a8b6d6093ea78392b65de20eb17 | [
"BSD-2-Clause"
] | permissive | jamiees2/xonsh | 96514c3381ac2fcca872e473ea9d414d74c2fdc9 | f7b5985a88b32fafdaf162012c1ebbd19e48e6b9 | refs/heads/master | 2021-01-18T07:49:58.323584 | 2015-03-11T01:56:42 | 2015-03-11T01:56:42 | 31,949,439 | 0 | 0 | null | 2015-03-10T09:42:21 | 2015-03-10T09:42:21 | null | UTF-8 | Python | false | false | 869 | py | """Tests the xonsh lexer."""
from __future__ import unicode_literals, print_function
import os
import sys
import ast
from xonsh.execer import Execer
from tools import mock_xonsh_env
DEBUG_LEVEL = 0
EXECER = None
#
# Helpers
#
def setup():
# only setup one parser
global EXECER
EXECER = Execer(debug_level=DEBUG_LEVEL)
def check_exec(input):
with mock_xonsh_env(None):
if not input.endswith('\n'):
input += '\n'
EXECER.debug_level = DEBUG_LEVEL
EXECER.exec(input)
def check_eval(input):
with mock_xonsh_env(None):
EXECER.debug_level = DEBUG_LEVEL
EXECER.eval(input)
#
# Tests
#
def test_bin_ls():
yield check_eval, '/bin/ls -l'
def test_ls_dashl():
yield check_eval, 'ls -l'
def test_which_ls():
yield check_eval, 'which ls'
if __name__ == '__main__':
nose.runmodule()
| [
"scopatz@gmail.com"
] | scopatz@gmail.com |
749d6bf1fe7baa9e3d49dfd43e59d33a244065a0 | 4a3d2dd36f9350d0ef4c839c3972e2a3fab5989b | /supportVector/src/svm_smo.py | d44296f46690bf1472019a78f022cd309fbe3244 | [] | no_license | zhangting9756/machineLearning | 59944e49cb202b3b80466984aab8beeb78a3b4cb | 7bec906a2f132048d90294da5c717090c75187cb | refs/heads/master | 2020-04-12T08:34:15.924743 | 2019-01-09T14:47:10 | 2019-01-09T14:47:10 | 162,389,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,505 | py | # -*- coding:UTF-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import random
"""
Author:
Jack Cui
Blog:
http://blog.csdn.net/c406495762
Zhihu:
https://www.zhihu.com/people/Jack--Cui/
Modify:
2017-10-03
"""
class optStruct:
"""
数据结构,维护所有需要操作的值
Parameters:
dataMatIn - 数据矩阵
classLabels - 数据标签
C - 松弛变量
toler - 容错率
"""
def __init__(self, dataMatIn, classLabels, C, toler):
self.X = dataMatIn #数据矩阵
self.labelMat = classLabels #数据标签
self.C = C #松弛变量
self.tol = toler #容错率
self.m = np.shape(dataMatIn)[0] #数据矩阵行数
self.alphas = np.mat(np.zeros((self.m,1))) #根据矩阵行数初始化alpha参数为0
self.b = 0 #初始化b参数为0
self.eCache = np.mat(np.zeros((self.m,2))) #根据矩阵行数初始化虎误差缓存,第一列为是否有效的标志位,第二列为实际的误差E的值。
def loadDataSet(fileName):
"""
读取数据
Parameters:
fileName - 文件名
Returns:
dataMat - 数据矩阵
labelMat - 数据标签
"""
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines(): #逐行读取,滤除空格等
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])]) #添加数据
labelMat.append(float(lineArr[2])) #添加标签
return dataMat,labelMat
def calcEk(oS, k):
"""
计算误差
Parameters:
oS - 数据结构
k - 标号为k的数据
Returns:
Ek - 标号为k的数据误差
"""
fXk = float(np.multiply(oS.alphas,oS.labelMat).T*(oS.X*oS.X[k,:].T) + oS.b)
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJrand(i, m):
"""
函数说明:随机选择alpha_j的索引值
Parameters:
i - alpha_i的索引值
m - alpha参数个数
Returns:
j - alpha_j的索引值
"""
j = i #选择一个不等于i的j
while (j == i):
j = int(random.uniform(0, m))
return j
def selectJ(i, oS, Ei):
"""
内循环启发方式2
Parameters:
i - 标号为i的数据的索引值
oS - 数据结构
Ei - 标号为i的数据误差
Returns:
j, maxK - 标号为j或maxK的数据的索引值
Ej - 标号为j的数据误差
"""
maxK = -1; maxDeltaE = 0; Ej = 0 #初始化
oS.eCache[i] = [1,Ei] #根据Ei更新误差缓存
validEcacheList = np.nonzero(oS.eCache[:,0].A)[0] #返回误差不为0的数据的索引值
if (len(validEcacheList)) > 1: #有不为0的误差
for k in validEcacheList: #遍历,找到最大的Ek
if k == i: continue #不计算i,浪费时间
Ek = calcEk(oS, k) #计算Ek
deltaE = abs(Ei - Ek) #计算|Ei-Ek|
if (deltaE > maxDeltaE): #找到maxDeltaE
maxK = k; maxDeltaE = deltaE; Ej = Ek
return maxK, Ej #返回maxK,Ej
else: #没有不为0的误差
j = selectJrand(i, oS.m) #随机选择alpha_j的索引值
Ej = calcEk(oS, j) #计算Ej
return j, Ej #j,Ej
def updateEk(oS, k):
"""
计算Ek,并更新误差缓存
Parameters:
oS - 数据结构
k - 标号为k的数据的索引值
Returns:
无
"""
Ek = calcEk(oS, k) #计算Ek
oS.eCache[k] = [1,Ek] #更新误差缓存
def clipAlpha(aj,H,L):
"""
修剪alpha_j
Parameters:
aj - alpha_j的值
H - alpha上限
L - alpha下限
Returns:
aj - 修剪后的alpah_j的值
"""
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def innerL(i, oS):
"""
优化的SMO算法
Parameters:
i - 标号为i的数据的索引值
oS - 数据结构
Returns:
1 - 有任意一对alpha值发生变化
0 - 没有任意一对alpha值发生变化或变化太小
"""
#步骤1:计算误差Ei
Ei = calcEk(oS, i)
#优化alpha,设定一定的容错率。
if ((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)): #什么意思?
#使用内循环启发方式2选择alpha_j,并计算Ej
j,Ej = selectJ(i, oS, Ei)
#保存更新前的aplpha值,使用深拷贝
alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy();
#步骤2:计算上下界L和H
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print("L==H")
return 0
#步骤3:计算eta
eta = 2.0 * oS.X[i,:] * oS.X[j,:].T - oS.X[i,:] * oS.X[i,:].T - oS.X[j,:] * oS.X[j,:].T
if eta >= 0:
print("eta>=0")
return 0
#步骤4:更新alpha_j
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej)/eta
#步骤5:修剪alpha_j
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
#更新Ej至误差缓存
updateEk(oS, j)
if (abs(oS.alphas[j] - alphaJold) < 0.00001):
print("alpha_j变化太小")
return 0
#步骤6:更新alpha_i
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])
#更新Ei至误差缓存
updateEk(oS, i)
#步骤7:更新b_1和b_2
b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
#步骤8:根据b_1和b_2更新b
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
else: oS.b = (b1 + b2)/2.0
return 1
else:
return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter):
"""
完整的线性SMO算法
Parameters:
dataMatIn - 数据矩阵
classLabels - 数据标签
C - 松弛变量
toler - 容错率
maxIter - 最大迭代次数
Returns:
oS.b - SMO算法计算的b
oS.alphas - SMO算法计算的alphas
"""
oS = optStruct(np.mat(dataMatIn), np.mat(classLabels).transpose(), C, toler) #初始化数据结构
iter = 0 #初始化当前迭代次数
entireSet = True; alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)): #遍历整个数据集都alpha也没有更新或者超过最大迭代次数,则退出循环
alphaPairsChanged = 0
if entireSet: #遍历整个数据集
for i in range(oS.m):
alphaPairsChanged += innerL(i,oS) #使用优化的SMO算法
print("全样本遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged))
iter += 1
else: #遍历非边界值
nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0] #遍历不在边界0和C的alpha 这是是非边界还是边界?
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print("非边界遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged))
iter += 1
if entireSet: #遍历一次后改为非边界遍历
entireSet = False
elif (alphaPairsChanged == 0): #如果alpha没有更新,计算全样本遍历
entireSet = True
print("迭代次数: %d" % iter)
return oS.b,oS.alphas #返回SMO算法计算的b和alphas
def showClassifer(dataMat, classLabels, w, b):
"""
分类结果可视化
Parameters:
dataMat - 数据矩阵
w - 直线法向量
b - 直线解决
Returns:
无
"""
#绘制样本点
data_plus = [] #正样本
data_minus = [] #负样本
for i in range(len(dataMat)):
if classLabels[i] > 0:
data_plus.append(dataMat[i])
else:
data_minus.append(dataMat[i])
data_plus_np = np.array(data_plus) #转换为numpy矩阵
data_minus_np = np.array(data_minus) #转换为numpy矩阵
plt.scatter(np.transpose(data_plus_np)[0], np.transpose(data_plus_np)[1], s=30, alpha=0.7) #正样本散点图
plt.scatter(np.transpose(data_minus_np)[0], np.transpose(data_minus_np)[1], s=30, alpha=0.7) #负样本散点图
#绘制直线
x1 = max(dataMat)[0]
x2 = min(dataMat)[0]
a1, a2 = w
b = float(b)
a1 = float(a1[0])
a2 = float(a2[0])
y1, y2 = (-b- a1*x1)/a2, (-b - a1*x2)/a2
plt.plot([x1, x2], [y1, y2])
#找出支持向量点
for i, alpha in enumerate(alphas):
if alpha > 0:
x, y = dataMat[i]
plt.scatter([x], [y], s=150, c='none', alpha=0.7, linewidth=1.5, edgecolor='red')
plt.show()
def calcWs(alphas,dataArr,classLabels):
"""
计算w
Parameters:
dataArr - 数据矩阵
classLabels - 数据标签
alphas - alphas值
Returns:
w - 计算得到的w
"""
X = np.mat(dataArr); labelMat = np.mat(classLabels).transpose()
m,n = np.shape(X)
w = np.zeros((n,1))
for i in range(m):
w += np.multiply(alphas[i]*labelMat[i],X[i,:].T)
return w
if __name__ == '__main__':
dataArr, classLabels = loadDataSet(r'F:\Machine-Learning-master\Machine-Learning-master\SVM\testSet.txt')
b, alphas = smoP(dataArr, classLabels, 0.6, 0.001, 40)
w = calcWs(alphas,dataArr, classLabels)
showClassifer(dataArr, classLabels, w, b)
| [
"tingzhangguo@163.com"
] | tingzhangguo@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.