blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3748a9e8a475776c784dacc5951e89171f92e72b | cb2a4180ffc0df4296737134230397069de8da21 | /accounts/signals.py | d6f2b8bc1882f084fe375b450158fdf3249fc531 | [
"MIT"
] | permissive | fagrimacs/fagrimacs_production | 8a9cef4e1d73360301fd66f4f0b70ea4868ef610 | ea1a8f92c41c416309cc1fdd8deb02f41a9c95a0 | refs/heads/master | 2022-12-23T22:08:27.768479 | 2020-09-24T10:10:35 | 2020-09-24T10:10:35 | 295,315,768 | 0 | 0 | MIT | 2020-09-24T10:10:36 | 2020-09-14T05:44:21 | JavaScript | UTF-8 | Python | false | false | 646 | py | from django.db.models.signals import pre_save
from django.dispatch import receiver
from .models import UserProfile
@receiver(pre_save, sender=UserProfile)
def delete_prev_profile_pic(sender, instance, **kwargs):
if instance.pk:
try:
prev_profile = UserProfile.objects.get(
pk=instance.pk).profile_pic
except UserProfile.DoesNotExist:
return
else:
new_profile = instance.profile_pic
if prev_profile and prev_profile.url != new_profile.url:
if prev_profile != 'profile_pics/user.png':
prev_profile.delete(save=False)
| [
"zendainnocent@gmail.com"
] | zendainnocent@gmail.com |
216d0e5c1001e89b218aef24c8cabfa7ee8027a8 | 5a310398592ddb75d27dc67c9b45198e31cb0d55 | /rfid-v1.py | d941851aa03ed4b0f8dbaef378689460a5bf2f2a | [] | no_license | ch-tseng/rfidDoor2 | 97871be9d431515425180b8e0893400a9b147831 | ed04b794d6c70dc223bb2f75e5d7367bea8353b4 | refs/heads/master | 2021-01-20T04:21:23.102422 | 2017-05-04T05:03:02 | 2017-05-04T05:03:02 | 89,674,676 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import urllib.request
import logging
import json
#import base64
import binascii
import sys
import time
# A UDP server
# Set up a UDP server
UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# Listen on port 21567
# (to all IP addresses on this system)
listen_addr = ("",8080)
UDPSock.bind(listen_addr)
debugPrint = False
urlHeadString = "http://data.sunplusit.com/Api/DoorRFIDInfo?code=83E4621643F7B2E148257244000655E3&rfid="
#-----------------------------------------
#logging記錄
logger = logging.getLogger('msg')
hdlr = logging.FileHandler('/home/chtseng/rfidDoor/msg.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def is_json(myjson):
try:
json_object = json.loads(myjson)
except ValueError:
return False
return True
while True:
data,addr = UDPSock.recvfrom(1024)
#tmpTAGS, tmpTIMES = scanTAGS(binascii.b2a_hex(data).decode('ascii'))
readHEX = binascii.b2a_hex(data).decode('ascii')
logger.info('Received rfid:' + readHEX)
if(debugPrint==True):
print (readHEX)
try:
webReply = urllib.request.urlopen(urlHeadString + readHEX).read()
webReply = webReply.decode('utf-8').rstrip()
logger.info('webReply: {}'.format(webReply))
if(debugPrint==True):
print('webReply: {}'.format(webReply))
print(urlHeadString + readHEX)
print("webReply:" + webReply)
# listTAGs = webReply.split("")
except Exception:
print("Unexpected error:", sys.exc_info()[0])
logger.info('Unexpected error:' + str(sys.exc_info()[0]))
webReply = "[]"
pass
if(is_json(webReply)==True):
jsonReply = json.loads(webReply)
if(debugPrint==True):
print (jsonReply)
#time.sleep(1)
| [
"ch.tseng@sunplusit.com"
] | ch.tseng@sunplusit.com |
2265604085f0b363acfc4bbfcfd9c1294885eb23 | 626b14ce13986b6d5e03143e151004247659625a | /Day01-15/code/Day07/dict2.py | 1f89c849d510ca7c3702747ec28763684b8c1a4f | [] | no_license | Focavn/Python-100-Days | c7586ecf7ae3f1fd42f024558bb998be23ee9df8 | d8de6307aeff9fe31fd752bd7725b9cc3fbc084b | refs/heads/master | 2021-08-08T17:57:02.025178 | 2020-09-17T11:58:04 | 2020-09-17T11:58:04 | 220,427,144 | 0 | 0 | null | 2019-11-08T08:59:43 | 2019-11-08T08:59:41 | null | UTF-8 | Python | false | false | 575 | py | """
字典的常用操作
Version: 0.1
Author: 骆昊
Date: 2018-03-06
"""
def main():
stu = {'name': '骆昊', 'age': 38, 'gender': True}
print(stu)
print(stu.keys())
print(stu.values())
print(stu.items())
for elem in stu.items():
print(elem)
print(elem[0], elem[1])
if 'age' in stu:
stu['age'] = 20
print(stu)
stu.setdefault('score', 60)
print(stu)
stu.setdefault('score', 100)
print(stu)
stu['score'] = 100
print(stu)
if __name__ == '__main__':
main()
| [
"Focavn@users.github.com"
] | Focavn@users.github.com |
439e7791a1e34f6ecad6c4253cc225a1a92b54ce | 4e0543a2231ed041d3837e104005ff5e131d6714 | /imagepro/settings.py | 2907c4b3a3ee5ab3ca6855462383126012214667 | [] | no_license | brendanmanning/Image-Processing-Project | 7606adb87865525915f0f5b9a85b0bd33cf442a1 | 61e46f81c111b90fb2b2ae958b8154f77007b9f8 | refs/heads/main | 2023-03-23T22:06:43.630696 | 2021-03-23T01:38:02 | 2021-03-23T01:38:02 | 348,441,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,068 | py | '''
MIT License
Copyright (c) 2019 Arshdeep Bahga and Vijay Madisetti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Django settings for imagepro project.
import os.path
SRC_DIR = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
)
print("src_dir: " + SRC_DIR)
print("=> " + os.path.join(SRC_DIR, "myapp/templates"))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': '',
'NAME': ''
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [ '34.235.134.15' ]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = SRC_DIR + '/media/'
print("media root: " + MEDIA_ROOT)
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SRC_DIR, "/myapp/templates/static"),
'/home/ubuntu/imagepro/myapp/templates/static',
'/Users/brendanmanning/OneDrive\ -\ Temple\ University/School/CIS\ 5517/Project\ 1/imagepro/myapp/templates/static'
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '92z2pjik%_@b)pg@e^k+7qrzm#!9*fa^nzxirk)h^8kn$gb(5k'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'imagepro.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'imagepro.wsgi.application'
'''
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#os.path.join(os.path.realpath(__file__), '../templates'),
os.path.join(SRC_DIR, "myapp/templates"),
'/home/ubuntu/imagepro/myapp/templates',
'/Users/brendanmanning/Downloads/imagepro/myapp/templates'
)
'''
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'/home/ubuntu/imagepro/myapp/templates',
'/Users/brendanmanning/OneDrive\ -\ Temple\ University/School/CIS\ 5517/Project\ 1/imagepro/myapp/templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp'
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
""
] | |
96e6b180de682b27e384d4ac457f410705460971 | d0b2bfd3c3c5999667576141c5b7c28af837fc3d | /cyg_plan_pagos/reportes/__init__.py | 70cd67fd5387e8a09b0bf8273a6f7ede4a48d678 | [] | no_license | edisonguachamin/cyg | c34a55875e8be5824e9fa5e0e58f1216144154a3 | 2a400b6c3d33602684d9478f999253e696c8534b | refs/heads/master | 2021-01-02T09:15:55.522581 | 2015-02-26T00:02:03 | 2015-02-26T00:02:03 | 31,244,324 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-Today
# CyG xxxx
# @autor: Jonathan Finlay <jfinlay@riseup.net>
#
##############################################################################
import report_payment
import report_statement | [
"edison.guachamin@gmail.com"
] | edison.guachamin@gmail.com |
7411f27ad02c4b93fccce394b876e457e646fe41 | 4b7fda167f5faf26ad3ba4aa8fe093759a2701b4 | /setup.py | 4ca7644cf989594af6bdc39f97f141bef92b4cf5 | [] | no_license | Mark42/veracross_api | 6aa4f19a50ab56555a179ea811dfc4b9b8be6e76 | f0662fb9b1959d6fda75af05d5f8b639a9c0a465 | refs/heads/master | 2023-04-25T23:07:15.080092 | 2019-06-24T22:14:35 | 2019-06-24T22:14:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | from distutils.core import setup
import setuptools
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='veracross_api',
packages=['veracross_api'],
version='0.5',
description='Simple library for interacting with the Veracross API',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT License',
author='Forrest Beck',
author_email='forrest.beck@da.org',
url='https://github.com/beckf/veracross_api',
download_url='https://github.com/beckf/veracross_api/archive/v.02.tar.gz',
keywords=['Veracross', 'API'],
install_requires=['requests'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
) | [
"forrest.beck@da.org"
] | forrest.beck@da.org |
c663457a1e3ed995d156a9683853ccf9c25bd48e | 950c66cccb6dc9df1752788defa18b6ca9deb330 | /app/main/errors.py | c06b7a941e5e95c3506dfca9e0d8c7df54c27d7a | [
"MIT"
] | permissive | margretmwangi/NewsHighlight | 7ca16e7305461a0694cd3ffd98d3b6bdcb14db1b | 091c64b24acb5ee7650238e5cc0687bcea8cd178 | refs/heads/master | 2022-04-25T09:51:41.784350 | 2020-04-27T18:41:02 | 2020-04-27T18:41:02 | 258,537,771 | 0 | 0 | MIT | 2020-04-27T18:15:58 | 2020-04-24T14:38:20 | HTML | UTF-8 | Python | false | false | 196 | py | from flask import render_template
from . import main
@main.errorhandler(404)
def not_found(error):
""" Function to render the 404 error page """
return render_template('404.html'), 404
| [
"margaret13mwangi@gmail.com"
] | margaret13mwangi@gmail.com |
fa9fccdba11e3a5161caca78f093918d44ec1789 | 207ab89146f30c6573bf3506510152c8c924c2bd | /test/integration/test_output_transform.py | 3889fcdb0c7a34b1da40f1d56c9688beea20d16d | [
"Apache-2.0"
] | permissive | theraw/nighthawk | 7fe39ee7f23c84d525d35eea06e168ea4dba991c | 317dba600a5e7ae3095383a1161f784f578f7426 | refs/heads/master | 2022-11-05T06:11:31.059304 | 2020-06-19T19:02:02 | 2020-06-19T19:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py | #!/usr/bin/env python3
import pytest
from test.integration import utility
import os
import subprocess
def _run_output_transform_with_args(args):
return utility.run_binary_with_args("nighthawk_output_transform", args)
def test_output_transform_help():
(exit_code, output) = _run_output_transform_with_args("--help")
utility.assertEqual(exit_code, 0)
utility.assertIn("USAGE", output)
def test_output_transform_bad_arguments():
(exit_code, output) = _run_output_transform_with_args("--foo")
utility.assertEqual(exit_code, 1)
utility.assertIn("PARSE ERROR: Argument: --foo", output)
def test_output_transform_101():
"""
Runs an arbitrary load test, which outputs to json.
This json output is then transformed to human readable output.
"""
test_rundir = os.path.join(os.environ["TEST_SRCDIR"], os.environ["TEST_WORKSPACE"])
process = subprocess.run([
os.path.join(test_rundir, "nighthawk_client"), "--duration", "1", "--rps", "1", "127.0.0.1",
"--output-format", "json"
],
stdout=subprocess.PIPE)
output = process.stdout
process = subprocess.run(
[os.path.join(test_rundir, "nighthawk_output_transform"), "--output-format", "human"],
stdout=subprocess.PIPE,
input=output)
utility.assertEqual(process.returncode, 0)
utility.assertIn("Nighthawk - A layer 7 protocol benchmarking tool",
process.stdout.decode("utf-8"))
| [
"noreply@github.com"
] | noreply@github.com |
fe8763de336ee65092b7aaec84eea8912eb81c8c | df75b4d24416bb764db61931457f367872d8a66c | /django_states/main/migrations/0006_auto__add_field_statecapital_state__chg_field_statecapital_latitude__c.py | fa52b64db591b04f547b608edbe24fd3731be7db | [] | no_license | Bofahda/states | bb1f7caf8409e363ba2cb67974464854f14570d8 | 11016ac07040177e81e53b1ea88739b4de0ea936 | refs/heads/master | 2020-12-24T16:58:56.789855 | 2015-08-12T09:20:53 | 2015-08-12T09:20:53 | 40,591,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'StateCapital.state'
db.add_column(u'main_statecapital', 'state',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['main.State'], unique=True, null=True),
keep_default=False)
# Changing field 'StateCapital.latitude'
db.alter_column(u'main_statecapital', 'latitude', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'StateCapital.longitude'
db.alter_column(u'main_statecapital', 'longitude', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'StateCapital.population'
db.alter_column(u'main_statecapital', 'population', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Deleting field 'StateCapital.state'
db.delete_column(u'main_statecapital', 'state_id')
# Changing field 'StateCapital.latitude'
db.alter_column(u'main_statecapital', 'latitude', self.gf('django.db.models.fields.FloatField')(default=1))
# Changing field 'StateCapital.longitude'
db.alter_column(u'main_statecapital', 'longitude', self.gf('django.db.models.fields.FloatField')(default=1))
# Changing field 'StateCapital.population'
db.alter_column(u'main_statecapital', 'population', self.gf('django.db.models.fields.IntegerField')(default=1))
models = {
u'main.state': {
'Meta': {'object_name': 'State'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'main.statecapital': {
'Meta': {'object_name': 'StateCapital'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'state': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['main.State']", 'unique': 'True', 'null': 'True'})
}
}
complete_apps = ['main'] | [
"user@Users-MacBook-Air.local"
] | user@Users-MacBook-Air.local |
fe6aa27d544a7bc06532e7cb5bfad0801c9b1eba | 8174d11add088a2413d5a7fdf8233059c3876f52 | /docs/examples/pool.py | 9265af58ef12bca9806eb7e1896aa4e7797bc85c | [
"MIT"
] | permissive | AraHaan/aioredis | 903eaaefb243c8bc8d70c9178baf721446c9cc7f | 19be499015a8cf32580e937cbfd711fd48489eca | refs/heads/master | 2023-03-17T03:16:46.281210 | 2022-02-22T14:33:33 | 2022-02-22T14:33:33 | 82,424,636 | 1 | 0 | MIT | 2023-03-07T15:57:29 | 2017-02-19T00:20:56 | Python | UTF-8 | Python | false | false | 648 | py | import asyncio
import aioredis
async def main():
redis = aioredis.from_url("redis://localhost", max_connections=10)
await redis.execute_command("set", "my-key", "value")
val = await redis.execute_command("get", "my-key")
print("raw value:", val)
async def main_pool():
pool = aioredis.ConnectionPool.from_url("redis://localhost", max_connections=10)
redis = aioredis.Redis(connection_pool=pool)
await redis.execute_command("set", "my-key", "value")
val = await redis.execute_command("get", "my-key")
print("raw value:", val)
if __name__ == "__main__":
asyncio.run(main())
asyncio.run(main_pool())
| [
"sean_stewart@me.com"
] | sean_stewart@me.com |
381b951e04e4543292f60b018cbdeaaf140dbd15 | ee49fbe3c0ce61379fe2a884342e5e6470c918e5 | /tools/texture.py | a67819835973afe44c10acf2af0b3b789c39d04f | [] | no_license | Prof222/camera-obscura | 290c4819269986c2093f442f9296fe0177a07ed5 | 20fbb0d798ba12bbfdc8036e9707954bf76d69d9 | refs/heads/master | 2021-01-21T03:24:50.093630 | 2017-09-10T19:05:47 | 2017-09-10T19:05:47 | 101,895,400 | 0 | 0 | null | 2017-09-10T19:03:16 | 2017-08-30T15:01:48 | Python | UTF-8 | Python | false | false | 15,221 | py |
from __future__ import with_statement
from gletools.gl import *
from .util import Context, DependencyException, quad, ExtensionMissing
from ctypes import string_at, sizeof, byref, c_char_p, cast, c_void_p, POINTER, memmove, c_ubyte, string_at
try:
import Image
has_pil = True
except:
has_pil = False
__all__ = ['Texture']
class Object(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def gen_texture():
id = GLuint()
glGenTextures(1, byref(id))
return id
class Texture(Context):
gl_byte = Object(
obj = GLubyte,
enum = GL_UNSIGNED_BYTE
)
gl_short = Object(
obj = GLushort,
enum = GL_UNSIGNED_SHORT
)
gl_float = Object(
obj = GLfloat,
enum = GL_FLOAT,
)
gl_half_float = Object(
obj = GLfloat,
enum = GL_FLOAT,
)
rgb = Object(
enum = GL_RGB,
count = 3,
)
rgba = Object(
enum = GL_RGBA,
count = 4,
)
luminance = Object(
enum = GL_LUMINANCE,
count = 1,
)
alpha = Object(
enum = GL_ALPHA,
count = 1,
)
depth = Object(
enum = GL_DEPTH_COMPONENT,
count = 1,
)
specs = {
GL_RGB:Object(
pil = 'RGB',
type = gl_byte,
channels = rgb,
),
GL_RGBA:Object(
pil = 'RGBA',
type = gl_byte,
channels = rgba,
),
GL_RGB16:Object(
type = gl_short,
channels = rgb,
),
GL_RGBA32F:Object(
pil = 'RGBA',
type = gl_float,
channels = rgba,
),
GL_RGB16F:Object(
type = gl_half_float,
channels = rgb,
),
# GL_RGB32F:Object(
# pil = 'RGB',
# type = gl_float,
# channels = rgb,
# ),
GL_LUMINANCE32F:Object(
pil = 'L',
type = gl_float,
channels = luminance,
),
GL_LUMINANCE:Object(
type = gl_byte,
channels = luminance,
),
GL_ALPHA:Object(
type = gl_byte,
channels = alpha,
),
GL_DEPTH_COMPONENT:Object(
type = gl_float,
channels = depth,
),
}
target = GL_TEXTURE_2D
_get = GL_TEXTURE_BINDING_2D
float_targets = [
GL_RGBA32F,
GL_RGB32F,
GL_LUMINANCE32F,
]
def bind(self, id):
glBindTexture(self.target, id)
def _enter(self):
glPushAttrib(GL_ENABLE_BIT | GL_TEXTURE_BIT)
glActiveTexture(self.unit)
glEnable(self.target)
def _exit(self):
glPopAttrib()
def __init__(self, width, height, format=GL_RGBA, filter=GL_LINEAR, unit=GL_TEXTURE0, data=None, mipmap=0, clamp=False):
if format in self.float_targets:
if not gl_info.have_extension('GL_ARB_texture_float'):
raise ExtensionMissing('no floating point texture support (GL_ARB_texture_float)')
Context.__init__(self)
self.clamp = clamp
self.mipmap = mipmap
self.width = width
self.height = height
self.format = format
self.filter = filter
self.unit = unit
spec = self.spec = self.specs[format]
self.buffer_type = (spec.type.obj * (width * height * spec.channels.count))
id = self.id = gen_texture()
if data:
if isinstance(data, str):
pointer = cast(c_char_p(data), c_void_p)
source = self.buffer_type.from_address(pointer.value)
target = self.buffer_type()
memmove(target, source, sizeof(source))
self.buffer = target
else:
self.buffer = self.buffer_type(*data)
else:
self._buffer = None
self.update()
self.display = self.make_display()
def get_buffer(self):
if not self._buffer:
self._buffer = self.buffer_type()
return self._buffer
def set_buffer(self, data):
self._buffer = data
buffer = property(get_buffer, set_buffer)
def delete(self):
glDeleteTextures(1, byref(self.id))
@classmethod
def open(cls, filename, format=GL_RGBA, filter=GL_LINEAR, unit=GL_TEXTURE0, mipmap=0):
if not has_pil:
raise DependencyException('PIL is requried to open image files')
spec = cls.specs[format]
pil_format = getattr(spec, 'pil', None)
if not pil_format:
raise Exception('cannot load')
image = Image.open(filename)
image = image.convert(pil_format)
width, height = image.size
data = image.tostring()
if spec.type == cls.gl_float:
data = map(lambda x: ord(x)/255.0, data)
else:
data = map(ord, data)
return cls(width, height, format=format, filter=filter, unit=unit, data=data, mipmap=mipmap)
@classmethod
def raw_open(cls, filename, width, height, format=GL_RGBA, filter=GL_LINEAR, unit=GL_TEXTURE0, mipmap=0, clamp=False):
data = open(filename, 'rb').read()
self = cls(width, height, data=data, format=format, filter=filter, unit=unit, mipmap=mipmap, clamp=clamp, image=image, point=point)
return self
def save(self, filename):
self.retrieve()
if self.spec.type == self.gl_byte:
source = string_at(self.buffer, sizeof(self.buffer))
image = Image.fromstring(self.spec.pil, (self.width, self.height), source)
image.save(filename)
else:
raise Exception('cannot save non byte images')
def make_display(self):
import pyglet
uvs = 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0
x1 = 0.0
y1 = 0.0
z = 0.0
x2 = self.width
y2 = self.height
verts = (
x1, y1, z,
x2, y1, z,
x2, y2, z,
x1, y2, z,
)
return pyglet.graphics.vertex_list(4,
('v3f', verts),
('t2f', uvs),
)
def draw(self, x=0, y=0, scale=1.0):
with self:
quad(
left=x, top=self.height+y, right=self.width+x, bottom=y, scale=scale
)
def set_data(self, data=None, clamp=False, level=0):
with self:
if isinstance(self.filter, tuple):
min_filter, mag_filter = self.filter
else:
min_filter = self.filter
mag_filter = GL_LINEAR
glTexParameteri(self.target, GL_TEXTURE_MIN_FILTER, min_filter)
glTexParameteri(self.target, GL_TEXTURE_MAG_FILTER, mag_filter)
if clamp:
if 's' in clamp:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
if 't' in clamp:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
if data:
if self.mipmap:
gluBuild2DMipmaps(
self.target, self.mipmap,
self.width, self.height,
self.spec.channels.enum,
self.spec.type.enum,
data,
)
else:
glTexImage2D(
self.target, level, self.format,
self.width, self.height,
0,
self.spec.channels.enum, self.spec.type.enum,
data,
)
else:
glTexImage2D(
self.target, level, self.format,
self.width/2**level, self.height/2**level,
#self.width, self.height,
#width, height,
0,
self.spec.channels.enum, self.spec.type.enum,
0,
)
glFlush()
def get_data(self, buffer):
with self:
glPushClientAttrib(GL_CLIENT_PIXEL_STORE_BIT)
glGetTexImage(
self.target, 0, self.spec.channels.enum, self.spec.type.enum,
buffer,
)
glPopClientAttrib()
glFinish()
def update(self):
if self._buffer:
self.set_data(self.buffer, self.clamp)
else:
self.set_data(clamp=self.clamp)
def retrieve(self):
self.get_data(self.buffer)
glFinish()
def __getitem__(self, (x, y)):
x, y = x%self.width, y%self.height
channels = self.spec.channels.count
pos = (x + y * self.width) * channels
if channels == 1:
return self.buffer[pos]
else:
end = pos + channels
return self.buffer[pos:end]
def __setitem__(self, (x, y), value):
x, y = x%self.width, y%self.height
channels = self.spec.channels.count
pos = (x + y * self.width) * channels
if channels == 1:
self.buffer[pos] = value
else:
end = pos + channels
self.buffer[pos:end] = value
def __iter__(self):
channels = self.spec.channels.count
if channels == 1:
for value in self.buffer:
yield value
else:
for i in range(0, len(self.buffer), channels):
yield self.buffer[i:i+channels]
class CubeMap(Context):
'''
Assumes a texturelayout of all 6 faces as a single row of:
right = +x, back = -z, left = -x, front = +z, bottom = -y, top = +y
'''
_get = GL_TEXTURE_BINDING_CUBE_MAP
target = GL_TEXTURE_CUBE_MAP
unit = GL_TEXTURE0
def __init__(self, width, height, data):
Context.__init__(self)
id = self.id = gen_texture()
self.width = width
self.height = height
with self:
glTexParameteri(self.target, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(self.target, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
right = GL_TEXTURE_CUBE_MAP_POSITIVE_X
left = GL_TEXTURE_CUBE_MAP_NEGATIVE_X
top = GL_TEXTURE_CUBE_MAP_POSITIVE_Y
bottom = GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
front = GL_TEXTURE_CUBE_MAP_POSITIVE_Z
back = GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
face_size = (self.height**2) * 4
self.set_data(right, data, 0)
self.set_data(back, data, 1)
self.set_data(left, data, 2)
self.set_data(front, data, 3)
self.set_data(bottom, data, 4)
self.set_data(top, data, 5)
def set_data(self, target, data, index):
glTexImage2D(
target, 0, GL_RGBA,
self.height, self.height,
0,
GL_RGBA, GL_UNSIGNED_BYTE,
self.face_data(data, index),
)
def face_data(self, data, index):
result = ''
face_pitch = self.height*4
full_pitch = self.width*4
for y in range(self.height):
offset = y*full_pitch
start = offset + index*face_pitch
end = start + face_pitch
result += data[start:end]
return result
def bind(self, id):
glBindTexture(self.target, id)
def _enter(self):
glPushAttrib(GL_ENABLE_BIT | GL_TEXTURE_BIT)
glActiveTexture(self.unit)
glEnable(self.target)
def _exit(self):
glPopAttrib()
@classmethod
def open(cls, filename):
if not has_pil:
raise DependencyException('PIL is requried to open image files')
image = Image.open(filename)
image = image.convert('RGBA')
width, height = image.size
data = image.tostring()
return cls(width, height, data)
class Texture1D(Context):
target = GL_TEXTURE_1D
_get = GL_TEXTURE_BINDING_1D
def __init__(self, data, unit=GL_TEXTURE0, ctype=c_ubyte, format=GL_LUMINANCE, type=GL_UNSIGNED_BYTE, internal_format=GL_LUMINANCE):
Context.__init__(self)
data = (ctype*len(data))(*data)
self.unit = unit
self.id = gen_texture()
self.bind(self.id)
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexImage1D(
GL_TEXTURE_1D, 0, internal_format,
len(data), 0,
format, type,
data,
)
self.bind(0)
def _enter(self):
glPushAttrib(GL_ENABLE_BIT | GL_TEXTURE_BIT)
glActiveTexture(self.unit)
glEnable(self.target)
def bind(self, id):
glBindTexture(GL_TEXTURE_1D, id)
def _exit(self):
glPopAttrib()
class ArrayTexture(Context):
target = GL_TEXTURE_2D_ARRAY
_get = GL_TEXTURE_BINDING_2D_ARRAY
def __init__(self, data, width, height, slice_count, format=GL_RGBA, type=GL_UNSIGNED_BYTE, internal_format=GL_RGBA, unit=GL_TEXTURE0, mipmaps=4):
Context.__init__(self)
self.unit = unit
self.id = gen_texture()
self.bind(self.id)
if mipmaps > 0:
glTexParameteri(self.target, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glTexParameteri(self.target, GL_TEXTURE_BASE_LEVEL, 0)
glTexParameteri(self.target, GL_TEXTURE_MAX_LEVEL, mipmaps)
else:
glTexParameteri(self.target, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(self.target, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
pointer = cast(c_char_p(data), c_void_p)
glTexImage3D(
self.target, 0, internal_format,
width, height, slice_count, 0,
format, type, pointer,
)
if mipmaps > 0:
glGenerateMipmap(self.target)
self.bind(0)
@classmethod
def raw_open(cls, names, width, height, format=GL_RGBA, type=GL_UNSIGNED_BYTE, internal_format=GL_RGBA, unit=GL_TEXTURE0, ctype=c_ubyte, channels=4, mipmaps=4):
slices = [open(name, 'rb').read() for name in names]
slice_count = len(slices)
base_level = ''.join(slices)
return cls(base_level, width, height, slice_count, format, type, internal_format, unit, mipmaps)
def _enter(self):
glPushAttrib(GL_TEXTURE_BIT)
glActiveTexture(self.unit)
def _exit(self):
glPopAttrib()
def bind(self, id):
glBindTexture(self.target, id)
| [
"noreply@github.com"
] | noreply@github.com |
734c51a8c062eb6de62195d7e60d9329e31e8df6 | 9b0e60703ecefc1a4fe2fe54d838dc41c331d0ce | /src/signups/admin.py | 3e0316e6b334506f971d0d2ab6fcabaf17a8c1d8 | [] | no_license | vinothini22/mvp | f4f0260fed7a8ed422457689e866b4dcab87cfae | d18e0c8454b01402837ee94172de43763546905a | refs/heads/master | 2021-01-01T15:50:24.555156 | 2014-09-10T03:57:57 | 2014-09-10T03:57:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from django.contrib import admin
# Register your models here.
from signups.models import Signup
"""class SignupAdmin(admin.ModelAdmin):
class Meta:
model = Signup"""
admin.site.register(Signup) | [
"vino.rv2@gmail.com"
] | vino.rv2@gmail.com |
8989f3601bdf239ab9afb02f1537dc5d01693da4 | 2b69c06306872ba60f5e2624263c9171e498c710 | /PANPytorch/trainer/trainer.py | 885ed68823f09f45feab6663b5cf91a7b15b03cd | [
"Apache-2.0"
] | permissive | bilal-rachik/text-localization | a72948fde68a3ab508ea780cfe4c90b792707e46 | 9162ab1436256b0a053f5d228c0b7c2011e7d82e | refs/heads/master | 2022-12-08T18:41:39.054281 | 2020-09-09T08:22:12 | 2020-09-09T08:22:12 | 293,534,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,204 | py | # -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:58
# @Author : zhoujun
import os
import cv2
import shutil
import numpy as np
import time
from tqdm import tqdm
import torch
import torchvision.utils as vutils
from torchvision import transforms
from PANPytorch.post_processing import decode
from PANPytorch.utils import PolynomialLR, runningScore, cal_text_score, cal_kernel_score, cal_recall_precison_f1
from PANPytorch.base import BaseTrainer
class Trainer(BaseTrainer):
def __init__(self, config, model, criterion, train_loader, weights_init=None):
super(Trainer, self).__init__(config, model, criterion, weights_init)
self.show_images_interval = self.config['trainer']['show_images_interval']
self.test_path = self.config['data_loader']['args']['dataset']['val_data_path']
self.train_loader = train_loader
self.train_loader_len = len(train_loader)
if self.config['lr_scheduler']['type'] == 'PolynomialLR':
self.scheduler = PolynomialLR(self.optimizer, self.epochs * self.train_loader_len)
self.logger.info('train dataset has {} samples,{} in dataloader'.format(self.train_loader.dataset_len,
self.train_loader_len))
def _train_epoch(self, epoch):
self.model.train()
epoch_start = time.time()
batch_start = time.time()
train_loss = 0.
running_metric_text = runningScore(2)
running_metric_kernel = runningScore(2)
lr = self.optimizer.param_groups[0]['lr']
for i, (images, labels, training_masks) in enumerate(self.train_loader):
if i >= self.train_loader_len:
break
self.global_step += 1
lr = self.optimizer.param_groups[0]['lr']
# 数据进行转换和丢到gpu
cur_batch_size = images.size()[0]
images, labels, training_masks = images.to(self.device), labels.to(self.device), training_masks.to(
self.device)
preds = self.model(images)
loss_all, loss_tex, loss_ker, loss_agg, loss_dis = self.criterion(preds, labels, training_masks)
# backward
self.optimizer.zero_grad()
loss_all.backward()
self.optimizer.step()
if self.config['lr_scheduler']['type'] == 'PolynomialLR':
self.scheduler.step()
# acc iou
score_text = cal_text_score(preds[:, 0, :, :], labels[:, 0, :, :], training_masks, running_metric_text)
score_kernel = cal_kernel_score(preds[:, 1, :, :], labels[:, 1, :, :], labels[:, 0, :, :], training_masks,
running_metric_kernel)
# loss 和 acc 记录到日志
loss_all = loss_all.item()
loss_tex = loss_tex.item()
loss_ker = loss_ker.item()
loss_agg = loss_agg.item()
loss_dis = loss_dis.item()
train_loss += loss_all
acc = score_text['Mean Acc']
iou_text = score_text['Mean IoU']
iou_kernel = score_kernel['Mean IoU']
if (i + 1) % self.display_interval == 0:
batch_time = time.time() - batch_start
self.logger.info(
'[{}/{}], [{}/{}], global_step: {}, Speed: {:.1f} samples/sec, acc: {:.4f}, iou_text: {:.4f}, iou_kernel: {:.4f}, loss_all: {:.4f}, loss_tex: {:.4f}, loss_ker: {:.4f}, loss_agg: {:.4f}, loss_dis: {:.4f}, lr:{:.6}, time:{:.2f}'.format(
epoch, self.epochs, i + 1, self.train_loader_len, self.global_step,
self.display_interval * cur_batch_size / batch_time, acc, iou_text,
iou_kernel, loss_all, loss_tex, loss_ker, loss_agg, loss_dis, lr, batch_time))
batch_start = time.time()
if self.tensorboard_enable:
# write tensorboard
self.writer.add_scalar('TRAIN/LOSS/loss_all', loss_all, self.global_step)
self.writer.add_scalar('TRAIN/LOSS/loss_tex', loss_tex, self.global_step)
self.writer.add_scalar('TRAIN/LOSS/loss_ker', loss_ker, self.global_step)
self.writer.add_scalar('TRAIN/LOSS/loss_agg', loss_agg, self.global_step)
self.writer.add_scalar('TRAIN/LOSS/loss_dis', loss_dis, self.global_step)
self.writer.add_scalar('TRAIN/ACC_IOU/acc', acc, self.global_step)
self.writer.add_scalar('TRAIN/ACC_IOU/iou_text', iou_text, self.global_step)
self.writer.add_scalar('TRAIN/ACC_IOU/iou_kernel', iou_kernel, self.global_step)
self.writer.add_scalar('TRAIN/lr', lr, self.global_step)
if i % self.show_images_interval == 0:
# show images on tensorboard
self.writer.add_images('TRAIN/imgs', images, self.global_step)
# text kernel and training_masks
gt_texts, gt_kernels = labels[:, 0, :, :], labels[:, 1, :, :]
gt_texts[gt_texts <= 0.5] = 0
gt_texts[gt_texts > 0.5] = 1
gt_kernels[gt_kernels <= 0.5] = 0
gt_kernels[gt_kernels > 0.5] = 1
show_label = torch.cat([gt_texts, gt_kernels, training_masks.float()])
show_label = vutils.make_grid(show_label.unsqueeze(1), nrow=cur_batch_size, normalize=False,
padding=20,
pad_value=1)
self.writer.add_image('TRAIN/gt', show_label, self.global_step)
# model output
preds[:, :2, :, :] = torch.sigmoid(preds[:, :2, :, :])
show_pred = torch.cat([preds[:, 0, :, :], preds[:, 1, :, :]])
show_pred = vutils.make_grid(show_pred.unsqueeze(1), nrow=cur_batch_size, normalize=False,
padding=20,
pad_value=1)
self.writer.add_image('TRAIN/preds', show_pred, self.global_step)
return {'train_loss': train_loss / self.train_loader_len, 'lr': lr, 'time': time.time() - epoch_start,
'epoch': epoch}
def _eval(self):
self.model.eval()
# torch.cuda.empty_cache() # speed up evaluating after training finished
img_path = os.path.join(self.test_path, 'img')
gt_path = os.path.join(self.test_path, 'gt')
result_save_path = os.path.join(self.save_dir, 'result')
if os.path.exists(result_save_path):
shutil.rmtree(result_save_path, ignore_errors=True)
if not os.path.exists(result_save_path):
os.makedirs(result_save_path)
short_size = 736
# 预测所有测试图片
img_paths = [os.path.join(img_path, x) for x in os.listdir(img_path)]
for img_path in tqdm(img_paths, desc='test models'):
img_name = os.path.basename(img_path).split('.')[0]
save_name = os.path.join(result_save_path, 'res_' + img_name + '.txt')
assert os.path.exists(img_path), 'file is not exists'
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w = img.shape[:2]
scale = short_size / min(h, w)
img = cv2.resize(img, None, fx=scale, fy=scale)
# 将图片由(w,h)变为(1,img_channel,h,w)
tensor = transforms.ToTensor()(img)
tensor = tensor.unsqueeze_(0)
tensor = tensor.to(self.device)
with torch.no_grad():
torch.cuda.synchronize(self.device)
preds = self.model(tensor)[0]
torch.cuda.synchronize(self.device)
preds, boxes_list = decode(preds)
scale = (preds.shape[1] / w, preds.shape[0] / h)
if len(boxes_list):
boxes_list = boxes_list / scale
np.savetxt(save_name, boxes_list.reshape(-1, 8), delimiter=',', fmt='%d')
# 开始计算 recall precision f1
result_dict = cal_recall_precison_f1(gt_path=gt_path, result_path=result_save_path)
return result_dict['recall'], result_dict['precision'], result_dict['hmean']
def _on_epoch_finish(self):
self.logger.info('[{}/{}], train_loss: {:.4f}, time: {:.4f}, lr: {}'.format(
self.epoch_result['epoch'], self.epochs, self.epoch_result['train_loss'], self.epoch_result['time'],
self.epoch_result['lr']))
net_save_path = '{}/PANNet_latest.pth'.format(self.checkpoint_dir)
save_best = False
if self.config['trainer']['metrics'] == 'hmean': # 使用f1作为最优模型指标
recall, precision, hmean = self._eval()
if self.tensorboard_enable:
self.writer.add_scalar('EVAL/recall', recall, self.global_step)
self.writer.add_scalar('EVAL/precision', precision, self.global_step)
self.writer.add_scalar('EVAL/hmean', hmean, self.global_step)
self.logger.info('test: recall: {:.6f}, precision: {:.6f}, f1: {:.6f}'.format(recall, precision, hmean))
if hmean > self.metrics['hmean']:
save_best = True
self.metrics['train_loss'] = self.epoch_result['train_loss']
self.metrics['hmean'] = hmean
self.metrics['precision'] = precision
self.metrics['recall'] = recall
self.metrics['best_model'] = net_save_path
else:
if self.epoch_result['train_loss'] < self.metrics['train_loss']:
save_best = True
self.metrics['train_loss'] = self.epoch_result['train_loss']
self.metrics['best_model'] = net_save_path
self._save_checkpoint(self.epoch_result['epoch'], net_save_path, save_best)
def _on_train_finish(self):
for k, v in self.metrics.items():
self.logger.info('{}:{}'.format(k, v))
self.logger.info('finish train')
| [
"brachik@bigapps.fr"
] | brachik@bigapps.fr |
6134850fd7818a9dcf64d35c57c579fb11960213 | 1a0902bf1403510e6038d2542e959a68000e028a | /helloworld.py | 2114f0793048ea0afb0241ba36b5ae5adbcfe688 | [] | no_license | henzfalz/HelloWorld | 5e9e705fb43ec6cca2a981fb8672bb3fed0c3eee | 181d7c77cbda98a4c7e883ed83ab77fe7e22d715 | refs/heads/master | 2020-03-13T09:07:50.250506 | 2018-05-04T19:29:19 | 2018-05-04T19:29:19 | 131,057,916 | 0 | 1 | null | 2018-04-25T20:44:48 | 2018-04-25T20:08:04 | Python | UTF-8 | Python | false | false | 101,591 | py | # -*- coding: utf-8 -*-
from LineAPI.linepy import *
from LineAPI.akad.ttypes import Message
from LineAPI.akad.ttypes import ContentType as Type
from gtts import gTTS
from time import sleep
from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from googletrans import Translator
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, six, ast, pytz, urllib, urllib3, urllib.parse, traceback, atexit
client = LINE()
#client = LINE("u94065e4f90aebd04ea0f8fded453072d")
clientMid = client.profile.mid
clientProfile = client.getProfile()
clientSettings = client.getSettings()
clientPoll = OEPoll(client)
botStart = time.time()
msg_dict = {}
settings = {
"autoAdd": False,
"autoJoin": False,
"autoLeave": False,
"autoRead": False,
"autoRespon": False,
"autoJoinTicket": False,
"checkContact": False,
"checkPost": False,
"checkSticker": False,
"changePictureProfile": False,
"changeGroupPicture": [],
"keyCommand": "",
"myProfile": {
"displayName": "",
"coverId": "",
"pictureStatus": "",
"statusMessage": ""
},
"mimic": {
"copy": False,
"status": False,
"target": {}
},
"setKey": False,
"unsendMessage": False
}
read = {
"ROM": {},
"readPoint": {},
"readMember": {},
"readTime": {}
}
list_language = {
"list_textToSpeech": {
"id": "Indonesia",
"af" : "Afrikaans",
"sq" : "Albanian",
"ar" : "Arabic",
"hy" : "Armenian",
"bn" : "Bengali",
"ca" : "Catalan",
"zh" : "Chinese",
"zh-cn" : "Chinese (Mandarin/China)",
"zh-tw" : "Chinese (Mandarin/Taiwan)",
"zh-yue" : "Chinese (Cantonese)",
"hr" : "Croatian",
"cs" : "Czech",
"da" : "Danish",
"nl" : "Dutch",
"en" : "English",
"en-au" : "English (Australia)",
"en-uk" : "English (United Kingdom)",
"en-us" : "English (United States)",
"eo" : "Esperanto",
"fi" : "Finnish",
"fr" : "French",
"de" : "German",
"el" : "Greek",
"hi" : "Hindi",
"hu" : "Hungarian",
"is" : "Icelandic",
"id" : "Indonesian",
"it" : "Italian",
"ja" : "Japanese",
"km" : "Khmer (Cambodian)",
"ko" : "Korean",
"la" : "Latin",
"lv" : "Latvian",
"mk" : "Macedonian",
"no" : "Norwegian",
"pl" : "Polish",
"pt" : "Portuguese",
"ro" : "Romanian",
"ru" : "Russian",
"sr" : "Serbian",
"si" : "Sinhala",
"sk" : "Slovak",
"es" : "Spanish",
"es-es" : "Spanish (Spain)",
"es-us" : "Spanish (United States)",
"sw" : "Swahili",
"sv" : "Swedish",
"ta" : "Tamil",
"th" : "Thai",
"tr" : "Turkish",
"uk" : "Ukrainian",
"vi" : "Vietnamese",
"cy" : "Welsh"
},
"list_translate": {
"af": "afrikaans",
"sq": "albanian",
"am": "amharic",
"ar": "arabic",
"hy": "armenian",
"az": "azerbaijani",
"eu": "basque",
"be": "belarusian",
"bn": "bengali",
"bs": "bosnian",
"bg": "bulgarian",
"ca": "catalan",
"ceb": "cebuano",
"ny": "chichewa",
"zh-cn": "chinese (simplified)",
"zh-tw": "chinese (traditional)",
"co": "corsican",
"hr": "croatian",
"cs": "czech",
"da": "danish",
"nl": "dutch",
"en": "english",
"eo": "esperanto",
"et": "estonian",
"tl": "filipino",
"fi": "finnish",
"fr": "french",
"fy": "frisian",
"gl": "galician",
"ka": "georgian",
"de": "german",
"el": "greek",
"gu": "gujarati",
"ht": "haitian creole",
"ha": "hausa",
"haw": "hawaiian",
"iw": "hebrew",
"hi": "hindi",
"hmn": "hmong",
"hu": "hungarian",
"is": "icelandic",
"ig": "igbo",
"id": "indonesian",
"ga": "irish",
"it": "italian",
"ja": "japanese",
"jw": "javanese",
"kn": "kannada",
"kk": "kazakh",
"km": "khmer",
"ko": "korean",
"ku": "kurdish (kurmanji)",
"ky": "kyrgyz",
"lo": "lao",
"la": "latin",
"lv": "latvian",
"lt": "lithuanian",
"lb": "luxembourgish",
"mk": "macedonian",
"mg": "malagasy",
"ms": "malay",
"ml": "malayalam",
"mt": "maltese",
"mi": "maori",
"mr": "marathi",
"mn": "mongolian",
"my": "myanmar (burmese)",
"ne": "nepali",
"no": "norwegian",
"ps": "pashto",
"fa": "persian",
"pl": "polish",
"pt": "portuguese",
"pa": "punjabi",
"ro": "romanian",
"ru": "russian",
"sm": "samoan",
"gd": "scots gaelic",
"sr": "serbian",
"st": "sesotho",
"sn": "shona",
"sd": "sindhi",
"si": "sinhala",
"sk": "slovak",
"sl": "slovenian",
"so": "somali",
"es": "spanish",
"su": "sundanese",
"sw": "swahili",
"sv": "swedish",
"tg": "tajik",
"ta": "tamil",
"te": "telugu",
"th": "thai",
"tr": "turkish",
"uk": "ukrainian",
"ur": "urdu",
"uz": "uzbek",
"vi": "vietnamese",
"cy": "welsh",
"xh": "xhosa",
"yi": "yiddish",
"yo": "yoruba",
"zu": "zulu",
"fil": "Filipino",
"he": "Hebrew"
}
}
try:
with open("Log_data.json","r",encoding="utf_8_sig") as f:
msg_dict = json.loads(f.read())
except:
print("Couldn't read Log data")
settings["myProfile"]["displayName"] = clientProfile.displayName
settings["myProfile"]["statusMessage"] = clientProfile.statusMessage
settings["myProfile"]["pictureStatus"] = clientProfile.pictureStatus
coverId = client.getProfileDetail()["result"]["objectId"]
settings["myProfile"]["coverId"] = coverId
def restartBot():
print ("[ INFO ] BOT RESTART")
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
client.log("[ ERROR ] {}".format(str(text)))
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = "{}, {} - {} - {} | {}".format(str(hasil), str(inihari.strftime('%d')), str(bln), str(inihari.strftime('%Y')), str(inihari.strftime('%H:%M:%S')))
with open("logError.txt","a") as error:
error.write("\n[ {} ] {}".format(str(time), text))
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > timedelta(1):
if "path" in msg_dict[data]:
client.deleteFile(msg_dict[data]["path"])
del msg_dict[data]
def sendMention(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@henzfalz "
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
client.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def command(text):
pesan = text.lower()
if settings["setKey"] == True:
if pesan.startswith(settings["keyCommand"]):
cmd = pesan.replace(settings["keyCommand"],"")
else:
cmd = "Undefined command"
else:
cmd = text.lower()
return cmd
def helpmessage():
if settings['setKey'] == True:
key = settings['keyCommand']
else:
key = ''
helpMessage = "╔══[ KEYWORD ]" + "\n" + \
"╠ " + key + "key" + "\n" + \
"╠ " + key + "Translate" + "\n" + \
"╠ " + key + "TTS" + "\n" + \
"╠══[ Status BoT ]" + "\n" + \
"╠ " + key + "Restart" + "\n" + \
"╠ " + key + "Runtime" + "\n" + \
"╠ " + key + "Sp" + "\n" + \
"╠ " + key + "Status" + "\n" + \
"╠ MyKey" + "\n" + \
"╠ SetKey「On/Off」" + "\n" + \
"╠══[ Settings ]" + "\n" + \
"╠ " + key + "Add「On/Off」" + "\n" + \
"╠ " + key + "Join「On/Off」" + "\n" + \
"╠ " + key + "JoinQR「On/Off」" + "\n" + \
"╠ " + key + "Leave「On/Off」" + "\n" + \
"╠ " + key + "AutoRead「On/Off」" + "\n" + \
"╠ " + key + "Respon「On/Off」" + "\n" + \
"╠ " + key + "Contact「On/Off」" + "\n" + \
"╠ " + key + "Post「On/Off」" + "\n" + \
"╠ " + key + "Sticker「On/Off」" + "\n" + \
"╠ " + key + "UnsendChat「On/Off」" + "\n" + \
"╠══[ Self Key ]" + "\n" + \
"╠ " + key + "ChangeName:「Query」" + "\n" + \
"╠ " + key + "ChangeBio:「Query」" + "\n" + \
"╠ " + key + "Fal" + "\n" + \
"╠ " + key + "MyMid" + "\n" + \
"╠ " + key + "MyName" + "\n" + \
"╠ " + key + "MyBio" + "\n" + \
"╠ " + key + "MyPict" + "\n" + \
"╠ " + key + "MyVideoProfile" + "\n" + \
"╠ " + key + "MyCover" + "\n" + \
"╠ " + key + "StealContact「Mention」" + "\n" + \
"╠ " + key + "StealMid「Mention」" + "\n" + \
"╠ " + key + "StealName「Mention」" + "\n" + \
"╠ " + key + "StealBio「Mention」" + "\n" + \
"╠ " + key + "StealPicture「Mention」" + "\n" + \
"╠ " + key + "StealVideoProfile「Mention」" + "\n" + \
"╠ " + key + "StealCover「Mention」" + "\n" + \
"╠ " + key + "Copy「Mention」" + "\n" + \
"╠ " + key + "Restore" + "\n" + \
"╠ " + key + "BackupMe" + "\n" + \
"╠ " + key + "CPP" + "\n" + \
"╠══[ Group Key ]" + "\n" + \
"╠ " + key + "GCreator" + "\n" + \
"╠ " + key + "GId" + "\n" + \
"╠ " + key + "GName" + "\n" + \
"╠ " + key + "GPicture" + "\n" + \
"╠ " + key + "GTicket" + "\n" + \
"╠ " + key + "GTicket「On/Off」" + "\n" + \
"╠ " + key + "GList" + "\n" + \
"╠ " + key + "GMList" + "\n" + \
"╠ " + key + "GInfo" + "\n" + \
"╠ " + key + "GroupCP" + "\n" + \
"╠══[ Special Command ]" + "\n" + \
"╠ " + key + "Mimic「On/Off」" + "\n" + \
"╠ " + key + "MimicList" + "\n" + \
"╠ " + key + "MimicAdd「Mention」" + "\n" + \
"╠ " + key + "MimicDel「Mention」" + "\n" + \
"╠ " + key + "Crot" + "\n" + \
"╠ " + key + "Lurk「On/Off/Reset」" + "\n" + \
"╠ " + key + "Lurks" + "\n" + \
"╠══[ Media Key ]" + "\n" + \
"╠ " + key + "CheckDate「Date」" + "\n" + \
"╠ " + key + "CheckWebsite「url」" + "\n" + \
"╠ " + key + "Sholat「Location」" + "\n" + \
"╠ " + key + "Cuaca「Location」" + "\n" + \
"╠ " + key + "Location「Location」" + "\n" + \
"╠ " + key + "InstaInfo 「UserName」" + "\n" + \
"╠ " + key + "InstaPost 「UserName」|「Number」" + "\n" + \
"╠ " + key + "InstaStory 「UserName」|「Number」" + "\n" + \
"╠ " + key + "Youtube「Search」" + "\n" + \
"╠ " + key + "Music 「Search」" + "\n" + \
"╠ " + key + "Lyric 「Search」" + "\n" + \
"╠ " + key + "Image 「Search」" + "\n" + \
"╚══[ Creator : FaLLaH ]"
return helpMessage
def helptexttospeech():
if settings['setKey'] == True:
key = settings['keyCommand']
else:
key = ''
helpTextToSpeech = "╔══[ Key TTS ]" + "\n" + \
"╠ " + key + "af : Afrikaans" + "\n" + \
"╠ " + key + "sq : Albanian" + "\n" + \
"╠ " + key + "ar : Arabic" + "\n" + \
"╠ " + key + "hy : Armenian" + "\n" + \
"╠ " + key + "bn : Bengali" + "\n" + \
"╠ " + key + "ca : Catalan" + "\n" + \
"╠ " + key + "zh : Chinese" + "\n" + \
"╠ " + key + "zhcn : Chinese (Mandarin/China)" + "\n" + \
"╠ " + key + "zhtw : Chinese (Mandarin/Taiwan)" + "\n" + \
"╠ " + key + "zhyue : Chinese (Cantonese)" + "\n" + \
"╠ " + key + "hr : Croatian" + "\n" + \
"╠ " + key + "cs : Czech" + "\n" + \
"╠ " + key + "da : Danish" + "\n" + \
"╠ " + key + "nl : Dutch" + "\n" + \
"╠ " + key + "en : English" + "\n" + \
"╠ " + key + "enau : English (Australia)" + "\n" + \
"╠ " + key + "enuk : English (United Kingdom)" + "\n" + \
"╠ " + key + "enus : English (United States)" + "\n" + \
"╠ " + key + "eo : Esperanto" + "\n" + \
"╠ " + key + "fi : Finnish" + "\n" + \
"╠ " + key + "fr : French" + "\n" + \
"╠ " + key + "de : German" + "\n" + \
"╠ " + key + "el : Greek" + "\n" + \
"╠ " + key + "hi : Hindi" + "\n" + \
"╠ " + key + "hu : Hungarian" + "\n" + \
"╠ " + key + "is : Icelandic" + "\n" + \
"╠ " + key + "id : Indonesian" + "\n" + \
"╠ " + key + "it : Italian" + "\n" + \
"╠ " + key + "ja : Japanese" + "\n" + \
"╠ " + key + "km : Khmer (Cambodian)" + "\n" + \
"╠ " + key + "ko : Korean" + "\n" + \
"╠ " + key + "la : Latin" + "\n" + \
"╠ " + key + "lv : Latvian" + "\n" + \
"╠ " + key + "mk : Macedonian" + "\n" + \
"╠ " + key + "no : Norwegian" + "\n" + \
"╠ " + key + "pl : Polish" + "\n" + \
"╠ " + key + "pt : Portuguese" + "\n" + \
"╠ " + key + "ro : Romanian" + "\n" + \
"╠ " + key + "ru : Russian" + "\n" + \
"╠ " + key + "sr : Serbian" + "\n" + \
"╠ " + key + "si : Sinhala" + "\n" + \
"╠ " + key + "sk : Slovak" + "\n" + \
"╠ " + key + "es : Spanish" + "\n" + \
"╠ " + key + "eses : Spanish (Spain)" + "\n" + \
"╠ " + key + "esus : Spanish (United States)" + "\n" + \
"╠ " + key + "sw : Swahili" + "\n" + \
"╠ " + key + "sv : Swedish" + "\n" + \
"╠ " + key + "ta : Tamil" + "\n" + \
"╠ " + key + "th : Thai" + "\n" + \
"╠ " + key + "tr : Turkish" + "\n" + \
"╠ " + key + "uk : Ukrainian" + "\n" + \
"╠ " + key + "vi : Vietnamese" + "\n" + \
"╠ " + key + "cy : Welsh" + "\n" + \
"╚══[ Creator : Fall ]" + "\n" + "\n\n" + \
"Contoh : " + key + "say-id Fall"
return helpTextToSpeech
def helptranslate():
if settings['setKey'] == True:
key = settings['keyCommand']
else:
key = ''
helpTranslate = "╔══[ Key Translate ]" + "\n" + \
"╠ " + key + "af : afrikaans" + "\n" + \
"╠ " + key + "sq : albanian" + "\n" + \
"╠ " + key + "am : amharic" + "\n" + \
"╠ " + key + "ar : arabic" + "\n" + \
"╠ " + key + "hy : armenian" + "\n" + \
"╠ " + key + "az : azerbaijani" + "\n" + \
"╠ " + key + "eu : basque" + "\n" + \
"╠ " + key + "be : belarusian" + "\n" + \
"╠ " + key + "bn : bengali" + "\n" + \
"╠ " + key + "bs : bosnian" + "\n" + \
"╠ " + key + "bg : bulgarian" + "\n" + \
"╠ " + key + "ca : catalan" + "\n" + \
"╠ " + key + "ceb : cebuano" + "\n" + \
"╠ " + key + "ny : chichewa" + "\n" + \
"╠ " + key + "zhcn : chinese (simplified)" + "\n" + \
"╠ " + key + "zhtw : chinese (traditional)" + "\n" + \
"╠ " + key + "co : corsican" + "\n" + \
"╠ " + key + "hr : croatian" + "\n" + \
"╠ " + key + "cs : czech" + "\n" + \
"╠ " + key + "da : danish" + "\n" + \
"╠ " + key + "nl : dutch" + "\n" + \
"╠ " + key + "en : english" + "\n" + \
"╠ " + key + "eo : esperanto" + "\n" + \
"╠ " + key + "et : estonian" + "\n" + \
"╠ " + key + "tl : filipino" + "\n" + \
"╠ " + key + "fi : finnish" + "\n" + \
"╠ " + key + "fr : french" + "\n" + \
"╠ " + key + "fy : frisian" + "\n" + \
"╠ " + key + "gl : galician" + "\n" + \
"╠ " + key + "ka : georgian" + "\n" + \
"╠ " + key + "de : german" + "\n" + \
"╠ " + key + "el : greek" + "\n" + \
"╠ " + key + "gu : gujarati" + "\n" + \
"╠ " + key + "ht : haitian creole" + "\n" + \
"╠ " + key + "ha : hausa" + "\n" + \
"╠ " + key + "haw : hawaiian" + "\n" + \
"╠ " + key + "iw : hebrew" + "\n" + \
"╠ " + key + "hi : hindi" + "\n" + \
"╠ " + key + "hmn : hmong" + "\n" + \
"╠ " + key + "hu : hungarian" + "\n" + \
"╠ " + key + "is : icelandic" + "\n" + \
"╠ " + key + "ig : igbo" + "\n" + \
"╠ " + key + "id : indonesian" + "\n" + \
"╠ " + key + "ga : irish" + "\n" + \
"╠ " + key + "it : italian" + "\n" + \
"╠ " + key + "ja : japanese" + "\n" + \
"╠ " + key + "jw : javanese" + "\n" + \
"╠ " + key + "kn : kannada" + "\n" + \
"╠ " + key + "kk : kazakh" + "\n" + \
"╠ " + key + "km : khmer" + "\n" + \
"╠ " + key + "ko : korean" + "\n" + \
"╠ " + key + "ku : kurdish (kurmanji)" + "\n" + \
"╠ " + key + "ky : kyrgyz" + "\n" + \
"╠ " + key + "lo : lao" + "\n" + \
"╠ " + key + "la : latin" + "\n" + \
"╠ " + key + "lv : latvian" + "\n" + \
"╠ " + key + "lt : lithuanian" + "\n" + \
"╠ " + key + "lb : luxembourgish" + "\n" + \
"╠ " + key + "mk : macedonian" + "\n" + \
"╠ " + key + "mg : malagasy" + "\n" + \
"╠ " + key + "ms : malay" + "\n" + \
"╠ " + key + "ml : malayalam" + "\n" + \
"╠ " + key + "mt : maltese" + "\n" + \
"╠ " + key + "mi : maori" + "\n" + \
"╠ " + key + "mr : marathi" + "\n" + \
"╠ " + key + "mn : mongolian" + "\n" + \
"╠ " + key + "my : myanmar (burmese)" + "\n" + \
"╠ " + key + "ne : nepali" + "\n" + \
"╠ " + key + "no : norwegian" + "\n" + \
"╠ " + key + "ps : pashto" + "\n" + \
"╠ " + key + "fa : persian" + "\n" + \
"╠ " + key + "pl : polish" + "\n" + \
"╠ " + key + "pt : portuguese" + "\n" + \
"╠ " + key + "pa : punjabi" + "\n" + \
"╠ " + key + "ro : romanian" + "\n" + \
"╠ " + key + "ru : russian" + "\n" + \
"╠ " + key + "sm : samoan" + "\n" + \
"╠ " + key + "gd : scots gaelic" + "\n" + \
"╠ " + key + "sr : serbian" + "\n" + \
"╠ " + key + "st : sesotho" + "\n" + \
"╠ " + key + "sn : shona" + "\n" + \
"╠ " + key + "sd : sindhi" + "\n" + \
"╠ " + key + "si : sinhala" + "\n" + \
"╠ " + key + "sk : slovak" + "\n" + \
"╠ " + key + "sl : slovenian" + "\n" + \
"╠ " + key + "so : somali" + "\n" + \
"╠ " + key + "es : spanish" + "\n" + \
"╠ " + key + "su : sundanese" + "\n" + \
"╠ " + key + "sw : swahili" + "\n" + \
"╠ " + key + "sv : swedish" + "\n" + \
"╠ " + key + "tg : tajik" + "\n" + \
"╠ " + key + "ta : tamil" + "\n" + \
"╠ " + key + "te : telugu" + "\n" + \
"╠ " + key + "th : thai" + "\n" + \
"╠ " + key + "tr : turkish" + "\n" + \
"╠ " + key + "uk : ukrainian" + "\n" + \
"╠ " + key + "ur : urdu" + "\n" + \
"╠ " + key + "uz : uzbek" + "\n" + \
"╠ " + key + "vi : vietnamese" + "\n" + \
"╠ " + key + "cy : welsh" + "\n" + \
"╠ " + key + "xh : xhosa" + "\n" + \
"╠ " + key + "yi : yiddish" + "\n" + \
"╠ " + key + "yo : yoruba" + "\n" + \
"╠ " + key + "zu : zulu" + "\n" + \
"╠ " + key + "fil : Filipino" + "\n" + \
"╠ " + key + "he : Hebrew" + "\n" + \
"╚══[ Creator : Fall ]" + "\n" + "\n\n" + \
"Contoh : " + key + "tr-id Fall"
return helpTranslate
def clientBot(op):
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
client.findAndAddContactsByMid(op.param1)
sendMention(op.param1, "Halo @!,cieee gw di add :3")
if op.type == 13:
print ("[ 13 ] NOTIFIED INVITE INTO GROUP")
if clientMid in op.param3:
if settings["autoJoin"] == True:
client.acceptGroupInvitation(op.param1)
sendMention(op.param1, "Halo @!, Fall is here,thanks for invite me :3")
if op.type in [22, 24]:
print ("[ 22 And 24 ] NOTIFIED INVITE INTO ROOM & NOTIFIED LEAVE ROOM")
if settings["autoLeave"] == True:
sendMention(op.param1, "Oi asw @!,group apa lg sih ini haddeeh")
client.leaveRoom(op.param1)
if op.type == 25:
try:
print ("[ 25 ] SEND MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
setKey = settings["keyCommand"].title()
if settings["setKey"] == False:
setKey = ''
if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:
if msg.toType == 0:
if sender != client.profile.mid:
to = sender
else:
to = receiver
elif msg.toType == 1:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 0:
if text is None:
return
else:
cmd = command(text)
if cmd == "key":
helpMessage = helpmessage()
client.sendMessage(to, str(helpMessage))
elif cmd == "tts":
helpTextToSpeech = helptexttospeech()
client.sendMessage(to, str(helpTextToSpeech))
elif cmd == "translate":
helpTranslate = helptranslate()
client.sendMessage(to, str(helpTranslate))
elif cmd.startswith("changekey:"):
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if " " in key:
client.sendMessage(to, "Key tidak bisa menggunakan spasi")
else:
settings["keyCommand"] = str(key).lower()
client.sendMessage(to, "Berhasil mengubah key command menjadi [ {} ]".format(str(key).lower()))
elif cmd == "sp":
start = time.time()
client.sendMessage(to, "wait...")
elapsed_time = time.time() - start
client.sendMessage(to, "{} detik".format(str(elapsed_time)))
elif cmd == "runtime":
timeNow = time.time()
runtime = timeNow - botStart
runtime = format_timespan(runtime)
client.sendMessage(to, "Bot sudah berjalan selama {}".format(str(runtime)))
elif cmd == "restart":
client.sendMessage(to, "Berhasil merestart Bot")
restartBot()
# Pembatas Script #
elif cmd == "add on":
settings["autoAdd"] = True
client.sendMessage(to, "Berhasil mengaktifkan auto add")
elif cmd == "add off":
settings["autoAdd"] = False
client.sendMessage(to, "Berhasil menonaktifkan auto add")
elif cmd == "join on":
settings["autoJoin"] = True
client.sendMessage(to, "Berhasil mengaktifkan auto join")
elif cmd == "join off":
settings["autoJoin"] = False
client.sendMessage(to, "Berhasil menonaktifkan auto join")
elif cmd == "leave on":
settings["autoLeave"] = True
client.sendMessage(to, "Berhasil mengaktifkan auto leave")
elif cmd == "leave off":
settings["autoLeave"] = False
client.sendMessage(to, "Berhasil menonaktifkan auto leave")
elif cmd == "respon on":
settings["autoRespon"] = True
client.sendMessage(to, "Berhasil mengaktifkan auto respon")
elif cmd == "respon off":
settings["autoRespon"] = False
client.sendMessage(to, "Berhasil menonaktifkan auto respon")
elif cmd == "read on":
settings["autoRead"] = True
client.sendMessage(to, "Berhasil mengaktifkan auto read")
elif cmd == "read off":
settings["autoRead"] = False
client.sendMessage(to, "Berhasil menonaktifkan auto read")
elif cmd == "joinqr on":
settings["autoJoinTicket"] = True
client.sendMessage(to, "Berhasil mengaktifkan auto join by ticket")
elif cmd == "Joinqr off":
settings["autoJoin"] = False
client.sendMessage(to, "Berhasil menonaktifkan auto join by ticket")
elif cmd == "contact on":
settings["checkContact"] = True
client.sendMessage(to, "Berhasil mengaktifkan check details contact")
elif cmd == "contact off":
settings["checkContact"] = False
client.sendMessage(to, "Berhasil menonaktifkan check details contact")
elif cmd == "post on":
settings["checkPost"] = True
client.sendMessage(to, "Berhasil mengaktifkan check details post")
elif cmd == "post off":
settings["checkPost"] = False
client.sendMessage(to, "Berhasil menonaktifkan check details post")
elif cmd == "sticker on":
settings["checkSticker"] = True
client.sendMessage(to, "Berhasil mengaktifkan check details sticker")
elif cmd == "sticker off":
settings["checkSticker"] = False
client.sendMessage(to, "Berhasil menonaktifkan check details sticker")
elif cmd == "unsendchat on":
settings["unsendMessage"] = True
client.sendMessage(to, "Berhasil mengaktifkan unsend message")
elif cmd == "unsendchat off":
settings["unsendMessage"] = False
client.sendMessage(to, "Berhasil menonaktifkan unsend message")
elif cmd == "status":
try:
ret_ = "╔══[ Status ]"
if settings["autoAdd"] == True: ret_ += "\n╠══[ ON ] Auto Add"
else: ret_ += "\n╠══[ OFF ] Auto Add"
if settings["autoJoin"] == True: ret_ += "\n╠══[ ON ] Auto Join"
else: ret_ += "\n╠══[ OFF ] Auto Join"
if settings["autoLeave"] == True: ret_ += "\n╠══[ ON ] Auto Leave Room"
else: ret_ += "\n╠══[ OFF ] Auto Leave Room"
if settings["autoJoinTicket"] == True: ret_ += "\n╠══[ ON ] Auto Join Ticket"
else: ret_ += "\n╠══[ OFF ] Auto Join Ticket"
if settings["autoRead"] == True: ret_ += "\n╠══[ ON ] Auto Read"
else: ret_ += "\n╠══[ OFF ] Auto Read"
if settings["autoRespon"] == True: ret_ += "\n╠══[ ON ] Detect Mention"
else: ret_ += "\n╠══[ OFF ] Detect Mention"
if settings["checkContact"] == True: ret_ += "\n╠══[ ON ] Check Contact"
else: ret_ += "\n╠══[ OFF ] Check Contact"
if settings["checkPost"] == True: ret_ += "\n╠══[ ON ] Check Post"
else: ret_ += "\n╠══[ OFF ] Check Post"
if settings["checkSticker"] == True: ret_ += "\n╠══[ ON ] Check Sticker"
else: ret_ += "\n╠══[ OFF ] Check Sticker"
if settings["setKey"] == True: ret_ += "\n╠══[ ON ] Set Key"
else: ret_ += "\n╠══[ OFF ] Set Key"
if settings["unsendMessage"] == True: ret_ += "\n╠══[ ON ] Unsend Message"
else: ret_ += "\n╠══[ OFF ] Unsend Message"
ret_ += "\n╚══[ Status ]"
client.sendMessage(to, str(ret_))
except Exception as e:
client.sendMessage(msg.to, str(e))
# Pembatas Script #
elif cmd == "virus":
client.sendContact(to, "u1f41296217e740650e0448b96851a3e2',")
elif cmd.startswith("changename:"):
sep = text.split(" ")
string = text.replace(sep[0] + " ","")
if len(string) <= 20:
profile = client.getProfile()
profile.displayName = string
client.updateProfile(profile)
client.sendMessage(to,"Berhasil mengganti display name menjadi{}".format(str(string)))
elif cmd.startswith("changebio:"):
sep = text.split(" ")
string = text.replace(sep[0] + " ","")
if len(string) <= 500:
profile = client.getProfile()
profile.statusMessage = string
client.updateProfile(profile)
client.sendMessage(to,"Berhasil mengganti status message menjadi{}".format(str(string)))
elif cmd == "fal":
client.sendContact(to, sender)
elif cmd == "mymid":
client.sendMessage(to, "[ MID ]\n{}".format(sender))
elif cmd == "myname":
contact = client.getContact(sender)
client.sendMessage(to, "[ Display Name ]\n{}".format(contact.displayName))
elif cmd == "mybio":
contact = client.getContact(sender)
client.sendMessage(to, "[ Status Message ]\n{}".format(contact.statusMessage))
elif cmd == "mypict":
contact = client.getContact(sender)
client.sendImageWithURL(to,"http://dl.profile.line-cdn.net/{}".format(contact.pictureStatus))
elif cmd == "myvideoprofile":
contact = client.getContact(sender)
client.sendVideoWithURL(to,"http://dl.profile.line-cdn.net/{}/vp".format(contact.pictureStatus))
elif cmd == "mycover":
channel = client.getProfileCoverURL(sender)
path = str(channel)
client.sendImageWithURL(to, path)
elif cmd.startswith("copy"):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = client.getContact(ls)
client.cloneContactProfile(ls)
client.sendMessage(to, "Berhasil mengclone profile {}".format(contact.displayName))
elif cmd == "restore":
try:
clientProfile = client.getProfile()
clientProfile.displayName = str(settings["myProfile"]["displayName"])
clientProfile.statusMessage = str(settings["myProfile"]["statusMessage"])
clientProfile.pictureStatus = str(settings["myProfile"]["pictureStatus"])
client.updateProfileAttribute(8, clientProfile.pictureStatus)
client.updateProfile(clientProfile)
coverId = str(settings["myProfile"]["coverId"])
client.updateProfileCoverById(coverId)
client.sendMessage(to, "Berhasil restore profile tunggu beberapa saat sampai profile berubah")
except Exception as e:
client.sendMessage(to, "Gagal restore profile")
logError(error)
elif cmd == "backupme":
try:
profile = client.getProfile()
settings["myProfile"]["displayName"] = str(profile.displayName)
settings["myProfile"]["statusMessage"] = str(profile.statusMessage)
settings["myProfile"]["pictureStatus"] = str(profile.pictureStatus)
coverId = client.getProfileDetail()["result"]["objectId"]
settings["myProfile"]["coverId"] = str(coverId)
client.sendMessage(to, "Berhasil backup profile")
except Exception as e:
client.sendMessage(to, "Gagal backup profile")
logError(error)
elif cmd.startswith("stealmid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
ret_ = "[ Mid User ]"
for ls in lists:
ret_ += "\n{}".format(str(ls))
client.sendMessage(to, str(ret_))
elif cmd.startswith("stealname "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = client.getContact(ls)
client.sendMessage(to, "[ Display Name ]\n{}".format(str(contact.displayName)))
elif cmd.startswith("stealbio "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = client.getContact(ls)
client.sendMessage(to, "[ Status Message ]\n{}".format(str(contact.statusMessage)))
elif cmd.startswith("stealpicture"):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = client.getContact(ls)
path = "http://dl.profile.line.naver.jp/{}".format(contact.pictureStatus)
client.sendImageWithURL(to, str(path))
elif cmd.startswith("stealvideoprofile "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = client.getContact(ls)
path = "http://dl.profile.line.naver.jp/{}/vp".format(contact.pictureStatus)
client.sendVideoWithURL(to, str(path))
elif cmd.startswith("stealcover "):
if client != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
channel = client.getProfileCoverURL(ls)
path = str(channel)
client.sendImageWithURL(to, str(path))
# Pembatas Script #
elif cmd == 'gcreator':
group = client.getGroup(to)
GS = group.creator.mid
client.sendContact(to, GS)
elif cmd == 'gid':
gid = client.getGroup(to)
client.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif cmd == 'gpict':
group = client.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
client.sendImageWithURL(to, path)
elif cmd == 'gname':
gid = client.getGroup(to)
client.sendMessage(to, "[Nama Group : ]\n" + gid.name)
elif cmd == 'groupticket':
if msg.toType == 2:
group = client.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = client.reissueGroupTicket(to)
client.sendMessage(to, "[ Group Ticket ]\nhttps://line.me/R/ti/g/{}".format(str(ticket)))
else:
client.sendMessage(to, "Grup qr tidak terbuka silahkan buka terlebih dahulu dengan perintah {}openqr".format(str(settings["keyCommand"])))
elif cmd == 'gticket on':
if msg.toType == 2:
group = client.getGroup(to)
if group.preventedJoinByTicket == False:
client.sendMessage(to, "Grup qr sudah terbuka")
else:
group.preventedJoinByTicket = False
client.updateGroup(group)
client.sendMessage(to, "Berhasil membuka grup qr")
elif cmd == 'gticket off':
if msg.toType == 2:
group = client.getGroup(to)
if group.preventedJoinByTicket == True:
client.sendMessage(to, "Grup qr sudah tertutup")
else:
group.preventedJoinByTicket = True
client.updateGroup(group)
client.sendMessage(to, "Berhasil menutup grup qr")
elif cmd == 'ginfo':
group = client.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Tidak ditemukan"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(client.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ Nama Group : {}".format(str(group.name))
ret_ += "\n╠ ID Group : {}".format(group.id)
ret_ += "\n╠ Pembuat : {}".format(str(gCreator))
ret_ += "\n╠ Jumlah Member : {}".format(str(len(group.members)))
ret_ += "\n╠ Jumlah Pending : {}".format(gPending)
ret_ += "\n╠ Group Qr : {}".format(gQr)
ret_ += "\n╠ Group Ticket : {}".format(gTicket)
ret_ += "\n╚══[ Finish ]"
client.sendMessage(to, str(ret_))
client.sendImageWithURL(to, path)
elif cmd == 'gmlist':
if msg.toType == 2:
group = client.getGroup(to)
ret_ = "╔══[ Member List ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ Total {} ]".format(str(len(group.members)))
client.sendMessage(to, str(ret_))
elif cmd == 'glist':
groups = client.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = client.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
client.sendMessage(to, str(ret_))
# Pembatas Script #
elif cmd == "cpp":
settings["changePictureProfile"] = True
client.sendMessage(to, "Silahkan kirim gambarnya")
elif cmd == "groupcp":
if msg.toType == 2:
if to not in settings["changeGroupPicture"]:
settings["changeGroupPicture"].append(to)
client.sendMessage(to, "Silahkan kirim gambarnya")
elif cmd == 'crot':
group = client.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//100
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*100 : (a+1)*300]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Fall \n'
client.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
client.sendMessage(to, "Total {} Mention".format(str(len(nama))))
elif cmd == "lurk on":
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
try:
del read['readPoint'][receiver]
del read['readMember'][receiver]
del read['readTime'][receiver]
except:
pass
read['readPoint'][receiver] = msg_id
read['readMember'][receiver] = ""
read['readTime'][receiver] = readTime
read['ROM'][receiver] = {}
client.sendMessage(receiver,"Lurking telah diaktifkan")
else:
try:
del read['readPoint'][receiver]
del read['readMember'][receiver]
del read['readTime'][receiver]
except:
pass
read['readPoint'][receiver] = msg_id
read['readMember'][receiver] = ""
read['readTime'][receiver] = readTime
read['ROM'][receiver] = {}
client.sendMessage(receiver,"Set reading point : \n" + readTime)
elif cmd == "lurk off":
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver not in read['readPoint']:
client.sendMessage(receiver,"Lurking telah dinonaktifkan")
else:
try:
del read['readPoint'][receiver]
del read['readMember'][receiver]
del read['readTime'][receiver]
except:
pass
client.sendMessage(receiver,"Delete reading point : \n" + readTime)
elif cmd == "lurk reset":
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read["readPoint"]:
try:
del read["readPoint"][msg.to]
del read["readMember"][msg.to]
del read["readTime"][msg.to]
del read["ROM"][msg.to]
except:
pass
read['readPoint'][receiver] = msg_id
read['readMember'][receiver] = ""
read['readTime'][receiver] = readTime
read['ROM'][receiver] = {}
client.sendMessage(msg.to, "Reset reading point : \n" + readTime)
else:
client.sendMessage(msg.to, "Lurking belum diaktifkan ngapain di reset?")
elif cmd == "lurks":
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
if read["ROM"][receiver].items() == []:
client.sendMessage(receiver,"Tidak Ada Sider")
else:
chiya = []
for rom in read["ROM"][receiver].items():
chiya.append(rom[1])
cmem = client.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = '[R E A D E R ]\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@c\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "\n" + readTime
try:
client.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as error:
print (error)
pass
else:
client.sendMessage(receiver,"Lurking belum diaktifkan")
elif cmd.startswith("mimicadd"):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
client.sendMessage(msg.to,"Target ditambahkan!")
break
except:
client.sendMessage(msg.to,"Gagal menambahkan target")
break
elif cmd.startswith("mimicdel"):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["mimic"]["target"][target]
client.sendMessage(msg.to,"Target dihapuskan!")
break
except:
client.sendMessage(msg.to,"Gagal menghapus target")
break
elif cmd == "mimiclist":
if settings["mimic"]["target"] == {}:
client.sendMessage(msg.to,"Tidak Ada Target")
else:
mc = "╔══[ Mimic List ]"
for mi_d in settings["mimic"]["target"]:
mc += "\n╠ "+client.getContact(mi_d).displayName
mc += "\n╚══[ Finish ]"
client.sendMessage(msg.to,mc)
elif cmd.startswith("mimic"):
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
client.sendMessage(msg.to,"Reply Message on")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
client.sendMessage(msg.to,"Reply Message off")
# Pembatas Script #
elif cmd.startswith("checkwebsite"):
try:
sep = text.split(" ")
query = text.replace(sep[0] + " ","")
r = requests.get("http://rahandiapi.herokuapp.com/sswebAPI?key=betakey&link={}".format(urllib.parse.quote(query)))
data = r.text
data = json.loads(data)
client.sendImageWithURL(to, data["result"])
except Exception as error:
logError(error)
elif cmd.startswith("checkdate"):
try:
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r = requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
ret_ = "[ D A T E ]"
ret_ += "\nDate Of Birth : {}".format(str(data["data"]["lahir"]))
ret_ += "\nAge : {}".format(str(data["data"]["usia"]))
ret_ += "\nBirthday : {}".format(str(data["data"]["ultah"]))
ret_ += "\nZodiak : {}".format(str(data["data"]["zodiak"]))
client.sendMessage(to, str(ret_))
except Exception as error:
logError(error)
elif cmd.startswith("sholat "):
separate = msg.text.split(" ")
location = msg.text.replace(separate[0] + " ","")
r = requests.get("http://api.corrykalam.net/apisholat.php?lokasi={}".format(location))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
if data[1] != "Subuh : " and data[2] != "Dzuhur : " and data[3] != "Ashar : " and data[4] != "Maghrib : " and data[5] != "Isya : ":
ret_ = "╔══[ Jadwal Sholat Sekitar " + data[0] + " ]"
ret_ += "\n╠ Tanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\n╠ Jam : " + datetime.strftime(timeNow,'%H:%M:%S')
ret_ += "\n╠ " + data[1]
ret_ += "\n╠ " + data[2]
ret_ += "\n╠ " + data[3]
ret_ += "\n╠ " + data[4]
ret_ += "\n╠ " + data[5]
ret_ += "\n╚══[ Success ]"
client.sendMessage(msg.to, str(ret_))
elif cmd.startswith("cuaca "):
try:
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
r = requests.get("http://api.corrykalam.net/apicuaca.php?kota={}".format(location))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
if "result" not in data:
ret_ = "╔══[ Weather Status ]"
ret_ += "\n╠ Location : " + data[0].replace("Temperatur di kota ","")
ret_ += "\n╠ Suhu : " + data[1].replace("Suhu : ","") + "°C"
ret_ += "\n╠ Kelembaban : " + data[2].replace("Kelembaban : ","") + "%"
ret_ += "\n╠ Tekanan udara : " + data[3].replace("Tekanan udara : ","") + "HPa"
ret_ += "\n╠ Kecepatan angin : " + data[4].replace("Kecepatan angin : ","") + "m/s"
ret_ += "\n╠══[ Time Status ]"
ret_ += "\n╠ Tanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\n╠ Jam : " + datetime.strftime(timeNow,'%H:%M:%S') + " WIB"
ret_ += "\n╚══[ Success ]"
client.sendMessage(to, str(ret_))
except Exception as error:
logError(error)
elif cmd.startswith("location "):
try:
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
r = requests.get("http://api.corrykalam.net/apiloc.php?lokasi={}".format(location))
data = r.text
data = json.loads(data)
if data[0] != "" and data[1] != "" and data[2] != "":
link = "https://www.google.co.id/maps/@{},{},15z".format(str(data[1]), str(data[2]))
ret_ = "╔══[ Location Status ]"
ret_ += "\n╠ Location : " + data[0]
ret_ += "\n╠ Google Maps : " + link
ret_ += "\n╚══[ Success ]"
client.sendMessage(to, str(ret_))
except Exception as error:
logError(error)
elif cmd.startswith("instainfo"):
try:
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
r = requests.get("https://www.instagram.com/{}/?__a=1".format(search))
data = r.text
data = json.loads(data)
if data != []:
ret_ = "╔══[ Profile Instagram ]"
ret_ += "\n╠ Nama : {}".format(str(data["graphql"]["user"]["full_name"]))
ret_ += "\n╠ Username : {}".format(str(data["graphql"]["user"]["username"]))
ret_ += "\n╠ Bio : {}".format(str(data["graphql"]["user"]["biography"]))
ret_ += "\n╠ Pengikut : {}".format(str(data["graphql"]["user"]["edge_followed_by"]["count"]))
ret_ += "\n╠ Diikuti : {}".format(str(data["graphql"]["user"]["edge_follow"]["count"]))
if data["graphql"]["user"]["is_verified"] == True:
ret_ += "\n╠ Verifikasi : Sudah"
else:
ret_ += "\n╠ Verifikasi : Belum"
if data["graphql"]["user"]["is_private"] == True:
ret_ += "\n╠ Akun Pribadi : Iya"
else:
ret_ += "\n╠ Akun Pribadi : Tidak"
ret_ += "\n╠ Total Post : {}".format(str(data["graphql"]["user"]["edge_owner_to_timeline_media"]["count"]))
ret_ += "\n╚══[ https://www.instagram.com/{} ]".format(search)
path = data["graphql"]["user"]["profile_pic_url_hd"]
client.sendImageWithURL(to, str(path))
client.sendMessage(to, str(ret_))
except Exception as error:
logError(error)
elif cmd.startswith("instapost"):
try:
sep = text.split(" ")
text = text.replace(sep[0] + " ","")
cond = text.split("|")
username = cond[0]
no = cond[1]
r = requests.get("http://rahandiapi.herokuapp.com/instapost/{}/{}?key=betakey".format(str(username), str(no)))
data = r.text
data = json.loads(data)
if data["find"] == True:
if data["media"]["mediatype"] == 1:
client.sendImageWithURL(msg.to, str(data["media"]["url"]))
if data["media"]["mediatype"] == 2:
client.sendVideoWithURL(msg.to, str(data["media"]["url"]))
ret_ = "╔══[ Info Post ]"
ret_ += "\n╠ Jumlah Like : {}".format(str(data["media"]["like_count"]))
ret_ += "\n╠ Jumlah Comment : {}".format(str(data["media"]["comment_count"]))
ret_ += "\n╚══[ Caption ]\n{}".format(str(data["media"]["caption"]))
client.sendMessage(to, str(ret_))
except Exception as error:
logError(error)
elif cmd.startswith("instastory"):
try:
sep = text.split(" ")
text = text.replace(sep[0] + " ","")
cond = text.split("|")
search = str(cond[0])
if len(cond) == 2:
r = requests.get("http://rahandiapi.herokuapp.com/instastory/{}?key=betakey".format(search))
data = r.text
data = json.loads(data)
if data["url"] != []:
num = int(cond[1])
if num <= len(data["url"]):
search = data["url"][num - 1]
if search["tipe"] == 1:
client.sendImageWithURL(to, str(search["link"]))
if search["tipe"] == 2:
client.sendVideoWithURL(to, str(search["link"]))
except Exception as error:
logError(error)
elif cmd.startswith("say-"):
sep = text.split("-")
sep = sep[1].split(" ")
lang = sep[0]
say = text.replace("say-" + lang + " ","")
if lang not in list_language["list_textToSpeech"]:
return client.sendMessage(to, "Language not found")
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
client.sendAudio(to,"hasil.mp3")
elif cmd.startswith("image"):
try:
separate = msg.text.split(" ")
search = msg.text.replace(separate[0] + " ","")
r = requests.get("http://rahandiapi.herokuapp.com/imageapi?key=betakey&q={}".format(search))
data = r.text
data = json.loads(data)
if data["result"] != []:
items = data["result"]
path = random.choice(items)
a = items.index(path)
b = len(items)
client.sendImageWithURL(to, str(path))
except Exception as error:
logError(error)
elif cmd.startswith("music "):
sep = msg.text.split(" ")
query = msg.text.replace(sep[0] + " ","")
cond = query.split("|")
search = str(cond[0])
result = requests.get("http://api.ntcorp.us/joox/search?q={}".format(str(search)))
data = result.text
data = json.loads(data)
if len(cond) == 1:
num = 0
ret_ = "╔══[ Result Music ]"
for music in data["result"]:
num += 1
ret_ += "\n╠ {}. {}".format(str(num), str(music["single"]))
ret_ += "\n╚══[ Total {} Music ]".format(str(len(data["result"])))
ret_ += "\n\nUntuk Melihat Details Music, silahkan gunakan command {}SearchMusic {}|「number」".format(str(setKey), str(search))
client.sendMessage(to, str(ret_))
elif len(cond) == 2:
num = int(cond[1])
if num <= len(data["result"]):
music = data["result"][num - 1]
result = requests.get("http://api.ntcorp.us/joox/song_info?sid={}".format(str(music["sid"])))
data = result.text
data = json.loads(data)
if data["result"] != []:
ret_ = "╔══[ Music ]"
ret_ += "\n╠ Title : {}".format(str(data["result"]["song"]))
ret_ += "\n╠ Album : {}".format(str(data["result"]["album"]))
ret_ += "\n╠ Size : {}".format(str(data["result"]["size"]))
ret_ += "\n╠ Link : {}".format(str(data["result"]["mp3"][0]))
ret_ += "\n╚══[ Finish ]"
client.sendImageWithURL(to, str(data["result"]["img"]))
client.sendMessage(to, str(ret_))
client.sendAudioWithURL(to, str(data["result"]["mp3"][0]))
elif cmd.startswith("lyric"):
sep = msg.text.split(" ")
query = msg.text.replace(sep[0] + " ","")
cond = query.split("|")
search = cond[0]
api = requests.get("http://api.secold.com/joox/cari/{}".format(str(search)))
data = api.text
data = json.loads(data)
if len(cond) == 1:
num = 0
ret_ = "╔══[ Result Lyric ]"
for lyric in data["results"]:
num += 1
ret_ += "\n╠ {}. {}".format(str(num), str(lyric["single"]))
ret_ += "\n╚══[ Total {} Music ]".format(str(len(data["results"])))
ret_ += "\n\nUntuk Melihat Details Lyric, silahkan gunakan command {}SearchLyric {}|「number」".format(str(setKey), str(search))
client.sendMessage(to, str(ret_))
elif len(cond) == 2:
num = int(cond[1])
if num <= len(data["results"]):
lyric = data["results"][num - 1]
api = requests.get("http://api.secold.com/joox/sid/{}".format(str(lyric["songid"])))
data = api.text
data = json.loads(data)
lyrics = data["results"]["lyric"]
lyric = lyrics.replace('ti:','Title - ')
lyric = lyric.replace('ar:','Artist - ')
lyric = lyric.replace('al:','Album - ')
removeString = "[1234567890.:]"
for char in removeString:
lyric = lyric.replace(char,'')
client.sendMessage(msg.to, str(lyric))
elif cmd.startswith("youtube"):
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
params = {"search_query": search}
r = requests.get("https://www.youtube.com/results", params = params)
soup = BeautifulSoup(r.content, "html5lib")
ret_ = "╔══[ Youtube Result ]"
datas = []
for data in soup.select(".yt-lockup-title > a[title]"):
if "&lists" not in data["href"]:
datas.append(data)
for data in datas:
ret_ += "\n╠══[ {} ]".format(str(data["title"]))
ret_ += "\n╠ https://www.youtube.com{}".format(str(data["href"]))
ret_ += "\n╚══[ Total {} ]".format(len(datas))
client.sendMessage(to, str(ret_))
elif cmd.startswith("tr-"):
sep = text.split("-")
sep = sep[1].split(" ")
lang = sep[0]
say = text.replace("tr-" + lang + " ","")
if lang not in list_language["list_translate"]:
return client.sendMessage(to, "Language not found")
translator = Translator()
hasil = translator.translate(say, dest=lang)
A = hasil.text
client.sendMessage(to, str(A))
# Pembatas Script #
# Pembatas Script #
if text.lower() == "mykey":
client.sendMessage(to, "KeyCommand Saat ini adalah [ {} ]".format(str(settings["keyCommand"])))
elif text.lower() == "setkey on":
settings["setKey"] = True
client.sendMessage(to, "Berhasil mengaktifkan setkey")
elif text.lower() == "setkey off":
settings["setKey"] = False
client.sendMessage(to, "Berhasil menonaktifkan setkey")
# Pembatas Script #
elif msg.contentType == 1:
if settings["changePicturePicture"] == True:
path = client.downloadObjectMsg(msg_id)
settings["changePictureProfile"] = False
client.updateProfilePicture(path)
client.sendMessage(to, "Berhasil mengubah foto profile")
if msg.toType == 2:
if to in settings["changeGroupPicture"]:
path = client.downloadObjectMsg(msg_id)
settings["changeGroupPicture"].remove(to)
client.updateGroupPicture(to, path)
client.sendMessage(to, "Berhasil mengubah foto group")
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ STICKER ID : {}".format(stk_id)
ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver)
ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
client.sendMessage(to, str(ret_))
elif msg.contentType == 13:
if settings["checkContact"] == True:
try:
contact = client.getContact(msg.contentMetadata["mid"])
if client != None:
cover = client.getProfileCoverURL(msg.contentMetadata["mid"])
else:
cover = "Tidak dapat masuk di line channel"
path = "http://dl.profile.line-cdn.net/{}".format(str(contact.pictureStatus))
try:
client.sendImageWithURL(to, str(path))
except:
pass
ret_ = "╔══[ Details Contact ]"
ret_ += "\n╠ Nama : {}".format(str(contact.displayName))
ret_ += "\n╠ MID : {}".format(str(msg.contentMetadata["mid"]))
ret_ += "\n╠ Bio : {}".format(str(contact.statusMessage))
ret_ += "\n╠ Gambar Profile : http://dl.profile.line-cdn.net/{}".format(str(contact.pictureStatus))
ret_ += "\n╠ Gambar Cover : {}".format(str(cover))
ret_ += "\n╚══[ Finish ]"
client.sendMessage(to, str(ret_))
except:
client.sendMessage(to, "Kontak tidak valid")
elif msg.contentType == 16:
if settings["checkPost"] == True:
try:
ret_ = "╔══[ Details Post ]"
if msg.contentMetadata["serviceType"] == "GB":
contact = client.getContact(sender)
auth = "\n╠ Penulis : {}".format(str(contact.displayName))
else:
auth = "\n╠ Penulis : {}".format(str(msg.contentMetadata["serviceName"]))
purl = "\n╠ URL : {}".format(str(msg.contentMetadata["postEndUrl"]).replace("line://","https://line.me/R/"))
ret_ += auth
ret_ += purl
if "mediaOid" in msg.contentMetadata:
object_ = msg.contentMetadata["mediaOid"].replace("svc=myhome|sid=h|","")
if msg.contentMetadata["mediaType"] == "V":
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n╠ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
murl = "\n╠ Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n╠ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
murl = "\n╠ Media URL : https://obs-us.line-apps.com/myhome/h/download.nhn?{}".format(str(object_))
ret_ += murl
else:
if msg.contentMetadata["serviceType"] == "GB":
ourl = "\n╠ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(msg.contentMetadata["mediaOid"]))
else:
ourl = "\n╠ Objek URL : https://obs-us.line-apps.com/myhome/h/download.nhn?tid=612w&{}".format(str(object_))
ret_ += ourl
if "stickerId" in msg.contentMetadata:
stck = "\n╠ Stiker : https://line.me/R/shop/detail/{}".format(str(msg.contentMetadata["packageId"]))
ret_ += stck
if "text" in msg.contentMetadata:
text = "\n╠ Tulisan : {}".format(str(msg.contentMetadata["text"]))
ret_ += text
ret_ += "\n╚══[ Finish ]"
client.sendMessage(to, str(ret_))
except:
client.sendMessage(to, "Post tidak valid")
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
if op.type == 26:
try:
print ("[ 26 ] RECIEVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:
if msg.toType == 0:
if sender != client.profile.mid:
to = sender
else:
to = receiver
elif msg.toType == 1:
to = receiver
elif msg.toType == 2:
to = receiver
if settings["autoRead"] == True:
client.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
client.sendMessage(msg.to,text)
if settings["unsendMessage"] == True:
try:
msg = op.message
if msg.toType == 0:
client.log("[{} : {}]".format(str(msg._from), str(msg.text)))
else:
client.log("[{} : {}]".format(str(msg.to), str(msg.text)))
msg_dict[msg.id] = {"text": msg.text, "from": msg._from, "createdTime": msg.createdTime, "contentType": msg.contentType, "contentMetadata": msg.contentMetadata}
except Exception as error:
logError(error)
if msg.contentType == 0:
if text is None:
return
if "/ti/g/" in msg.text.lower():
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = client.findGroupByTicket(ticket_id)
client.acceptGroupInvitationByTicket(group.id,ticket_id)
client.sendMessage(to, "Berhasil masuk ke group %s" % str(group.name))
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if clientMid in mention["M"]:
if settings["autoRespon"] == True:
sendMention(sender, "Oi Asw @!,jangan main tag tag", [sender])
break
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
if op.type == 65:
print ("[ 65 ] NOTIFIED DESTROY MESSAGE")
if settings["unsendMessage"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict:
if msg_dict[msg_id]["from"]:
contact = client.getContact(msg_dict[msg_id]["from"])
if contact.displayNameOverridden != None:
name_ = contact.displayNameOverridden
else:
name_ = contact.displayName
ret_ = "Send Message cancelled."
ret_ += "\nSender : @!"
ret_ += "\nSend At : {}".format(str(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"]))))
ret_ += "\nType : {}".format(str(Type._VALUES_TO_NAMES[msg_dict[msg_id]["contentType"]]))
ret_ += "\nText : {}".format(str(msg_dict[msg_id]["text"]))
sendMention(at, str(ret_), [contact.mid])
del msg_dict[msg_id]
else:
client.sendMessage(at,"SentMessage cancelled,But I didn't have log data.\nSorry > <")
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
try:
if op.param1 in read['readPoint']:
if op.param2 in read['readMember'][op.param1]:
pass
else:
read['readMember'][op.param1] += op.param2
read['ROM'][op.param1][op.param2] = op.param2
else:
pass
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
while True:
try:
delete_log()
ops = clientPoll.singleTrace(count=50)
if ops is not None:
for op in ops:
clientBot(op)
clientPoll.setRevision(op.revision)
except Exception as error:
logError(error)
def atend():
print("Saving")
with open("Log_data.json","w",encoding='utf8') as f:
json.dump(msg_dict, f, ensure_ascii=False, indent=4,separators=(',', ': '))
print("BYE")
atexit.register(atend)
| [
"noreply@github.com"
] | noreply@github.com |
07ee852c8d0b57c49ce3b4bf6f1a092363300b09 | 68466546bdf505ac9668bc5508370b350190f56b | /vae/envs/reacher.py | e71f85464e7c257cc6df89428674011bde30d987 | [] | no_license | MishaLaskin/vae | a05e01929eebf5c81ac995e78b2833d4562e955c | 4ec2c18e0c0c5a531a9b36d948e4ea115d992727 | refs/heads/master | 2020-07-20T18:50:26.939543 | 2019-09-06T02:13:44 | 2019-09-06T02:13:44 | 206,694,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,233 | py | import numpy as np
import pickle
import torch
from dm_control import suite
from gym.spaces import Box, Dict
from vae.models import VAE
class GoalImageEnv:
def __init__(self,
env_name=None,
mode=None,
act_dim=None,
reward_type='pixeldiff',
img_dim=32,
camera_id=0,
path_length=200,
threshold=0.8,
gpu_id=0,
**kwargs):
self.dm_env = suite.load(env_name, mode)
self.task = self.dm_env._task
self.camera_id = camera_id
self.physics = self.dm_env.physics
self.max_steps = path_length
self.threshold = threshold
self.reward_type = reward_type
self.device = torch.device(
"cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
# actions always between -1.0 and 1.0
self.action_space = Box(
high=1.0, low=-1.0,
shape=self.dm_env.action_spec().shape if act_dim is None else (act_dim,)
)
# observtions are, in principle, unbounded
state_space = Box(
high=float("inf"), low=-float("inf"),
shape=(128,))
goal_space = Box(
high=float("inf"), low=-float("inf"),
shape=(128,))
self.observation_space = Dict([
('observation', state_space),
('desired_goal', goal_space),
('achieved_goal', goal_space),
#('state_observation', state_space),
#('state_desired_goal', goal_space),
#('state_achieved_goal', goal_space),
])
self.render_kwargs = dict(
width=img_dim, height=img_dim, camera_id=self.camera_id)
self.model = VAE(
img_dim=img_dim, image_channels=3, z_dim=128, device=self.device)
self.model = self.model.to(self.device)
def render(self):
# renders image
# example: self.render_kwargs={width=32,height=32,camera_id=0}
return self.physics.render(**self.render_kwargs).astype(np.float32)
def normalized_render(self):
img = self.render()
img = normalize_image(img)
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, 0)
img = torch.tensor(img).float().to(self.device)
_, z, _, _ = self.model(img)
return z.squeeze(0).detach().cpu().numpy()
def reset(self):
self.steps = 0
self.dm_env.reset()
self.desired_goal = self.normalized_render()
self.desired_goal
self.dm_env.reset()
obs = self.normalized_render()
achieved_goal = obs.copy()
obs_dict = dict(observation=obs,
achieved_goal=achieved_goal,
desired_goal=self.desired_goal)
return obs_dict
def step(self, a):
# one timestep forward.astype(np.float32)
# reward and done are taken from dm_control's env
self.dm_env.step(a)
obs = self.normalized_render()
achieved_goal = obs.copy()
obs_dict = dict(observation=obs,
achieved_goal=achieved_goal,
desired_goal=self.desired_goal)
reward = self.compute_reward(a, obs_dict)
done, is_success = self.is_done(reward)
info = {
'is_success': is_success
}
# self.update_internal_state()
self.steps += 1
return obs_dict, reward, done, info
def compute_reward(self, action, obs, *args, **kwargs):
# abstract method only cares about obs and threshold
distance = np.linalg.norm(obs['achieved_goal'] - obs['desired_goal'])
if self.reward_type == 'threshold':
r = -1.0 if distance > self.threshold else 0.0
elif self.reward_type == 'pixeldiff':
r = 0.0 if np.allclose(
obs['achieved_goal'], obs['desired_goal']) else -1.0
return r
def compute_rewards(self, actions, obs):
# abstract method only cares about obs and threshold
achieved_goals = obs['achieved_goal']
desired_goals = obs['desired_goal']
if self.reward_type == 'threshold':
distances = np.linalg.norm(achieved_goals - desired_goals, axis=1)
r = -(distances > self.threshold).astype(float)
elif self.reward_type == 'pixeldiff':
proximities = np.array([0.0 if np.allclose(x, y, atol=1e-3) else -1.0
for x, y in zip(achieved_goals, desired_goals)])
r = proximities
return r
def is_done(self, r):
# abstract method only cares about obs and threshold
# check if max step limit is reached
if self.steps >= self.max_steps:
done = True
is_success = False
return done, is_success
# check if episode was successful
is_success = r == 0
done = is_success
return done, is_success
def normalize_image(img):
"""normalizes image to [-1,1] interval
Arguments:
img {np.array or torch.tensor} -- [an image array / tensor with integer values 0-255]
Returns:
[np.array or torch tensor] -- [an image array / tensor with float values in [-1,1] interval]
"""
# takes to [0,1] interval
img /= 255.0
# takes to [-0.5,0.5] interval
# img -= 0.5
# takes to [-1,1] interval
# img /= 0.5
return img
class Reacher(GoalImageEnv):
def __init__(self,
path_length=None,
reward_type='threshold',
threshold=0.8,
img_dim=32,
camera_id=0):
super().__init__(env_name='reacher',
mode='no_target',
act_dim=None,
reward_type=reward_type,
img_dim=img_dim,
camera_id=camera_id,
path_length=path_length)
path = '/home/misha/research/baselines/vae/saved_models/reacher_vae.pth'
self.model.load_state_dict(torch.load(path))
self.model = self.model.to(self.device)
| [
"laskin.misha@gmail.com"
] | laskin.misha@gmail.com |
2e83cecc70ee4497d5e6350b4cd1f5292a03febd | e7e34a88c150009a0653d6fee8f4cf40312908ad | /bruteforce/bruteForceList.py | 2fef73b798ca50452c11480e7834e4edaf480e0a | [] | no_license | SharpShooter17/Python | a75e282b2c5098db0ef01cdcf357493849dbfa77 | f7b4593a08e3580b4d553c3689b50ce19f47b631 | refs/heads/master | 2021-05-07T00:29:44.017535 | 2018-01-28T09:40:09 | 2018-01-28T09:40:09 | 110,150,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | your_list = 'abcdefghijklmnopqrstuvwxyz'
complete_list = []
for current in range(3):
a = [i for i in your_list]
for y in range(current):
a = [x+i for i in your_list for x in a]
complete_list = complete_list+a
| [
"b.ujazdowski@gmail.com"
] | b.ujazdowski@gmail.com |
72e93af8ec74d62438d70db9a90b5582bfc4aa9d | 124451bae7909f1285cc1baace7e066f3c069457 | /day9/example_data.py | 444fde0b2b99f48e7f41be474350e821126f970e | [] | no_license | matthiashamacher/Advent-of-Code-2020 | 4d125a42b4f527018146e4084040dfc9f5dffcc9 | 4b24b7a6976847a802bcd45ae6f4379cf74722a0 | refs/heads/main | 2023-01-29T21:08:01.814280 | 2020-12-13T12:25:28 | 2020-12-13T12:25:28 | 318,295,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | data = """
35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576
"""
result_part1 = 127
result_part2 = 62
| [
"m.hamacher@mhamacher.de"
] | m.hamacher@mhamacher.de |
714364cbe17eb0462f7f7729f896a429d40eb05e | da3e0a5adb0502dfe3438931c4682981dd75d836 | /shared/utils/templatetags/text_tags.py | 11c5a4c9cb944d2ab68793d123a22360856572e7 | [
"MIT"
] | permissive | sha-red/django-shared-utils | def4bc83d702cb0c62db1035863500bdb242c42b | 88059b53a10fdce960442fcfd7470fded4cabb19 | refs/heads/master | 2021-01-12T12:19:51.400720 | 2020-12-10T06:43:44 | 2020-12-10T06:43:44 | 72,434,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,454 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import string
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.encoding import force_text
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from .. import text as text_utils
register = template.Library()
@register.filter()
def conditional_punctuation(value, punctuation=",", space=" "):
"""
Appends punctuation if the (stripped) value is not empty
and the value does not already end in a punctuation mark (.,:;!?).
"""
value = force_text(value or "").strip()
if value:
if value[-1] not in ".,:;!?":
value += conditional_escape(punctuation)
value += conditional_escape(space) # Append previously stripped space
return value
conditional_punctuation.is_safe = True
WHITESPACE = re.compile('\s+')
@register.filter(needs_autoescape=True)
@stringfilter
def nbsp(text, autoescape=True):
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
return mark_safe(WHITESPACE.sub(' ', esc(text.strip())))
@register.filter(needs_autoescape=False)
@stringfilter
def html_entities_to_unicode(text):
return mark_safe(text_utils.html_entities_to_unicode(text))
@register.filter(needs_autoescape=False)
def slimdown(text):
return mark_safe(text_utils.slimdown(text))
@register.filter(needs_autoescape=False)
def strip_links(text):
return mark_safe(text_utils.strip_links(text))
@register.filter(is_safe=True)
@stringfilter
def html_lines_to_list(value):
"""
Replaces all <br> tags with ", "
"""
rv = []
lines = value.split("<br>")
for i in range(0, len(lines)):
line = lines[i].strip()
rv.append(line)
if i < len(lines) - 1:
if line[-1] not in ";:,.-–—":
rv.append(", ")
else:
rv.append(" ")
return "".join(rv)
return ", ".join([l.strip() for l in value.split("<br>")])
def remove_punctuation(s):
# http://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python
# return s.translate(s.maketrans("",""), string.punctuation)
regex = re.compile('[%s]' % re.escape(string.punctuation))
return regex.sub('', s)
def splitn(s, n):
"""split string s into chunks no more than n characters long"""
parts = re.split("(.{%d,%d})" % (n, n), s)
map(parts.remove, [""] * parts.count(""))
return parts
def clean_value(value):
# Convert all whitespace including non-breaking space to a single space
if value:
return re.sub(u"([\s\u00A0]+)", u" ", force_text(value.strip()))
else:
return value
@register.filter()
@stringfilter
def append(value, text):
"""
Appends text if value is not None.
"""
if value is not None:
value = str(value).strip()
if value:
return "{}{}".format(value, text)
return ""
@register.filter()
@stringfilter
def prepend(value, text):
"""
Prepends text if value is not None.
"""
if value is not None:
value = str(value).strip()
if value:
return "{}{}".format(text, value)
return ""
@register.filter
@stringfilter
def first_line(s):
"""
Strips whitespace, then returns the first line of text.
"""
return s.strip().splitlines(False)[0]
| [
"erik@classlibrary.net"
] | erik@classlibrary.net |
f3b1622bcb7fbe9c7b42b568817a065ba1646bb2 | fa8c5ce1fc8182fefeaa07394ad8acb4c38bd223 | /super_global_variable_and_super_keyword.py | 55b6cc7ec0b92804e3855fec468b0e265de68e69 | [] | no_license | manisha-jaiswal/variable-and-super-variable | 7392cd3f41a0a2f8164a26553da2c8cbf0cd7c9f | 6014db262279c6877581e81cf047ba5cd49bbf9a | refs/heads/master | 2022-06-22T00:22:26.875677 | 2020-05-09T11:15:24 | 2020-05-09T11:15:24 | 262,553,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,144 | py | #global and local variable and global keyword
#--------------------------------------------------------------------------------------------
l=10 #global variable used in everywhere , global scope
def function1(n):
#l=5 #local variable , it is not used outside the function
m=8 #local variable
#l=l+45
# we cannot change globl variable value,
# if we want to change value of global variable within the function then we use global keyword
global l
l=l+45
print(l,m) # here it searches l in function,if its not found then it search outside the function and print value of l,
print(m,"i have printed")
function1("this is me ,")
print(l)
#--------------------------------------------------------------------------------
# using nestef function
#--------------------------------------------------------------------------------
def Manu():
x=20
def Sanu():
global x #it goes outside and search global value,if find then replace the value 88 else not
x= 88
print("before calling sanu()",x)
Sanu()
print("after calling sanu()",x)
Manu()
print(x)
| [
"noreply@github.com"
] | noreply@github.com |
d0907a7ddb855d8b347fbf390726c78785b65a8d | e572ec1ce50d7701b1609c4ef2140abac6c459bb | /lab3/2nd assignment/depp.py | 88da682127cfde43db7cd6af3fc015e3ddcd5de3 | [] | no_license | platonv/bigdata_course | 10d7076e1d82fc43520828cadfc71aa50b7aa524 | 751ba7e53389d9171297be507ac31a3412fbfb06 | refs/heads/master | 2021-04-06T13:13:34.871316 | 2018-04-06T08:54:24 | 2018-04-06T08:54:24 | 125,376,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from pyspark import SparkContext, SparkConf
if __name__=='__main__':
conf = SparkConf().setAppName("lab3ex1").setMaster("local")
sc = SparkContext(conf=conf)
lines = sc.textFile("/lab3_2/has_played_in/part-m-00000")
filtered=lines.filter(lambda x: x.find("Johnny,Depp")!=-1);
print '\n\n'
print filtered.count()
print '\n\n'
| [
"andreimesesan@yahoo.com"
] | andreimesesan@yahoo.com |
14b6382005902b1f1ba788899a187b51d5038695 | dbb793f03e944cbe4e36af4cc0a926ab7b684e8e | /studentregister/migrations/0001_initial.py | 06f08cd5dfc03c4fbd6c689119cb150b9bffe9e7 | [] | no_license | UlftKcy/django-register-app | cefcf371870419357b7b3247d01bd1fd9525a379 | 96bed1cfa3d76cc1fe28723c1686e1b0d1380d76 | refs/heads/master | 2023-08-16T03:22:25.090124 | 2021-10-20T15:55:51 | 2021-10-20T15:55:51 | 419,387,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | # Generated by Django 3.2.8 on 2021-10-20 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student_Register',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=100)),
('mobile', models.CharField(max_length=50)),
('email', models.EmailField(max_length=154)),
('gender', models.CharField(choices=[('1', 'Female'), ('2', 'Male'), ('3', 'Other'), ('4', 'Prefer Not Say')], max_length=50)),
('student_number', models.CharField(max_length=50)),
('path', models.CharField(choices=[('FSD', 'Full Stack Developer'), ('AD', 'AWS/Devops'), ('DS', 'Data Science'), ('CS', 'Cyber Security')], max_length=50)),
],
),
]
| [
"ukacay87@gmail.com"
] | ukacay87@gmail.com |
fe735dbdb5a92306e652dc99f15ad3b0aa0599c2 | 323e6dd01cc5059623b9506aea63f9d1d6250d2c | /N_Mens_Morris/Morris.py | 567d0339859c6a7f30c67840c290c53903ab88e3 | [] | no_license | mf15726/Tp2 | 5cb29a31a518fe5710597b4e18b442affa346add | e73856045a4f7f47988cd7f946793311f571dd59 | refs/heads/master | 2020-04-22T22:15:13.954540 | 2019-02-14T14:42:29 | 2019-02-14T14:42:29 | 170,701,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,914 | py | #Packages
import numpy as np
import pandas as pd
import random
from copy import deepcopy
import csv
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
from math import log
import networkx as nx
#Classes
from learned_player import Learned_Player
from random_player import Random_Player
from human_player import Human_Player
#from multi_task_player import Multi_Task_Player
#game_type = 3, 6, 9, 12 (Men's Morris)
mill_dict_3 = [[[1, 2], [3, 6], [4, 8]],
[[0, 2], [4, 7]],
[[5, 8], [4, 6], [0,1]],
[[0, 6], [4, 5]],
[[0, 8], [1, 7], [2, 6], [3, 5]],
[[2, 8], [4, 3]],
[[7, 8], [2, 4], [0, 3]],
[[6, 8], [1, 4]],
[[7, 6], [2, 5], [0, 4]]]
mill_dict_6 = [[[1, 2], [13, 6]],
[[0, 2]],
[[0, 1], [9, 15]],
[[10, 7], [4, 5]],
[[3, 5]],
[[12, 8], [4, 3]],
[[0, 13]],
[[10, 3]],
[[5, 12]],
[[2, 15]],
[[3, 7], [11, 12]],
[[10, 12]],
[[10, 11], [5, 8]],
[[0, 6], [14, 15]],
[[13, 15]],
[[2, 9], [13, 14]]]
mill_dict_9 = [[[1, 2], [9, 21]],[[0, 2], [7, 4]],[[0, 1], [14, 23]],[[18, 10], [4, 5]],[[3, 5], [1, 7]],[[13, 20], [4, 3]],
[[11, 15], [7, 8]],[[6, 8], [1, 4]],[[12, 17], [6, 7]],[[10, 11], [0, 21]],[[9, 11], [3, 18]],[[6, 15], [9, 11]],
[[8, 17], [13, 14]],[[12, 14], [5, 20]],[[2, 23], [12, 13]],[[6, 11], [16, 17]],[[15, 17], [19, 22]],
[[12, 8], [15, 16]],[[10, 3], [19, 20]],[[18, 20], [10, 3]],[[19, 18], [5, 13]],[[9, 0], [22, 23]],[[21, 23], [16, 19]],
[[21, 22], [2, 14]]]
mill_dict_12 = [[[1, 2], [9, 21], [3, 6]],[[0, 2], [7, 4]],[[0, 1], [14, 23], [5, 8]],[[18, 10], [4, 5], [0, 6]],[[3, 5], [1, 7]],
[[13, 20], [4, 3], [2, 8]],[[11, 15], [7, 8], 9],[[6, 8], [1, 4]],[[12, 17], [6, 7], 20],[[10, 11], [0, 21]],
[[9, 11], [3, 18]],[[6, 15], [9, 11]],[[8, 17], [13, 14]],[[12, 14], [5, 20]],[[2, 23], [12, 13]],
[[6, 11], [16, 17], [18, 21]],[[15, 17], [19, 22]],[[12, 8], [15, 16], [20, 23]],[[10, 3], [19, 20], [15, 21]],
[[18, 20], [10, 3]],[[19, 18], [5, 13], [17, 23]], [[9, 0], [22, 23],[15, 18]],[[21, 23], [16, 19]],
[[21, 22],[2, 14], [17, 20]]]
def printboard(game_type,state):
# gameboard = nx.graph()
if game_type == 3:
print(str(state[0])+'-'+str(state[1])+'-'+str(state[2]))
print('|\|/|')
print(str(state[3])+'-'+str(state[4])+'-'+str(state[5]))
print('|/|\|')
print(str(state[6])+'-'+str(state[7])+'-'+str(state[8]))
if game_type == 6:
print(str(state[0])+'---'+str(state[1])+'---'+str(state[2]))
print('| | |')
print('| '+str(state[3])+'-'+str(state[4])+'-'+str(state[5])+' |')
print('| | | |')
print(str(state[6])+'-'+str(state[7])+' '+str(state[8])+'-'+str(state[9]))
print('| | | |')
print('| '+str(state[10])+'-'+str(state[11])+'-'+str(state[12])+' |')
print('| | |')
print(str(state[13])+'---'+str(state[14])+'---'+str(state[15]))
if game_type == 9:
print(str(state[0])+'-----'+str(state[1])+'-----'+str(state[2]))
print('| | |')
print('| '+str(state[3])+'---'+str(state[4])+'---'+str(state[5])+' |')
print('| | | | |')
print('| | '+str(state[6])+'-'+str(state[7])+'-'+str(state[8])+' | |')
print('| | | | | |')
print(str(state[9])+'-'+str(state[10])+'-'+str(state[11])+' '+str(state[12])+'-'+str(state[13])+'-'+str(state[14]))
print('| | | | | |')
print('| | '+str(state[15])+'-'+str(state[16])+'-'+str(state[17])+' | |')
print('| | | | |')
print('| '+str(state[18])+'---'+str(state[19])+'---'+str(state[20])+' |')
print('| | |')
print(str(state[21])+'-----'+str(state[22])+'-----'+str(state[23]))
if game_type == 12:
print(str(state[0])+'-----'+str(state[1])+'-----'+str(state[2]))
print('|\ | /|')
print('| '+str(state[3])+'---'+str(state[4])+'---'+str(state[5])+' |')
print('| |\ | /| |')
print('| | '+str(state[6])+'-'+str(state[7])+'-'+str(state[8])+' | |')
print('| | | | | |')
print(str(state[9])+'-'+str(state[10])+'-'+str(state[11])+' '+str(state[12])+'-'+str(state[13])+'-'+str(state[14]))
print('| | | | | |')
print('| | '+str(state[15])+'-'+str(state[16])+'-'+str(state[17])+' | |')
print('| |/ | \| |')
print('| '+str(state[18])+'---'+str(state[19])+'---'+str(state[20])+' |')
print('|/ | \|')
print(str(state[21])+'-----'+str(state[22])+'-----'+str(state[23]))
def end_game(state):
count1 = state.count(1)
count2 = state.count(2)
# print(('Count1 = ') + str(count1))
# print(('Count2 = ') + str(count2))
if count1 <= 2:
return 2
if count2 <= 2:
return 1
else:
return 0
def det_mill(state, move, game_type):
if game_type == 3:
for item in mill_dict_3[move]:
if state[move] == state[item[0]] == state[item[1]]:
return True
else:
return False
if game_type == 6:
print(move)
for item in mill_dict_6[move]:
if state[move] == state[item[0]] == state[item[1]]:
return True
else:
return False
if game_type == 9:
for item in mill_dict_9[move]:
if state[move] == state[item[0]] == state[item[1]]:
return True
else:
return False
if game_type == 12:
for item in mill_dict_12[move]:
if state[move] == state[item[0]] == state[item[1]]:
return True
else:
return False
def free_space_finder(state):
free_space = []
for i in range(len(state)):
if state[i] == 0:
free_space.append(i)
return free_space
#def free_space_finder(state):
# free_space = [0] * 9
# for i in range(len(state)):
def flying_check(state, player):
if game_type == 3:
return False
count = state.count(player)
if count == 3:
return True
else:
return False
def game_play(player1,player2,game_type,print_board,flying,limit):
winner = 0
move_no = 0
player1_piece_list = [None] * game_type
player2_piece_list = [None] * game_type
p1_pieces_removed = 0
p2_pieces_removed = 0
if game_type == 3:
state = [0,0,0,0,0,0,0,0,0]
# free_space = [1,1,1,1,1,1,1,1,1]
elif game_type == 6:
state = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# free_space = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
else:
state = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# free_space = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
if print_board:
printboard(game_type,state)
free_space = free_space_finder(state)
while winner == 0:
player = (move_no % 2) + 1
if move_no < game_type * 2:
if player == 1:
move = player1.place(state,free_space,game_type,player,move_no)
player1_piece_list[int(move_no/2)] = move
else:
move = player2.place(state,free_space,game_type,player,move_no)
print(int((move_no - 1)/2))
player2_piece_list[int((move_no - 1)/2)] = move
state[move] = player
_ = free_space.index(move)
# print('Placed by Player ' + str(player) + ' ' + str(move))
# print('Free Space = ' +str(free_space))
free_space.remove(move)
if print_board:
printboard(game_type,state)
if det_mill(state, move, game_type):
if game_type == 3:
return player
# print('Mill Created')
if player == 1:
removed_piece = player1.remove_piece(state,player2_piece_list,game_type,player,p2_pieces_removed)
# print('P2 Plist = ' + str(player2_piece_list))
# print('Removed piece = ' + str(removed_piece))
state[removed_piece] = 0
p2_pieces_removed += 1
player2_piece_list.remove(removed_piece)
else:
removed_piece = player2.remove_piece(state,player1_piece_list,game_type,player,p1_pieces_removed)
# print('P1 Plist = ' + str(player1_piece_list))
# print('Removed piece = ' + str(removed_piece))
state[removed_piece] = 0
p1_pieces_removed += 1
player1_piece_list.remove(removed_piece)
free_space.append(removed_piece)
if print_board:
printboard(game_type,state)
else:
if move_no == game_type * 2:
winner = end_game(state)
if winner != 0:
return winner
if flying:
p1_fly = flying_check(state,1)
p2_fly = flying_check(state,2)
if player == 1:
prev_pos, move = player1.move(state,game_type,free_space,player1_piece_list,player,p1_fly,move_no)
if move == 25:
return 2
player1_piece_list.append(move)
player1_piece_list.remove(prev_pos)
# print('Player1 moves' + str(move))
# print('From ' + str(prev_pos))
else:
prev_pos, move = player2.move(state,game_type,free_space,player2_piece_list,player,p2_fly,move_no)
if move == 25:
return 1
player2_piece_list.append(move)
player2_piece_list.remove(prev_pos)
# print('Player2 moves' + str(move))
# print('From ' + str(prev_pos))
state[move] = player
state[prev_pos] = 0
free_space.remove(move)
free_space.append(prev_pos)
if print_board:
printboard(game_type,state)
if det_mill(state, move, game_type):
# print('Mill Created')
if player == 1:
removed_piece = player1.remove_piece(state,player2_piece_list,game_type,player,p2_pieces_removed)
# print('P2 Plist = ' + str(player2_piece_list))
# print('Removed piece = ' + str(removed_piece))
state[removed_piece] = 0
p2_pieces_removed += 1
player2_piece_list.remove(removed_piece)
if flying:
p1_fly = flying_check(state,1)
else:
removed_piece = player2.remove_piece(state,player1_piece_list,game_type,player,p1_pieces_removed)
# print('P1 Plist = ' + str(player1_piece_list))
# print('Removed piece = ' + str(removed_piece))
state[removed_piece] = 0
p1_pieces_removed += 1
player1_piece_list.remove(removed_piece)
if flying:
p2_fly = flying_check(state,2)
free_space.append(removed_piece)
if print_board:
printboard(game_type,state)
winner = end_game(state)
move_no += 1
if move_no == limit:
return 0
return winner
winner_list = []
enable_flying = True
game_type = 6
see_board = True
total_move_no = 1000
human_player = Human_Player()
random_player = Random_Player()
learned_player = Learned_Player(epsilon=0.01, alpha=0.3, gamma=0.9, limit=total_move_no)
learned_player.sess.run(tf.global_variables_initializer())
#gameboard = define_board(6)
#nx.draw(gameboard)
#plt.show()
for i in range(100):
if i%2 == 0:
print('Game Number = ' +str(i+1))
winner = game_play(random_player,random_player, game_type, see_board, enable_flying, total_move_no)
# winner = game_play(random_player,random_player, 12, False)
print('Winner of game ' + str(i+1) + ' is Player ' + str(winner))
winner_list.append(winner)
# learned_player.learn(game_type, winner)
print('P1 wins = ' + str(winner_list.count(1)))
print('P2 wins = ' + str(winner_list.count(2)))
| [
"noreply@github.com"
] | noreply@github.com |
b9c79efd417aa62939f894e941ef2cddc2629860 | 70632f8a94ffa37e64b4cae5aea66bbd979603ca | /RRE Calculator.py | c89aec91469a65663a6d171522ad71848b9a3322 | [] | no_license | nwilson14/Reduced-Row-Echelon-Form-Calculator | edc714cf5e7dd575911a98ded1642746ea0b1c75 | 2013c51937015abac94afdb0f523a49b90612429 | refs/heads/master | 2022-10-02T12:11:08.184268 | 2020-06-06T16:25:54 | 2020-06-06T16:25:54 | 270,030,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,263 | py | # Multiple Matrices are created to test the program
# Comment out the Matrices you do not want to use.
MATRIX = [[2.0, 4.0, 6.0],
[1.0, 3.0, 9.0],
[1.0, 6.0, 8.0]]
MATRIX = [[2.0, 4.0, 8.0, 2.0],
[1.0, 2.0, 0.0, 4.0],
[1.0, 5.0, 3.0, 0.0],
[2.0, 7.0, 2.0, 4.0]]
MATRIX = [[2.0, 4.0, 8.0, 2.0],
[1.0, 2.0, 0.0, 4.0],
[1.0, 5.0, 3.0, 0.0],
[0.0, 0.0, 0.0, 0.0]]
MATRIX = [[4.0, 8.0, 2.0],
[2.0, 0.0, 4.0],
[5.0, 3.0, 0.0],
[7.0, 2.0, 4.0]]
MATRIX = [[4.0, 2.0, 5.0, 7.0],
[8.0, 0.0, 3.0, 2.0],
[2.0, 4.0, 0.0, 4.0]]
MATRIX = [[2.0, 0.0, 8.0, 2.0],
[1.0, 0.0, 2.0, 4.0],
[1.0, 0.0, 3.0, 9.0],
[2.0, 0.0, 2.0, 6.0]]
COLUMNS = len(MATRIX[0])
ROWS = len(MATRIX)
def print_matrix():
for i in range(ROWS):
print(MATRIX[i])
print()
def print_final():
for i in range(ROWS):
for j in range(COLUMNS):
convert = MATRIX[i][j].as_integer_ratio()
if convert[1] == 1:
MATRIX[i][j] = str(convert[0])
else:
MATRIX[i][j] = str(convert[0]) + "/" + str(convert[1])
print(MATRIX[i])
print()
def swap(pos):
# Given the row, move it down the Matrix until all the rows that do not
# have a leading 0 in position pos, are above it
start = pos
check = -1
for i in range(start, ROWS):
if MATRIX[i][pos] != 0:
check = 0
print("Swapping row " + str(pos+1) + " and row " + str(i+1))
temp = MATRIX[pos]
MATRIX[pos] = MATRIX[i]
MATRIX[i] = temp
pos = i
print_matrix()
if check == -1:
return False
else:
return True
def rre_calculator(confirm):
r = 0
c = 0
check = True
# Continue for all rows in the matrix
while r < ROWS and c < COLUMNS:
print("1")
# If there is a leading 0 in a row, swap it with another row
if MATRIX[r][c] == 0:
check = swap(r)
if check:
# If there is NOT a leading one, make it a leading one
# Whatever you do to make it a leading one, must also be done
# to every element in the entire row
if MATRIX[r][c] != 1 and MATRIX[r][c] != 0:
keep = 1 / MATRIX[r][c]
for i in range(COLUMNS):
MATRIX[r][i] = keep * MATRIX[r][i]
print("Done creating pivot 1")
print_matrix()
# Eliminate everything below the leading 1
for j in range(r + 1, ROWS):
keep = -MATRIX[j][r]
for k in range(r, COLUMNS):
MATRIX[j][k] = keep * MATRIX[r][k] + MATRIX[j][k]
print("Everything below pivot is 0")
print_matrix()
r = r + 1
c = c + 1
else:
# There is a column of 0's, so move one
print("Column of zeros")
c = c + 1
# If the user only requested Row Echelon Form, return
if confirm == 0:
print("Matrix in Row Echelon Form")
return
# Otherwise, continue with Reduced Row Echelon Form
print("Now finding Reduced Row Echelon")
# Find the position of every 1, and eliminate everything above
for q in range(ROWS-1, -1, -1):
r = COLUMNS-1
while MATRIX[q][r] != 1 and r > -1:
r = r - 1
# Matrix[q][r] displays the leading 1 found when reverse traversing the list
print("Found 1, now make everything above that 1 a 0")
print_matrix()
for n in range(q-1, -1, -1):
keep = -MATRIX[n][r]
for h in range(0, COLUMNS):
MATRIX[n][h] = keep*MATRIX[q][h] + MATRIX[n][h]
print("Changed")
print_matrix()
print("Matrix in Reduced Row Echelon Form")
# # # ----- Main ----- # # #
print("What form do you want the Matrix in?")
form = input("0: Row Echelon Form\n1: Reduced Row Echelon Form\n")
rre_calculator(int(form))
print_final()
| [
"noreply@github.com"
] | noreply@github.com |
2df1008afb7fa774acce316de5e1d9dd58f5af9c | 8981127a683d105c266a179fd7bd967558b7cd12 | /exp/__init__.py | d4bf7a2298b4699420e0dffac37f63d65f8384e0 | [] | no_license | haossr/Human-friendly-preference | 173da5511ab05de4eae206827722e69dbbf0f774 | 4d83aa16c1a1f2fbbcc77840c3f6cdd430b7945a | refs/heads/master | 2020-12-27T22:50:00.195635 | 2020-03-05T06:13:03 | 2020-03-05T06:13:03 | 238,093,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from .exp import Experiment
| [
"haosheng@stanford.edu"
] | haosheng@stanford.edu |
a14ab6df974107ea56998dc17be7330c4bf7fa0f | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/pywrap_tensorflow_internal.py | 57359e7e1d2bc639df434b43db03044bd4c69c0c | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/pywrap_tensorflow_internal.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
187c3fc4c1f71964da91cace8450315224f8d3ad | c440f528c00965679b6d046a42bc9ea1049257b4 | /tensor_flow_test.py | 621cc631f0d99b18c3d5ec34f35aeaac3c06fa9d | [] | no_license | WooRaZil-Boy/DLND | 9c68ec1795978a5e34634abbdf8bf36b9fd31e38 | d92069cbdc93557dd187a66c1f16f2530cfb6df7 | refs/heads/master | 2021-06-20T05:36:07.936885 | 2017-07-20T01:23:30 | 2017-07-20T01:23:30 | 86,291,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,033 | py | import tensorflow as tf
hello_constant = tf.constant("Hello world")
#텐서 플로우는 기본 변수 저장을 tensor 단위로 한다.
#tf.constant ()에 의해 리턴 된 텐서는 텐서의 값이 변하지 않기 때문에 상수 텐서라 부른다.
# A is a 0-dimensional int32 tensor
A = tf.constant(1234)
# B is a 1-dimensional int32 tensor
B = tf.constant([123,456,789])
# C is a 2-dimensional int32 tensor
C = tf.constant([ [123,456,789], [222,333,444] ])
with tf.Session() as sess: #세션 메서드로 세션 인스턴스 sess를 만든다.
#세션은 그래프를 실행하기 위한 환경이다.
#세션은 원격 컴퓨터를 포함하여 GPU 및 CPU에 작업을 할당한다.
#with as 구문은 어떤 블럭에 진입하고 나올 때 지정된 객체(context manager)로 하여금 그 시작과 끝에서 어떤 처리를 하도록 할 때 사용한다.
#파일이나 DB 혹은 네트워크 연결을 열어서 작업하던 중에 예외가 발생하였을 때에 안전하게 리소스 처리를 할 수 있는 로직을 깔끔하게 처리할 수 있다.
#파일 입출력 시에 try catch finally 등 대신 쓸 수 있다.
output = sess.run(hello_constant) #세션 메서드 run으로 텐서를 평가하고 결과값을 반환한다.
print(output)
################################ input ############################################
x = tf.placeholder(tf.string)
#모델 입력 시 변경되지 않을 데이처를 입력하고자 할 때는 placeholder를 사용하면 된다.
#상수(constant)처럼 바로 값을 입력할 수 없다.
#tf.placeholder()는 tf.session.run() 함수에 전달 된 데이터에서 값을 가져 오는 텐서를 반환하므로 세션이 실행되기 전에 입력을 바로 설정할 수 있다.
#자료형 뒤에 shape를 지정해 줄 수 도 있다. https://www.tensorflow.org/api_docs/python/tf/placeholder
with tf.Session() as sess:
output = sess.run(x, feed_dict={x: 'Hello World'})
#run()에서 feed_dict 매개 변수를 사용해서 텐서를 설정해 준다.
#(변수 텐서(placeholder), feed_dict = 매개변수 설정(딕셔너리))
#여기서는 텐서 x가 문자열 "Hello, world"로 설정된다.
print(output)
x = tf.placeholder(tf.string)
y = tf.placeholder(tf.int32)
z = tf.placeholder(tf.float32)
with tf.Session() as sess:
output = sess.run(x, feed_dict={x: 'Test String', y: 123, z: 45.67})
#feed_dict를 사용하여 하나 이상의 텐서를 설정할 수도 있다.
print(output)
################################ TensorFlow Math ################################
x = tf.add(5, 2) # 7
x = tf.subtract(10, 4) # 6
y = tf.multiply(2, 5) # 10
#tf.subtract(tf.constant(2.0),tf.constant(1))
#Fails with ValueError: Tensor conversion requested dtype float32 for Tensor with dtype int32:
#변수의 단위를 맞춰줘야 한다.
tf.subtract(tf.cast(tf.constant(2.0), tf.int32), tf.constant(1)) # 1
x = tf.constant(10)
y = tf.constant(2)
z = tf.subtract(tf.divide(x, y), tf.cast(tf.constant(1), tf.float64)) #x/y - 1
################################ Weights and Bias in TensorFlow ################################
#tf.placeholder()는 입력 데이터를 만들 때 주로 사용한다. (실제 훈련 예제를 제공하는 변수) - 초기값을 지정할 필요 없다. (모델 입력시 변경되지 않을 데이터)
#tf.Variable()은 데이터의 상태를 저장할 때 주로 사용한다. (가중치나 편향 등의 학습 가능한 변수) - 초기값을 지정해야 한다. (학습 되는 데이터)
#http://stackoverflow.com/questions/36693740/whats-the-difference-between-tf-placeholder-and-tf-variable
x = tf.Variable(5) #Variable()은 수정할 수 있는 텐서를 생성한다.
init = tf.global_variables_initializer() #global_variables_initializer() 모든 가변 텐서의 상태 초기화하는 메서드
with tf.Session() as sess:
sess.run(init)
#tf.global_variables_initializer()는 모든 TensorFlow 변수를 그래프에서 초기화하는 연산을 반환한다.
#세션에서 작업을 호출 해 모든 변수를 초기화한다.
#tf.Variable 클래스를 사용하면 가중치와 편향의 초기 값을 입력해야 한다.
n_features = 120
n_labels = 5
weights = tf.Variable(tf.truncated_normal((n_features, n_labels))) #tf.truncated_normal() 정규 분포 내에서 임의의 값을 생성한다.
#tf.truncated_normal()는 평균값으로부터 2 표준 편차를 넘지 않는 정규 분포에서 무작위 값을 갖는 텐서를 반환한다.
#정규 분포에서 무작위 수로 가중치를 초기화하는 것이 좋다.
bias = tf.Variable(tf.zeros(n_labels))
#가중치를 설정하면 학습 모델이 정체되는 것을 막을 수 있으므로 편향까지 무작위로 추출 할 필요는 없다. 따라서 편향을 0으로 설정하기도 한다.
################################ One Hot Encoding ################################
import numpy as np
from sklearn import preprocessing
# Example labels
labels = np.array([1,5,3,2,1,4,2,1,3])
# Create the encoder
lb = preprocessing.LabelBinarizer() #sklearn의 preprocessing을 통해 쉽게 one-hot encoding을 구현할 수 있다.
# Here the encoder finds the classes and assigns one-hot vectors
lb.fit(labels)
# And finally, transform the labels into one-hot encoded vectors
lb.transform(labels)
# array([[1, 0, 0, 0, 0],
# [0, 0, 0, 0, 1],
# [0, 0, 1, 0, 0],
# [0, 1, 0, 0, 0],
# [1, 0, 0, 0, 0],
# [0, 0, 0, 1, 0],
# [0, 1, 0, 0, 0],
# [1, 0, 0, 0, 0],
# [0, 0, 1, 0, 0]])
################################ Cross Entropy ################################
x = tf.reduce_sum([1, 2, 3, 4, 5]) # 15
#reduce_sum은 배열의 수를 더해서 반환
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
x = [[1, 1, 1], [1, 1, 1]]
tf.reduce_sum(x) # 6 #축을 설정하지 않으면 모든 원소를 더한다.
tf.reduce_sum(x, 0) # [2, 2, 2] #축 방향만 더해서 반환
tf.reduce_sum(x, 1) # [3, 3] #축 방향만 더해서 반환
tf.reduce_sum(x, 1, keep_dims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
# x = tf.log(100) # 4.60517
#log는 자연로그를 취한다.
softmax_data = [0.7, 0.2, 0.1]
one_hot_data = [1.0, 0.0, 0.0]
softmax = tf.placeholder(tf.float32)
one_hot = tf.placeholder(tf.float32)
cross_entropy = -tf.reduce_sum(tf.multiply(one_hot, tf.log(softmax)))
#***** 크로스 엔트로피 오차는 원 핫 인코딩에서 해당 값만을 -ln(x) 한 것. *****
#tf.multiply(one_hot, tf.log(softmax))를 하면 정답 레이블만 값을 가지게 된다.
#cross_entropy = -tf.log(tf.reduce_sum(tf.multiply(one_hot, softmax)))도 같다.
with tf.Session() as sess:
print(sess.run(cross_entropy, feed_dict={softmax: softmax_data, one_hot: one_hot_data}))
################################ Mini-batch ################################
import math
from pprint import pprint
def batches(batch_size, features, labels):
"""
Create batches of features and labels
:param batch_size: The batch size
:param features: List of features
:param labels: List of labels
:return: Batches of (Features, Labels)
"""
assert len(features) == len(labels)
output_batches = []
sample_size = len(features)
for start_i in range(0, sample_size, batch_size):
end_i = start_i + batch_size
batch = [features[start_i:end_i], labels[start_i:end_i]] #end_i가 범위를 넘어가면 마지막까지만 출력
output_batches.append(batch)
return output_batches
# 4 Samples of features
example_features = [
['F11','F12','F13','F14'],
['F21','F22','F23','F24'],
['F31','F32','F33','F34'],
['F41','F42','F43','F44']]
# 4 Samples of labels
example_labels = [
['L11','L12'],
['L21','L22'],
['L31','L32'],
['L41','L42']]
# PPrint prints data structures like 2d arrays, so they are easier to read
pprint(batches(3, example_features, example_labels))
| [
"byuncorry@hanmail.net"
] | byuncorry@hanmail.net |
06d1a676a79e4717ef3a8f9091ba8612972c4f88 | af829a7bb04f515b01dc78aaeb318991ead50d24 | /cart/forms.py | 05c7b1e1972cd2dd46c070cb532696742bea4a12 | [] | no_license | marcinpelszyk/Djnago-ecom | 75ffffb1d6fcd5457d9db8bf166610b15994203f | 8ae049087c952b52f287dd58f6a91a2e83113921 | refs/heads/main | 2023-06-08T20:14:22.007418 | 2021-06-14T20:56:26 | 2021-06-14T20:56:26 | 376,601,973 | 0 | 0 | null | 2021-06-14T20:56:27 | 2021-06-13T17:26:48 | HTML | UTF-8 | Python | false | false | 192 | py | from django import forms
from django import forms
from .models import OrderItem
class AddCartForm(forms.ModelForm):
class Meta:
model = OrderItem
fields = ['quantity']
| [
"marcin.pelszyk90@gmail.com"
] | marcin.pelszyk90@gmail.com |
64541b443d026560b213cf649fddf14d9174859e | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/894c37be40744bf289920a1bd2eb6ba4.py | 8a84f39bc980357d36a643c97a4bffbd58c75679 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 233 | py | def hey(phrase):
phrase = phrase.strip()
if len(phrase) == 0:
return 'Fine. Be that way!'
elif phrase.isupper():
return 'Whoa, chill out!'
elif phrase.endswith('?') :
return 'Sure.'
else:
return 'Whatever.'
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
419569c9669a5484b018ffc38b0dceaf219047f3 | 6248927177c35b35ff8e1c4b5219f7684143e180 | /quickstart.py | fd6ba10df187e86cd717bc12595bd7c81d7d7916 | [] | no_license | lauradarcy/graphtoolpractice | 0e4c110ac84daf47e8195111fb6354f92115509e | 9e0b4f735c81317559adee176c23e7622991dd1a | refs/heads/master | 2020-04-06T14:07:35.749501 | 2018-11-16T15:29:46 | 2018-11-16T15:29:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | import graph_tool.all as gt
g = gt.Graph()
v0_g = g.add_vertex()
v1_g = g.add_vertex()
e0_g = g.add_edge(v0_g,v1_g)
g1 = gt.Graph(g) #this is a deep copy, so any further changes to g will not affect g1
v2_g = g.add_vertex()
e2_g = g.add_edge(v0_g,v2_g)
gt.graph_draw(g, vertex_text=g.vertex_index, vertex_font_size=18, output_size=(200,200), output="two-nodes.png")
gt.graph_draw(g1, vertex_text=g1.vertex_index, vertex_font_size=18, output_size=(200,200), output="two-nodesg2.png")
for vertex in g.vertices():
print(vertex, vertex.out_degree(), vertex.in_degree()) #this is slow as loops are performed in pure python, fastest method is below
print(g.get_vertices())
print(g.get_out_degrees(g.get_vertices()))
print(g.get_in_degrees(g.get_vertices()))
#edges = g.get_edges()
#print((edges[:,0] * edges[:,1]).sum())
| [
"luludarcy@gmail.com"
] | luludarcy@gmail.com |
c076365e81a1ffceb0cf95f537046d11f72da5f9 | 29b7eea3810b0dab40985a6f2533ff807ea4755a | /firstsite/sites/college/migrations/0005_delete_marks.py | 5429f9402d702d36ab5247179154d728010c643e | [] | no_license | hayderAlmosawi/MyWork | 674698495f2deceb2855b3d4c44fd53d4235aac7 | 26a1228e794fba786a63544d9c4777110efc681b | refs/heads/master | 2021-01-10T11:36:34.153643 | 2016-04-09T16:17:38 | 2016-04-09T16:17:38 | 53,725,952 | 0 | 0 | null | 2016-04-09T16:06:15 | 2016-03-12T10:30:14 | JavaScript | UTF-8 | Python | false | false | 345 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-05 14:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('college', '0004_marks'),
]
operations = [
migrations.DeleteModel(
name='Marks',
),
]
| [
"hayder719@gmail.com"
] | hayder719@gmail.com |
3d952f006a6a746f5d7106ff9ca29ac82643fddd | 2f242a319d45a8dcff46bfa2307fe03742edffc5 | /src/module/scripts/generate_clusters.py | 304633d89e1952caee7948c03dd36e215ca8a60f | [] | no_license | bengitles/final_project | 6faeb599da723b6d762df125f94c163bd954e2a9 | 2527a3e722b27c48bf773d1d4c28ee57c92552f9 | refs/heads/master | 2021-01-01T08:21:09.957106 | 2014-12-16T17:09:25 | 2014-12-16T17:09:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,207 | py | import csv
import matplotlib.pyplot as plt
import numpy as np
import sys
from pylab import savefig
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.metrics import pairwise_distances_argmin_min
def import_answers(filename):
feature_questions = ['Answer.E1a', 'Answer.E1b', 'Answer.J1', 'Answer.N1a', 'Answer.N1b',
'Answer.N2a', 'Answer.N2b', 'Answer.N3a', 'Answer.N3b', 'Answer.N4a',
'Answer.N4b', 'Answer.N4f', 'Answer.N4f1', 'Answer.N4g', 'Answer.P4a',
'Answer.P4b', 'Answer.P6', 'Answer.P6a1', 'Answer.X13', 'Answer.X13a']
control_questions = {'Answer.Z1control1' : 'No' , 'Answer.Z1control2' : 'Bark',
'Answer.Z1control3' : 'Agree strongly', 'Answer.Z1control4' : 'Meow',
'Answer.Z1control5' : 'Cut out entirely', 'Answer.Z1control7' : '33',
'Answer.Z1control8' : '222'}
dict_reader = csv.DictReader(open(filename, 'rb'), delimiter=',')
#dict_reader.next()
X = []
for row in dict_reader:
# golden set check
golden_answers = [row[a] for a in control_questions.keys()]
if set(golden_answers) != set(control_questions.itervalues()):
continue
# all fields are populated
somethingEmpty = False
for key in feature_questions:
if row[key] == '':
somethingEmpty = True
break
if somethingEmpty:
continue
result = []
# E1a (political ideology)
answer_e1a = row['Answer.E1a']
possible_e1a_answers = ['Extremely liberal', 'Liberal', 'Slightly liberal', 'Moderate; middle of the road',
'Slightly conservative', 'Conservative', 'Extremely conservative']
result.append(possible_e1a_answers.index(answer_e1a) - 3)
# E1b
if row['Answer.E1b'] == 'Liberal':
result.append(0)
else:
result.append(1)
# J1 (political party)
party_affil = row['Answer.J1']
if party_affil == 'Democrat':
result.append(-1)
elif party_affil == 'Republican':
result.append(1)
else:
result.append(0)
# N2a (defense spending), N3a (healthcare spending), N4a (gov't and jobs)
N_ans = ['Answer.N2a', 'Answer.N3a', 'Answer.N4a']
for a in N_ans:
if row[a].startswith('1'):
result.append(1)
elif row[a].startswith('7'):
result.append(7)
else:
result.append(int(row[a]))
#result.append(importance_list.index(row[b]))
# P6a (gun control)
gun_control = row['Answer.P6']
if gun_control.startswith('More'):
result.append(-1)
elif gun_control.startswith('Make'):
result.append(1)
else:
result.append(0)
# # N4f and N4f1 and N4g (Illegal Immigration)
# n4f_ans = row['Answer.N4f']
# stance = 0
# feeling = 0
# if n4f_ans == 'Favor':
# stance = 1
# elif n4f_ans == 'Oppose':
# stance = 2
# else:
# stance = 3
# n4f1_ans = row['Answer.N4f1']
# if n4f1_ans == 'A great deal':
# feeling = 1
# elif n4f1_ans == 'Moderately':
# feeling = 2
# else:
# feeling = 3
# if stance == 1 and feeling == 1:
# result.append(1) # strongly support immigration
# if stance == 1 and feeling == 2:
# result.append(2)
# if stance == 1 and feeling == 3:
# result.append(3)
# if stance == 3:
# result.append(4)
# if stance == 2 and feeling == 3:
# result.append(5)
# if stance == 2 and feeling == 2:
# result.append(6)
# if stance == 2 and feeling == 1:
# result.append(7) # strongly oppose immigration
# modified_importance_list = ['Not important at all', 'Slightly important', 'Moderately important', 'Very important', 'Extremely important']
# result.append(modified_importance_list.index(row['Answer.N4g']))
# # P4a and b (environment)
# a = 'Answer.P4a'
# if row[a].startswith('1'):
# result.append(1)
# elif row[a].startswith('7'):
# result.append(7)
# elif row[a] == '':
# result.append(0)
# else:
# result.append(int(row[a]))
# result.append(importance_list.index(row['Answer.P4b']))
# # P6 (gun control)
# p6_ans = row['Answer.P6']
# if p6_ans == 'More difficult': # negative bc democratic stance
# result.append(-1)
# elif p6_ans == 'Make it easier':
# result.append(1)
# else:
# result.append(0)
# result.append(importance_list.index(row['Answer.P6a1']))
# # X13 (Homesexuality)
# x13_ans = row['Answer.X13']
# if x13_ans == 'Favor':
# result.append(-1)
# else:
# result.append(1)
# x131_ans = row['Answer.X13a']
# if x131_ans == 'Strongly':
# result.append(1)
# else:
# result.append(0)
X.append(result)
return X
def kmeans(X):
num_clusters = int(sys.argv[2])
kmeans_model = KMeans(n_clusters = num_clusters)
kmeans_model.fit(X)
if sys.argv[3] == 'c':
print kmeans_model.cluster_centers_
else:
closest, _ = pairwise_distances_argmin_min(kmeans_model.cluster_centers_, X)
for point in closest:
print X[point]
def PCA_plot_clusters(data):
reduced_data = PCA(n_components=2).fit_transform(data)
num_clusters = int(sys.argv[2])
kmeans = KMeans(init='k-means++', n_clusters=num_clusters, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the candidate dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
#savefig('PCA_clusters_951674.png')
#savefig('PCA_clusters_951779.png')
if __name__ == '__main__':
X = import_answers(sys.argv[1])
X = np.array(X)
# print type(X)
print " ['ideology', 'Liberal or not', 'Party affiliation', 'defense spending position', 'healthcare position', 'Gov't and jobs position', 'Gun control position'] "
kmeans(X)
try:
if sys.argv[4] == 'graph':
PCA_plot_clusters(X)
except IndexError:
pass
#k_nearest_neighbors(X) | [
"abhishek.gadiraju@gmail.com"
] | abhishek.gadiraju@gmail.com |
c7129370443f104ccf475efa0da13bda8d448769 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/catapult/third_party/gsutil/gslib/hashing_helper.py | c26831fe3af861fcb437fa8a1e588da4bb246152 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 16,892 | py | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for hashing functionality."""
import base64
import binascii
from hashlib import md5
import os
from boto import config
import crcmod
from gslib.exception import CommandException
from gslib.util import DEFAULT_FILE_BUFFER_SIZE
from gslib.util import MIN_SIZE_COMPUTE_LOGGING
from gslib.util import TRANSFER_BUFFER_SIZE
from gslib.util import UsingCrcmodExtension
SLOW_CRCMOD_WARNING = """
WARNING: You have requested checksumming but your crcmod installation isn't
using the module's C extension, so checksumming will run very slowly. For help
installing the extension, please see:
$ gsutil help crcmod
"""
_SLOW_CRCMOD_DOWNLOAD_WARNING = """
WARNING: Downloading this composite object requires integrity checking with
CRC32c, but your crcmod installation isn't using the module's C extension,
so the hash computation will likely throttle download performance. For help
installing the extension, please see:
$ gsutil help crcmod
To disable slow integrity checking, see the "check_hashes" option in your
boto config file.
"""
_SLOW_CRC_EXCEPTION_TEXT = """
Downloading this composite object requires integrity checking with CRC32c,
but your crcmod installation isn't using the module's C extension, so the
hash computation will likely throttle download performance. For help
installing the extension, please see:
$ gsutil help crcmod
To download regardless of crcmod performance or to skip slow integrity
checks, see the "check_hashes" option in your boto config file.
NOTE: It is strongly recommended that you not disable integrity checks. Doing so
could allow data corruption to go undetected during uploading/downloading."""
_NO_HASH_CHECK_WARNING = """
WARNING: This download will not be validated since your crcmod installation
doesn't use the module's C extension, so the hash computation would likely
throttle download performance. For help in installing the extension, please
see:
$ gsutil help crcmod
To force integrity checking, see the "check_hashes" option in your boto config
file.
"""
# Configuration values for hashing.
CHECK_HASH_IF_FAST_ELSE_FAIL = 'if_fast_else_fail'
CHECK_HASH_IF_FAST_ELSE_SKIP = 'if_fast_else_skip'
CHECK_HASH_ALWAYS = 'always'
CHECK_HASH_NEVER = 'never'
# Table storing polynomial values of x^(2^k) mod CASTAGNOLI_POLY for all k < 31,
# where x^(2^k) and CASTAGNOLI_POLY are both considered polynomials. This is
# sufficient since x^(2^31) mod CASTAGNOLI_POLY = x.
X_POW_2K_TABLE = [2, 4, 16, 256, 65536, 517762881, 984302966,
408362264, 1503875210, 2862076957, 3884826397, 1324787473,
621200174, 1758783527, 1416537776, 1180494764, 648569364,
2521473789, 994858823, 1728245375, 3498467999, 4059169852,
3345064394, 2828422810, 2429203150, 3336788029, 860151998,
2102628683, 1033187991, 4243778976, 1123580069]
# Castagnoli polynomial and its degree.
CASTAGNOLI_POLY = 4812730177
DEGREE = 32
def ConcatCrc32c(crc_a, crc_b, num_bytes_in_b):
"""Computes CRC32C for concat(A, B) given crc(A), crc(B) and len(B).
An explanation of the algorithm can be found at
crcutil.googlecode.com/files/crc-doc.1.0.pdf.
Args:
crc_a: A 32-bit integer representing crc(A) with least-significant
coefficient first.
crc_b: Same as crc_a.
num_bytes_in_b: Length of B in bytes.
Returns:
CRC32C for concat(A, B)
"""
if not num_bytes_in_b:
return crc_a
return _ExtendByZeros(crc_a, 8 * num_bytes_in_b) ^ crc_b
def _CrcMultiply(p, q):
"""Multiplies two polynomials together modulo CASTAGNOLI_POLY.
Args:
p: The first polynomial.
q: The second polynomial.
Returns:
Result of the multiplication.
"""
result = 0
top_bit = 1 << DEGREE
for _ in range(DEGREE):
if p & 1:
result ^= q
q <<= 1
if q & top_bit:
q ^= CASTAGNOLI_POLY
p >>= 1
return result
def _ExtendByZeros(crc, num_bits):
"""Given crc representing polynomial P(x), compute P(x)*x^num_bits.
Args:
crc: crc respresenting polynomial P(x).
num_bits: number of bits in crc.
Returns:
P(x)*x^num_bits
"""
def _ReverseBits32(crc):
return int('{0:032b}'.format(crc, width=32)[::-1], 2)
crc = _ReverseBits32(crc)
i = 0
while num_bits != 0:
if num_bits & 1:
crc = _CrcMultiply(crc, X_POW_2K_TABLE[i % len(X_POW_2K_TABLE)])
i += 1
num_bits >>= 1
crc = _ReverseBits32(crc)
return crc
def _CalculateHashFromContents(fp, hash_alg):
"""Calculates a base64 digest of the contents of a seekable stream.
This function resets the file pointer to position 0.
Args:
fp: An already-open file object.
hash_alg: Instance of hashing class initialized to start state.
Returns:
Hash of the stream in hex string format.
"""
hash_dict = {'placeholder': hash_alg}
fp.seek(0)
CalculateHashesFromContents(fp, hash_dict)
fp.seek(0)
return hash_dict['placeholder'].hexdigest()
def CalculateHashesFromContents(fp, hash_dict, callback_processor=None):
"""Calculates hashes of the contents of a file.
Args:
fp: An already-open file object (stream will be consumed).
hash_dict: Dict of (string alg_name: initialized hashing class)
Hashing class will be populated with digests upon return.
callback_processor: Optional callback processing class that implements
Progress(integer amount of bytes processed).
"""
while True:
data = fp.read(DEFAULT_FILE_BUFFER_SIZE)
if not data:
break
for hash_alg in hash_dict.itervalues():
hash_alg.update(data)
if callback_processor:
callback_processor.Progress(len(data))
def CalculateB64EncodedCrc32cFromContents(fp):
"""Calculates a base64 CRC32c checksum of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
Returns:
CRC32c checksum of the file in base64 format.
"""
return _CalculateB64EncodedHashFromContents(
fp, crcmod.predefined.Crc('crc-32c'))
def CalculateB64EncodedMd5FromContents(fp):
"""Calculates a base64 MD5 digest of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
Returns:
MD5 digest of the file in base64 format.
"""
return _CalculateB64EncodedHashFromContents(fp, md5())
def CalculateMd5FromContents(fp):
"""Calculates a base64 MD5 digest of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
Returns:
MD5 digest of the file in hex format.
"""
return _CalculateHashFromContents(fp, md5())
def Base64EncodeHash(digest_value):
"""Returns the base64-encoded version of the input hex digest value."""
return base64.encodestring(binascii.unhexlify(digest_value)).rstrip('\n')
def Base64ToHexHash(base64_hash):
"""Returns the hex digest value of the input base64-encoded hash.
Args:
base64_hash: Base64-encoded hash, which may contain newlines and single or
double quotes.
Returns:
Hex digest of the input argument.
"""
return binascii.hexlify(base64.decodestring(base64_hash.strip('\n"\'')))
def _CalculateB64EncodedHashFromContents(fp, hash_alg):
"""Calculates a base64 digest of the contents of a seekable stream.
This function sets the stream position 0 before and after calculation.
Args:
fp: An already-open file object.
hash_alg: Instance of hashing class initialized to start state.
Returns:
Hash of the stream in base64 format.
"""
return Base64EncodeHash(_CalculateHashFromContents(fp, hash_alg))
def GetUploadHashAlgs():
"""Returns a dict of hash algorithms for validating an uploaded object.
This is for use only with single object uploads, not compose operations
such as those used by parallel composite uploads (though it can be used to
validate the individual components).
Returns:
dict of (algorithm_name: hash_algorithm)
"""
check_hashes_config = config.get(
'GSUtil', 'check_hashes', CHECK_HASH_IF_FAST_ELSE_FAIL)
if check_hashes_config == 'never':
return {}
return {'md5': md5}
def GetDownloadHashAlgs(logger, consider_md5=False, consider_crc32c=False):
"""Returns a dict of hash algorithms for validating an object.
Args:
logger: logging.Logger for outputting log messages.
consider_md5: If True, consider using a md5 hash.
consider_crc32c: If True, consider using a crc32c hash.
Returns:
Dict of (string, hash algorithm).
Raises:
CommandException if hash algorithms satisfying the boto config file
cannot be returned.
"""
check_hashes_config = config.get(
'GSUtil', 'check_hashes', CHECK_HASH_IF_FAST_ELSE_FAIL)
if check_hashes_config == CHECK_HASH_NEVER:
return {}
hash_algs = {}
if consider_md5:
hash_algs['md5'] = md5
elif consider_crc32c:
# If the cloud provider supplies a CRC, we'll compute a checksum to
# validate if we're using a native crcmod installation and MD5 isn't
# offered as an alternative.
if UsingCrcmodExtension(crcmod):
hash_algs['crc32c'] = lambda: crcmod.predefined.Crc('crc-32c')
elif not hash_algs:
if check_hashes_config == CHECK_HASH_IF_FAST_ELSE_FAIL:
raise CommandException(_SLOW_CRC_EXCEPTION_TEXT)
elif check_hashes_config == CHECK_HASH_IF_FAST_ELSE_SKIP:
logger.warn(_NO_HASH_CHECK_WARNING)
elif check_hashes_config == CHECK_HASH_ALWAYS:
logger.warn(_SLOW_CRCMOD_DOWNLOAD_WARNING)
hash_algs['crc32c'] = lambda: crcmod.predefined.Crc('crc-32c')
else:
raise CommandException(
'Your boto config \'check_hashes\' option is misconfigured.')
return hash_algs
class HashingFileUploadWrapper(object):
"""Wraps an input stream in a hash digester and exposes a stream interface.
This class provides integrity checking during file uploads via the
following properties:
Calls to read will appropriately update digesters with all bytes read.
Calls to seek (assuming it is supported by the wrapped stream) using
os.SEEK_SET will catch up / reset the digesters to the specified
position. If seek is called with a different os.SEEK mode, the caller
must return to the original position using os.SEEK_SET before further
reads.
Calls to seek are fast if the desired position is equal to the position at
the beginning of the last read call (we only need to re-hash bytes
from that point on).
"""
def __init__(self, stream, digesters, hash_algs, src_url, logger):
"""Initializes the wrapper.
Args:
stream: Input stream.
digesters: dict of {string: hash digester} containing digesters, where
string is the name of the hash algorithm.
hash_algs: dict of {string: hash algorithm} for resetting and
recalculating digesters. String is the name of the hash algorithm.
src_url: Source FileUrl that is being copied.
logger: For outputting log messages.
"""
if not digesters:
raise CommandException('HashingFileUploadWrapper used with no digesters.')
elif not hash_algs:
raise CommandException('HashingFileUploadWrapper used with no hash_algs.')
self._orig_fp = stream
self._digesters = digesters
self._src_url = src_url
self._logger = logger
self._seek_away = None
self._digesters_previous = {}
for alg in self._digesters:
self._digesters_previous[alg] = self._digesters[alg].copy()
self._digesters_previous_mark = 0
self._digesters_current_mark = 0
self._hash_algs = hash_algs
def read(self, size=-1): # pylint: disable=invalid-name
""""Reads from the wrapped file pointer and calculates hash digests.
Args:
size: The amount of bytes to read. If ommited or negative, the entire
contents of the file will be read, hashed, and returned.
Returns:
Bytes from the wrapped stream.
Raises:
CommandException if the position of the wrapped stream is unknown.
"""
if self._seek_away is not None:
raise CommandException('Read called on hashing file pointer in an '
'unknown position; cannot correctly compute '
'digest.')
data = self._orig_fp.read(size)
self._digesters_previous_mark = self._digesters_current_mark
for alg in self._digesters:
self._digesters_previous[alg] = self._digesters[alg].copy()
self._digesters[alg].update(data)
self._digesters_current_mark += len(data)
return data
def tell(self): # pylint: disable=invalid-name
"""Returns the current stream position."""
return self._orig_fp.tell()
def seekable(self): # pylint: disable=invalid-name
"""Returns true if the stream is seekable."""
return self._orig_fp.seekable()
def seek(self, offset, whence=os.SEEK_SET): # pylint: disable=invalid-name
"""Seeks in the wrapped file pointer and catches up hash digests.
Args:
offset: The offset to seek to.
whence: os.SEEK_CUR, or SEEK_END, SEEK_SET.
Returns:
Return value from the wrapped stream's seek call.
"""
if whence != os.SEEK_SET:
# We do not catch up hashes for non-absolute seeks, and rely on the
# caller to seek to an absolute position before reading.
self._seek_away = self._orig_fp.tell()
else:
# Hashes will be correct and it's safe to call read().
self._seek_away = None
if offset < self._digesters_previous_mark:
# This is earlier than our earliest saved digest, so we need to
# reset the digesters and scan from the beginning.
for alg in self._digesters:
self._digesters[alg] = self._hash_algs[alg]()
self._digesters_current_mark = 0
self._orig_fp.seek(0)
self._CatchUp(offset)
elif offset == self._digesters_previous_mark:
# Just load the saved digests.
self._digesters_current_mark = self._digesters_previous_mark
for alg in self._digesters:
self._digesters[alg] = self._digesters_previous[alg]
elif offset < self._digesters_current_mark:
# Reset the position to our previous digest and scan forward.
self._digesters_current_mark = self._digesters_previous_mark
for alg in self._digesters:
self._digesters[alg] = self._digesters_previous[alg]
self._orig_fp.seek(self._digesters_previous_mark)
self._CatchUp(offset - self._digesters_previous_mark)
else:
# Scan forward from our current digest and position.
self._orig_fp.seek(self._digesters_current_mark)
self._CatchUp(offset - self._digesters_current_mark)
return self._orig_fp.seek(offset, whence)
def _CatchUp(self, bytes_to_read):
"""Catches up hashes, but does not return data and uses little memory.
Before calling this function, digesters_current_mark should be updated
to the current location of the original stream and the self._digesters
should be current to that point (but no further).
Args:
bytes_to_read: Number of bytes to catch up from the original stream.
"""
if self._orig_fp.tell() != self._digesters_current_mark:
raise CommandException(
'Invalid mark when catching up hashes. Stream position %s, hash '
'position %s' % (self._orig_fp.tell(), self._digesters_current_mark))
for alg in self._digesters:
if bytes_to_read >= MIN_SIZE_COMPUTE_LOGGING:
self._logger.info('Catching up %s for %s...', alg,
self._src_url.url_string)
self._digesters_previous[alg] = self._digesters[alg].copy()
self._digesters_previous_mark = self._digesters_current_mark
bytes_remaining = bytes_to_read
bytes_this_round = min(bytes_remaining, TRANSFER_BUFFER_SIZE)
while bytes_this_round:
data = self._orig_fp.read(bytes_this_round)
bytes_remaining -= bytes_this_round
for alg in self._digesters:
self._digesters[alg].update(data)
bytes_this_round = min(bytes_remaining, TRANSFER_BUFFER_SIZE)
self._digesters_current_mark += bytes_to_read
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
7b89d6fc1d32a653acadbcb42e0291d6bcbf2e75 | 5a3316076850c40155fd4458e32af11377e888ef | /tests/tests_for_history_service.py | dd356ee4511d476b98d3fd26e51f8d5b5d07a2e0 | [] | no_license | Lv-447-Python/history-service-repository | bd71f7f2a03ca82ad10a1620989109de9a2236fe | 7cac4ea4fec588518180f6158c2fd4c628ac47c3 | refs/heads/develop | 2022-12-11T01:21:03.854519 | 2019-12-26T22:49:11 | 2019-12-26T22:49:11 | 218,752,067 | 0 | 0 | null | 2022-12-08T06:58:55 | 2019-10-31T11:37:37 | Python | UTF-8 | Python | false | false | 875 | py | """Module for history service testing."""
import unittest
from history_service import APP, DB
POSTGRES = {
'user': 'postgres',
'pw': 'postgres',
'db': 'HistoryTestDB',
'host': '127.0.0.1',
'port': '5432',
}
class HistoryServiceTestCase(unittest.TestCase):
"""General class for service resources testing."""
def setUp(self):
"""
Method for app and database configuration.
Returns:
None.
"""
APP.config['TESTING'] = True
APP.config['SQLALCHEMY_DATABASE_URI'] = 'postgres+psycopg2:' \
'//%(user)s:%(pw)s@%(host)s:%(port)s/%(db)s' % POSTGRES
self.APP = APP.test_client()
DB.create_all()
def tearDown(self):
"""
Method for database clearing.
Returns:
None.
"""
DB.session.remove()
DB.drop_all()
| [
"valentyn17@gmail.com"
] | valentyn17@gmail.com |
a35e5dfaf0b8117e9aa0577ead52f0f6ccfc948b | df8fc5431ed40e355a12b22787aa63f2baca9456 | /Task0.py | 38a690d836e4992bd392dca9460140a9d2e929ed | [] | no_license | gaikwadapurva/Unscramble-Computer-Science-Problems | d931d11b3d6f0b3022328e96f0cbbfe182d02f6a | a5298082cd273d21c56b8a09f924f0fa3d04df01 | refs/heads/master | 2022-09-09T05:13:23.476095 | 2020-05-25T13:20:29 | 2020-05-25T13:20:29 | 266,773,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
"""
TASK 0:
What is the first record of texts and what is the last record of calls?
Print messages:
"First record of texts, <incoming number> texts <answering number> at time <time>"
"Last record of calls, <incoming number> calls <answering number> at time <time>, lasting <during> seconds"
"""
print("First record of texts, {} texts {} at time {}"
.format(texts[0][0], texts[0][1], texts[0][2]))
print("Last record of calls, {} calls {} at time {}, lasting {} seconds"
.format(calls[-1][0], calls[-1][1], calls[-1][2], calls[-1][3])) | [
"gaikwadapurva65@gmail.com"
] | gaikwadapurva65@gmail.com |
138883483c657f0d94e071ae9a5d193d18f81d3f | eca097296c914c29f8701acfd3efcf7f795a9119 | /modules/apitest/views.py | 19fdecad343f63902edca4f6333b950a404f545f | [] | no_license | xudachengzi/tasks | a4ce50b1f90e345bb627927600fd2d7868cdb497 | 8e2e8f54add02fc2bb70e82b6356a513d243d810 | refs/heads/master | 2020-04-16T12:30:43.904569 | 2019-01-14T03:00:57 | 2019-01-14T03:00:57 | 165,582,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib import auth
from django.contrib.auth import authenticate, login
def test(request):
return HttpResponse('Hello Python')
def login(request):
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username,
password=password)
if user:
auth.login(request,user)
request.session['user'] = username
response = HttpResponseRedirect('/home/')
return response
else:
return render(request, 'login.html',
{'error': '用户名或密码错误'})
else:
return render(request, 'login.html')
def home(request):
return render(request, "home.html")
| [
"xu649047654@163.com"
] | xu649047654@163.com |
8507c003779b6772a64260fe1ed4ba57f738c277 | b2b499ef98711005a02c3529cbbde88d249510a7 | /process.py | 6c0f9f5ce578e07f3048e9b7679d98c61f1c4f1f | [] | no_license | emilynielson/drone_challenge | b42a9deff1d43e89e5b433e19a0719177762b7b4 | de9abac00dae938bf6f713acbcf075bf6b1f332a | refs/heads/master | 2020-03-31T10:56:31.385175 | 2018-10-10T00:25:57 | 2018-10-10T00:25:57 | 152,156,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,230 | py | from drone import Drone
from order import Order
import sys
import os
from datetime import datetime
import ipdb
#Readlines in import file and return list of lines
def importFile(filePath):
with open(filePath) as f:
content = f.readlines()
f.close()
return content
#Write order number with departure time and NPS score to output textfile
def exportFile(lines, nps):
lastLine = 'NPS '+ str(nps)
lines.append(lastLine)
with open('output.txt', 'w') as f:
for line in lines:
f.write("%s\n" % line)
f.close()
print(os.path.dirname(os.path.abspath(f.name))+'/'+f.name)
#Create Order objects and return in a list
def createOrders(content):
orders =[]
for i in content:
line = i.strip('\n')
items = line.split(' ')
number = items[0]
location = items[1]
time = items[2]
order = Order(number, location, time)
orders.append(order)
return orders
#Create Drone objects and return in a list
def createDrones(orders):
drones = []
for order in orders:
drone = Drone(order)
drones.append(drone)
return drones
#Calculate Drone departure times, nps and return times and return list of lines for the output file
def runDeliveries(drones, droneStartTime):
output = []
startTime = 0
for drone in drones:
if drone == drones[0]:
droneDeparture = droneStartTime
else:
droneDeparture = startTime
drone.deliveryTime = drone.calculateDeliveryTime(droneDeparture)
drone.nps = drone.calculateNPS()
drone.returnTime = drone.calculateReturnTime()
startTime = drone.returnTime
formatDepartureTime = droneDeparture.strftime('%H:%M:%S')
line = drone.orderNumber +' '+formatDepartureTime
output.append(line)
return output
#Calculate NPS score from saved Drone nps attributes
def getNPS(drones):
totalOrders = len(drones)
npsScores = []
for drone in drones:
nps = drone.nps
npsScores.append(nps)
promotors = npsScores.count(1)
detractors = npsScores.count(-1)
score = int(((promotors/totalOrders)-(detractors/totalOrders))*100)
return score
| [
"ecw1014@gmail.com"
] | ecw1014@gmail.com |
f5a259477272d7b2fd22bf1a1bbcea312cbd1921 | 799c6dfa8693b277ca92f12a2093f52cca97210c | /Twitter/Twitter/apps/users/migrations/0001_initial.py | bd8b1b51df2e92d8d650d0d37b331a07050fa302 | [] | no_license | KolesovaOlesya/task | 46a033207b1a8689e28a0fd4b2ce885e1d8c26d2 | fa72c2444283604e6d08fab78e006ab09754f763 | refs/heads/master | 2020-04-11T03:23:46.271606 | 2018-12-12T11:14:34 | 2018-12-12T11:14:34 | 161,471,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | # Generated by Django 2.1.3 on 2018-11-25 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
]
| [
"kolesova.olesya.93@mail.ru"
] | kolesova.olesya.93@mail.ru |
9400a770c4ec51d7b2b9fc887620c1727b823f17 | 2ee873ee61f2dfd422dc273ffdf8800dafd457c3 | /src/wurf/url_download.py | 801bf5a752c905d118357da4dc0a193bb55b4fd3 | [
"BSD-3-Clause"
] | permissive | GameMonkey/waf | ef75f87fc93092be6be473913d768e9c74d4ab22 | f246a8f3899a6456feda0d06f5f21aedcc7d8fc1 | refs/heads/master | 2020-12-02T21:20:39.697151 | 2017-07-06T11:21:41 | 2017-07-06T11:21:41 | 96,296,712 | 0 | 0 | null | 2017-07-06T11:22:56 | 2017-07-05T08:35:28 | Python | UTF-8 | Python | false | false | 2,295 | py | #! /usr/bin/env python
# encoding: utf-8
import cgi
import os
from .compat import IS_PY2
if IS_PY2:
# Python 2
from urllib2 import urlopen
from urlparse import urlparse
else:
# Python 3
from urllib.request import urlopen
from urllib.parse import urlparse
class UrlDownload(object):
def _url_filename(self, url):
""" Based on the url return the filename it contains or None if no
filename is specified.
URL with a filename:
http://example.com/file.txt
URL without a filename:
http://example.com
:param url: The URL as a string
:return: The filename or None if no filename is in the URL.
"""
parsed = urlparse(url)
if not parsed.path:
return None
filename = os.path.basename(parsed.path)
_, extension = os.path.splitext(filename)
if not extension:
return None
else:
return filename
def _response_filename(self, response):
""" Returns the filename contained in the HTTP Content-Disposition
header.
"""
# Try to get the file name from the headers
header = response.info().get('Content-Disposition', '')
if not header:
return None
_, params = cgi.parse_header(header)
return params.get('filename', None)
def download(self, cwd, source, filename=None):
""" Download the file specified by the source.
:param cwd: The directory where to download the file.
:param source: The URL of the file to download.
:param filename: The filename to store the file under.
"""
response = urlopen(url=source)
if not filename:
filename = self._url_filename(source)
if not filename:
filename = self._response_filename(response)
assert filename
assert os.path.isdir(cwd)
filepath = os.path.join(cwd, filename)
# From http://stackoverflow.com/a/1517728
CHUNK = 16 * 1024
with open(filepath, 'wb') as f:
while True:
chunk = response.read(CHUNK)
if not chunk:
break
f.write(chunk)
return filepath
| [
"morten@mortenvp.com"
] | morten@mortenvp.com |
8829070ac250ac2824845aa0c2f13fbe2be8478b | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_19002.py | 3593dffbce1abc3e5313e425c261f44a9b5adabd | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | # No datetime module in python how to install via easy_install?
easy_install DateTime
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
6c7b90e6c72ee8e5fd6006ff8c23dbf6a106dc50 | 8941fa2cb407d7e451b30cd7e330d8053b03c367 | /src/command_modules/azure-cli-extension/azure/cli/command_modules/extension/commands.py | 36f94cdb152af9e53172e113661dd1ce12e29de4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | omerlh/azure-cli | 0dfc6c4e996070891be17c5492c2f6e5b4ad326f | 518f08f0c51382675ac10c0f3681d3e21dea88f1 | refs/heads/dev | 2021-08-20T06:14:42.571888 | 2017-11-28T02:03:35 | 2017-11-28T02:03:35 | 112,201,618 | 1 | 0 | null | 2017-11-27T13:51:02 | 2017-11-27T13:47:12 | Python | UTF-8 | Python | false | false | 1,506 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
from azure.cli.core.prompting import prompt_y_n
from azure.cli.core.commands import cli_command
def transform_extension_list_available(results):
return [OrderedDict([('Name', r)]) for r in results]
def ext_add_has_confirmed(command_args):
return bool(not command_args.get('source') or prompt_y_n('Are you sure you want to install this extension?'))
cli_command(__name__, 'extension add', 'azure.cli.command_modules.extension.custom#add_extension',
confirmation=ext_add_has_confirmed)
cli_command(__name__, 'extension remove', 'azure.cli.command_modules.extension.custom#remove_extension')
cli_command(__name__, 'extension list', 'azure.cli.command_modules.extension.custom#list_extensions')
cli_command(__name__, 'extension show', 'azure.cli.command_modules.extension.custom#show_extension')
cli_command(__name__, 'extension list-available',
'azure.cli.command_modules.extension.custom#list_available_extensions',
table_transformer=transform_extension_list_available)
cli_command(__name__, 'extension update', 'azure.cli.command_modules.extension.custom#update_extension')
| [
"noreply@github.com"
] | noreply@github.com |
fbab0237ee264128ef38a29d1bf77e02444765be | de9716ac7ad944c9bf9139e73e85d026ede605c8 | /site/blog/migrations/0001_initial.py | 25719c107e73e5dde808c5349b43e45719e2c71f | [] | no_license | Igor-Yudin/cooking-blog | d2feb2ff71adca96243f587daad4e09b1d947231 | 67b888bc75c2d662c00826fc9e438bb5e6c4aba9 | refs/heads/master | 2021-01-11T16:36:46.849542 | 2017-03-30T18:24:14 | 2017-03-30T18:24:14 | 80,123,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 19:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images')),
('order', models.IntegerField(blank=True)),
],
),
migrations.CreateModel(
name='Paragraph',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('paragraph', models.TextField()),
('order', models.IntegerField(blank=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('date', models.DateField(auto_now_add=True)),
('slug', models.SlugField(blank=True, max_length=255)),
],
),
migrations.AddField(
model_name='paragraph',
name='post',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='paragraphs', to='blog.Post'),
),
migrations.AddField(
model_name='image',
name='post',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='blog.Post'),
),
]
| [
"Igor_Yudin@inbox.ru"
] | Igor_Yudin@inbox.ru |
7c08fd723a38602b8e19a3aa2adc67f12d44e429 | 85286d46f675bbe9c1f8fcc7534ab9d3849640e6 | /toolBox/_classes.py | 5c4e11ce2eada4a85fb57c35f1f7423c46b337ec | [] | no_license | gamug/pdfMiner_dev | e6e2642b27f80280aae6323b32616553b10caede | ef0f4492df4aa6d6504e97e5ea6ea673584db788 | refs/heads/master | 2020-07-20T07:42:35.330697 | 2019-09-10T19:41:55 | 2019-09-10T19:41:55 | 206,600,815 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,257 | py | # Class defined to couple words and search criterion
import re, spacy
import numpy as np
class resultSearched():
def __init__(self, words, symbol, function='default'):
self.words = words
self.symbol = symbol
if type(function) == str:
self.function = self.default
else:
self.function = function
def default(self,array):
'''''
Function default to search word coincidences
input:
array: list type with paragraph words
output:
boolean: True if the searched word is in paragraph
False if isn't
'''''
paragraph = ' '.join(array)
for word in self.words:
aux = re.findall(word + r'.*', paragraph, re.IGNORECASE)
if len(aux) > 0:
return True
return False
# This is a class that let us extract engineering and petroleum companies from spacy entities
class entGetter():
def __init__(self, petroleum, engineer, nlp):
'''''
Class generate to proccess company and gpe extraction
input:
petroleum: petroleum companies dictionary with structure {'companies':[...],'countries':[...]}
engineer: enginer companies dictionary with structure {'companies':[...],'countries':[...]}
Output:
provide a instantiated object to manage companies and places extraction from text.
'''''
self.nlp = nlp
# first we create and normalize filters
self.companies = self.normalizeFilters(petroleum['companies']) \
+ self.normalizeFilters(engineer['companies'])
def normalizeFilters(self, data):
'''''
function designed to transform word list to an string without spaces
input:
data (list): list of strings
compCollections (string): collections of companies without spaces.
'''''
compCollections = []
for company in data:
for word in company.split(' '):
compCollections.append(word)
compCollections = ''.join(compCollections)
return compCollections
def getEntities(self, text):
'''''
This function iterate along the text input and return gpe count and entities gotten
input:
text: Sentence to analize
output:
textCompanies: NumpyArray with all companies found
gpe: Count of number of places found
'''''
self.doc = self.nlp(text)
self.textCompanies = []
self.gpe = []
for ent in self.doc.ents:
if ent.label_ == 'ORG':
try:
if len(re.findall(ent.text.replace(' ', ''), self.companies)) > 0:
self.textCompanies.append(ent.text)
except:
pass
if ent.label_ == 'GPE':
self.gpe.append(ent.text)
self.textCompanies = np.unique(self.textCompanies)
numgpe = len(self.gpe)
self.gpe = np.unique(self.gpe)
return self.textCompanies, self.gpe, numgpe | [
"noreply@github.com"
] | noreply@github.com |
62c90294b18a2c3fd268af603a53b8a22b86605c | 30754a148b79903d6e49399f1f270c79934ce389 | /fuzzinator/ui/tui/tui_listener.py | 9427825e28a0e23a7c0a9b1feb0cd1a50d817e82 | [
"BSD-3-Clause"
] | permissive | syedkhalid/fuzzinator | 720ffc552c595b50de46e4e4e51f3a01cdc9aa77 | f90b58605de563e77b85ed0d54d2beb29efc7d14 | refs/heads/master | 2021-04-09T17:31:06.625840 | 2018-03-12T14:37:18 | 2018-03-12T15:21:27 | 125,814,277 | 1 | 0 | BSD-3-Clause | 2018-03-19T06:53:29 | 2018-03-19T06:53:29 | null | UTF-8 | Python | false | false | 1,054 | py | # Copyright (c) 2016-2017 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import inspect
import os
from fuzzinator.listener import EventListener
class TuiListener(EventListener):
def __init__(self, pipe, events, lock):
for fn, _ in inspect.getmembers(EventListener, predicate=inspect.isfunction):
setattr(self, fn, self.Trampoline(name=fn, pipe=pipe, events=events, lock=lock))
class Trampoline(object):
def __init__(self, name, pipe, events, lock):
self.name = name
self.pipe = pipe
self.events = events
self.lock = lock
def __call__(self, **kwargs):
with self.lock:
try:
self.events.put_nowait({'fn': self.name, 'kwargs': kwargs})
os.write(self.pipe, b'x')
except:
pass
| [
"reni@inf.u-szeged.hu"
] | reni@inf.u-szeged.hu |
834d806a1de11b9ede080c5f7971ceaf79826ab9 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/d_7.py | ab8cf5b7f1a98268b126e4f73b7ff347efe89e40 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'd_7':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
d264e28ed2341e4155dedc2bdc2156861c78747e | e7ce273f404f82fd8672c97e50b386509c8f9870 | /Advanced/File_Handling/Directory_Traversal.py | 5aa0fbcbb285a6c22e60ca7405b8dd188b7a9b8b | [] | no_license | rzlatkov/Softuni | 3edca300f8ecdcfd86e332557712e17552bc91c3 | a494e35bff965b2b9dccc90e1381d5a1a23737a1 | refs/heads/main | 2023-07-02T12:49:59.737043 | 2021-08-13T20:47:07 | 2021-08-13T20:47:07 | 319,088,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | # Directory Traversal
import os
USER = os.getlogin()
USED_PATH = '/home/rzlatkov/Softuni/Advanced/File_Handling/' # first level traverse only
ROOT_PATH = './' # traverse everything
REPORT_PATH_WINDOWS = f'C:\\Users\\{USER}\\Desktop\\report.txt' # for Win users
REPORT_PATH_LINUX = f'/home/{USER}/Desktop/report.txt' # I am coding on a Linux (Manjaro)
def traverse(path):
dictionary_of_files = {}
path = os.walk(path)
for _,_, files in path:
for f in files:
extension = f[f.index('.'):]
if not extension in dictionary_of_files:
dictionary_of_files[extension] = []
dictionary_of_files[extension].append(f)
return dictionary_of_files
def sort_extensions(dictionary_of_files):
return dict(sorted(dictionary_of_files.items(), key=lambda x: x[0]))
def sort_filenames(dictionary_list_values):
return sorted(dictionary_list_values, key=lambda x: x)
def write_to_report(result, report_path):
with open(report_path, 'w') as writer:
for ext, fnames in result.items():
writer.write(ext + '\n')
sorted_fnames = sort_filenames(fnames)
for f in sorted_fnames:
writer.write(f"- - - {f}\n")
files = traverse(USED_PATH)
sorted_ext = sort_extensions(files)
write_to_report(sorted_ext, REPORT_PATH_LINUX)
| [
"nozzller@gmail.com"
] | nozzller@gmail.com |
b498d649116dedfc3a2280063468c8542d154604 | 543e7e45ae80c9fdc7c3a036fecccc675583bb3d | /accounts/tests/test_view_password_reset.py | 054e2f17ddc4613c02377d37e7efdf1567d144a6 | [] | no_license | davidswei/django-boards | b247d413af61138a2032c420055af21efc157f2d | fd1d70827b5f79cfca246760559dfffb7e4b8a41 | refs/heads/master | 2020-04-24T02:06:13.656778 | 2019-02-21T13:24:35 | 2019-02-21T13:24:35 | 171,624,559 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,106 | py | from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.models import User
from django.core import mail
from django.core.urlresolvers import reverse
from django.urls import resolve
from django.test import TestCase
class PasswordResetTests(TestCase):
def setUp(self):
url = reverse('password_reset')
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/reset/')
self.assertEquals(view.func.view_class, auth_views.PasswordResetView)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, PasswordResetForm)
def test_form_inputs(self):
self.assertContains(self.response, '<input', 2)
self.assertContains(self.response, 'type="email"', 1)
class SuccessfulPasswordResetTests(TestCase):
def setUp(self):
email = 'john@doe.com'
User.objects.create_user(
username='john', email=email, password='123abcdef')
url = reverse('password_reset')
self.response = self.client.post(url, {'email': email})
def test_redirection(self):
url = reverse('password_reset_done')
self.assertRedirects(self.response, url)
def test_send_password_reset_email(self):
self.assertEqual(1, len(mail.outbox))
class InvalidPasswordResetTests(TestCase):
def setUp(self):
url = reverse('password_reset')
self.response = self.client.post(
url, {'email': 'donotexist@email.com'})
def test_redirection(self):
url = reverse('password_reset_done')
self.assertRedirects(self.response, url)
def test_no_reset_email_sent(self):
self.assertEqual(0, len(mail.outbox))
class PasswordResetDoneTests(TestCase):
def setUp(self):
url = reverse('password_reset_done')
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/reset/done/')
self.assertEquals(
view.func.view_class, auth_views.PasswordResetDoneView)
class PasswordResetConfirmTests(TestCase):
def setUp(self):
user = User.objects.create_user(
username='john', email='john@doe.com', password='123abcdef')
self.uid = urlsafe_base64_encode(force_bytes(user.pk)).decode()
self.token = default_token_generator.make_token(user)
url = reverse('password_reset_confirm', kwargs={
'uidb64': self.uid, 'token': self.token})
self.response = self.client.get(url, follow=True)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/reset/{uidb64}/{token}/'.format(
uidb64=self.uid, token=self.token))
self.assertEquals(
view.func.view_class, auth_views.PasswordResetConfirmView)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, SetPasswordForm)
def test_form_inputs(self):
self.assertContains(self.response, '<input', 3)
self.assertContains(self.response, 'type="password"', 2)
class InvalidPasswordResetConfirmTests(TestCase):
def setUp(self):
user = User.objects.create_user(
username='john', email='john@doe.com', password='123abcdef')
uid = urlsafe_base64_encode(force_bytes(user.pk)).decode()
token = default_token_generator.make_token(user)
user.set_password('abcdef123')
user.save()
url = reverse('password_reset_confirm', kwargs={
'uidb64': uid, 'token': token})
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_html(self):
password_reset_url = reverse('password_reset')
self.assertContains(self.response, 'invalid password reset link')
self.assertContains(
self.response, 'href="{0}"'.format(password_reset_url))
class PasswordResetCompleteTests(TestCase):
def setUp(self):
url = reverse('password_reset_complete')
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/reset/complete/')
self.assertEquals(
view.func.view_class, auth_views.PasswordResetCompleteView)
| [
"sw@cust.edu.cn"
] | sw@cust.edu.cn |
6fb808f6f31d31f7fc676d40edbf1d63c7dd7845 | 5c6d73f39886e7d295d903530b2a2c533fec3090 | /06 - ShellSort/shellsort.py | 32fd9484a593180669dae991b4fd7402b275e45e | [] | no_license | CrisCoutinho/PO-2019.2 | 9e9fd3a9ea851356f7575d5fab3e22154172eba3 | 0bbd5444f0543419c4d6efd00b88e9f383ccb4d8 | refs/heads/master | 2020-06-27T09:43:40.850712 | 2019-10-03T02:27:08 | 2019-10-03T02:27:08 | 199,916,962 | 0 | 0 | null | 2019-08-24T01:52:29 | 2019-07-31T19:28:49 | Python | UTF-8 | Python | false | false | 3,592 | py | # -*- coding: utf-8 -*-
"""shellSort.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/11bTldwNT-UZwRVJllL5xAmMshbCqmP9r
"""
import numpy as np
from random import shuffle, randint
import timeit
import matplotlib as mpl
import matplotlib.pyplot as plt
def shellSort(elementList):
numberElement = len(elementList)
gap = numberElement//2
while gap > 0:
for i in range(gap, numberElement):
temp = elementList[i]
j = i
while j >= gap and elementList[j-gap] >temp:
elementList[j] = elementList[j-gap]
j = j - gap
elementList[j] = temp
gap //= 2
def listInv(tamanho):
lista =[]
for i in range(tamanho, 0, -1):
lista.append(tamanho)
tamanho = tamanho - 1
return lista
def drawGraph(x,y,yInv,XAxis = "Lista de Numeros", YAxis = "Tempo de ordenação"):
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
ax.plot(x,y,color = 'blue', label = "Aleatorio")
ax.plot(x,yInv, color = 'red', label = "Invertida")
ax.legend(bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)
plt.ylabel(YAxis)
plt.xlabel(XAxis)
fig.savefig(YAxis)
plt.show()
def listInv(tamanho):
newList =[]
for i in range(tamanho, 0, -1):
newList.append(tamanho)
tamanho = tamanho - 1
return newList
def generateList(tam):
newList = list(range(1, tam + 1))
shuffle(newList)
return newList
#sys.setrecursionlimit(10**6)
listValueGraph = [100 ,200 , 400, 500, 1000, 2000]
#~~~~~~~~~~ Tests ~~~~~~~~~~~#
value100k = 100000
value200k = 200000
value400k = 400000
value500k = 500000
value1M = 1000000
value2M = 2000000
#~~~~~~~~~~ List Inverted Case ~~~~~~~~~~~#
listInvertedCase100k = listInv(value100k)
listInvertedCase200k = listInv(value200k)
listInvertedCase400k = listInv(value400k)
listInvertedCase500k = listInv(value500k)
listInvertedCase1M = listInv(value1M)
listInvertedCase2M = listInv(value2M)
listQuestionInvertedCase = [listInvertedCase100k,listInvertedCase200k,
listInvertedCase400k,listInvertedCase500k,
listInvertedCase1M, listInvertedCase2M]
timeSortInvertedCase = []
#~~~~~~~~~~ Random List Case ~~~~~~~~~~~#
listRandomCase100k = generateList(value100k)
listRandomCase200k = generateList(value200k)
listRandomCase400k = generateList(value400k)
listRandomCase500k = generateList(value500k)
listRandomCase1M = generateList(value1M)
listRandomCase2M = generateList(value2M)
listQuestionRandomCase = [listRandomCase100k,listRandomCase200k,
listRandomCase400k, listRandomCase500k,
listRandomCase1M, listRandomCase2M]
timeSortRandomCase = []
for i in range(6):
timeSortInvertedCase.append(timeit.
timeit("shellSort({})".format(listQuestionInvertedCase[i]),
setup="from __main__ import shellSort, listInv, generateList",number = 1))
timeSortRandomCase.append(timeit.
timeit("shellSort({})".format(listQuestionRandomCase[i]),
setup="from __main__ import shellSort, listInv, generateList",number = 1))
print(i)
drawGraph(listValueGraph,
timeSortInvertedCase,
timeSortRandomCase,
XAxis="Número de elementos em milhares",
YAxis="Tempo de ordenação em Seg") | [
"criscoutinho44@gmail.com"
] | criscoutinho44@gmail.com |
0fbf2c922fd8bafbe3bd8cd540490a9f01f2f600 | d627a394ab3599d680eb9ee52c675653cfb62dc6 | /nexabots-master/src/algos/PG/supervised_model_learning.py | f628c6ae44920532fe3432e2523c049ba45d7bb3 | [] | no_license | i7CDrAlfAc/grewRL | e98b0b1cd189f04faf648688532448a19e5236fd | 387d8f205380b94a88c076f61aaae60c413b926d | refs/heads/master | 2023-01-02T05:31:33.672202 | 2020-10-26T09:46:46 | 2020-10-26T09:46:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,687 | py | import os
import sys
import numpy as np
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import time
import src.my_utils as my_utils
import src.policies as policies
import random
import string
import socket
import pybullet as p
def train(env, model, params):
model_optim = T.optim.Adam(model.parameters(), lr=params["model_lr"], weight_decay=params["weight_decay"])
mse = T.nn.MSELoss()
batch_states = []
batch_actions = []
batch_new_states = []
batch_ctr = 0
for i in range(params["iters"]):
s_0 = env.reset()
done = False
step_ctr = 0
latent_var = env.get_latent_label()
while not done:
# Sample action from policy
action = np.random.randn(env.act_dim)
# Step action
s_1, _, done, _ = env.step(action.squeeze(0).numpy())
step_ctr += 1
if params["animate"]:
p.removeAllUserDebugItems()
p.addUserDebugText("sphere mass: {0:.3f}, prediction: {0:.3f}".format(env.mass, prediction), [0, 0, 2])
env.render()
# Record transition
batch_states.append(my_utils.to_tensor(s_0, True))
batch_actions.append(action)
batch_new_states.append(my_utils.to_tensor(s_1, True))
s_0 = s_1
# Just completed an episode
batch_ctr += 1
# If enough data gathered, then perform update
if batch_ctr == params["batchsize"]:
batch_states = T.cat(batch_states)
batch_actions = T.cat(batch_actions)
batch_new_states = T.cat(batch_new_states)
next_states_pred = model(batch_states, batch_actions)
prediction_loss = mse(next_states_pred, batch_new_states)
model_optim.zero_grad()
prediction_loss.backward()
model_optim.step()
print("Episode {}/{}, prediction_loss: {}".
format(i, params["iters"], prediction_loss / params["batchsize"], ))
# Finally reset all batch lists
batch_ctr = 0
batch_states = []
batch_actions = []
batch_new_states = []
if i % 300 == 0 and i > 0:
sdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"agents/{}_{}_{}_pg.p".format(env.__class__.__name__, policy.__class__.__name__, params["ID"]))
T.save(policy, sdir)
print("Saved checkpoint at {} with params {}".format(sdir, params))
if __name__=="__main__":
T.set_num_threads(1)
ID = ''.join(random.choices(string.ascii_uppercase + string.digits, k=3))
params = {"iters": 500000, "batchsize": 20, "gamma": 0.99, "model_lr": 0.0007, "weight_decay" : 0.0001,
"animate": True, "train" : False,
"note" : "Supervised model learning", "ID" : ID}
if socket.gethostname() == "goedel":
params["animate"] = False
params["train"] = True
from src.envs.cartpole_pbt.hangpole import HangPoleBulletEnv
env = HangPoleBulletEnv(animate=params["animate"], latent_input=True, action_input=False)
# Test
if params["train"]:
print("Training")
model = policies.NN_PG(env, 16, obs_dim=env.obs_dim, act_dim=env.obs_dim)
print(params, env.obs_dim, env.act_dim, env.__class__.__name__, model.__class__.__name__)
train(env, model, params)
else:
print("Testing")
policy_path = 'agents/HangPoleBulletEnv_NN_PG_ETX_pg.p'
policy = T.load(policy_path)
env.test(policy, slow=params["animate"], seed=1338)
print(policy_path) | [
"69282697+junpilan@users.noreply.github.com"
] | 69282697+junpilan@users.noreply.github.com |
704361a75a15b4ff7147ee4334cde26f9da2f4dd | 180beda50f31031bd3ba1668067bdb73fc1a7686 | /website/members/management/commands/generate_member_invoices.py | e73095aaebe1d829fb36e519f1585796e73b12cd | [
"MIT"
] | permissive | eamanu/asoc_members | 053a05563a20ff4cafd09020367f3d60f149392e | bf2e99e9c63c60a59bdfd10ca1812d78851cbde6 | refs/heads/master | 2020-11-26T14:20:46.857545 | 2020-01-06T12:57:30 | 2020-01-06T12:57:30 | 229,101,268 | 0 | 0 | MIT | 2019-12-19T17:01:15 | 2019-12-19T17:01:14 | null | UTF-8 | Python | false | false | 7,788 | py | import datetime
import os
from django.conf import settings
from django.core.mail import EmailMessage
from django.core.management.base import BaseCommand
from django.db.models import Max
from members import logic
from members.models import Quota, Person, Payment, Member, PaymentStrategy
from . import _afip, _gdrive
INVOICES_FROM = '2018-08-01 00:00+03'
GMTminus3 = datetime.timezone(datetime.timedelta(hours=-3))
# mail stuff
MAIL_SUBJECT = "Factura por pago de cuota(s) a la Asociación Civil Python Argentina"
MAIL_TEXT = """\
Hola!
Adjunta va la factura por el pago hecho en fecha {payment_date:%Y-%m-%d}.
¡Gracias! Saludos,
--
. Lalita
.
Asociación Civil Python Argentina
http://ac.python.org.ar/
(claro, este mail es automático, soy une bot, pero contestá el mail sin problemas que
le va a llegar al humane correspondiente)
"""
PDF_MIMETYPE = 'application/pdf'
def _send_mail(payment_date, recipient, attach_path):
text = MAIL_TEXT.format(payment_date=payment_date)
mail = EmailMessage(MAIL_SUBJECT, text, settings.EMAIL_FROM, [recipient])
filename = os.path.basename(attach_path)
with open(attach_path, "rb") as fh:
attach_content = fh.read()
mail.attach(filename, attach_content, PDF_MIMETYPE)
mail.send()
class Command(BaseCommand):
help = "Generate the missing invoices"
def add_arguments(self, parser):
parser.add_argument('--limit', type=int, nargs='?', default=1)
parser.add_argument(
'--invoice-date', type=str, nargs='?', help="Invoice date (%Y-%m-%d), forces limit=1")
def handle(self, *args, **options):
limit = options['limit']
invoice_date = options['invoice_date']
if invoice_date is None:
invoice_date = datetime.date.today()
else:
invoice_date = datetime.datetime.strptime(invoice_date, "%Y-%m-%d").date()
limit = 1
print("Forcing invoice date to {} (also limit=1)".format(invoice_date))
records = []
# check AFIP
_afip.verify_service()
# get the greatest invoice number used (once, will keep updated later)
_max_invoice_number_query = Payment.objects.aggregate(Max('invoice_number'))
max_invoice_number = _max_invoice_number_query['invoice_number__max']
print("Found max invoice number {}".format(max_invoice_number))
# get payments after we started automatically that still have no invoice generated
payments_per_invoice = {}
persons_per_invoice = {}
payments = (
Payment.objects.filter(timestamp__gte=INVOICES_FROM, invoice_ok=False)
.exclude(strategy__platform=PaymentStrategy.CREDIT)
.order_by('timestamp', 'pk').all()
)
print("Found {} payments to process".format(len(payments)))
if len(payments) > limit:
payments = payments[:limit]
print(" truncating to {}".format(limit))
for payment in payments:
print("Generating invoice for payment", payment)
record = {
'invoice_date': invoice_date,
}
records.append(record)
# get the related member (if None, or multiple, still not supported!)
_members = Member.objects.filter(patron=payment.strategy.patron).all()
assert len(_members) == 1, "multiple or no members for the patron is not supported"
member = _members[0]
# only process payments for normal members (benefactor members get invoices done
# by hand)
person = member.entity
if isinstance(person, Person):
print(" person found", person)
else:
print(" IGNORING payment, member {} is not a person: {}".format(member, person))
continue
# if payment still doesn't have a number, add one to latest and save;
# in any case, use it
if not payment.invoice_number:
max_invoice_number += 1
payment.invoice_number = max_invoice_number
payment.invoice_spoint = settings.AFIP['selling_point']
payment.save()
print(" using new invoice number", payment.invoice_number)
else:
print(" using already stored invoice number", payment.invoice_number)
assert payment.invoice_spoint == settings.AFIP['selling_point']
payments_per_invoice[payment.invoice_number] = payment
record['invoice'] = payment.invoice_number
# we bill one item, for the whole amount: "3 quotas for $300", instead of billing
# 3 x "1 quota for $100", which would be problematic if the paid amount is
# not exactly 300
record['amount'] = payment.amount
record['quantity'] = 1
# get all billing data from the person
persons_per_invoice[payment.invoice_number] = person
record['dni'] = person.document_number
record['fullname'] = person.full_name
record['address'] = person.street_address
record['city'] = person.city
record['zip_code'] = person.zip_code
record['province'] = person.province
tstamp_argentina = payment.timestamp.astimezone(GMTminus3)
record['payment_comment'] = "Pago via {} ({:%Y-%m-%d %H:%M})".format(
payment.strategy.platform_name, tstamp_argentina)
# get quotas for the payment; we don't show the period in the description
# as there's a specific field for that
quotas = list(Quota.objects.filter(payment=payment).order_by('year', 'month').all())
assert quotas
if len(quotas) == 1:
description = "1 cuota social"
else:
description = "{} cuotas sociales".format(len(quotas))
record['description'] = description
from_quota = quotas[0]
from_day = datetime.date(from_quota.year, from_quota.month, 1)
to_quota = quotas[-1]
ny, nm = logic.increment_year_month(to_quota.year, to_quota.month)
to_day = datetime.date(ny, nm, 1) - datetime.timedelta(days=1)
record['service_date_from'] = from_day.strftime("%Y%m%d")
record['service_date_to'] = to_day.strftime("%Y%m%d")
print(" found {} quota(s) ({} - {})".format(
len(quotas), record['service_date_from'], record['service_date_to']))
try:
results = _afip.generate_invoices(records)
except Exception:
print("PROBLEMS generating invoices with records", records)
raise
# save the results for the generated ok invoices and send the proper mails
for invoice_number, result in sorted(results.items()):
print("Post-processing invoice {} at {}".format(
invoice_number, result.get('pdf_path')))
if not result['invoice_ok']:
print(" WARNING: invoice NOT authorized ok")
continue
payment = payments_per_invoice[invoice_number]
payment.invoice_ok = True
payment.save()
# upload the invoice to google drive
_gdrive.upload_invoice(result['pdf_path'], invoice_date)
print(" uploaded to gdrive OK")
# send the invoice by mail
person = persons_per_invoice[invoice_number]
_send_mail(payment.timestamp, person.email, result['pdf_path'])
print(" sent by mail OK")
# invoice uploaded to gdrive and sent ok, don't need it here anymore
os.remove(result['pdf_path'])
| [
"facundo@taniquetil.com.ar"
] | facundo@taniquetil.com.ar |
0497c5f2e6e0a19f3b1ccc1263cd6a238f005f14 | e7c8edb68c515260a0c45038eed0e2d0aaa7a2d6 | /models/resnet.py | f1cd020486690115c66617621de0bb5bef4bf993 | [] | no_license | yangdonghun3/OOD_detecion_based_on_DML | b7a04b0964a7d1126e050b68601249bebe3c4fde | 9be8bd66e1dfdf8a6072d50700c8b607dba6f3b6 | refs/heads/master | 2022-12-01T22:58:09.414447 | 2020-08-22T05:36:03 | 2020-08-22T05:36:03 | 289,215,814 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,936 | py | import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, input_channels=3, model_flag=True):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(input_channels, 64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.model_flag = model_flag
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
if self.model_flag == True:
return out.view(out.size(0), -1)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
y = self.linear(out)
return y
# function to extact the multiple features
def feature_list(self, x):
out_list = []
out = F.relu(self.bn1(self.conv1(x)))
out_list.append(out)
out = self.layer1(out)
out_list.append(out)
out = self.layer2(out)
out_list.append(out)
out = self.layer3(out)
out_list.append(out)
out = self.layer4(out)
out_list.append(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
y = self.linear(out)
return y, out_list
# function to extact a specific feature
def intermediate_forward(self, x, layer_index):
out = F.relu(self.bn1(self.conv1(x)))
if layer_index == 1:
out = self.layer1(out)
elif layer_index == 2:
out = self.layer1(out)
out = self.layer2(out)
elif layer_index == 3:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
elif layer_index == 4:
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
return out
# function to extact the penultimate features
#def penultimate_forward(self, x):
# out = F.relu(self.bn1(self.conv1(x)))
# out = self.layer1(out)
# out = self.layer2(out)
# out = self.layer3(out)
# penultimate = self.layer4(out)
# out = F.avg_pool2d(penultimate, 4)
# out = out.view(out.size(0), -1)
# y = self.linear(out)
# return y, penultimate
def ResNet18(num_c):
return ResNet(PreActBlock, [2, 2, 2, 2], num_classes=num_c)
def ResNet34(input_channels=3, num_c=10, model_flag=True):
#return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_c)
return ResNet(BasicBlock, [3, 4, 6, 3], input_channels=input_channels, num_classes=num_c)
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(Variable(torch.randn(1, 3, 32, 32)))
print(y.size())
# test() | [
"yangdonghun3@gmail.com"
] | yangdonghun3@gmail.com |
cc327c5519fca2b6e3bb2c6477b0ea09ac2b8c41 | 065841aa0cf291d55d12bc06fd26dfe39b3f955a | /Chapter3/Code/figures.py | edee216176583b0769078ea75e48a7905a209038 | [
"CC-BY-4.0",
"MIT",
"BSD-2-Clause"
] | permissive | nabobalis/PhDThesis | d40131ae7a294aa8adaf6ecb2cb84d8110d922ef | 6824ba374f1e65d5a21566d0b84c6a3102273b67 | refs/heads/master | 2020-04-14T20:49:02.077363 | 2018-02-19T16:32:18 | 2018-02-19T16:32:18 | 30,153,618 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 22:35:57 2015
@author: nabobalis
"""
import numpy as np
import matplotlib.pyplot as plt
import astropy.io as io
file_1 = '/data/Data/1999_sunspot.fits' #SVST
file_2 = '/data/Data/aligned_2005.fits' #DOT 1
file_3 = '/data/Data/aligned_2008.fits' # DOT 2
data_1 = 1
data_2 = 1
data_3 = 1 | [
"nabil.freij@gmail.com"
] | nabil.freij@gmail.com |
0bb0bc6f1d8c8029f5c932d085a24c1217164ab6 | 36c41c0e7e844cb668ba578e70e0e70270825e84 | /Calculator.py | 22078ba171e3c910124ce55a0546b54fc7e8a278 | [
"MIT"
] | permissive | amirulhakiim/ASBF-Calculator | aafe35158f3f0b8af475c29dd9b352d78c7bcff7 | bcf29d59944429035cb1837ae28544b1189c9bc1 | refs/heads/master | 2020-12-06T04:48:38.665623 | 2020-01-15T16:34:54 | 2020-01-15T16:34:54 | 232,348,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from Compound import compound
from Amortization_Schedule import amortization
def calculate(principal,asb_return,loan_interest,loan_tenure,year_terminate):
compounded_interest = compound(principal,year_terminate,asb_return)
balance,payment,ppmt,ipmt = amortization(principal,loan_interest,loan_tenure,year_terminate)
maturity = compounded_interest - balance[int(year_terminate*12) - 1]
return maturity,payment,ppmt,ipmt
| [
"amirulhakimazmi@gmail.com"
] | amirulhakimazmi@gmail.com |
cfc446794e99fe545d4c7bdfa4d0c4baea0bbaae | c10bec10526866fd5c92f3591373d4079342f83d | /Demo/Demo.py | ebb61e7042a3a018465997b40e7d5bab67268c58 | [] | no_license | PacYang/flask_demo | b943788e00e01cd944ca938d9b6af15dbb1f5d27 | d794c78ee628f15dd7ea4650615f3550818a36ce | refs/heads/master | 2021-04-07T22:18:41.395264 | 2020-03-20T09:13:44 | 2020-03-20T09:13:44 | 248,712,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from flask import Flask,url_for
app = Flask(__name__)
@app.route('/<a>')
def hello_world(a):
return a
if __name__ == '__main__':
app.run()
with app.test_request_context():
print(url_for('hello_world', _external=True, a=123))
| [
"larry.yang@yingzt.com"
] | larry.yang@yingzt.com |
534e978dd076d4b6e56f9243326f617ea023c224 | 6e39ce7b16cbbde696dcb76a4f4640cc5ee63b58 | /text_extraction_wiki.py | f81752b90fac8ad47275062b38fa95e2412e55b8 | [] | no_license | kcMaroju/Political-Sentiment-Analysis | 814642cc504f53ce7652295f4bda32b21cc18571 | 9d14c538511fedc69b97404d56dda53ea2e80471 | refs/heads/master | 2022-10-01T06:26:46.014153 | 2020-06-05T14:41:38 | 2020-06-05T14:41:38 | 206,795,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 14 11:32:20 2019
@author: Mouni
"""
import wikipedia
def get_wiki_text(text):
wikipedia.set_lang("en")
wiki_text = wikipedia.summary(text, sentences=9)
return wiki_text
| [
"noreply@github.com"
] | noreply@github.com |
1c8a899e74c97c05daf1bb397f564d13820e33e7 | 83e384c20365dafcc24e169d6983969ddbd36927 | /python/baekjoon/Simulation/BOJ1062가르침.py | fd7b9562909aa65bf00ae2d9fd5f96311f3f0a32 | [] | no_license | daehyun1023/Algorithm | e6997f0f36db973ecd5bc54ea4450a08a0f3406c | a50caa463ad394f32113fc19f2120caaa701212f | refs/heads/master | 2023-07-04T03:35:14.996565 | 2021-08-09T13:47:49 | 2021-08-09T13:47:49 | 332,384,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | N, K = map(int,input().split())
result = 0
chars = ['a', 'n', 't', 'i', 'c']
plus_chars = []
input_strs = []
temp = chars.copy()
for i in range(N):
input_str = input()
input_strs.append(input_str)
for input_char in input_str:
if input_char not in chars and input_char not in plus_chars:
plus_chars.append(input_char)
size = len(plus_chars)
check = [False] * size
K = K - 5
def getCnt(cnt, temp):
for input_str in input_strs:
for input_char in input_str:
if input_char not in temp:
cnt -= 1
break
return cnt
def comb(depth, x):
global result
if depth == K or (K > len(plus_chars) and depth == len(plus_chars)):
temp = chars.copy()
for i in range(size):
if check[i] == True:
temp.append(plus_chars[i])
result = max(result, getCnt(N, temp))
if depth == K:
return
for i in range(x, size):
if check[i] == False:
check[i] = True
comb(depth+1, i)
check[i] = False
if K < 0 :
result = 0
else:
comb(0,0)
print(result)
| [
"daehyun1023@naver.com"
] | daehyun1023@naver.com |
3cf43fa8092e181dca265178db23f042cb43c200 | 8e304f1291480db18b9727efa61647b369531172 | /csvkit/convert/js.py | 8757c65a2bfbc280bab8fc78a153f0f7dcf57e4c | [
"MIT"
] | permissive | zviri/csvkit | 4439fff0e376d089f89420fabca245c25eb12dc5 | 39f5d3b6c7d6eaaf145e7e01fa247292763da16d | refs/heads/master | 2021-01-12T20:54:42.673449 | 2014-09-04T14:11:06 | 2014-09-04T14:11:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | #!/usr/bin/env python
import json
import six
from csvkit import CSVKitWriter
def parse_object(obj, path=''):
"""
Recursively parse JSON objects and a dictionary of paths/keys and values.
Inspired by JSONPipe (https://github.com/dvxhouse/jsonpipe).
"""
if isinstance(obj, dict):
iterator = obj.iteritems()
elif isinstance(obj, (list, tuple)):
iterator = enumerate(obj)
else:
return { path.strip('/'): obj }
d = {}
for key, value in iterator:
key = six.text_type(key)
d.update(parse_object(value, path + key + '/'))
return d
def json2csv(f, key=None, **kwargs):
"""
Convert a JSON document into CSV format.
The top-level element of the input must be a list or a dictionary. If it is a dictionary, a key must be provided which is an item of the dictionary which contains a list.
"""
document = f.read()
js = json.loads(document)
if isinstance(js, dict):
if not key:
raise TypeError('When converting a JSON document with a top-level dictionary element, a key must be specified.')
js = js[key]
if not isinstance(js, list):
raise TypeError('Only JSON documents with a top-level list element are able to be converted (or a top-level dictionary if specifying a key).')
field_set = set()
flat = []
for obj in js:
flat.append(parse_object(obj))
for obj in flat:
field_set.update(obj.keys())
fields = sorted(list(field_set))
o = six.StringIO()
writer = CSVKitWriter(o)
writer.writerow(fields)
for i in flat:
row = []
for field in fields:
if field in i:
row.append(i[field])
else:
row.append(None)
writer.writerow(row)
output = o.getvalue()
o.close()
return output
| [
"staringmonkey@gmail.com"
] | staringmonkey@gmail.com |
0d3feee1544aa647f2fbf68304d59c190370ecc8 | 7b0548e9425b777baa383e89c8a2e97be466d4c2 | /sort/quicksort.py | 4344d013facb73294d6f43e30df3eb434b4f4ab1 | [] | no_license | FlorianLehmann/programmation_fine | 8634046fa631e72f9c22355852ee4304e6c8c9d2 | ccfa7c05707852083de50acc2855c371b3608e13 | refs/heads/master | 2020-03-29T21:44:53.376065 | 2018-10-10T07:45:08 | 2018-10-10T07:45:08 | 150,385,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,536 | py | import random
def always_low_value(lst, low, high):
return low
def always_mid_element(lst, low, high):
return (high + low) // 2
def always_high_value(lst, low, high):
return high
def random_pivot(lst, low, high):
return random.randrange(low, high + 1)
def find_pivot_by_median_of_3_values(lst, low, high):
mid = low + (high - low) // 2
if lst[mid] < lst[low]:
if lst[low] < lst[high]:
return low
if lst[mid] > lst[high]:
return mid
return high
if lst[low] > lst[high]:
return low
if lst[mid] > lst[high]:
return high
return mid
def find_pivot_by_median_of_5_values(lst, low, high):
q1, mid, q3 = low + (high - low) // 4, low + (high - low) // 2, low + (high - low) * 3 // 4
if lst[low] < lst[q1]:
if lst[q1] < lst[mid]:
if lst[q3] < lst[high]:
if lst[high] < lst[mid]:
if lst[high] > lst[q1]:
if lst[q3] > lst[q1]:
return q3
return q1
if lst[high] > lst[low]:
return high
return low
if lst[mid] < lst[q3]:
return mid
if lst[q3] > lst[q1]:
return q3
return q1
if lst[q3] < lst[mid]:
if lst[q3] > lst[q1]:
if lst[high] > lst[q1]:
return high
return q1
if lst[q3] > lst[low]:
return q3
return low
if lst[mid] < lst[high]:
return mid
if lst[high] > lst[q1]:
return high
return q1
if lst[mid] > lst[low]:
if lst[q3] < lst[high]:
if lst[high] < lst[q1]:
if lst[high] > lst[mid]:
if lst[q3] > lst[mid]:
return q3
return mid
if lst[high] > lst[low]:
return high
return low
if lst[q1] < lst[q3]:
return q1
if lst[q3] > lst[mid]:
return q3
return mid
if lst[q3] < lst[q1]:
if lst[q3] > lst[mid]:
if lst[high] > lst[mid]:
return high
return mid
if lst[q3] > lst[low]:
return q3
return low
if lst[q1] < lst[high]:
return q1
if lst[high] > lst[mid]:
return high
return mid
if lst[q3] < lst[high]:
if lst[high] < lst[q1]:
if lst[high] > lst[low]:
if lst[q3] > lst[low]:
return q3
return low
if lst[high] > lst[mid]:
return high
return mid
if lst[q1] < lst[q3]:
return q1
if lst[q3] > lst[low]:
return q3
return low
if lst[q3] < lst[q1]:
if lst[q3] > lst[low]:
if lst[high] > lst[low]:
return high
return low
if lst[q3] > lst[mid]:
return q3
return mid
if lst[q1] < lst[high]:
return q1
if lst[high] > lst[low]:
return high
return low
if lst[low] < lst[mid]:
if lst[q3] < lst[high]:
if lst[high] < lst[mid]:
if lst[high] > lst[low]:
if lst[q3] > lst[low]:
return q3
return low
if lst[high] > lst[q1]:
return high
return q1
if lst[mid] < lst[q3]:
return mid
if lst[q3] > lst[low]:
return q3
return low
if lst[q3] < lst[mid]:
if lst[q3] > lst[low]:
if lst[high] > lst[low]:
return high
return low
if lst[q3] > lst[q1]:
return q3
return q1
if lst[mid] < lst[high]:
return mid
if lst[high] > lst[low]:
return high
return low
if lst[mid] > lst[q1]:
if lst[q3] < lst[high]:
if lst[high] < lst[low]:
if lst[high] > lst[mid]:
if lst[q3] > lst[mid]:
return q3
return mid
if lst[high] > lst[q1]:
return high
return q1
if lst[low] < lst[q3]:
return low
if lst[q3] > lst[mid]:
return q3
return mid
if lst[q3] < lst[low]:
if lst[q3] > lst[mid]:
if lst[high] > lst[mid]:
return high
return mid
if lst[q3] > lst[q1]:
return q3
return q1
if lst[low] < lst[high]:
return low
if lst[high] > lst[mid]:
return high
return mid
if lst[q3] < lst[high]:
if lst[high] < lst[low]:
if lst[high] > lst[q1]:
if lst[q3] > lst[q1]:
return q3
return q1
if lst[high] > lst[mid]:
return high
return mid
if lst[low] < lst[q3]:
return low
if lst[q3] > lst[q1]:
return q3
return q1
if lst[q3] < lst[low]:
if lst[q3] > lst[q1]:
if lst[high] > lst[q1]:
return high
return q1
if lst[q3] > lst[mid]:
return q3
return mid
if lst[low] < lst[high]:
return low
if lst[high] > lst[q1]:
return high
return q1
pivot_functions = [
always_low_value,
always_mid_element,
always_high_value,
random_pivot,
find_pivot_by_median_of_3_values,
find_pivot_by_median_of_5_values
]
def partition(lst, low, high, pivot):
lst[high], lst[pivot] = lst[pivot], lst[high]
pivot_value = lst[high]
pointer = low
for i in range(low, high):
if (i <= pivot and lst[i] <= pivot_value) or (i > pivot and lst[i] < pivot_value):
if i != pointer:
lst[i], lst[pointer] = lst[pointer], lst[i]
pointer += 1
lst[pointer], lst[high] = lst[high], lst[pointer]
return pointer
def quick_sort(lst, low=0, high=None, min_size=10, pivot_function=find_pivot_by_median_of_3_values):
if high is None:
high = len(lst) - 1
if (high - low) < min_size:
return insertion(lst, low, high)
if low < high:
pivot = pivot_function(lst, low, high)
pivot = partition(lst, low, high, pivot)
quick_sort(lst, low, pivot - 1, min_size, pivot_function)
quick_sort(lst, pivot + 1, high, min_size, pivot_function)
return lst
def insertion(numbers, start, end):
for i in range(start + 1, end + 1):
tmp = i
for j in reversed(range(start, i)):
if numbers[tmp] >= numbers[j]:
break
else:
numbers[tmp], numbers[j] = numbers[j], numbers[tmp]
tmp = tmp - 1
return numbers
| [
"alexandre.clement@etu.unice.fr"
] | alexandre.clement@etu.unice.fr |
10c0d2e568c19c1dc9f529a51c7b0580c65e028c | 582dea457827d7dd9a53ebbdc4b580a73224dd2e | /Problems/Animals/task.py | c4ac68bdef65b10d684325c998f19196af9348ac | [] | no_license | codewithgsp/hyperskill-rockpaperscissors | 9dda27eec17ec42fbe17f7d4af918b456d761e6e | 39310d99a6a73742c753728298a639c44ab6d2e1 | refs/heads/master | 2022-11-12T17:49:34.014770 | 2020-07-07T22:08:59 | 2020-07-07T22:08:59 | 277,372,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # read animals.txt
# and write animals_new.txt
file_read = open('animals.txt', 'r')
animals = [line.strip('\n') + ' ' for line in file_read]
file_read.close()
file_write = open('animals_new.txt', 'w')
file_write.writelines(animals)
file_write.close()
| [
"codewithgsp@gmail.com"
] | codewithgsp@gmail.com |
fe06e20d06a873edf3cee0576f8fde279fba7650 | bc76a0be48eb10df351c5d45a4eef2b4fcf47ea6 | /datas/categorize_bayes.py | a6a84a41b405f5ff0e331102f8e4e3d2dff4f6d4 | [] | no_license | BoomsJune/permission | 6ae829cc45b563af44fa4a62096eaad376b23339 | e4b8104fc003410b32a58f24e0998dc3018ef42a | refs/heads/master | 2022-12-12T22:48:28.268612 | 2019-05-05T01:41:09 | 2019-05-05T01:41:09 | 152,403,702 | 1 | 0 | null | 2022-06-21T21:29:21 | 2018-10-10T10:11:19 | Python | UTF-8 | Python | false | false | 4,779 | py | # -*- coding: utf-8 -*-
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
from sklearn.utils import shuffle
from sklearn import metrics
import collections
import datetime
from databases import get_data_from_sort
def get_best_param(pipeline):
"""
找到分类器最佳参数,2000数据大约需要2min
:param pipeline:
:return:
"""
parameters = {
'vect__max_df': (0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
'vect__max_features': (None, 50, 100, 500, 1000, 2000, 5000, 10000),
# 'tfidf__use_idf': (True, False),
'clf__alpha': (1, 0.1, 0.01, 0.001, 0.0001),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
from time import time
t0 = time()
grid_search.fit(X_train, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best score: %0.3f" % grid_search.best_score_
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print "\t%s: %r" % (param_name, best_parameters[param_name])
if __name__ == '__main__':
"""
starttime = datetime.datetime.now()
datas = get_data_from_sort()
print "sum datas count:", len(datas)
X = [] # 特征集
y = [] # 类别
# 统计词频
labels = [0, 1, 2, 3, 4] # 总类别
features_dict = {label: [] for label in labels}
for data in datas:
features_dict[data[1]] += data[0]
# 剪掉一些不要的词
for label, feature in features_dict.items():
counts = collections.Counter(feature).most_common()
for word, count in counts:
if count <= 1:
feature.remove(word)
# 组装
for data in datas:
words = features_dict[data[1]]
new_words = []
for w in data[0]:
if w in words:
new_words.append(w)
X.append(" ".join(new_words))
y.append(data[1])
X_train, y_train = shuffle(X, y) # 打乱数据顺序
# for index, x in enumerate(X_train):
# print y_train[index]
# print x
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.1, random_state=0)
pipeline = Pipeline([
('vect', TfidfVectorizer()),
# ('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
# get_best_param(pipeline)
#
# pipeline.set_params(clf__alpha=0.01, tfidf__use_idf=True, vect__max_df=0.2, vect__max_features=None)
pipeline.set_params(clf__alpha=0.0001, vect__max_df=0.4, vect__max_features=None)
pipeline.fit(X_train, y_train)
joblib.dump(pipeline, 'categorize_model.joblib') # 保存模型
y_pred = pipeline.predict(X_train)
print metrics.accuracy_score(y_train, y_pred)
print metrics.confusion_matrix(y_train, y_pred)
print "spend ",(datetime.datetime.now()-starttime)
y_pred = pipeline.predict(X_test)
print metrics.accuracy_score(y_test, y_pred)
print metrics.confusion_matrix(y_test, y_pred)
"""
pipeline = joblib.load("categorize_model.joblib")
#
X_test = [
"联合 到底 是 有 什么 问题",
"更新 后 开始 逃生 时 蓝屏 怎么办",
"非常 好玩",
"为什么 进不去",
"我 不知道 怎么 同居 谁 能 告诉 我",
"氪金",
"bug 投诉 十六 遍 网易 做 什么 四次 更新 bug 还 不修 动荡 之城 匹配 机制 什么 高战 营地 匹配 中 低 等级 营地 休闲 营地 匹配 高战 营地 什么 意思 娱乐 玩家 战争 玩家 划等号 战争 营地 匹配 休闲 营地 动荡 什么 意思 纯粹 吊打 搞 私服 吗 天天 开新服 生怕 别人 不 知道 私服 吗 人 去 新服 再 氪金 停服 民心 不 朋友 都 退游 退游 评论 1 谢谢 退游 人数 7",
"一个字 肝",
"还行",
"菜 玩 几个 小时",
"还 可以",
"优化 比较 差",
"还 可以 就是 服务器 真的 垃圾",
"问 一个 问题 什么 时候 联动",
"肝 快乐",
"问 一下 怎么 加 好友",
"希望 室友 可以 交易 啊",
"还 可以 氪金 才能 变强",
"很 棒棒",
]
y_category = pipeline.predict(X_test)
# # print metrics.accuracy_score(y_test, y_pred)
# # print metrics.confusion_matrix(y_test, y_pred)
y_proba = pipeline.predict_proba(X_test)
for doc, category, proba in zip(X_test, y_category, y_proba):
print doc, ":", category, ":", proba | [
"wb.zhujiaying2018@mesg.corp.netease.com"
] | wb.zhujiaying2018@mesg.corp.netease.com |
2547df717dfe3addc94186c2ac91b1a1ae7485f3 | 958011e29af1e4311f02419af10e75d6f65c31df | /python/python系统性学习/2020_04_18/水仙花数.py | de6759cf1f18324f4b03cdc708ecd182a1fc89a9 | [] | no_license | cceniam/BaiduNetdiskDownload | 028e9dad1cb684f828fea5b998055a30a5e4ca6b | 29d15e26037cd7bea714da8a920e40c32aea01fa | refs/heads/master | 2022-12-02T13:04:48.781165 | 2020-08-16T15:01:43 | 2020-08-16T15:01:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | """
寻找水仙花数
水仙花数也被称为超完全数字不变数、自恋数、自幂数、阿姆斯特朗数,
它是一个3位数,该数字每个位上数字的立方之和正好等于它本身
"""
for num in range(100, 1001):
if (num % 10) ** 3 + (num // 10 % 10) ** 3 + (num // 100) ** 3 == num:
print(num)
| [
"1287666839@qq.com"
] | 1287666839@qq.com |
358ac8e540becb6c1f897dee898dd415d72de6c5 | 81fc75db14579a2f1d576b00716bc84ff65384c4 | /光城/kuohao/kh.py | 9eacf2d627aa4189d08da57bae0adf6d1e806e72 | [] | no_license | guangcity/learning-algorithm | 4de7bdd4fc72a8e32d5154f9c295a7eecf7de6b0 | bfb7d5f353910b1a73a4f4eaab3cdb1450253878 | refs/heads/master | 2020-04-09T17:11:00.161275 | 2018-12-24T11:43:04 | 2018-12-24T11:43:04 | 160,473,047 | 7 | 26 | null | 2019-01-10T13:43:22 | 2018-12-05T06:42:28 | Python | UTF-8 | Python | false | false | 719 | py | class Solution:
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
stack_match = {'{':'}','[':']','(':')'}
stack=[]
for str in s:
if str == '{' or str == '[' or str== '(':
stack.append(str)
else:
# 无左括号,只有右括号,直接返回False
if len(stack)==0:
return False
# 栈顶元素对应的字符串是否匹配当前字符
if stack_match[stack.pop()]!=str:
return False
flag = True if len(stack)==0 else False
return flag
s = Solution()
t = "{[]}"
res = s.isValid(t)
print(res)
| [
"455954986@qq.com"
] | 455954986@qq.com |
dafaae9d98fc5c486dd6b6f6187f1c06453472a7 | 4a2d0c916ec14ce38a94defd4df8d6c309248045 | /Challenge-Zheren Dong.py | 5d78f7bedcf5e2a94fa4da8b6d2ec2ba5904c191 | [] | no_license | Edwardong/Challenge-turtle | 4342d7b0a94257174d5a1946957e941b5244e204 | 2979f2e5e4a5f83de5dcf5945782e9539b781204 | refs/heads/master | 2021-01-18T15:38:39.102396 | 2017-10-18T20:34:59 | 2017-10-18T20:34:59 | 67,078,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,691 | py | import turtle
import math
#draw four obliqueSquares firstly
turtle.setup(800,600)
turtle.bgcolor('#1E0028')
dis=60
def obliqueSquare(t,x,y):
t.width(0.5)
t.up()
t.goto(x,y)
t.down()
t.left(45)
t.forward(dis)
t.right(90)
t.forward(dis)
t.right(90)
t.forward(dis)
t.right(90)
t.forward(dis)
t.right(135)
Edward=turtle.Turtle()
Edward.color('purple')
Edward.hideturtle()
Edward.speed(0)
turtle.tracer(0,0)
obliqueSquare(Edward,-3*dis/math.sqrt(2),0)
obliqueSquare(Edward,-dis/math.sqrt(2),2*dis/math.sqrt(2))
obliqueSquare(Edward,dis/math.sqrt(2),0)
obliqueSquare(Edward,-dis/math.sqrt(2),-2*dis/math.sqrt(2))
turtle.update()
##############################################
#define four points, draw a line through these points with no delay
#a,b,c,d are the names of the points
steps=dis*2
stepsize=1/2
def fourPointGooto(t,a,b,c,d):
t.shape("circle")
t.width(0.3)
t.shapesize(0.2, 0.2)
t.up()
t.goto(a)
t.down()
t.stamp()
t.goto(b)
t.stamp()
t.goto(c)
t.stamp()
t.goto(d)
t.stamp()
t.goto(a)
def drawSquare1(t):
t.hideturtle()
#change1 is used to change the coordinate of 16 points of 4 squares in every frame
change1=(stepsize/math.sqrt(2),stepsize/math.sqrt(2))
change1=list(change1)
a0=(-3*dis/math.sqrt(2),0)
b0=(0,3*dis/math.sqrt(2))
c0=(3*dis/math.sqrt(2),0)
d0=(0,-3*dis/math.sqrt(2))
a0=list(a0)
b0=list(b0)
c0=list(c0)
d0=list(d0)
a1=(-2*dis/math.sqrt(2),dis/math.sqrt(2))
b1=(dis/math.sqrt(2),2*dis/math.sqrt(2))
c1=(2*dis/math.sqrt(2),-dis/math.sqrt(2))
d1=(-dis/math.sqrt(2),-2*dis/math.sqrt(2))
a1=list(a1)
b1=list(b1)
c1=list(c1)
d1=list(d1)
a2=(-dis/math.sqrt(2),0)
b2=(0,dis/math.sqrt(2))
c2=(dis/math.sqrt(2),0)
d2=(0,-dis/math.sqrt(2))
a2=list(a2)
b2=list(b2)
c2=list(c2)
d2=list(d2)
a3=(-2*dis/math.sqrt(2),-dis/math.sqrt(2))
b3=(-dis/math.sqrt(2),2*dis/math.sqrt(2))
c3=(2*dis/math.sqrt(2),dis/math.sqrt(2))
d3=(dis/math.sqrt(2),-2*dis/math.sqrt(2))
a3=list(a3)
b3=list(b3)
c3=list(c3)
d3=list(d3)
while 1:
for i in range(steps):
t.clear()
turtle.tracer(0,0)
fourPointGooto(t,a0,b0,c0,d0)
a0=[a0[0]+change1[0],a0[1]+change1[1]]
b0=[b0[0]+change1[0],b0[1]-change1[1]]
c0=[c0[0]-change1[0],c0[1]-change1[1]]
d0=[d0[0]-change1[0],d0[1]+change1[1]]
fourPointGooto(t,a1,b1,c1,d1)
a1=[a1[0]+change1[0],a1[1]-change1[1]]
b1=[b1[0]-change1[0],b1[1]-change1[1]]
c1=[c1[0]-change1[0],c1[1]+change1[1]]
d1=[d1[0]+change1[0],d1[1]+change1[1]]
fourPointGooto(t,a2,b2,c2,d2)
a2=[a2[0]-change1[0],a2[1]-change1[1]]
b2=[b2[0]-change1[0],b2[1]+change1[1]]
c2=[c2[0]+change1[0],c2[1]+change1[1]]
d2=[d2[0]+change1[0],d2[1]-change1[1]]
fourPointGooto(t,a3,b3,c3,d3)
a3=[a3[0]-change1[0],a3[1]+change1[1]]
b3=[b3[0]+change1[0],b3[1]+change1[1]]
c3=[c3[0]+change1[0],c3[1]-change1[1]]
d3=[d3[0]-change1[0],d3[1]-change1[1]]
turtle.update()
t.clearstamps()
for j in range(steps):
t.clear()
turtle.tracer(0,0)
fourPointGooto(t,a0,b0,c0,d0)
a0=[a0[0]+change1[0],a0[1]-change1[1]]
b0=[b0[0]-change1[0],b0[1]-change1[1]]
c0=[c0[0]-change1[0],c0[1]+change1[1]]
d0=[d0[0]+change1[0],d0[1]+change1[1]]
fourPointGooto(t,a1,b1,c1,d1)
a1=[a1[0]-change1[0],a1[1]-change1[1]]
b1=[b1[0]-change1[0],b1[1]+change1[1]]
c1=[c1[0]+change1[0],c1[1]+change1[1]]
d1=[d1[0]+change1[0],d1[1]-change1[1]]
fourPointGooto(t,a2,b2,c2,d2)
a2=[a2[0]-change1[0],a2[1]+change1[1]]
b2=[b2[0]+change1[0],b2[1]+change1[1]]
c2=[c2[0]+change1[0],c2[1]-change1[1]]
d2=[d2[0]-change1[0],d2[1]-change1[1]]
fourPointGooto(t,a3,b3,c3,d3)
a3=[a3[0]+change1[0],a3[1]+change1[1]]
b3=[b3[0]+change1[0],b3[1]-change1[1]]
c3=[c3[0]-change1[0],c3[1]-change1[1]]
d3=[d3[0]-change1[0],d3[1]+change1[1]]
turtle.update()
t.clearstamps()
for k in range(steps):
t.clear()
turtle.tracer(0,0)
fourPointGooto(t,a0,b0,c0,d0)
a0=[a0[0]-change1[0],a0[1]-change1[1]]
b0=[b0[0]-change1[0],b0[1]+change1[1]]
c0=[c0[0]+change1[0],c0[1]+change1[1]]
d0=[d0[0]+change1[0],d0[1]-change1[1]]
fourPointGooto(t,a1,b1,c1,d1)
a1=[a1[0]-change1[0],a1[1]+change1[1]]
b1=[b1[0]+change1[0],b1[1]+change1[1]]
c1=[c1[0]+change1[0],c1[1]-change1[1]]
d1=[d1[0]-change1[0],d1[1]-change1[1]]
fourPointGooto(t,a2,b2,c2,d2)
a2=[a2[0]+change1[0],a2[1]+change1[1]]
b2=[b2[0]+change1[0],b2[1]-change1[1]]
c2=[c2[0]-change1[0],c2[1]-change1[1]]
d2=[d2[0]-change1[0],d2[1]+change1[1]]
fourPointGooto(t,a3,b3,c3,d3)
a3=[a3[0]+change1[0],a3[1]-change1[1]]
b3=[b3[0]-change1[0],b3[1]-change1[1]]
c3=[c3[0]-change1[0],c3[1]+change1[1]]
d3=[d3[0]+change1[0],d3[1]+change1[1]]
turtle.update()
t.clearstamps()
for l in range(steps):
t.clear()
turtle.tracer(0,0)
fourPointGooto(t,a0,b0,c0,d0)
a0=[a0[0]-change1[0],a0[1]+change1[1]]
b0=[b0[0]+change1[0],b0[1]+change1[1]]
c0=[c0[0]+change1[0],c0[1]-change1[1]]
d0=[d0[0]-change1[0],d0[1]-change1[1]]
fourPointGooto(t,a1,b1,c1,d1)
a1=[a1[0]+change1[0],a1[1]+change1[1]]
b1=[b1[0]+change1[0],b1[1]-change1[1]]
c1=[c1[0]-change1[0],c1[1]-change1[1]]
d1=[d1[0]-change1[0],d1[1]+change1[1]]
fourPointGooto(t,a2,b2,c2,d2)
a2=[a2[0]+change1[0],a2[1]-change1[1]]
b2=[b2[0]-change1[0],b2[1]-change1[1]]
c2=[c2[0]-change1[0],c2[1]+change1[1]]
d2=[d2[0]+change1[0],d2[1]+change1[1]]
fourPointGooto(t,a3,b3,c3,d3)
a3=[a3[0]-change1[0],a3[1]-change1[1]]
b3=[b3[0]-change1[0],b3[1]+change1[1]]
c3=[c3[0]+change1[0],c3[1]+change1[1]]
d3=[d3[0]+change1[0],d3[1]-change1[1]]
turtle.update()
t.clearstamps()
Amy=turtle.Turtle()
Amy.color('white')
drawSquare1(Amy)
| [
"dongzheren@gmail.com"
] | dongzheren@gmail.com |
ea6d70b093648632d6d1ed12b6149d57e462d18b | e07a1e7238dcbc3740632384a392f18945eef2c0 | /Algorithmic ToolBox/change.py | c6ebacde04dc122f479ce65dc832d9b0d71ae25a | [] | no_license | rishittripathi-therocking/Data-Structures-And-Algorithm | 9692d6fe478ba73602bbbf64e08490f7f35416ca | 36648ee65d240526e2d72a6c7ebb9d2db84425a7 | refs/heads/master | 2023-01-28T14:40:34.671429 | 2020-12-03T08:35:04 | 2020-12-03T08:35:04 | 293,687,842 | 2 | 1 | null | 2020-10-01T03:46:14 | 2020-09-08T03:09:58 | Python | UTF-8 | Python | false | false | 402 | py | # Uses python3
import sys
def get_change(m):
mod10=m%10
denomination10=(m-mod10)//10
if mod10<5:
return denomination10+mod10
else:
if mod10==5:
return denomination10+1
else:
denomiantion5=1
mod10-=5
return denomination10+denomiantion5+mod10
#write your code here
m=int(input())
print(get_change(m))
| [
"rishittripathi13@gmail.com"
] | rishittripathi13@gmail.com |
def8a888c3cd5ae4a708fece0485e1c9598f0e31 | 3abe16fc2d01d951d8a54ffe64348d67c0009380 | /NL Query System in PythonNLTK/statements.py | 5b4e6012ed1e6a89e167053c307434732a400121 | [] | no_license | jiatuo/NLP-projects | ffa5c390035e6330e55566a77f6e5823b8c67613 | 9b71aa92e3a62eb21af175cc42a922561f79b653 | refs/heads/master | 2020-07-31T22:29:06.684101 | 2019-09-25T06:42:08 | 2019-09-25T06:42:08 | 210,773,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,690 | py | # File: statements.py
# Template file for Informatics 2A Assignment 2:
# 'A Natural Language Query System in Python/NLTK'
# John Longley, November 2012
# Revised November 2013 and November 2014 with help from Nikolay Bogoychev
# Revised November 2015 by Toms Bergmanis and Shay Cohen
# PART A: Processing statements
from nltk.corpus import brown
br = set(brown.tagged_words())
def add(lst,item):
if (item not in lst):
lst.insert(len(lst),item)
class Lexicon:
"""stores known word stems of various part-of-speech categories"""
def __init__(self):
self.data = {'P':[], 'N':[],'A':[],'I':[],'T':[]}
def add(self,stem,cat):
if not cat in self.data.keys():
return "Invalid tag!"
else:
self.data[cat].append(stem)
def getAll(self,cat):
a = set(self.data[cat])
result = list(a)
return result;
class FactBase:
def __init__(self):
self.un = {}
self.bin = {}
def addUnary(self,pred,e1):
if not pred in self.un.keys():
self.un[pred] = [];
self.un[pred].append(e1)
def queryUnary(self,pred,e1):
if (pred in self.un.keys()) and (e1 in self.un[pred]):
return True
else:
return False
def addBinary(self,pred,e1,e2):
if not pred in self.bin.keys():
self.bin[pred] = [];
self.bin[pred].append((e1,e2))
def queryBinary(self,pred,e1,e2):
if (pred in self.bin.keys()) and ((e1,e2) in self.bin[pred]):
return True
else:
return False
import re
def verb_stem(s):
if (next((word for word in br if word == (s, 'VB') or word == (s, 'VBZ')),1) == 1):
return ""
else:
if s == "has":
s = "have"
if s == "does":
s = "do"
if re.match('(unt|.)ies', s):
s = s[:-1]
elif re.match('.*[^aeiou]ies',s):
s = s[:-3]
s = s + 'y'
elif re.match('.*(o|x|ch|sh|ss|zz)es',s):
s = s[:-2]
elif re.match('.*(z|s)es',s):
s = s[:-1]
elif re.match('.*[^i]es',s):
s = s[:-1]
elif re.match('.*[aeiou]ys', s):
s = s[:-1]
elif re.match('.*([^aeiousxyz(ch)(sh)])s',s):
s = s[:-1]
return s
def add_proper_name (w,lx):
"""adds a name to a lexicon, checking if first letter is uppercase"""
if ('A' <= w[0] and w[0] <= 'Z'):
lx.add(w,'P')
return ''
else:
return (w + " isn't a proper name")
def process_statement (lx,wlist,fb):
"""analyses a statement and updates lexicon and fact base accordingly;
returns '' if successful, or error message if not."""
# Grammar for the statement language is:
# S -> P is AR Ns | P is A | P Is | P Ts P
# AR -> a | an
# We parse this in an ad hoc way.
msg = add_proper_name (wlist[0],lx)
if (msg == ''):
if (wlist[1] == 'is'):
if (wlist[2] in ['a','an']):
lx.add (wlist[3],'N')
fb.addUnary ('N_'+wlist[3],wlist[0])
else:
lx.add (wlist[2],'A')
fb.addUnary ('A_'+wlist[2],wlist[0])
else:
stem = verb_stem(wlist[1])
if (len(wlist) == 2):
lx.add (stem,'I')
fb.addUnary ('I_'+stem,wlist[0])
else:
msg = add_proper_name (wlist[2],lx)
if (msg == ''):
lx.add (stem,'T')
fb.addBinary ('T_'+stem,wlist[0],wlist[2])
return msg
# End of PART A.
| [
"changyijia@Hotmail.com"
] | changyijia@Hotmail.com |
3d0be7a72b4ae36b3d79b94160a1bd62abf25d72 | 9db6dffea7934c37e9c66796b7e1bc258d2ceca4 | /plugin.video.daffyslist/resources/lib/modules/sources.py | 60e8629903c90b4ede1506a088bd69d3cb309b6f | [] | no_license | KodiUKTV/kodiuktv | 4025380d70ad6caba5efc5eb59dd65f372b5e0ea | 68a41077cdd86838bb4207b6b0d7fe8070b00fe5 | refs/heads/master | 2021-01-18T22:09:44.434458 | 2017-07-27T13:35:22 | 2017-07-27T13:35:22 | 47,322,825 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 19,494 | py | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys,re,json,urllib,urlparse,random,datetime,time
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import workers
try: from sqlite3 import dbapi2 as database
except: from pysqlite2 import dbapi2 as database
try: import urlresolver
except: pass
try: import xbmc
except: pass
class sources:
def __init__(self):
self.getConstants()
self.sources = []
def getSources(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, quality='HD', timeout=20):
u = None
self.prepareSources()
sourceDict = self.sourceDict
content = 'movie' if tvshowtitle == None else 'episode'
if content == 'movie':
sourceDict = [(i[0], i[1], getattr(i[1], 'movie', None)) for i in sourceDict]
else:
sourceDict = [(i[0], i[1], getattr(i[1], 'tvshow', None)) for i in sourceDict]
sourceDict = [(i[0], i[1]) for i in sourceDict if not i[2] == None]
sourceDict = [(i[0], i[1], i[1].priority) for i in sourceDict]
if quality == 'SD':
sourceDict = [i for i in sourceDict if i[2] == 0]
threads = []
if content == 'movie':
title = cleantitle.normalize(title)
for i in sourceDict: threads.append(workers.Thread(self.getMovieSource, title, year, imdb, i[0], i[1]))
else:
tvshowtitle = cleantitle.normalize(tvshowtitle)
for i in sourceDict: threads.append(workers.Thread(self.getEpisodeSource, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, i[0], i[1]))
s = [i[0] + (i[1],) for i in zip(sourceDict, threads)]
s = [(i[3].getName(), i[0], i[2]) for i in s]
mainsourceDict = [i[0] for i in s if i[2] == 0]
[i.start() for i in threads]
progressDialog = control.progressDialog
progressDialog.create(control.addonInfo('name'), control.lang(30726).encode('utf-8'))
progressDialog.update(0)
progressDialog.update(0, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
for i in range(0, (timeout * 2) + 60):
try:
if xbmc.abortRequested == True: return sys.exit()
timerange = int(i * 0.5)
try:
if progressDialog.iscanceled(): break
except:
pass
is_alive = [x.is_alive() for x in threads]
if all(x == False for x in is_alive): break
if timerange >= timeout:
is_alive = [x for x in threads if x.is_alive() == True and x.getName() in mainsourceDict]
if not is_alive: break
time.sleep(0.5)
except:
pass
progressDialog.update(50, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
items = self.sourcesFilter()
filter = [i for i in items if i['source'].lower() in self.hostcapDict and i['debrid'] == '']
items = [i for i in items if not i in filter]
filter = [i for i in items if i['source'].lower() in self.hostblockDict and i['debrid'] == '']
items = [i for i in items if not i in filter]
items = [i for i in items if ('autoplay' in i and i['autoplay'] == True) or not 'autoplay' in i]
for i in range(len(items)):
try:
if progressDialog.iscanceled(): break
if xbmc.abortRequested == True: return sys.exit()
url = self.sourcesResolve(items[i])
if u == None: u = url
if not url == None: break
except:
pass
try: progressDialog.close()
except: pass
return u
def getURISource(self, url):
u = None
sourceDict = self.sourceDict
domain = re.sub('^www\.|^www\d+\.', '', urlparse.urlparse(url.strip().lower()).netloc)
domains = [(i[0], i[1].domains) for i in sourceDict]
domains = [i[0] for i in domains if any(x in domain for x in i[1])]
if not domains: return False
sourceDict = [i for i in sourceDict if i[0] == domains[0]][0]
source = sourceDict[0] ; call = sourceDict[1]
progressDialog = control.progressDialog
progressDialog.create(control.addonInfo('name'), control.lang(30726).encode('utf-8'))
progressDialog.update(0)
progressDialog.update(0, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
self.sources = call.sources(url, self.hostDict, self.hostprDict)
for i in self.sources: i.update({'provider': source})
progressDialog.update(50, control.lang(30726).encode('utf-8'), control.lang(30731).encode('utf-8'))
items = self.sourcesFilter()
filter = [i for i in items if i['source'].lower() in self.hostcapDict and i['debrid'] == '']
items = [i for i in items if not i in filter]
filter = [i for i in items if i['source'].lower() in self.hostblockDict and i['debrid'] == '']
items = [i for i in items if not i in filter]
items = [i for i in items if ('autoplay' in i and i['autoplay'] == True) or not 'autoplay' in i]
for i in range(len(items)):
try:
if progressDialog.iscanceled(): break
if xbmc.abortRequested == True: return sys.exit()
url = self.sourcesResolve(items[i])
if u == None: u = url
if not url == None: break
except:
pass
try: progressDialog.close()
except: pass
return u
def prepareSources(self):
try:
control.makeFile(control.dataPath)
self.sourceFile = control.providercacheFile
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
dbcur.execute("CREATE TABLE IF NOT EXISTS rel_url (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""rel_url TEXT, ""UNIQUE(source, imdb_id, season, episode)"");")
dbcur.execute("CREATE TABLE IF NOT EXISTS rel_src (""source TEXT, ""imdb_id TEXT, ""season TEXT, ""episode TEXT, ""hosts TEXT, ""added TEXT, ""UNIQUE(source, imdb_id, season, episode)"");")
except:
pass
def getMovieSource(self, title, year, imdb, source, call):
try:
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
except:
pass
try:
sources = []
dbcur.execute("SELECT * FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
match = dbcur.fetchone()
t1 = int(re.sub('[^0-9]', '', str(match[5])))
t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
update = abs(t2 - t1) > 60
if update == False:
sources = json.loads(match[4])
return self.sources.extend(sources)
except:
pass
try:
url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
url = dbcur.fetchone()
url = url[4]
except:
pass
try:
if url == None: url = call.movie(imdb, title, year)
if url == None: raise Exception()
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, '', '', url))
dbcon.commit()
except:
pass
try:
sources = []
sources = call.sources(url, self.hostDict, self.hostprDict)
if sources == None or sources == []: raise Exception()
for i in sources: i.update({'provider': source})
self.sources.extend(sources)
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?)", (source, imdb, '', '', json.dumps(sources), datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
dbcon.commit()
except:
pass
def getEpisodeSource(self, title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, source, call):
try:
dbcon = database.connect(self.sourceFile)
dbcur = dbcon.cursor()
except:
pass
try:
sources = []
dbcur.execute("SELECT * FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
match = dbcur.fetchone()
t1 = int(re.sub('[^0-9]', '', str(match[5])))
t2 = int(datetime.datetime.now().strftime("%Y%m%d%H%M"))
update = abs(t2 - t1) > 60
if update == False:
sources = json.loads(match[4])
return self.sources.extend(sources)
except:
pass
try:
url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
url = dbcur.fetchone()
url = url[4]
except:
pass
try:
if url == None: url = call.tvshow(imdb, tvdb, tvshowtitle, year)
if url == None: raise Exception()
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, '', ''))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, '', '', url))
dbcon.commit()
except:
pass
try:
ep_url = None
dbcur.execute("SELECT * FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
ep_url = dbcur.fetchone()
ep_url = ep_url[4]
except:
pass
try:
if url == None: raise Exception()
if ep_url == None: ep_url = call.episode(url, imdb, tvdb, title, premiered, season, episode)
if ep_url == None: raise Exception()
dbcur.execute("DELETE FROM rel_url WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
dbcur.execute("INSERT INTO rel_url Values (?, ?, ?, ?, ?)", (source, imdb, season, episode, ep_url))
dbcon.commit()
except:
pass
try:
sources = []
sources = call.sources(ep_url, self.hostDict, self.hostprDict)
if sources == None or sources == []: raise Exception()
for i in sources: i.update({'provider': source})
self.sources.extend(sources)
dbcur.execute("DELETE FROM rel_src WHERE source = '%s' AND imdb_id = '%s' AND season = '%s' AND episode = '%s'" % (source, imdb, season, episode))
dbcur.execute("INSERT INTO rel_src Values (?, ?, ?, ?, ?, ?)", (source, imdb, season, episode, json.dumps(sources), datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
dbcon.commit()
except:
pass
def sourcesFilter(self):
provider = control.setting('hosts.sort.provider')
quality = control.setting('hosts.quality')
if quality == '': quality = '0'
captcha = control.setting('hosts.captcha')
language = self.getLanguage()
random.shuffle(self.sources)
if provider == 'true':
self.sources = sorted(self.sources, key=lambda k: k['provider'])
local = [i for i in self.sources if 'local' in i and i['local'] == True]
self.sources = [i for i in self.sources if not i in local]
filter = []
filter += [i for i in self.sources if i['direct'] == True]
filter += [i for i in self.sources if i['direct'] == False]
self.sources = filter
filter = []
for d in self.debridDict: filter += [dict(i.items() + [('debrid', d)]) for i in self.sources if i['source'].lower() in self.debridDict[d]]
filter += [i for i in self.sources if not i['source'].lower() in self.hostprDict and i['debridonly'] == False]
self.sources = filter
filter = []
filter += local
if quality == '0': filter += [i for i in self.sources if i['quality'] == '1080p' and 'debrid' in i]
if quality == '0' or quality == '1': filter += [i for i in self.sources if i['quality'] == 'HD' and 'debrid' in i]
if quality == '0': filter += [i for i in self.sources if i['quality'] == '1080p' and not 'debrid' in i and 'memberonly' in i]
if quality == '0' or quality == '1': filter += [i for i in self.sources if i['quality'] == 'HD' and not 'debrid' in i and 'memberonly' in i]
if quality == '0': filter += [i for i in self.sources if i['quality'] == '1080p' and not 'debrid' in i and not 'memberonly' in i]
if quality == '0' or quality == '1': filter += [i for i in self.sources if i['quality'] == 'HD' and not 'debrid' in i and not 'memberonly' in i]
filter += [i for i in self.sources if i['quality'] == 'SD']
if len(filter) < 10: filter += [i for i in self.sources if i['quality'] == 'SCR']
if len(filter) < 10: filter += [i for i in self.sources if i['quality'] == 'CAM']
self.sources = filter
if not captcha == 'true':
filter = [i for i in self.sources if i['source'].lower() in self.hostcapDict and not 'debrid' in i]
self.sources = [i for i in self.sources if not i in filter]
filter = [i for i in self.sources if i['source'].lower() in self.hostblockDict and not 'debrid' in i]
self.sources = [i for i in self.sources if not i in filter]
if not language == '':
self.sources = [i for i in self.sources if i['language'] == language] + [i for i in self.sources if not i['language'] == language]
else:
self.sources = [i for i in self.sources if not i['language'] == 'en'] + [i for i in self.sources if i['language'] == 'en']
self.sources = self.sources[:2000]
for i in range(len(self.sources)):
u = self.sources[i]['url']
s = self.sources[i]['source'].lower()
p = self.sources[i]['provider']
p = re.sub('v\d*$', '', p)
q = self.sources[i]['quality']
try: f = (' | '.join(['[I]%s [/I]' % info.strip() for info in self.sources[i]['info'].split('|')]))
except: f = ''
try: d = self.sources[i]['debrid']
except: d = self.sources[i]['debrid'] = ''
if not d == '': label = '%02d | [B]%s[/B] | ' % (int(i+1), d)
#if not d == '': label = '%02d | [B]%s[/B] | [B]%s[/B] | ' % (int(i+1), p, d)
else: label = '%02d | [B]%s[/B] | ' % (int(i+1), p)
if q in ['1080p', 'HD']: label += '%s | %s | [B][I]%s [/I][/B]' % (s.rsplit('.', 1)[0], f, q)
elif q == 'SD': label += '%s | %s' % (s.rsplit('.', 1)[0], f)
else: label += '%s | %s | [I]%s [/I]' % (s.rsplit('.', 1)[0], f, q)
label = label.replace('| 0 |', '|').replace(' | [I]0 [/I]', '')
label = label.replace('[I]HEVC [/I]', 'HEVC')
label = re.sub('\[I\]\s+\[/I\]', ' ', label)
label = re.sub('\|\s+\|', '|', label)
label = re.sub('\|(?:\s+|)$', '', label)
self.sources[i]['label'] = label.upper()
return self.sources
def sourcesResolve(self, item, info=False):
try:
self.url = None
u = url = item['url']
d = item['debrid'] ; direct = item['direct']
provider = item['provider']
call = [i[1] for i in self.sourceDict if i[0] == provider][0]
u = url = call.resolve(url)
if url == None or not '://' in str(url): raise Exception()
if not d == '':
url = debrid.resolver(url, d)
elif not direct == True:
hmf = urlresolver.HostedMediaFile(url=u, include_disabled=True, include_universal=False)
if hmf.valid_url() == True: url = hmf.resolve()
if url == False or url == None: raise Exception()
ext = url.split('?')[0].split('&')[0].split('|')[0].rsplit('.')[-1].replace('/', '').lower()
if ext == 'rar': raise Exception()
try: headers = url.rsplit('|', 1)[1]
except: headers = ''
headers = urllib.quote_plus(headers).replace('%3D', '=') if ' ' in headers else headers
headers = dict(urlparse.parse_qsl(headers))
if url.startswith('http') and '.m3u8' in url:
result = client.request(url.split('|')[0], headers=headers, output='geturl', timeout='20')
if result == None: raise Exception()
elif url.startswith('http'):
result = client.request(url.split('|')[0], headers=headers, output='chunk', timeout='20')
if result == None: raise Exception()
self.url = url
return url
except:
if info == True: self.errorForSources()
return
def errorForSources(self):
return
def getLanguage(self):
return 'en'
def getConstants(self):
from resources.lib.sources import sources as sources
self.sourceDict = sources()
try:
self.hostDict = urlresolver.relevant_resolvers(order_matters=True)
self.hostDict = [i.domains for i in self.hostDict if not '*' in i.domains]
self.hostDict = [i.lower() for i in reduce(lambda x, y: x+y, self.hostDict)]
self.hostDict = [x for y,x in enumerate(self.hostDict) if x not in self.hostDict[:y]]
except:
self.hostDict = []
self.hostprDict = ['1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net', 'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net']
self.hostcapDict = ['hugefiles.net', 'kingfiles.net', 'openload.io', 'openload.co', 'thevideo.me', 'torba.se']
self.hostblockDict = []
self.debridDict = debrid.debridDict()
| [
"info@kodiuk.tv"
] | info@kodiuk.tv |
4d1509e08207429c062809870e4b2d8eaeb83857 | 386c146a5092b344b753925da7350790bee97e64 | /tests/index_cal_test_case.py | 14cbcc224f6ec94de95d65983a53a999cc28531d | [] | no_license | ritik977/code-20210228-ritikAgrawal | c47a1e9a93d1dc78f89634e16f790d79b2baa397 | 80cfb13e42754f33a02d6fff11e2e2be0ba3852e | refs/heads/main | 2023-03-11T14:21:46.863914 | 2021-02-28T10:33:13 | 2021-02-28T10:33:13 | 343,075,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import unittest
import sys
sys.path.append('../')
from services import index_cal
class TestIndexCalc(unittest.TestCase):
def test_bmi_cal(self):
result = index_cal.bmi_cal(96, 171)
expected = {"BMI":32.83,"BMI_Category": "Normal_weight", "Health_risk": "low_risk"}
self.assertEqual(result, expected) | [
"ritik977@gmail.com"
] | ritik977@gmail.com |
08167712f2bc0b002374f6928882267caeea1344 | 4af907f7c7b5c45800029cfa37a2151a4bdec784 | /species.py | 7d01bd49c6f3af414a8190bc6a0f8d288adeaa8d | [] | no_license | matt-py/NEAT-Bot | bfbf66fc6f3af34b3ae49f277aaa77e2a84b7c8e | 7f42ec581e3341fdcc42cc9bff7ce07d3faf8647 | refs/heads/master | 2020-04-27T23:08:00.263358 | 2019-03-18T17:04:09 | 2019-03-18T17:04:09 | 174,763,404 | 1 | 0 | null | 2019-03-17T23:48:28 | 2019-03-10T01:06:22 | Python | UTF-8 | Python | false | false | 3,956 | py | import random
class Species:
def __init__(self, p=None):
self.players = []
self.best_fitness = 0
self.champ = None
self.average_fitness = 0
self.staleness = 0
self.rep = None
self.excess_coeff = 1
self.weight_diff_coeff = 0.5
self.compatibility_threshold = 3
if p is not None:
self.players.append(p)
self.best_fitness = p.fitness
self.rep = p.brain.clone()
self.champ = p.clone()
def same_species(self, g):
excess_and_disjoint = self.get_excess_disjoint(g, self.rep)
average_weight_diff = self.average_weight_diff(g, self.rep)
large_genome_normaliser = len(g.genes) - 20
if large_genome_normaliser < 1:
large_genome_normaliser = 1
compatibility = ((self.excess_coeff * excess_and_disjoint / large_genome_normaliser) +
(self.weight_diff_coeff * average_weight_diff))
return (self.compatibility_threshold > compatibility)
def add_to_species(self, p):
self.players.append(p)
def get_excess_disjoint(self, brain1, brain2):
matching = 0
for i in range(len(brain1.genes)):
for j in range(len(brain2.genes)):
if brain1.genes[i].innovation_num == brain2.genes[j].innovation_num:
matching += 1
break
return ((len(brain1.genes)+len(brain2.genes)) - (2 * matching))
def average_weight_diff(self, brain1, brain2):
if len(brain1.genes) == 0 or len(brain2.genes) == 0:
return 0
matching = 0
total_diff = 0
for i in range(len(brain1.genes)):
for j in range(len(brain2.genes)):
if brain1.genes[i].innovation_num == brain2.genes[j].innovation_num:
matching += 1
total_diff += abs(brain1.genes[i].weight - brain2.genes[j].weight)
break
if matching == 0:
return 100
return total_diff/matching
def sort_species(self):
temp = sorted(self.players, key=lambda x: x.fitness, reverse=True)
self.players = []
self.players = list(temp)
if len(self.players) == 0:
self.staleness = 200
return
if self.players[0].fitness > self.best_fitness:
self.staleness = 0
self.best_fitness = self.players[0].fitness
self.rep = self.players[0].brain.clone()
self.champ = self.players[0].clone()
else:
self.staleness += 1
def set_average(self):
total_sum = 0
for player in self.players:
total_sum += player.fitness
self.average_fitness = total_sum/len(self.players)
def give_me_baby(self, innovation_history):
baby = None
if random.random() < 0.25:
baby = self.select_player().clone()
else:
parent1 = self.select_player()
parent2 = self.select_player()
if parent1.fitness < parent2.fitness:
baby = parent2.crossover(parent1)
else:
baby = parent1.crossover(parent2)
baby.brain.mutate(innovation_history)
return baby
def select_player(self):
fitness_sum = 0
for player in self.players:
fitness_sum += player.fitness
rand = random.uniform(0, fitness_sum)
running_sum = 0
for player in self.players:
running_sum += player.fitness
if running_sum > rand:
return player
return self.players[0]
def cull(self):
if len(self.players) > 2:
i = round(len(self.players)/2)
while i < len(self.players):
del self.players[i]
def fitness_sharing(self):
for player in self.players:
player.fitness /= len(self.players) | [
"48388592+matt-py@users.noreply.github.com"
] | 48388592+matt-py@users.noreply.github.com |
54f40089f47304b3d0702b64a9c922376adbdbf5 | c66fb28627777e633bd306395738a6f36300c41a | /2020/day_5/day5.py | 92e52721976229f892340d243c58e73ae930039a | [] | no_license | giach/advent-of-code | 098dcbb3d764e086973058991954cddbc7ce5cd3 | a1ae64d16804596d502d41650ae12c5bc15b4792 | refs/heads/master | 2021-06-22T21:11:40.481390 | 2021-01-03T20:13:51 | 2021-01-03T20:13:51 | 157,754,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py |
def get_steps():
file = open("day5_input", "r")
lines = file.read().splitlines()
return lines
def part1():
lines = get_steps()
seats = []
maxid = -999999
rowsum = 0
seats = []
exitingseats = 0
for line in lines:
row = 0
column = 0
pline = list(line)
for i in pline:
if i == 'F':
row = row << 1
if i == 'B':
row = (row << 1) + 1
if i == 'L':
column = column << 1
if i == 'R':
column = (column << 1) + 1
seat = row * 8 + column
exitingseats += seat
if (seat) > maxid:
maxid = seat
seats.append(seat)
sorted_seats = sorted(seats)
# these are the seats which do not exist from the
# very front and back of the plane
lowseats = sum(list(range(sorted_seats[0])))
highseats = sum(list(range(sorted_seats[-1] + 1, 1024)))
# all possible exiting seats
allseats = sum(list(range(1024)))
# remove the non existing seats and the seats from
# my list to find out what is my place
# Part2 - my seat ID
print("Part2 - my seat ID: ", allseats - lowseats - highseats - exitingseats)
# Part1 - max seat ID
print("Part1 - max seat ID: ", maxid)
# this has also part2
part1()
| [
"georgiana.chelu93@gmail.com"
] | georgiana.chelu93@gmail.com |
47f75f65742930c228888748f310f241e00b97a2 | 42b64bc6a1215063538f83a0709b5def218c443b | /exp_qd.py | a1289b367f6d08fd4ef646dd51d479731b4be537 | [
"MIT"
] | permissive | tarik/pi-snm-qde | ef60060c088f1db5598635a2ca3716356c7a2c80 | 61b0d60b5e7a56454d313693c2d1a9694879f1dc | refs/heads/master | 2022-12-11T02:42:03.608191 | 2020-08-31T10:56:15 | 2020-08-31T10:56:15 | 285,258,717 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 20,704 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Experiments with the original quality-driven ensembles (SEM-QD) with 1 hidden layer NNs.
"""
from neural_pi.estimator.base import Randomness
from neural_pi.experiment import Experiment
from neural_pi.pipeline import DefaultPipeline
from neural_pi.data import FileDataset, ShuffledDataset
from neural_pi.estimator import PiEnsemble, Adam, ExponentialDecay, \
qd_code_loss, qd_paper_loss, sem_aggreg, std_aggreg
EXPERIMENT_ID = 'exp_qd'
ex = Experiment(name=EXPERIMENT_ID,
runs_dir='runs/%s' % EXPERIMENT_ID,
temp_dir='temp', # Where artifacts are temporarily stored.
template='templates/template.html')
@ex.config
def default_config():
executor = DefaultPipeline(ex.artifacts_dir)
seed = Randomness().random_seed()
# ------------------------------------------------------------------------------
@ex.named_config
def boston_dev():
config = dict(
dataset=ShuffledDataset(
data_path='data/boston_housing_data.csv',
shuffle_path='data/boston_housing_data.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=5,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
epochs=5,
batch_size=100,
optimizer=Adam,
learning_rate=0.02,
scheduler=ExponentialDecay,
decay_rate=0.9,
decay_steps=50.,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
seed = 1
# ------------------------------------------------------------------------------
# BASED ON THE IMPLEMENTATION: LOSS AND AGGREGATION FUNCTION
# ------------------------------------------------------------------------------
@ex.named_config
def boston_code():
config = dict(
dataset=ShuffledDataset(
data_path='data/boston_housing_data.csv',
shuffle_path='data/boston_housing_data.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
epochs=300,
batch_size=100,
optimizer=Adam,
learning_rate=0.02,
scheduler=ExponentialDecay,
decay_rate=0.9,
decay_steps=50.,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def concrete_code():
config = dict(
dataset=ShuffledDataset(
data_path='data/concrete_data.csv',
shuffle_path='data/concrete_data.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.03,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.98,
epochs=800,
batch_size=100,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def energy_code():
config = dict(
dataset=ShuffledDataset(
data_path='data/energy.csv',
shuffle_path='data/energy.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.02,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.96,
epochs=1000,
batch_size=100,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def kin8_code():
config = dict(
dataset=ShuffledDataset(
data_path='data/kin8nm.csv',
shuffle_path='data/kin8nm.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.02,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.99,
epochs=500,
batch_size=100,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def naval_code():
config = dict(
dataset=ShuffledDataset(
data_path='data/naval_compressor_decay.csv',
shuffle_path='data/naval_compressor_decay.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.006,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.998,
epochs=1000,
batch_size=100,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=4.,
print_frequency=10
)
)
@ex.named_config
def power_code():
config = dict(
dataset=ShuffledDataset(
data_path='data/power.csv',
shuffle_path='data/power.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.01,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.99,
epochs=300,
batch_size=100,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def protein_code():
config = dict(
dataset=ShuffledDataset(
data_path='data/protein.csv',
shuffle_path='data/protein.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=5,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[100],
optimizer=Adam,
learning_rate=0.002,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.999,
epochs=600,
batch_size=100,
loss_func=qd_code_loss,
retry_on_crossing=False,
alpha=0.05,
soften=160.,
lambda_=40.,
print_frequency=10
)
)
@ex.named_config
def wine_code():
"""
Could not reproduce results of Pearce et al. (2018).
Therefore new HPs with random search with the objective to optimize
for the aggregation as did Pearce et al. (2018);
confirmed in our email communication.
"""
config = dict(
dataset=ShuffledDataset(
data_path='data/wine.csv',
shuffle_path='data/wine.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.009,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=1.,
epochs=700,
batch_size=100,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=26.,
print_frequency=10
)
)
@ex.named_config
def yacht_code():
"""
Original HPs with `alpha=0.01`.
Therefore new HPs with random search with the objective to optimize
for the aggregation as did Pearce et al. (2018);
confirmed in our email communication.
"""
config = dict(
dataset=ShuffledDataset(
data_path='data/yacht.csv',
shuffle_path='data/yacht.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.009,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.98,
epochs=2500,
batch_size=100,
loss_func=qd_code_loss,
alpha=0.05,
soften=160.,
lambda_=16.,
print_frequency=10
)
)
@ex.named_config
def year_code():
config = dict(
dataset=FileDataset(
file_path='data/yearmsd.csv',
standardize=True,
shuffle=False
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=1,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[100],
optimizer=Adam,
learning_rate=0.005,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.999,
epochs=100,
batch_size=1000,
loss_func=qd_code_loss,
retry_on_crossing=False,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
# ------------------------------------------------------------------------------
# BASED ON THE PAPER: LOSS FUNCTION AND AGGREGATION FUNCTION
# ------------------------------------------------------------------------------
@ex.named_config
def boston_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/boston_housing_data.csv',
shuffle_path='data/boston_housing_data.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
epochs=300,
batch_size=100,
optimizer=Adam,
learning_rate=0.02,
scheduler=ExponentialDecay,
decay_rate=0.9,
decay_steps=50.,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def concrete_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/concrete_data.csv',
shuffle_path='data/concrete_data.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.03,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.98,
epochs=800,
batch_size=100,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def energy_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/energy.csv',
shuffle_path='data/energy.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.02,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.96,
epochs=1000,
batch_size=100,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def kin8_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/kin8nm.csv',
shuffle_path='data/kin8nm.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.02,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.99,
epochs=500,
batch_size=100,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def naval_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/naval_compressor_decay.csv',
shuffle_path='data/naval_compressor_decay.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.006,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.998,
epochs=1000,
batch_size=100,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=4.,
print_frequency=10
)
)
@ex.named_config
def power_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/power.csv',
shuffle_path='data/power.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.01,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.99,
epochs=300,
batch_size=100,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
@ex.named_config
def protein_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/protein.csv',
shuffle_path='data/protein.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=5,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[100],
optimizer=Adam,
learning_rate=0.002,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.999,
epochs=600,
batch_size=100,
loss_func=qd_paper_loss,
retry_on_crossing=False,
alpha=0.05,
soften=160.,
lambda_=40.,
print_frequency=10
)
)
@ex.named_config
def wine_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/wine.csv',
shuffle_path='data/wine.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.009,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=1.,
epochs=700,
batch_size=100,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=26.,
print_frequency=10
)
)
@ex.named_config
def yacht_paper():
config = dict(
dataset=ShuffledDataset(
data_path='data/yacht.csv',
shuffle_path='data/yacht.npy',
standardize=True,
shuffle=True
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=20,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[50],
optimizer=Adam,
learning_rate=0.009,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.98,
epochs=2500,
batch_size=100,
loss_func=qd_paper_loss,
alpha=0.05,
soften=160.,
lambda_=16.,
print_frequency=10
)
)
@ex.named_config
def year_paper():
config = dict(
dataset=FileDataset(
file_path='data/yearmsd.csv',
standardize=True,
shuffle=False
),
split=dict(
train_size=0.9,
test_size=0.1
),
num_runs=1,
method=PiEnsemble,
hyper_params=dict(
ensemble_size=5,
aggreg_func=[sem_aggreg, std_aggreg],
hidden_size=[100],
optimizer=Adam,
learning_rate=0.005,
scheduler=ExponentialDecay,
decay_steps=50.,
decay_rate=0.999,
epochs=100,
batch_size=1000,
loss_func=qd_paper_loss,
retry_on_crossing=False,
alpha=0.05,
soften=160.,
lambda_=15.,
print_frequency=10
)
)
# ------------------------------------------------------------------------------
@ex.capture
def execute(executor, config, seed):
return executor.run(**config, seed=seed)
@ex.automain
def main():
return execute()
| [
"tarik@salem.cz"
] | tarik@salem.cz |
6389cd069b984d4e989a8c114236bd598cef97a2 | a89dfda3732eb73863b3e2fb1ebb46f1cb40973a | /txweb/tests/test_util_basic_sanitize_render_output.py | 3ea1634b5077da8b2f699e5319c384c2d49cc0f1 | [
"MIT"
] | permissive | devdave/txWeb | 543ccb7be0671a5e83959bb7cfc8e7804f04a74a | e447fbefd16134cb2f83323c04c20c41638d7da3 | refs/heads/master | 2022-12-15T18:11:50.880675 | 2021-03-24T18:48:16 | 2021-03-24T18:48:16 | 2,116,693 | 1 | 0 | MIT | 2022-12-08T04:28:41 | 2011-07-28T03:55:43 | Python | UTF-8 | Python | false | false | 568 | py |
import pytest
from txweb.util.basic import sanitize_render_output
from twisted.web.server import NOT_DONE_YET
from twisted.internet.defer import Deferred
def test_full_suite_coverage():
assert sanitize_render_output("Foo") == b"Foo"
assert sanitize_render_output(b"Foo") == b"Foo"
with pytest.raises(RuntimeError):
assert sanitize_render_output(("Foo",))
assert sanitize_render_output(NOT_DONE_YET) == NOT_DONE_YET
d = Deferred()
assert sanitize_render_output(d) == NOT_DONE_YET
assert sanitize_render_output(123) == b"123" | [
"devdave@ominian.net"
] | devdave@ominian.net |
516d3f814d2b9f1b24489e79ddf13170813e41b0 | 9f4223e760430841df47cf9ce26eae95dc5f9055 | /hw2/cs285/agents/pg_agent.py | a7549309f5be677197e50e3b157460a4eecae97b | [] | no_license | 1528226147/CS285 | d6ebcb4545cb25422eecc4b0a9da63575cbd8752 | a0de2d158e61aeaada7cd15b3af1a58723cc86e8 | refs/heads/master | 2022-12-07T12:36:35.941061 | 2019-10-01T01:25:20 | 2019-10-01T01:25:20 | 211,963,114 | 0 | 1 | null | 2022-11-21T21:24:53 | 2019-09-30T21:44:49 | Python | UTF-8 | Python | false | false | 9,154 | py | import numpy as np
from .base_agent import BaseAgent
from cs285.policies.MLP_policy import MLPPolicyPG
from cs285.infrastructure.replay_buffer import ReplayBuffer
from cs285.infrastructure.utils import *
class PGAgent(BaseAgent):
def __init__(self, sess, env, agent_params):
super(PGAgent, self).__init__()
# init vars
self.env = env
self.sess = sess
self.agent_params = agent_params
self.gamma = self.agent_params['gamma']
self.standardize_advantages = self.agent_params['standardize_advantages']
self.nn_baseline = self.agent_params['nn_baseline']
self.reward_to_go = self.agent_params['reward_to_go']
# actor/policy
# NOTICE that we are using MLPPolicyPG (hw2), instead of MLPPolicySL (hw1)
# which indicates similar network structure (layout/inputs/outputs),
# but differences in training procedure
# between supervised learning and policy gradients
self.actor = MLPPolicyPG(sess,
self.agent_params['ac_dim'],
self.agent_params['ob_dim'],
self.agent_params['n_layers'],
self.agent_params['size'],
discrete=self.agent_params['discrete'],
learning_rate=self.agent_params['learning_rate'],
nn_baseline=self.agent_params['nn_baseline']
)
# replay buffer
self.replay_buffer = ReplayBuffer(1000000)
def train(self, obs, acs, rews_list, next_obs, terminals):
"""
Training a PG agent refers to updating its actor using the given observations/actions
and the calculated qvals/advantages that come from the seen rewards.
----------------------------------------------------------------------------------
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^{T-1} grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, s_1, a_1, s_2, a_2, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
b_t is a baseline which may depend on s_t,
and (Q_t - b_t ) is the advantage.
Thus, the PG update performed by the actor needs (s_t, a_t, q_t, adv_t),
and that is exactly what this function provides.
----------------------------------------------------------------------------------
"""
# step 1: calculate q values of each (s_t, a_t) point,
# using rewards from that full rollout of length T: (r_0, ..., r_t, ..., r_{T-1})
q_values = self.calculate_q_vals(rews_list)
# step 2: calculate advantages that correspond to each (s_t, a_t) point
advantage_values = self.estimate_advantage(obs, q_values)
# step 3:
# TODO: pass the calculated values above into the actor/policy's update,
# which will perform the actual PG update step
loss = self.actor.update(obs, acs, qvals=q_values, adv_n=advantage_values)
return loss
def calculate_q_vals(self, rews_list):
"""
Monte Carlo estimation of the Q function.
arguments:
rews_list: length: number of sampled rollouts
Each element corresponds to a particular rollout,
and contains an array of the rewards for every step of that particular rollout
returns:
q_values: shape: (sum/total number of steps across the rollouts)
Each entry corresponds to the estimated q(s_t,a_t) value
of the corresponding obs/ac point at time t.
"""
# Case 1: trajectory-based PG
if not self.reward_to_go:
# TODO: Estimate the Q value Q^{pi}(s_t, a_t) using rewards from that entire trajectory
# HINT1: value of each point (t) = total discounted reward summed over the entire trajectory (from 0 to T-1)
# In other words, q(s_t, a_t) = sum_{t'=0}^{T-1} gamma^t' r_{t'}
# Hint3: see the helper functions at the bottom of this file
q_values = np.concatenate([self._discounted_return(r) for r in rews_list])
# Case 2: reward-to-go PG
else:
# TODO: Estimate the Q value Q^{pi}(s_t, a_t) as the reward-to-go
# HINT1: value of each point (t) = total discounted reward summed over the remainder of that trajectory (from t to T-1)
# In other words, q(s_t, a_t) = sum_{t'=t}^{T-1} gamma^(t'-t) * r_{t'}
# Hint3: see the helper functions at the bottom of this file
q_values = np.concatenate([self._discounted_cumsum(r) for r in rews_list])
return q_values
def estimate_advantage(self, obs, q_values):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
"""
# TODO: Estimate the advantage when nn_baseline is True
# HINT1: pass obs into the neural network that you're using to learn the baseline
# extra hint if you're stuck: see your actor's run_baseline_prediction
# HINT2: advantage should be [Q-b]
if self.nn_baseline:
b_n_unnormalized = self.actor.run_baseline_prediction(obs)
b_n = b_n_unnormalized * np.std(q_values) + np.mean(q_values)
adv_n = q_values - b_n
# Else, just set the advantage to [Q]
else:
adv_n = q_values.copy()
# Normalize the resulting advantages
if self.standardize_advantages:
adv_n = (adv_n - np.mean(adv_n)) / (np.std(adv_n) + 1e-8)
return adv_n
#####################################################
#####################################################
def add_to_replay_buffer(self, paths):
self.replay_buffer.add_rollouts(paths)
def sample(self, batch_size):
return self.replay_buffer.sample_recent_data(batch_size, concat_rew=False)
#####################################################
################## HELPER FUNCTIONS #################
#####################################################
# TODO: implement this function
def _discounted_return(self, rewards):
"""
Helper function
Input: a list of rewards {r_0, r_1, ..., r_t', ... r_{T-1}} from a single rollout of length T
Output: list where each index t contains sum_{t'=0}^{T-1} gamma^t' r_{t'}
note that all entries of this output are equivalent
because each index t is a sum from 0 to T-1 (and doesnt involve t)
"""
# 1) create a list of indices (t'): from 0 to T-1
indices = list(range(len(rewards)))
# 2) create a list where the entry at each index (t') is gamma^(t')
discounts = np.power(self.gamma, indices)
# 3) create a list where the entry at each index (t') is gamma^(t') * r_{t'}
discounted_rewards = np.multiply(discounts, rewards)
# 4) calculate a scalar: sum_{t'=0}^{T-1} gamma^(t') * r_{t'}
sum_of_discounted_rewards = np.sum(discounted_rewards)
# 5) create a list of length T-1, where each entry t contains that scalar
list_of_discounted_returns = [sum_of_discounted_rewards] * len(rewards)
return list_of_discounted_returns
def _discounted_cumsum(self, rewards):
"""
Input:
a list of length T
a list of rewards {r_0, r_1, ..., r_t', ... r_{T-1}} from a single rollout of length T
Output:
a list of length T
a list where the entry in each index t is sum_{t'=t}^{T-1} gamma^(t'-t) * r_{t'}
"""
all_discounted_cumsums = []
# for loop over steps (t) of the given rollout
for start_time_index in range(len(rewards)):
# 1) create a list of indices (t'): goes from t to T-1
indices = list(range(start_time_index, len(rewards)))
# 2) create a list where the entry at each index (t') is gamma^(t'-t)
discounts = np.power(self.gamma, np.subtract(indices, start_time_index))
# 3) create a list where the entry at each index (t') is gamma^(t'-t) * r_{t'}
# Hint: remember that t' goes from t to T-1, so you should use the rewards from those indices as well
discounted_rtg = np.multiply(discounts, rewards[start_time_index : ])
# 4) calculate a scalar: sum_{t'=t}^{T-1} gamma^(t'-t) * r_{t'}
sum_discounted_rtg = np.sum(discounted_rtg)
# appending each of these calculated sums into the list to return
all_discounted_cumsums.append(sum_discounted_rtg)
list_of_discounted_cumsums = np.array(all_discounted_cumsums)
return list_of_discounted_cumsums | [
"yutong_lu@outlook.com"
] | yutong_lu@outlook.com |
2ada1e18ee1ecf3884d4c48bfedd0b0bdcec20f8 | e7c8debef86f0da831e9841ef39155569cbd049c | /shop/migrations/0005_auto_20190403_0859.py | 5d47d615ab46cd65a47bd93f14e760b9dcfff0f2 | [] | no_license | Izigraim/ISP-4sem-2019 | 3a2875719a3d32a5d3479d1dc54b69c6cdd4e590 | b9abb593b00940dc17b7dc6bcc475734f81bf44c | refs/heads/master | 2020-08-20T07:14:08.094188 | 2019-10-18T10:15:25 | 2019-10-18T10:15:25 | 215,995,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | # Generated by Django 2.1.7 on 2019-04-03 05:59
from django.db import migrations, models
import shop.models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_auto_20190401_2308'),
]
operations = [
migrations.AddField(
model_name='product',
name='count',
field=models.DecimalField(decimal_places=0, default=0, max_digits=9),
),
migrations.AlterField(
model_name='category',
name='slug',
field=models.SlugField(blank=True),
),
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(upload_to=shop.models.image_folder),
),
migrations.AlterField(
model_name='product',
name='slug',
field=models.SlugField(blank=True),
),
]
| [
"z7635332@gmail.com"
] | z7635332@gmail.com |
b2d29bf47293851f2c11249cc1c9538b905cf9e4 | 8fbf7054bc8676eb6754e80ead566ac10277af76 | /aula/aula010.py | 32227395bc3c8906833d3b8fa685e2bf71a61fd2 | [
"MIT"
] | permissive | henriquekirchheck/Curso-em-Video-Python | 5eb4c97ed6320fcd100030bda718de732430244a | 1a29f68515313af85c8683f626ba35f8fcdd10e7 | refs/heads/main | 2023-06-06T16:25:48.018420 | 2021-07-04T17:46:28 | 2021-07-04T17:46:28 | 379,697,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | from time import sleep
#Aula N°10
print("Aula N°10 \n")
sleep(0.2)
""" car = int(input('Anos do carro: '))
if(car <= 3):
print('Carro Novo')
elif(car > 3):
print('Carro Velho')
print('Carro Novo' if car <= 3 else 'Carro Velho') """
""" nome = str(input('Qual o seu nome?: '))
if nome == 'Gustavo':
print('Que nome lindo vc tem')
elif nome == 'sus' or 'SUS' or 'Sus':
print('AMOGUS')
else:
print('Seu nome é tão normal')
print('Bom dia {}!' .format(nome)) """
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m = (n1 + n2) / 2
print('A sua media foi {:.2f}' .format(m))
if m >= 6.0:
print('Sua media foi boa!')
else:
print('Sua media foi ruim!')
| [
"86362827+henriquekirchheck@users.noreply.github.com"
] | 86362827+henriquekirchheck@users.noreply.github.com |
3f912421cf12848e776f7f30387961e82a570848 | ef32b87973a8dc08ba46bf03c5601548675de649 | /pytglib/api/functions/get_web_app_url.py | fd431d0848611e166908d7d79bd1b425fdebbbee | [
"MIT"
] | permissive | iTeam-co/pytglib | 1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721 | d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5 | refs/heads/master | 2022-07-26T09:17:08.622398 | 2022-07-14T11:24:22 | 2022-07-14T11:24:22 | 178,060,880 | 10 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py |
from ..utils import Object
class GetWebAppUrl(Object):
"""
Returns an HTTPS URL of a Web App to open after keyboardButtonTypeWebApp button is pressed
Attributes:
ID (:obj:`str`): ``GetWebAppUrl``
Args:
bot_user_id (:obj:`int`):
Identifier of the target bot
url (:obj:`str`):
The URL from the keyboardButtonTypeWebApp button
theme (:class:`telegram.api.types.themeParameters`):
Preferred Web App theme; pass null to use the default theme
Returns:
HttpUrl
Raises:
:class:`telegram.Error`
"""
ID = "getWebAppUrl"
def __init__(self, bot_user_id, url, theme, extra=None, **kwargs):
self.extra = extra
self.bot_user_id = bot_user_id # int
self.url = url # str
self.theme = theme # ThemeParameters
@staticmethod
def read(q: dict, *args) -> "GetWebAppUrl":
bot_user_id = q.get('bot_user_id')
url = q.get('url')
theme = Object.read(q.get('theme'))
return GetWebAppUrl(bot_user_id, url, theme)
| [
"arshshia@gmail.com"
] | arshshia@gmail.com |
f6cf5068a24ed049337e31291ee753906a44bb71 | 0fad8a8dfcbdb2ee69b2d41c321f23f95f68fda7 | /website/db/web_scraper.6fb6fffb06cd.py | 5650c647dc9d35258a8b9b70ac4ca31cfb24a561 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Madsomma/movie-recommender-form | 7a717c3923e2647dbbeef74f0a9687e696c5a1eb | d2ace8eb840794ac59f89ca24b6a53cf9e9b7069 | refs/heads/main | 2023-02-03T16:05:36.369512 | 2020-12-27T22:06:15 | 2020-12-27T22:06:15 | 324,837,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | import pandas as pd
from lxml import html
import requests
import json
df = pd.read_csv("limers_explanations-fm_regressor-k_50-movie_genres.csv", sep="\t")
links_df = pd.read_csv("links.csv", dtype={'imdbId': object})
movies_info = pd.read_csv("parsed_movie_dataset.csv")
new_df = df.merge(links_df, left_on='item_id', right_on='movieId')
new_df = new_df.merge(movies_info, left_on="item_id", right_on="Movie Id")
# test
test = new_df.head(1)
# test = new_df.head(10)
test = test.drop(["local_prediction", "tmdbId", "movieId", "Genre"], axis=1)
movie_ids = df["item_id"]
urls_poster = []
new_exp = []
for index, row in test.iterrows():
if index % 30 == 0:
print("Processing line {} out of {}.".format(index, new_df.shape[0]))
imdb_id = row["imdbId"]
url = 'https://www.imdb.com/title/tt' + str(imdb_id)
page = requests.get(url)
tree = html.fromstring(page.content)
poster = tree.xpath('//div[@class="poster"]/a/img/@src')
urls_poster.append(poster[0])
raw_exp = row["explanations"]
print(repr(raw_exp))
print(type(raw_exp))
dic = json.loads(raw_exp)
print(repr(dic))
keys_sorted = sorted(dic, key=dic.get, reverse=True)[:3]
new_dict = {}
for key in keys_sorted:
new_dict[key] = dic[key]
new_exp.append(new_dict)
test = test.drop(["explanations"], axis=1)
test["urls"] = urls_poster
test["explanations"] = new_exp
print(test)
# test.to_csv("recommendations_db_1.csv", sep="\t", index=None)
| [
"48261091+Madsomma@users.noreply.github.com"
] | 48261091+Madsomma@users.noreply.github.com |
c7a84219541a207b77a6abe222131259e8320dcf | 18a79067223932c2f7aa6ff6b81d0b3f36169db2 | /atcoder/abc178/A.py | fbe81ed981719c0a616b1b4817d553d4699e8bb1 | [] | no_license | aadiupadhyay/CodeForces | 894b0e5faef73bfd55a28c2058fb0ca6f43c69f9 | 76dac4aa29a2ea50a89b3492387febf6515cf43e | refs/heads/master | 2023-04-12T17:58:52.733861 | 2021-05-07T20:08:00 | 2021-05-11T20:07:11 | 330,149,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from sys import stdin,stdout
st=lambda:list(stdin.readline().strip())
li=lambda:list(map(int,stdin.readline().split()))
mp=lambda:map(int,stdin.readline().split())
inp=lambda:int(stdin.readline())
pr=lambda n: stdout.write(str(n)+"\n")
mod=1000000007
def solve():
n=inp()
print((n+1)%2)
for _ in range(1):
solve()
| [
"upadhyay.aaditya2001@gmail.com"
] | upadhyay.aaditya2001@gmail.com |
b9d4ada59c0445c25063ad9a12b391523bb48422 | 879e930f576576448bfcea39e55fb46bd48ce270 | /raw_feature/onset_strength_v2.py | 3237c41d8a48b23f4fda6c89cde418cb0b921265 | [] | no_license | leixiaolin/smartMusic_v2 | e1387b5b0b2b4d1f5a29cb350b6f52e4b96ff182 | 463b908f2f1a761628be99eae6e80a0c5221a65b | refs/heads/master | 2021-10-07T17:35:52.353380 | 2021-09-29T02:46:52 | 2021-09-29T02:46:52 | 170,714,545 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | # First, load some audio and plot the spectrogram
import librosa
import matplotlib.pyplot as plt
import librosa.display
import numpy as np
from create_base import *
#filename = 'F:/项目/花城音乐项目/样式数据/ALL/旋律/2.2MP3/旋律一(8)(90).wav'
#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/节奏/节奏2林(20).wav'
#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/节奏/节奏4卢(65).wav'
#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/节奏/节奏1_40227(100).wav'
#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/节奏/节奏4_40323(90).wav'
#filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/节奏/节奏3-04(95).wav'
filename = 'F:/项目/花城音乐项目/样式数据/2.27MP3/节奏/节奏2.4(90).wav'
#filename = 'F:/项目/花城音乐项目/参考代码/tensorflow_models_nets-master/raw_data/onsets/test/A\节奏1-02(90).wav'
def load_and_trim(path):
audio, sr = librosa.load(path)
energy = librosa.feature.rmse(audio)
frames = np.nonzero(energy >= np.max(energy) / 5)
indices = librosa.core.frames_to_samples(frames)[1]
audio = audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
return audio, sr
#y, sr = librosa.load(filename)
y,sr = load_and_trim(filename)
D = np.abs(librosa.stft(y))
times = librosa.frames_to_time(np.arange(D.shape[1]))
plt.figure()
ax1 = plt.subplot(3, 1, 1)
librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
y_axis='log', x_axis='time')
plt.title('Power spectrogram')
ax1 = plt.subplot(3, 1, 2)
# Construct a standard onset function
librosa.display.waveplot(y, sr=sr)
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
plt.subplot(3, 1, 3, sharex=ax1)
plt.plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,label='Mean (mel)')
#plt.plot(times, 2 + onset_env, alpha=0.8,label='Mean (mel)')
# Median aggregation, and custom mel options
onset_env = librosa.onset.onset_strength(y=y, sr=sr,
aggregate=np.mean,
fmax=8000, n_mels=256)
plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,label='Median (custom mel)')
#plt.plot(times, 1 + onset_env, alpha=0.8,label='Median (custom mel)')
# Constant-Q spectrogram instead of Mel
onset_env = librosa.onset.onset_strength(y=y, sr=sr,
feature=librosa.cqt)
plt.plot(times, onset_env / onset_env.max(), alpha=0.8,label='Mean (CQT)')
#plt.plot(times, onset_env, alpha=0.8,label='Mean (CQT)')
plt.legend(frameon=True, framealpha=0.75)
plt.ylabel('Normalized strength')
plt.yticks([])
plt.axis('tight')
plt.tight_layout()
plt.show() | [
"498900619@qq.com"
] | 498900619@qq.com |
ec8e7dafe20595ebc94fed5089fa5fc70c148552 | 6147d3d059a048be57aaabe3519551ed4bc305ec | /config/management/commands/fs2import.py | ed4cb19b50f25028e6c1954d36d1fe16bcc534f1 | [
"MIT"
] | permissive | a-mere-peasant/MangAdventure | a8f7fdfddf5ae65e645b0e0e0d197f2b0033bc8d | afbcdb5ab68bfc801550c8383568f7265e70b5ab | refs/heads/master | 2020-08-06T13:38:59.062119 | 2019-10-05T12:22:53 | 2019-10-05T12:22:53 | 212,808,131 | 0 | 0 | MIT | 2019-10-04T13:07:47 | 2019-10-04T12:18:27 | null | UTF-8 | Python | false | false | 4,613 | py | from os.path import abspath, join
from xml.etree import cElementTree as et
from django.core.files import File
from django.core.management import BaseCommand
from groups.models import Group
from reader.models import Chapter, Page, Series
def _get_element(tables, name):
return list(filter(
lambda t: t.attrib['name'].endswith(name), tables
))
def _get_column(table, name):
text = table.find('column[@name="%s"]' % name).text
return text if text is not None else ''
def _sort_children(tables, name):
return sorted(tables, key=lambda p: _get_column(p, name))
class Command(BaseCommand):
help = 'Imports data from FoolSlide2.'
def add_arguments(self, parser):
parser.add_argument(
'root', type=str,
help='The path to the root directory of the FS2 installation.'
)
parser.add_argument(
'data', type=str,
help="The path to FS2's exported data (in XML format)."
)
def handle(self, *args, **options):
root = abspath(options['root'])
data = abspath(options['data'])
tables = et.parse(data).findall('database/table')
content = join(root, 'content', 'comics')
directories = {'series': [], 'chapters': []}
elements = {
'series': _get_element(tables, 'comics'),
'chapters': _get_element(tables, 'chapters'),
'pages': _get_element(tables, 'pages'),
'groups': _get_element(tables, 'teams')
}
all_groups = []
for g in elements['groups']:
group = Group(
id=_get_column(g, 'id'),
name=_get_column(g, 'name'),
website=_get_column(g, 'url'),
twitter=_get_column(g, 'twitter'),
irc=_get_column(g, 'irc')
)
all_groups.append(group)
Group.objects.bulk_create(all_groups)
all_series = []
for s in elements['series']:
slug = _get_column(s, 'stub')
series = Series(
id=_get_column(s, 'id'), slug=slug,
title=_get_column(s, 'name'),
description=_get_column(s, 'description'),
)
thumb = _get_column(s, 'thumbnail')
series_dir = join(content, '%s_%s' % (
slug, _get_column(s, 'uniqid')
))
cover = join(series_dir, 'thumb_%s' % thumb)
with open(cover, 'rb') as f:
series.cover.save(thumb, File(f), save=False)
all_series.append(series)
directories['series'].append(
(_get_column(s, 'id'), series_dir)
)
Series.objects.bulk_create(all_series)
all_chapters = []
chapter_groups = []
groups_through = Chapter.groups.through
for c in elements['chapters']:
cid = _get_column(c, 'id')
sid = _get_column(c, 'comic_id')
number = float('%s.%s' % (
_get_column(c, 'chapter') or '0',
_get_column(c, 'subchapter') or '0'
))
volume = int(_get_column(c, 'volume') or '0')
chapter = Chapter(
id=cid, series_id=sid,
title=_get_column(c, 'name'),
volume=volume, number=number
)
gid = _get_column(c, 'team_id')
if gid:
chapter_groups.append(
groups_through(chapter_id=cid, group_id=gid)
)
_dir = next(d[1] for d in directories['series'] if d[0] == sid)
directories['chapters'].append((
cid, join(_dir, '%s_%s' % (
_get_column(c, 'stub'), _get_column(c, 'uniqid')
))
))
all_chapters.append(chapter)
Chapter.objects.bulk_create(all_chapters)
groups_through.objects.bulk_create(chapter_groups)
all_pages = []
page_numbers = {}
for p in _sort_children(elements['pages'], 'filename'):
pid = _get_column(p, 'id')
cid = _get_column(p, 'chapter_id')
page_numbers[cid] = page_numbers.get(cid, 0) + 1
page = Page(id=pid, chapter_id=cid, number=page_numbers[cid])
_dir = next(d[1] for d in directories['chapters'] if d[0] == cid)
fname = _get_column(p, 'filename')
with open(join(_dir, fname), 'rb') as f:
page.image.save(fname, File(f), save=False)
all_pages.append(page)
Page.objects.bulk_create(all_pages)
| [
"chronobserver@disroot.org"
] | chronobserver@disroot.org |
075689efd3502a5351c7f5e3c9accfde981372c2 | 4491f991a6c116683aa481b647440a2165654621 | /src/run_actions.py | d648027f32c7d07f96608df1c595e1a2fe20d28f | [
"Apache-2.0"
] | permissive | zkcpku/stackoverflow-encourages-cheating | 68de783c01ca405a7d0a181fda72b1ddc53a4983 | 425fa92e7defc783d34f4bd3366cd96990d3c037 | refs/heads/main | 2023-07-08T08:16:50.773288 | 2021-08-10T21:23:34 | 2021-08-10T21:23:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,750 | py | import json
from pathlib import Path
import logging
from typing import Dict, List
import pandas as pd
import numpy as np
from collections import defaultdict
import re
from .evaluation.seq_to_seq import CodeGenerationEvaluator
from transformers import AutoTokenizer
import torch
import ast
import shutil
import sys
import textwrap
from tqdm import tqdm
markers = re.compile(r'(<\w+>) ')
code_block_marker = re.compile(r'(<code_block>)')
__all__ = [
'getExperimentTestResults',
'getGenerated',
'getPredsStats',
'cleanDataset'
]
def combineLogHistory(log_history):
out = {}
for e in log_history:
if e['epoch'] not in out:
out[e['epoch']] = {}
out[int(e['epoch'])].update(e)
return out
def getSingleTestResults(test_dict: Dict, shorthand: str, issue_logger) -> Dict:
experiment_stats = {}
if 'test' not in test_dict:
issue_logger.critical(f"Could not find stats for 'test' in {shorthand}")
return {}
train_state = test_dict['train_state'] if 'train_state' in test_dict else test_dict['train']
logs = combineLogHistory(train_state['log_history'])
last_log = logs[len(logs)]
experiment_stats['Runtime'] = last_log['train_runtime']
stats_for_test = test_dict['test']
if 'stats' not in stats_for_test:
issue_logger.critical(f"Missing stats in {shorthand}['test']")
return {}
for k, v in stats_for_test['stats'].items():
metric = k
if len(metric.split('_')) > 1:
metric = '_'.join(k.split('_')[1:])
if 'mean' not in v:
issue_logger.critical(f"Missing mean for {metric} in {shorthand}")
continue
experiment_stats[metric] = v['mean']
return experiment_stats
def getExperimentTestResults(
data_dir: Path,
logger: logging.Logger,
issue_logger: logging.Logger,
file_name_shorthand: Dict[str, str] = None,
keys_for_latex: Dict[str, str] = None,
is_cv: bool = False):
if keys_for_latex is None:
keys_for_latex = {}
if file_name_shorthand is None:
file_name_shorthand = {}
out = {}
evaluator = CodeGenerationEvaluator(
AutoTokenizer.from_pretrained('facebook/bart-base'),
torch.device('cpu'),
smooth_bleu=True
)
# Get all of the data files in the directory.
for fn in data_dir.joinpath(
'experiment_results' if not is_cv else 'cv_results').glob('*.json'):
shorthand, cat = file_name_shorthand.get(fn.stem, (None, None))
if not shorthand:
issue_logger.warning(f"Could not find '{fn}' in shorthand")
continue
logger.info(f"Reading {cat}:'{shorthand}'")
data = json.loads(fn.read_text('utf-8'))
if is_cv:
all_runtimes = []
all_valid = []
all_oracle_valid = []
for test_run in data['data']:
logs = combineLogHistory(test_run['train']['log_history'])
last_log = logs[len(logs)]
all_runtimes.append(last_log['train_runtime'])
if 'preds' in test_run:
oracle_has_valid_code = 0
has_valid_code = 0
for prediction_info in tqdm(test_run['preds'], file=sys.stdout,
desc=f"{cat}:{shorthand}"):
preds = prediction_info[-1]
has_valid_code += 1 if isValidCode(preds[0]) else 0
oracle_has_valid_code += 1 if any(
isValidCode(pred) for pred in preds) else 0
all_oracle_valid.append(oracle_has_valid_code / len(test_run['preds']) * 100)
all_valid.append(has_valid_code / len(test_run['preds']) * 100)
else:
all_oracle_valid.append(0)
all_valid.append(0)
experiment_stats = {
'Runtime' : np.mean(all_runtimes),
'Runtime_std' : np.std(all_runtimes),
'Oracle Valid' : np.mean(all_oracle_valid),
'Oracle Valid_std': np.std(all_oracle_valid),
'Valid' : np.mean(all_valid),
'Valid_std' : np.std(all_valid),
}
for metric, values in data['bleu_stats'].items():
experiment_stats[metric] = values['mean']
experiment_stats[f"{metric}_std"] = values['std']
for k in keys_for_latex.keys():
if k not in experiment_stats:
experiment_stats[k] = 0
experiment_stats[f"{k}_std"] = 0
else:
experiment_stats = getSingleTestResults(data, shorthand, issue_logger)
out[f"{cat}|{shorthand}"] = experiment_stats
df = pd.DataFrame.from_dict(
out,
orient='index'
).reindex(
sorted(
list(out[list(out.keys())[0]].keys()),
key=lambda x: x.replace('-', 'z')),
axis=1
)
df.to_csv(data_dir.joinpath('single_test_results.csv'), sep=';')
# Make the latex if it is cv
if is_cv:
for i, r in df.iterrows():
line = []
for v in keys_for_latex.keys():
commands = '\\numwithstd{'
line.append(
commands
+ f"{r[v]:0.2f}" + '}{'
+ f"{r[v + '_std']:0.2f}" + "}"
)
print(f"{i}:")
row_name = str(i).split('|')[-1]
add_indent = "\t\\tableind " if row_name != 'Baseline' else ''
print(add_indent + f"{row_name} & {' & '.join(line)}" + '\\\\')
return out
def getPredictionsFromExperiments(
data_dir: Path,
logger: logging.Logger,
issue_logger: logging.Logger,
file_name_shorthand: Dict[str, str] = None) -> Dict:
if file_name_shorthand is None:
file_name_shorthand = {}
predictions = defaultdict(dict)
file_count = 0
for fn in data_dir.joinpath('experiment_results').glob('*.json'):
shorthand, cat = file_name_shorthand.get(fn.stem, (None, None))
if not shorthand:
issue_logger.warning(f"Could not find '{fn.stem}' in shorthand, skipping")
continue
logger.info(f"Reading {cat}:'{shorthand}'")
data = json.loads(fn.read_text('utf-8'))
if 'preds' not in data:
issue_logger.error(f"There are no predictions in the file '{fn.stem}'")
continue
file_count += 1
# Get every prediction and align it based on the question id and idx. We
# keep both the label and generated. The reason for keeping the labeled
# truth is so that later we can do a sanity check.
for i, v in enumerate(data['preds']):
# More sanity checks
if len(v) != 3:
issue_logger.warning(f"{shorthand} has an incorrect prediction at index {i}")
continue
example_id, label, pred = v
predictions[example_id][(cat, shorthand)] = {'label': label, "pred": pred}
logger.info(f"Found {len(predictions)} questions")
return predictions
def cleanLine(line):
is_first_marker = True
prev_marker = None
prev_end = None
out = ""
def handleMarker(span_end):
span = line[prev_end:span_end]
if prev_marker == "<code_block>":
return '\n\tstart_block\n\t' + span
elif prev_marker == '<code>':
return '`' + span.strip() + '`'
elif prev_marker == '<console_in>':
return '>>>' + span
elif prev_marker == '<console_out>':
return '...' + span
else:
return span
for m in markers.finditer(line):
if prev_marker is not None:
is_special = prev_marker.strip() in ['<code_block>', '<code>', '<console_in>',
'<console_out>']
out += f"\n\t{prev_marker if is_first_marker and not is_special else ''}" \
f"{handleMarker(m.start())}"
is_first_marker = False
prev_marker = m.group(0).strip()
prev_end = m.end()
out += handleMarker(len(line))
return out
def getGenerated(
data_dir: Path,
out_file: Path,
dataset,
logger: logging.Logger,
issue_logger: logging.Logger,
file_name_shorthand: Dict[str, str] = None):
all_predictions = getPredictionsFromExperiments(
data_dir,
logger,
issue_logger,
file_name_shorthand
)
simplified_preds = defaultdict(dict)
# Align the generated results with the data from the unprocessed dataset.
write_file = out_file.open('w', encoding='utf-8')
for question, predictions in tqdm(all_predictions.items(), total=len(all_predictions),
file=sys.stdout, desc='Aligning'):
# Example id is not truly the question id. Rather it is of the format
# `"Question ID.idx"`. This is because there can be multiple
# examples for a single question, so storing based on the example id
# causes collisions.
qid, idx = question.split('.')
question_data = dataset[int(idx)]
# If this fails, we are in BIG trouble.
if qid != str(question_data['question_id']):
issue_logger.critical(f"'{question}' is not aligned. Dataset "
f"returned '{question['question_id']}'")
continue
simplified_preds[question] = {
'intent': question_data['intent'],
'body' : markers.sub('', question_data['body']),
'preds' : {}
}
# Because we write to a file...we need this mess
write_file.write('=' * 80 + '\n')
write_file.write(f"\nQUESTION: idx={idx:<6} id={qid:}\n")
write_file.write("-" * 37 + 'INPUTS' + '-' * 37 + '\n\n')
keys_use = ['tags', 'score', 'slot_map', 'intent', 'body']
for k in keys_use:
if k == 'tags':
write_file.write(f"{k}: {', '.join(question_data[k])}\n")
elif k == "body":
write_file.write(f"{k}(Left in tags for better readability):\n")
body = code_block_marker.sub(r'\1\n', question_data[k])
consecutive_empty_line = False
for line in body.splitlines(True):
# use_line = cleanLine(line)
if line == '\n':
if not consecutive_empty_line:
write_file.write('\n')
consecutive_empty_line = True
continue
consecutive_empty_line = False
for wrapped_line in textwrap.wrap(line, 70):
write_file.write(wrapped_line + '\n')
else:
write_file.write(f"{k}: {question_data[k]}\n")
# write_file.write('\n')
write_file.write('\n' + "-" * 37 + 'OUTPUT' + '-' * 37 + '\n')
printed_expected = False
for name, p_dict in predictions.items():
use_name = name[0] + ':' + name[1]
if not printed_expected:
write_file.write(f"\n{'Expected':>24}= {repr(p_dict['label'])}\n")
simplified_preds[question]['snippet'] = p_dict['label']
printed_expected = True
pred_write = p_dict['pred'][0] if isinstance(p_dict['pred'], list) else p_dict['pred']
write_file.write(f"{use_name:>24}= {pred_write}\n")
simplified_preds[question]['preds'][use_name] = pred_write
write_file.write('\n')
write_file.close()
with out_file.parent.joinpath('simplified_preds.json').open('w', encoding='utf-8') as f:
json.dump(simplified_preds, f, indent=True)
def isValidCode(snippet):
try:
ast.parse(snippet)
except SyntaxError:
return False
return True
def getPredsStats(preds_file: Path,
out_dir: Path,
logger: logging.Logger):
predictions = json.loads(preds_file.read_text('utf-8'))
ablation_stats = defaultdict(lambda: defaultdict(list))
question_stats = defaultdict(list)
labels = []
evaluator = CodeGenerationEvaluator(AutoTokenizer.from_pretrained('facebook/bart-base'),
torch.device('cpu'), logger, smooth_bleu=True)
output_stats = {}
for question_id, question_info in tqdm(predictions.items(), desc="Calculating Stats",
file=sys.stdout):
logger.info(f"Handling question {question_id}")
labels.append(question_info['snippet'])
valid_ablations = []
for ablation, predicition in question_info['preds'].items():
is_valid_code = isValidCode(predicition)
if is_valid_code:
valid_ablations.append(ablation)
ablation_stats[ablation]['invalid_code'].append(0 if is_valid_code else 1)
question_stats['valid_count'].append(valid_ablations)
logger.info(f"Done")
return
def cleanDataset(base_data_dir: Path,
special_tags_path: Path,
out_path: Path,
logger: logging.Logger) -> Dict:
logger.info(f"Cleaning SO Data")
# Create regular expressions for later cleaning.
double_newline = re.compile(r'\n\n+', re.MULTILINE)
double_space = re.compile(r'(\s)[^\S\n]+', re.MULTILINE)
# Open and read the list of special tokens from the file. Every line in the
# file is a unique token. In the text, they show up as `<token>`, but do not
# have this format in the `.txt` file. Therefore, we wrap them in `<>`.
special_tags = [
f"<{l.strip()}>" for l in special_tags_path.read_text('utf-8').splitlines(False)
if l.strip()
]
logger.info(f"Found {len(special_tags)} special tokens.")
logger.info(f"Cleaning data and saving to '{out_path}'")
if out_path.exists():
# Remove the existing file
shutil.rmtree(out_path)
# Make the directory and any missing parents
out_path.mkdir(parents=True)
def cleanBody(raw_body):
if not raw_body:
return None
for t in special_tags:
raw_body = raw_body.replace(t, '')
return double_newline.sub('\n', double_space.sub(r'\1', raw_body)).lstrip()
sample_data = defaultdict(list)
for file in base_data_dir.glob('*.jsonl'):
# Read the jsonl file containing the dataset
data = [json.loads(line) for line in file.read_text('utf-8').splitlines(False)]
logger.info(f"Cleaning {len(data)} questions from '{file.stem}'")
with out_path.joinpath(file.name).open('w', encoding='utf-8') as out_file:
for question in tqdm(data, file=sys.stdout, desc="Cleaning"):
question_dict = {
'question_id': question['question_id'],
'snippet' : question['snippet'],
'is_api' : question['is_api'],
'answer_id' : question['answer_id'],
'intent' : question['normal_intent'],
'body' : cleanBody(question.get('body', None)),
'title' : question.get('title', None),
'tags' : question.get('tags', [])
}
if len(sample_data[file.stem]) < 5:
sample_data[file.stem].append(question_dict)
out_file.write(json.dumps(question_dict) + '\n')
return sample_data
| [
"gabeorlanski@gmail.com"
] | gabeorlanski@gmail.com |
bb1af4976675e819be801f4081fef4cca304caec | 8910cc6c995081a3ccbf66c1d15227b1012f42ca | /lib/taskkill.py | 46497c5460884f9f4de0dd7f631919003f330163 | [
"MIT"
] | permissive | lubnc4261/Incite-Terminal | cc16455ff210c7154ea3c4c55d8f05fe7e13112d | e00ed048b477a0e493c20f498177877e5d9f39cd | refs/heads/main | 2023-01-11T18:11:49.842681 | 2022-09-05T18:22:02 | 2022-09-05T18:22:02 | 302,435,284 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | import psutil
from imports import CustomFont as Beauty
defaultValue = "5"
def checkExistence(option:int) -> bool:
if psutil.Process(option) == True:
return True
else:
return False
def taskkill(option, option2):
if option is not None and option2 == defaultValue:
try:
p = psutil.Process(option)
p.kill()
except ValueError:
print(Beauty.Fore.YELLOW + "Process ID has to be a number"+ Beauty.Fore.RESET)
except psutil.NoSuchProcess:
print("PID does not exist")
#elif option == False and option2 == defaultValue:
#print("PID does not exist")
else:
print("Wrong command usage, please check the man page")
| [
"noreply@github.com"
] | noreply@github.com |
2baae5c5e21ed0309a11ec33f7ea460f6bb29b97 | 4660beb6aad3b8de21409f43c1bd622d07c83120 | /mp3organizer/clients/base.py | 681f94f54060dc720831f1cb2878208bf467e5f6 | [] | no_license | ofir123/MP3-Organizer | c2235ee24beb86e8aed755afa043731e8c416301 | b59cfd035dc84e5d811d8ccfb4213a72e4cc2f59 | refs/heads/master | 2020-12-25T17:34:40.855708 | 2016-08-06T10:24:03 | 2016-08-06T10:24:03 | 12,661,894 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,910 | py | import os
from mp3organizer.file_utils import normalize_name
class ConnectionException(Exception):
"""
Raised when connection wasn't initialized before first use.
"""
pass
class Client(object):
"""
Supplies simple functions for finding an album.
"""
MAX_RESULTS = 1
ARTWORK_EXTENSION = '.jpg'
def __init__(self, artwork_folder=None, verbose=True):
"""
Initializes the client.
:param artwork_folder: The folder to save pictures in.
:param verbose: Whether or not to print output.
"""
self.api = None
self.artwork_folder = artwork_folder
self.verbose = verbose
self._connected = False
@staticmethod
def get_name():
return 'Unknown'
def is_connected(self):
"""
Checks if the client is connected.
:return: True if connected, False otherwise.
"""
return self._connected
def connect(self):
"""
Initializes the connection to the service.
"""
raise NotImplementedError('Client didn\'t implement this method.')
def find_album(self, album, artist=None, prompt=True, web=True):
"""
Searches service for the artist and returns the album data.
:param album: The album's name.
:param artist: The artist's name.
:param prompt: Whether or not to prompt the user for approval.
:param web: Whether or not to open a browser with the album's information.
:returns: album or None.
"""
raise NotImplementedError('Client didn\'t implement this method.')
def __repr__(self):
"""
Prints the name of the service.
"""
return 'Unknown service'
@staticmethod
def _prompt_user(album, artist):
"""
Prompts the user and asks for result verification.
:param album: The result album.
:param artist: The result artist.
:return: The user's answer (True or False).
"""
question = 'Found album "{}" by "{}". Is this correct (y/n)?'.format(album, artist)
user_answer = input(question)
while user_answer not in ['y', 'n']:
print('Please enter either "y" or "n".')
user_answer = input(question)
return user_answer == 'y'
def _save_image(self, image_data, album):
"""
Saves the album's artwork.
:param image_data: The image to write.
:param album: The album's name.
:return: The artwork's path.
"""
normalized_album = normalize_name(album)
album = ' '.join(x.capitalize() for x in normalized_album.split(' '))
image_path = os.path.join(self.artwork_folder, album + Client.ARTWORK_EXTENSION)
image_file = open(image_path, 'wb')
image_file.write(image_data)
image_file.close()
return image_path
| [
"ofirbrukner@gmail.com"
] | ofirbrukner@gmail.com |
37dabf9121ca4486cf9781e3a5eddfe463958798 | de4ec63dfbbb214479ac571e99dae92c87543001 | /Board.py | 089f589a985d5385a0abad6a66d39dfe8f46208a | [] | no_license | xkdgo/chess | a512464d2486d4b62ff7f69895090b1b2218778a | 5f0db4fe830215bdd71116343a9fac857fe05973 | refs/heads/master | 2020-03-14T08:13:47.599821 | 2018-05-31T14:10:41 | 2018-05-31T14:10:41 | 131,513,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,539 | py | # -*- coding: utf-8 -*-
import figures as fg
from figures import Figure
from exceptions import InvalidPosition, PositionOccupied, InvalidMove
from helpers import str_to_move
class Board(object):
def __init__(self, height=8, width=None):
self.__height = height
if width is None:
self.__width = self.__height
else:
self.__width = width
self.__figures = []
def __contains__(self, val):
if isinstance(val, Figure):
# return any(( x is val for x in self.__figures ))
for fig in self.__figures:
if fig is val:
return True
else:
return False
else:
k, j = val
if k < 0 or k >= self.width:
return False
if j < 0 or j >= self.height:
return False
return True
def __getitem__(self, pos):
k, j = pos
if isinstance(k, slice) or isinstance(j, slice):
raise NotImplementedError('Slice indexing not implemented')
# TODO if position right
for f in self.__figures:
if f.pos == (k, j):
return f
else:
return None
def add(self, fig):
# добавляет фигуру на доску
if fig in self:
# fig in self call method __contains__
return
if not (fig.pos in self):
# fig.pos in self call method __contains__
raise InvalidPosition(fig.pos)
k, j = fig.pos
if self[k, j] is not None:
# self[k, j] call method __getitem__
raise PositionOccupied()
self.__figures.append(fig)
fig.board = self
# используется сеттер из Figure
# создает слабую ссылку на доску
def take_from_pos(self, pos):
# снимает фигуру с доски и возвращает ссылку на эту фигуру
# используется перебор списка по индексу
for k in range(0, len(self.__figures)):
if self.__figures[k].pos == pos:
fig = self.__figures[k]
del self.__figures[k]
return fig
return None
def initialize(self):
for i in range(0, self.width):
f = fg.Pawn('w', (i, 1))
self.add(f)
# self.add call method add
f = fg.Pawn('b', (i, self.height - 2))
self.add(f)
last = self.height - 1
self.add(fg.Rook('w', (0, 0)))
self.add(fg.Rook('w', (7, 0)))
self.add(fg.Rook('b', (0, last)))
self.add(fg.Rook('b', (7, last)))
self.add(fg.Knight('w', (1, 0)))
self.add(fg.Knight('w', (6, 0)))
self.add(fg.Knight('b', (1, last)))
self.add(fg.Knight('b', (6, last)))
self.add(fg.Bishop('w', (2, 0)))
self.add(fg.Bishop('w', (5, 0)))
self.add(fg.Bishop('b', (2, last)))
self.add(fg.Bishop('b', (5, last)))
self.add(fg.Queen('w', (3, 0)))
self.add(fg.King('w', (4, 0)))
self.add(fg.Queen('b', (3, last)))
self.add(fg.King('b', (4, last)))
@property
def height(self):
return self.__height
@property
def width(self):
return self.__width
def __str__(self):
return f'<board {self.height}x{self.width}>'
__repr__ = __str__
def show(self):
alpha = 'abcdefghijklmnopqrstuvwxyz'[:self.width]
print(' ' + (' ' * 5).join(alpha))
# raise NotImplementedError('Board.show')
print(' \u250c' + '\u2500' * 6 * self.width + '\u2510')
for n in range(self.height, 0, -1):
line = ('\u2588' * 6 + '\u2591' * 6) * self.width
if n % 2 > 0:
line = line[6:]
line = line[:self.width * 6]
line1 = ' \u2502' + line + '\u2502'
# print figure on the board
for k in range(0, self.width):
f = self[k, n - 1]
# self[k, n - 1] call method __getitem__
if f is None:
# if __getitem__ returned None check next position k
continue
b = k * 6 + 2
c = b + len(f.symbol)
line = line[:b] + f.symbol + line[c:]
line2 = f'{n:2d}\u2502' + line + f'\u2502{n:2d}'
print(line1)
print(line2)
print(line1)
print(' \u2514' + '\u2500' * 6 * self.width + '\u2518')
print(' ' + (' ' * 5).join(alpha))
def move(self, fig_sym, start=None, finish=None, takes=False):
# метод распаковывает строку введенную пользвателем
# и передвигает фигуру
# использует функцию str_to_move из helpers
if isinstance(fig_sym, str):
fig_sym, start, finish, takes = str_to_move(fig_sym)
fig = self[start]
# call func __getitem__(self, start)
if fig is None:
raise InvalidMove('Field is empty')
if fig.symbol[0] != fig_sym:
raise InvalidMove('Invalid figure')
# Проверить, еще что-то можно дописать проверки
try:
fig.pos = finish
except PositionOccupied:
if not takes:
print('You need input x before take')
self.take_from_pos(finish)
fig.pos = finish
def prepare_for_pickle(self):
# метод удаляет мягкие ссылки из доски
# и подготавливает доску к запаковыванию в байты
for fig in self.__figures:
del fig.board
def restore_for_pickle(self):
# метод восстанавливает мягкие ссылки
for fig in self.__figures:
fig.board = self
# следующие методы автоматически делают экземпляр
# менеджером контекста и его можно использовать с
# с функцией with
def __enter__(self):
self.prepare_for_pickle()
return self
def __exit__(self, exctype, excvalue, traceback):
self.restore_for_pickle()
if __name__ == '__main__':
x = Board(8)
x.initialize()
x.show()
| [
"xkdgo@rambler.ru"
] | xkdgo@rambler.ru |
5c825153929f1313315fe6ca61544eb90f422a8c | 24c9419d869c85d8f54fbf70960445d5b465841b | /Rotate Array.py | 3eb1a33b541960365c11f5657c3b8134632c673c | [] | no_license | triparnabh/Leetcode_Problems | 162e46f847bfa19557240320ea4e4b2b67bda267 | a5de4c4f3f5616e656e83a1653bf450a292ffd56 | refs/heads/master | 2020-04-20T12:51:21.710505 | 2019-03-23T16:23:35 | 2019-03-23T16:23:35 | 168,853,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py |
def RotateArray(lists, num):
temp = []
for item in range(len(lists) - num, len(lists)):
temp.append(lists[item])
for item in range(0, len(lists) - num):
temp.append(lists[item])
return temp
rotate_num = 3
list_1 = [1, 2, 3, 4, 5, 6]
print(RotateArray(list_1, rotate_num))
# n = 3
#
# list_1 = [1, 2, 3, 4, 5, 6]
# list_1 = (list_1[-n:] + list_1[:-n])
#
# print(list_1) | [
"triparnabh@gmail.com"
] | triparnabh@gmail.com |
2bf68bb4fcd778c1997e5f663b9f54634c559219 | e9de15ca55e02587f7d1267189f8cde01e1d0f84 | /website/views.py | a75a61248f9acc314b07bff0e9630f393b0b13c9 | [] | no_license | jestanoff/meeting-planner | 187fd1680eabc3e72cd76da6158cee3ba9ede448 | 0c9271ee59019ef35ca6860f8f68d0a889b37f49 | refs/heads/master | 2022-04-26T14:03:59.001498 | 2020-04-13T14:34:57 | 2020-04-13T14:34:57 | 255,338,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
from meetings.models import Meeting
def welcome(request):
return render(request, "website/welcome.html",
{"meetings": Meeting.objects.all()})
def date(request):
return HttpResponse("This page was server at " + str(datetime.now()))
def about(request):
return HttpResponse("Hello dudes it is me the enthusiastic Python programmer! Ka-bum-m-m-m")
| [
"stefan.dzhestanov@waitrose.co.uk"
] | stefan.dzhestanov@waitrose.co.uk |
db989cd943ae57d2a0bc568f07c50e18c8ad3589 | e33895eae4a8181575cd74ca4903357d9104bddc | /bistgame/urls.py | d5d6c3e0f96aa67592d2be7b56ebeba90959388d | [] | no_license | 404FoundGroup/bistagame | 701261d480f7438b55951e98b0a54c1603a6201d | 213eb1ca3f152256714b0d4d0dde91baa3420af6 | refs/heads/master | 2020-03-17T02:53:28.788141 | 2018-05-28T11:36:59 | 2018-05-28T11:36:59 | 133,210,782 | 3 | 0 | null | 2018-05-25T22:15:12 | 2018-05-13T06:11:36 | Python | UTF-8 | Python | false | false | 750 | py | """bistgame URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"aria.moradi007@gmail.com"
] | aria.moradi007@gmail.com |
fa503b55fe5581b5699049ff5f746e8c3f0bd255 | cde8abbb214170db5ed05679ab1e110540b147bf | /core/migrations/0010_auto_drop_proxy_models.py | 2144fcb1bd242d2fb0afa907b6bf9006496210a5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | nirgal/ngw | bbe6ef3a4f54a752ea7f32ba8a26d37f88d4c2b5 | 6f94ae702e8d772c49a7daea82fc007317538634 | refs/heads/master | 2022-05-18T13:18:30.668119 | 2022-04-07T13:19:14 | 2022-04-07T13:19:14 | 15,556,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ngw', '0009_config_eventdefaultperms'),
]
operations = [
migrations.DeleteModel(
name='ChoiceContactField',
),
migrations.DeleteModel(
name='DateContactField',
),
migrations.DeleteModel(
name='DateTimeContactField',
),
migrations.DeleteModel(
name='EmailContactField',
),
migrations.DeleteModel(
name='FileContactField',
),
migrations.DeleteModel(
name='ImageContactField',
),
migrations.DeleteModel(
name='LongTextContactField',
),
migrations.DeleteModel(
name='MultipleChoiceContactField',
),
migrations.DeleteModel(
name='MultipleDoubleChoiceContactField',
),
migrations.DeleteModel(
name='NumberContactField',
),
migrations.DeleteModel(
name='PasswordContactField',
),
migrations.DeleteModel(
name='PhoneContactField',
),
migrations.DeleteModel(
name='RibContactField',
),
migrations.DeleteModel(
name='TextContactField',
),
]
| [
"jmv_deb@nirgal.com"
] | jmv_deb@nirgal.com |
b69ff391c0b83806e0c6d3ff7abac10bb56b291b | dbed0e44a11157224d933c46be5692c8e77f66e5 | /apps/oomjsonsvr.py | dd9a4f2db4a76f2c8fcf0baff63369e697b34f2b | [
"MIT"
] | permissive | pichuang/oom | ef50312b442225c33a6169d2c3d5fa8bbc170d1b | 725e4859f900ee0f8e83d62663a5efd70dce6c15 | refs/heads/master | 2020-05-21T07:36:23.740807 | 2017-02-28T00:24:34 | 2017-02-28T00:24:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,737 | py | # /////////////////////////////////////////////////////////////////////
#
# oomjsonsvr.py : implements functions to translate OOM Southbound API
# data structures to JSON, and back
#
# implements the switch side of an "over the network" OOM
# southbound API (a SHIM). This routine calls the native SHIM on the
# network, turns the response into JSON, and ships it over the network
# (in response to calls from the server side)
#
# Copyright 2016 Finisar Inc.
#
# Author: Don Bollinger don@thebollingers.org
#
# ////////////////////////////////////////////////////////////////////
import os
from ctypes import *
import json
import base64
from flask import Flask, request
from oom.oomjsonshim import jpdict_to_cport, cport_to_json
from oom.oomtypes import c_port_t
from oom import *
#
# Read from EEPROM, pass it over the network as JSON
#
def oom_get_json_memory_sff(cport, address, page, offset, length):
port = matchport(cport, portlist.list)
data = oom_get_memory_sff(port, address, page, offset, length)
jsdata = base64.b64encode(data)
js_out = '{"data": "%s",\n "length": "%s"}' % (jsdata, str(length))
return(js_out)
#
# Raw write
#
def oom_set_json_memory_sff(cport, address, page, offset, length, data):
port = matchport(cport, portlist.list)
retlen = oom_set_memory_sff(port, address, page, offset, length, data)
js_out = '{"length": "%s"}' % str(retlen)
return js_out
def matchport(cport, portlist):
temp = cport.handle # kludge, python thinks 0 is 'None'
if temp is None:
temp = 0
for port in portlist:
temp2 = port.c_port.handle
if temp2 is None:
temp2 = 0
if temp == temp2:
if port.port_name == bytearray(cport.name).rstrip('\0'):
return port
return None
#
# Simulate oomsouth.oom_get_portlist(0,0)
# In fact, call oom_get_portlist(), cache the portlist, return it's length
#
def oom_get_json_portlist_zeros():
if portlist.list == []:
portlist.list = oom_get_portlist()
numports = len(portlist.list)
return '{"numports": "%d"}' % numports
#
# Fetch portlist if needed, else use cached value
#
def oom_get_json_portlist():
if portlist.list == []:
portlist.list = oom_get_portlist()
return port_list_to_json(portlist.list)
def port_list_to_json(portlist):
js_out = '{"portlist":[\n\t'
first = 1
for port in portlist:
if first == 0:
js_out += ',\n\t'
else:
first = 0
js_out += cport_to_json(port.c_port)
js_out += '\n\t]}'
return js_out
def json_to_numports(js):
# json to dict, dict to value string, string to int
return int(json.loads(js)["numports"])
#
# Start up the Flask server
#
app = Flask(__name__)
# set up the one URL that I'm listening to for requests
# receive all requests, unpack the parameters and send them to the
# right routine
@app.route('/OOM', methods=['GET'])
def getOOMdata():
command = request.json
print 'command:'
print command
# 'ogp0' means oom_get_portlist(0, 0)
if command['cmd'] == 'ogp0':
js = oom_get_json_portlist_zeros()
return js
# 'ogp' means oom_get_portlist()
if command['cmd'] == 'ogp':
js = oom_get_json_portlist()
return js
# 'ogms' means 'oom_get_memory_sff()'
if command['cmd'] == 'ogms':
cport = jpdict_to_cport(json.loads(command['port']))
address = int(command['address'])
page = int(command['page'])
offset = int(command['offset'])
length = int(command['length'])
retval = oom_get_json_memory_sff(cport, address, page, offset, length)
return retval
# 'osms' means 'oom_set_memory_sff()'
if command['cmd'] == 'osms':
cport = jpdict_to_cport(json.loads(command['port']))
address = int(command['address'])
page = int(command['page'])
offset = int(command['offset'])
length = int(command['length'])
# the data is going to take more work...
# fetch it from the dict, b64 decode it...
sentdata = base64.b64decode(command['data'])
# create a string buffer suitable for oomsouth.oom_set...
data = create_string_buffer(length)
# and copy the data into it
ptr = 0
for c in sentdata:
data[ptr] = c
ptr += 1
retval = oom_set_json_memory_sff(cport, address, page, offset,
length, data)
return retval
class portlist:
list = []
portlist = portlist()
if __name__ == "__main__":
# to debug locally use:
# app.run(debug=True)
# to be visible across the network, use:
app.run(host='0.0.0.0')
| [
"don@thebollingers.org"
] | don@thebollingers.org |
9d4c90dfdf0af067060fbab3e578ff906b4e17fe | cdc4e9c81b6887bff2b70e3f3893b93d6a824e41 | /Python/Week3/Day2/practice/main/apps/first_app/migrations/0001_initial.py | 69c76226ead11c943298422365d71aeef19eca47 | [] | no_license | eawasthi/CodingDojo | e5c363184add26d2b7fb8b442ca15c657599feab | bfbba677cf957b7b45e99465079d5c00bc6c3f6f | refs/heads/master | 2021-01-23T05:25:17.152128 | 2017-06-26T18:36:32 | 2017-06-26T18:36:32 | 86,299,143 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-19 07:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45)),
('author', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"ekta.awasthi@hotmail.com"
] | ekta.awasthi@hotmail.com |
96e1b7d4ba508da9f2c0883b2ba7a362efde32d5 | b9878c92b857f73ff0452fc51c822cfc9fa4dc1c | /watson_machine_learning_client/libs/repo/util/base_singleton.py | 4a9943aa7d08aaa332d2f94d4ddaafc4ea0a0726 | [] | no_license | DavidCastilloAlvarado/WMLC_mod | 35f5d84990c59b623bfdd27369fe7461c500e0a5 | f2673b9c77bd93c0e017831ee4994f6d9789d9a1 | refs/heads/master | 2022-12-08T02:54:31.000267 | 2020-09-02T15:49:21 | 2020-09-02T15:49:21 | 292,322,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | class _Singleton(type):
""" A metaclass that creates a Singleton base class when called. """
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class BaseSingleton(_Singleton('SingletonMeta', (object,), {})):
pass | [
"dcastilloa@uni.pe"
] | dcastilloa@uni.pe |
355a8a6a7493e09e033a44bc139d2aa4314b07e5 | f27c49458bde84048e6008da8c52ca0f1ae711ce | /code/05-interactive-code/m-n-m/guessinggame.py | f919ba7aeb09373c7ec432efe4f22638bff7f3d2 | [
"MIT"
] | permissive | talkpython/python-for-absolute-beginners-course | 54b0f48b5edbf7755de6ca688a8e737ba16dc2fc | 1930dab0a91526863dc92c3e05fe3c7ec63480e1 | refs/heads/master | 2022-11-24T03:02:32.759177 | 2022-11-08T14:30:08 | 2022-11-08T14:30:08 | 225,979,578 | 2,287 | 1,059 | MIT | 2022-11-07T19:45:15 | 2019-12-05T00:02:31 | Python | UTF-8 | Python | false | false | 656 | py | import random
print("------------------------------")
print(" M&M guessing game!")
print("------------------------------")
print("Guess the number of M&Ms and you get lunch on the house!")
print()
mm_count = random.randint(1, 100)
attempt_limit = 5
attempts = 0
while attempts < attempt_limit:
guess_text = input("How many M&Ms are in the jar? ")
guess = int(guess_text)
attempts += 1
if mm_count == guess:
print(f"You got a free lunch! It was {guess}.")
break
elif guess < mm_count:
print("Sorry, that's too LOW!")
else:
print("That's too HIGH!")
print(f"Bye, you're done in {attempts}!")
| [
"mikeckennedy@gmail.com"
] | mikeckennedy@gmail.com |
7bdb3032d0b87e6e58936035f17049cb25437466 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/4fde7f3e48576985304dbb54c7ab85f5d1c4d4e9observer.py | 4fde7f3e48576985304dbb54c7ab85f5d1c4d4e9 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 988 | py | from flask import Flask
from flask.ext.restful import reqparse, abort, Api, Resource
from utils import get_controllers
app = Flask(__name__)
api = Api(app)
def get_controller_by_id(controller_id):
try:
return controllers[controller_id]
except IndexError:
abort(404, message="Controller {} doesn't exist".format(controller_id))
class ControllerListResource(Resource):
def get(self):
return [controller.state_as_dict() for controller in controllers]
class ControllerResource(Resource):
def get(self, controller_id):
controller = get_controller_by_id(controller_id)
return controller.state_as_dict()
api.add_resource(ControllerListResource, '/controllers')
api.add_resource(ControllerResource, '/controllers/<int:controller_id>')
if __name__ == '__main__':
controllers = get_controllers(read_only=True)
app.run(debug=True, use_reloader=False)
for controller in controllers:
controller.terminate()
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
78b2a54064a34e9f68b79ccd14968387c034d9f6 | 618e05c82a6b6034f480494be98c9d4f2ebeb038 | /src/platforms.py | aa0fac39f4ab1151523512952d9da5711b1417bb | [
"CC0-1.0"
] | permissive | camelNotationsdjkh/Pento-s-Pledge | 8b851f9c210750c724ee2d70f651502c137fe679 | ac0ec683e41a0117ad744ff3ba8ef403f18eec4f | refs/heads/master | 2023-01-27T17:29:25.156130 | 2023-01-21T23:20:54 | 2023-01-21T23:20:54 | 229,200,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,197 | py | """
Module for managing platforms.
"""
import pygame
import constants
from spritesheet_functions import SpriteSheet
# These constants define our platform types:
# Name of file
# X location of sprite
# Y location of sprite
# Width of sprite
# Height of sprite
""" Put these constants here instead of
constants.py because it's easier to
manage that way """
# Level 1 assets
DUNG_PLATFORM_MIDDLE = (0, 0, 300, 80)
DUNG_PLATFORM_PILLAR = (0, 140, 50, 60)
LADDER_SPRITE = (422, 238, 40, 172)
DUNG_SLIVER = (195, 147, 107, 55)
DUNG_DIRT = (60, 84, 125, 53)
# Level two assets
MG_ROCK_PLAT_LEFT = (608, 0, 159, 80)
MG_ROCK_PLAT_MIDDLE = (608, 0, 136, 80)
MG_GREEN_PLAT = (414, 0, 173, 36)
MG_FLOWER_01 = (837, 333, 38, 24)
MG_FLOWER_02 = (837, 386, 38, 24)
MG_FENCE = (844, 146, 141, 34)
MG_PILLAR_MIDDLE = (766, 54, 61, 66)
MG_PILLAR_BOTTOM = (766, 0, 61, 66)
MG_MOVING_GREEN = (472, 59, 114, 60)
MG_TREE = (130, 0, 268, 471)
MG_BUSH_LEFT = (995, 189, 205, 128)
MG_BUSH_RIGHT = (998, 342, 199, 124)
# King
MG_KING = (1006, 8, 41, 54)
class Platform(pygame.sprite.Sprite):
""" Platform the user can jump on """
def __init__(self, sprite_sheet_data, is_image=True, surface_box=None, color=constants.BLACK, mg_sheet=False, flip=False):
""" Platform constructor. Assumes constructed with user passing in
an array of 5 numbers like what's defined at the top of this
code. """
pygame.sprite.Sprite.__init__(self)
# Load the sprite sheet for the platforms (level 1 has a different one)
sprite_sheet = SpriteSheet("images/special_sprites.png") if not mg_sheet else SpriteSheet("images/sheet.png", True)
# Grab the image for this platform
if is_image: self.image = sprite_sheet.get_image(
sprite_sheet_data[0],
sprite_sheet_data[1],
sprite_sheet_data[2],
sprite_sheet_data[3])
else:
self.image = pygame.Surface(surface_box)
self.image.fill(color)
if flip:
self.image = pygame.transform.flip(self.image, False, True)
self.rect = self.image.get_rect()
class MovingPlatform(Platform):
""" This is a fancier platform that can actually move. """
change_x = 0
change_y = 0
boundary_top = 0
boundary_bottom = 0
boundary_left = 0
boundary_right = 0
put_on_top = False
level = None
player = None
def update(self):
""" Move the platform.
If the player is in the way, it will shove the player
out of the way. This does NOT handle what happens if a
platform shoves a player into another object. Make sure
moving platforms have clearance to push the player around
or add code to handle what happens if they don't. """
# Move left/right
self.rect.x += self.change_x
# See if we hit the player
hit = pygame.sprite.collide_rect(self, self.player)
if hit:
# We did hit the player. Shove the player around and
# assume he/she won't hit anything else.
# If we are moving right, set our right side
# to the left side of the item we hit
if self.change_x < 0:
self.player.rect.right = self.rect.left
else:
# Otherwise if we are moving left, do the opposite.
self.player.rect.left = self.rect.right
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit the player
hit = pygame.sprite.collide_rect(self, self.player)
if hit:
# We did hit the player. Shove the player around and
# assume he/she won't hit anything else.
# Reset our position based on the top/bottom of the object.
if self.change_y < 0:
self.player.rect.bottom = self.rect.top
else:
if self.put_on_top: self.player.rect.bottom = self.rect.top
else: self.player.rect.top = self.rect.bottom
# Check the boundaries and see if we need to reverse
# direction.
if self.rect.bottom > self.boundary_bottom or self.rect.top < self.boundary_top:
self.change_y *= -1
cur_pos = self.rect.x - self.level.world_shift
if cur_pos < self.boundary_left or cur_pos > self.boundary_right:
self.change_x *= -1
class Ladder(pygame.sprite.Sprite):
""" A ladder for the user to climb """
def __init__(self, sprite_sheet_data, player=None):
super().__init__()
# Get the ladder sprite image
ladder = SpriteSheet("images/sheet.png", True)
self.player = player
self.image = ladder.get_image(
sprite_sheet_data[0],
sprite_sheet_data[1],
sprite_sheet_data[2],
sprite_sheet_data[3]
)
self.rect = self.image.get_rect()
class BackImage(pygame.sprite.Sprite):
""" A simple image object used just for decoration
Written by entirely by me """
def __init__(self, sprite_sheet_data, x, y, scale=False, scale_size=None):
super().__init__()
# The images from the sprite sheet
only_images = SpriteSheet("images/sheet.png", True)
self.image = only_images.get_image(
sprite_sheet_data[0],
sprite_sheet_data[1],
sprite_sheet_data[2],
sprite_sheet_data[3]
)
# Scale the image if needed
if scale:
self.image = pygame.transform.scale(self.image, scale_size)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
| [
"noreply@github.com"
] | noreply@github.com |
78568ca4885a42d8f3f9605cd773cdac043a3fda | 27317b3adb1ccd99afa86cb931d2d14e23b9b175 | /bcs-app/backend/apps/cluster/migrations/0011_auto_20180514_1805.py | 7246c2cb4817e9db1fb1c09afade48a95c1a0502 | [
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"ICU",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"Zlib",
"LicenseRef-scancode-openssl",
"NAIST-2003",
"ISC",
"NTP",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"MIT"
] | permissive | freyzheng/bk-bcs-saas | cf5a6c4ab1c20959bda1362bc31de7884451acd7 | 96373cda9d87038aceb0b4858ce89e7873c8e149 | refs/heads/master | 2021-07-05T04:11:08.555930 | 2020-09-22T12:26:37 | 2020-09-22T12:26:37 | 201,279,048 | 0 | 1 | NOASSERTION | 2020-09-16T03:07:16 | 2019-08-08T14:48:27 | Python | UTF-8 | Python | false | false | 1,794 | py | # -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# Generated by Django 1.11.5 on 2018-05-14 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cluster', '0010_auto_20180224_2058'),
]
operations = [
migrations.AlterField(
model_name='clusterinstalllog',
name='oper_type',
field=models.CharField(choices=[('initialize', '集群初始化'), ('reinstall', '集群重新初始化'), ('initial_check', '前置检查'), ('removing', '删除集群'), ('so_initial', 'SO 机器初始化')], default='initialize', max_length=16),
),
migrations.AlterField(
model_name='nodeupdatelog',
name='oper_type',
field=models.CharField(choices=[('initialize', '节点初始化'), ('reinstall', '节点重新初始化'), ('removing', '节点移除'), ('initial_check', '前置检查'), ('so_initial', 'SO 机器初始化')], default='initialize', max_length=16),
),
]
| [
"gejun.coolfriend@gmail.com"
] | gejun.coolfriend@gmail.com |
90a9f66b973d50155b27452cee64bbbfb1800a9b | 4dd1d8fa59e20061e2c12e540fc52b1b305e575b | /source/sims-2/boltz-bgk/s5/plot-sol.py | ff8f54a4239c7054fea5d9ce39a198fdbe5120d1 | [
"MIT"
] | permissive | ammarhakim/ammar-simjournal | f63521906a97d55ab290a5960d94758139944c89 | 5019f4723e20db80a20db6f2bd454c2fd3241412 | refs/heads/master | 2023-06-08T08:18:11.722779 | 2023-06-02T15:06:43 | 2023-06-02T15:06:43 | 204,050,516 | 3 | 3 | null | 2022-02-01T16:53:13 | 2019-08-23T18:28:44 | Lua | UTF-8 | Python | false | false | 3,194 | py | import gkedata
import gkedgbasis
from pylab import *
import pylab
import tables
import math
import numpy
import pylab
import numpy
from matplotlib import rcParams
import matplotlib.pyplot as plt
# customization for figure
rcParams['lines.linewidth'] = 2
rcParams['font.size'] = 18
rcParams['xtick.major.size'] = 8 # default is 4
rcParams['xtick.major.width'] = 3 # default is 0.5
rcParams['ytick.major.size'] = 8 # default is 4
rcParams['ytick.major.width'] = 3 # default is 0.5
rcParams['figure.facecolor'] = 'white'
#rcParams['figure.subplot.bottom'] = 0.125
#rcParams['figure.subplot.right'] = 0.85 # keep labels/ticks of colobar in figure
rcParams['image.interpolation'] = 'none'
rcParams['image.origin'] = 'lower'
rcParams['contour.negative_linestyle'] = 'solid'
rcParams['savefig.bbox'] = 'tight'
# Math/LaTex fonts:
# http://matplotlib.org/users/mathtext.html
# http://matplotlib.org/users/usetex.html
# Example: xlabel(r'$t \cdot l / V_{A,bc}$')
rcParams['mathtext.default'] = 'regular' # match the font used for regular text
def IE(n, nu, E):
return 0.5*(E-nu**2/n)
# density plot
d = gkedata.GkeData("../s5/s5-bgk-boltz_numDensity_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, n2 = dg1.project(0)
d = gkedata.GkeData("../s6/s6-bgk-boltz_numDensity_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, n3 = dg1.project(0)
nEul = loadtxt("../m2/m2-euler-shock-exact-density.txt")
figure(1)
plot(Xc, n2, '-r', label='Kn=1/100')
plot(Xc, n3, '-b', label='Kn=1/1000')
plot(nEul[:,0], nEul[:,1], 'k--')
xlabel('X')
ylabel('Density')
legend(loc='best')
savefig('jets-density-cmp.png', dpi=200)
# momentum plot
d = gkedata.GkeData("../s5/s5-bgk-boltz_momentum_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, nu2 = dg1.project(0)
d = gkedata.GkeData("../s6/s6-bgk-boltz_momentum_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, nu3 = dg1.project(0)
uEul = loadtxt("../m2/m2-euler-shock-exact-velocity.txt")
figure(2)
plot(Xc, nu2/n2, '-r', label='Kn=1/100')
plot(Xc, nu3/n3, '-b', label='Kn=1/1000')
plot(uEul[:,0], uEul[:,1], 'k--')
xlabel('X')
ylabel('Velocity')
legend(loc='best')
savefig('jets-velocity-cmp.png', dpi=200)
# internal energy plot
d = gkedata.GkeData("../s5/s5-bgk-boltz_ptclEnergy_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, E2 = dg1.project(0)
d = gkedata.GkeData("../s6/s6-bgk-boltz_ptclEnergy_5.h5")
dg1 = gkedgbasis.GkeDgLobatto1DPolyOrder2Basis(d)
Xc, E3 = dg1.project(0)
pEul = loadtxt("../m2/m2-euler-shock-exact-pressure.txt")
figure(3)
plot(Xc, IE(n2, nu2, E2), '-r', label='Kn=1/100')
plot(Xc, IE(n3, nu3, E3), '-b', label='Kn=1/1000')
plot(pEul[:,0], pEul[:,1]/(3-1), 'k--')
xlabel('X')
ylabel('Particle Energy')
legend(loc='best')
savefig('jets-ptclInternalEnergy-cmp.png', dpi=200)
figure(4)
plot(Xc, 0.5*E2, '-r', label='Kn=1/100')
plot(Xc, 0.5*E3, '-b', label='Kn=1/1000')
plot(pEul[:,0], 0.5*nEul[:,1]*uEul[:,1]**2+pEul[:,1]/(3-1), 'k--')
xlabel('X')
ylabel('Particle Energy')
legend(loc='best')
savefig('jets-ptclEnergy-cmp.png', dpi=200)
show()
| [
"11265732+ammarhakim@users.noreply.github.com"
] | 11265732+ammarhakim@users.noreply.github.com |
c865eb2e04496208f170fb858fc3ffe0e9a8f306 | 4ae14ddd7dff01480fa62ae996d7e8ad93e5a333 | /linear_regression_mydict/stepwise.py | b4e078c1e855baf8ca40312369ed3caa8fd1ccdb | [] | no_license | sadirabdulhadi/airbnb_content_analysis | f548da9de76066b74f4e657ec4437b738ae6be81 | 4ac45c8d3badd100bb828d16c9bbd16136cbbc72 | refs/heads/master | 2020-05-17T15:10:10.825198 | 2019-04-28T12:29:52 | 2019-04-28T12:29:52 | 183,782,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,574 | py | #here I'm using PySAL for the first time :) It is the Python spatial analysis library.
#references
##MORAN'S TEST
# the overall pipeline: http://darribas.org/gds_scipy16/ipynb_md/04_esda.html
# interpretation: http://pro.arcgis.com/en/pro-app/tool-reference/spatial-statistics/h-how-spatial-autocorrelation-moran-s-i-spatial-st.htm
# check with matlab.
#
# moran's test -> no gaps give 0.
# check statsmodel -> zero constant.
# R -> stepwise linear regression // without enforcing the VIF.
#
# A city with more areas can also be looked at.
# Table of content
#
import pysal as ps
import statsmodels.api as sm
from scipy import stats
import pandas as pd
import numpy
import matplotlib.pyplot as plt
import geopandas as gpd
import statsmodels.formula.api as smf
import csv
from statsmodels.stats.outliers_influence import variance_inflation_factor
pref= "median-more40_"
city = "london"
property_type = "private"
table_census = "/Users/Sadir/Documents/Computer_science/Year4/term2/individual_project/results/"+city+"/socio-economic.csv"
table_reviews = "/Users/Sadir/Documents/Computer_science/Year4/term2/individual_project/results/"+city+"/dictionary-results/mydict/"+property_type+"/"+property_type
shapefile = "/Users/Sadir/Documents/Computer_science/Year4/term2/individual_project/ward_files/"+city+"/wards.shp"
download_dir = "/Users/Sadir/Documents/Computer_science/Year4/term2/individual_project/results/"+city+"/linear_regression/stepwise-mydict/"+property_type+"/"+property_type+"-sw-"
themes = ["prof.csv", "loc.csv", "prop.csv", "soc.csv", "soc_rel.csv"]
#themes = ["table-avg_pers_pers+impers.csv", "table-avg_affect_tot.csv", "table-avg_pers_tot.csv"]
years=["2014", "2015", "2016", "2017", "2018", "total"]
years_bis=["2010","2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "total"] #DON'T USE, SIDE EFFECTS
keys_df = []
def remove_empty_rows(dataframe, column):
return dataframe[dataframe[column] != -1]
def clean_dataframe(census, reviews, year):
#merge census and reviews, index is "Ward"
sf = census.merge(reviews, on='Ward', how='inner')
sf.set_index("Ward", inplace = True)
#drop all the years except for the one we are studying
years_bis.remove(year)
sf.drop(years_bis, axis=1, inplace = True)
years_bis.append(year)
#bristol" remove dep index
#sf.drop(["nonenglish_household", "educated", "number_bedrooms", "students", "dep_children", "stem", "foreign_born", "bohemian"], axis=1, inplace = True) #VIF Bristol
#manchester
#sf.drop(["foreign_born", "educated", "median_age", "nonenglish_household", "students"], axis=1, inplace = True)
#london
#sf.drop(["household_size", "nonenglish_household", "educated"], axis=1, inplace = True)
sf.rename(columns={year: 'index'}, inplace = True)
global keys_df
keys_df = list(sf.columns)
keys_df.remove("index")
print('KEYS')
print(keys_df)
sf = remove_empty_rows(sf, "index")
sf.sort_index(inplace = True)
return sf
def apply_log(sf):
#when to apply log: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3591587/
for column in list(sf.columns):
values = sf[column].tolist()
skewness = stats.skewtest(values).statistic
sf["index"] = sf["index"].replace(0.000000, 0.000001)
if (skewness > 1.96) or (skewness < - 1.96):
sf[column] = sf[column].apply(numpy.log)
print("applied log to ", column)
return sf
def transform_dataframe(sf):
cols = list(sf.columns)
for col in cols:
zscore = (sf[col] - sf[col].mean())/sf[col].std(ddof=0)
sf[col] = zscore
return sf
def forward_selected(data, response):
"""Linear model designed by forward selection.
Parameters:
-----------
data : pandas DataFrame with all possible predictors and response
response: string, name of response column in data
Returns:
--------
model: an "optimal" fitted statsmodels linear model
with an intercept
selected by forward selection
evaluated by adjusted R-squared
"""
remaining = set(data.columns)
remaining.remove(response)
selected = []
current_score, best_new_score = 0.0, 0.0
while remaining and current_score == best_new_score:
scores_with_candidates = []
for candidate in remaining:
formula = "{} ~ {} + 1".format(response,
' + '.join(selected + [candidate]))
score = smf.ols(formula, data).fit().rsquared_adj
scores_with_candidates.append((score, candidate))
scores_with_candidates.sort()
best_new_score, best_candidate = scores_with_candidates.pop()
if current_score < best_new_score:
remaining.remove(best_candidate)
selected.append(best_candidate)
current_score = best_new_score
formula = "{} ~ {} + 1".format(response,
' + '.join(selected))
model = smf.ols(formula, data).fit()
return model
def vif_cal(input_data, dependent_col):
x_vars=input_data.drop([dependent_col], axis=1)
xvar_names=x_vars.columns
max = 0
max_name = ""
for i in range(0, xvar_names.shape[0]):
y = x_vars[xvar_names[i]]
x = x_vars[xvar_names.drop(xvar_names[i])]
rsq = smf.ols(formula = "y~x", data = x_vars).fit().rsquared
vif=round(1/(1-rsq),2)
if vif > max:
max_name = xvar_names[i]
max = vif
print (xvar_names[i], " VIF = " , vif)
print("MAX = ", max_name)
#the residuals we receive should be clean
def moran(shape, residuals):
tx = gpd.read_file(shape)
#only indices that exist in the residuals would be counted
#print("residuals")
#print(residuals)
tx = tx.merge(residuals, right_on='Ward', left_on="ward_id", how='left')
tx = tx.set_index("ward_id")
tx["resid"].fillna(value=0, inplace=True)
'''
hr10 = ps.Quantiles(tx["resid"], k=10)
f, ax = plt.subplots(1, figsize=(9, 9))
tx.assign(cl=hr10.yb).plot(column='cl', categorical=True, k=10, cmap='OrRd', linewidth=0.1, ax=ax, edgecolor='black', legend=True)
ax.set_axis_off()
plt.title("HR90 Deciles")
'''
plt.show()
indices_to_keep = tx.index.values
df = ps.pdio.read_files(shapefile)
df = df.set_index("ward_id")
df = df.loc[indices_to_keep]
W = ps.weights.Rook.from_dataframe(df)
W.transform = 'r'
score = ps.Moran(tx['resid'], W)
return([score.I, score.p_sim])
#here we have the option between queen and rook. rook only considers common edges, queen does consider vertices
#!! We need to apply it to the residuals
#def get_moran(tx):
#W = ps.rook_from_shapefile(shapefile)
def generate_yearly_dict(theme):
rsquared = {}
adjusted = {}
pvalues = {}
coeff = {}
mor = {}
#set-up:
for year in years:
pvalues[year] = {}
coeff[year] = {}
for year in years:
#prep
print(theme, year)
census = pd.read_csv(table_census)
reviews = pd.read_csv(table_reviews + pref + theme)
sf = clean_dataframe(census, reviews, year)
apply_log(sf)
sf = transform_dataframe(sf)
model = forward_selected(sf, "index")
print(theme, year)
print(model.summary())
residuals_dict = dict(model.resid)
residuals_df = pd.DataFrame.from_dict(data = residuals_dict, orient = "index", columns = ["resid"])
residuals_df.index.name = "Ward"
rsquared[year] = model.rsquared
adjusted[year] = model.rsquared_adj
mor[year] = moran(shapefile, residuals_df)
for key, value in dict(model.pvalues).items():
pvalues[year][key] = value
for key, value in dict(model.params).items():
coeff[year][key] = value
#getting rsquared
'''
print("R-squared", model.rsquared)
print("Adjusted", model.rsquared_adj)
#getting pvalues
print('p-values: ', model.pvalues)
#getting coefficients
print('Coefficient: ', model.params)
#moran's test
print(moran(shapefile, residuals_df))
'''
return[rsquared, adjusted, mor, pvalues, coeff]
#vif_cal(sf, "index")
def tocsv():
for theme in themes:
values = generate_yearly_dict(theme)
rsquared = values[0]
adjusted = values[1]
mor = values[2]
pvalues = values[3]
coeff = values[4]
row_rsquared = []
row_adjusted = []
row_mor = []
row_morp = []
row_pvalues = {}
row_coef = {}
csvw = csv.writer(open(download_dir + theme, "w"))
csvw.writerow([theme])
for key in keys_df:
row_pvalues[key] = []
row_coef[key] = []
for year in years:
row_mor.append(mor[year][0])
row_morp.append(mor[year][1])
row_rsquared.append(rsquared[year])
row_adjusted.append(adjusted[year])
for key in keys_df:
if key in coeff[year].keys():
row_coef[key].append(coeff[year][key])
row_pvalues[key].append(pvalues[year][key])
else:
row_coef[key].append(-1)
row_pvalues[key].append(-1)
#the actual writing part
print("we are here")
print(row_adjusted)
csvw.writerow(["", ""]+years)
csvw.writerow(["R squared", ""] + row_rsquared)
csvw.writerow(["Adjusted R squared", ""] + row_adjusted)
print("KEYS")
print(keys_df)
for key in keys_df:
print(key)
csvw.writerow([key, "pval"] + row_pvalues[key])
csvw.writerow([key, "coeff"] + row_coef[key])
csvw.writerow(["Moran's test", ""] + row_mor)
csvw.writerow(["Moran's p", ""] + row_morp)
<<<<<<< HEAD
=======
def intersperse(lst, item):
result = [item] * (len(lst) * 2 - 1)
result[0::2] = lst
return result
>>>>>>> origin/master
'''
census = pd.read_csv(table_census)
reviews = pd.read_csv(table_reviews)
sf = clean_dataframe(census, reviews, "2018")
apply_log(sf)
transform_dataframe(sf)
result = lin_reg(sf, "index")
#print(result.summary())
residuals_dict = dict(result.resid)
residuals_df = pd.DataFrame.from_dict(data = residuals_dict, orient = "index", columns = ["resid"])
residuals_df.index.name = "Ward"
#plot_quartiles(shapefile, residuals_df)
#vif_cal(sf, "index")
'''
tocsv()
| [
"sadeer11@gmail.com"
] | sadeer11@gmail.com |
a93ea63288eff967bb4d9195f3c82744bd638f54 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_fasting.py | 2a2b2ee81235039958aac103ee2d9541cc58f881 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.verbs._fast import _FAST
#calss header
class _FASTING(_FAST, ):
def __init__(self,):
_FAST.__init__(self)
self.name = "FASTING"
self.specie = 'verbs'
self.basic = "fast"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e80d10438b5937276fb7778906b03ba53f30d116 | c5b5d9b00fa7e4f5b2d048f3dc302a39398e190b | /imdb.py | b1aa48f07db7bb8cef29bdd4edd306fdb4ba2b05 | [] | no_license | erodner/imdbpy | 11e260f9f01cc3cdb4097fe1f6a2901b01f3bb12 | 39b828b5583521757aa4d9dfbac2d9168cce258f | refs/heads/master | 2021-01-10T12:38:59.552093 | 2015-11-18T19:48:21 | 2015-11-18T19:48:21 | 46,417,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,962 | py | """ The following script downloads IMDB ratings and
saves a part of these results as a JSON file.
The author of the script does not take any warranty for
the damage this script might cause.
Author: Erik Rodner (2015)
"""
copyright = """
Use this software and it's created data
at your own risk. Only a small part of the IMDB data should be
used and only for individual and personal use.
IMDB: http://www.imdb.com/help/show_leaf?usedatasoftware
Information courtesy of IMDb (http://www.imdb.com).
"""
import json
import re
import urllib2
import gzip
from StringIO import StringIO
import argparse
mirrors = {'berlin': 'ftp://ftp.fu-berlin.de/pub/misc/movies/database/ratings.list.gz',
'sweden': 'ftp://ftp.sunet.se/pub/tv+movies/imdb/ratings.list.gz',
'finland': 'ftp://ftp.funet.fi/pub/mirrors/ftp.imdb.com/pub/ratings.list.gz'}
parser = argparse.ArgumentParser(description='Script for IMDB ratings parsing',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--minvotes', help='minimum number of votes needed', default=10000, type=int)
parser.add_argument('--mirror', help='specify mirror site to use', choices=mirrors.keys(), default='berlin')
args = parser.parse_args()
# minimum number of votes needed to be added in the database
min_votes = args.minvotes
# regular expression for a single line in the ratings file
# there are a few movies where this expression doesn't work, but
# these movies shouldn't be part of our small dataset anyway
line_pattern = '^\s*([\.\d\*]+)\s+(\d+)\s+([\d\.]+)\s+"?(.+?)"?\s+\((\d\d\d\d).*?\)\s*(.*?)$'
# regular expression for a single episode
episode_pattern = '^\s*\{(.+?)\}\s*$'
# regular expression to extract episode season and number
episode_numbers_pattern = '\(#(\d+)\.(\d+)\)\s*$'
# URL to the ratings file we are downloading
# http://www.imdb.com/interfaces
ratings_link = mirrors[args.mirror]
#
#
# Downloading the ratings file and open gzip stream
print ("Opening connection...")
response = urllib2.urlopen(ratings_link)
print ("Downloading file...")
buf = StringIO( response.read() )
print ("Processing file...")
f = gzip.GzipFile(fileobj=buf)
# counting the lines processed
line_count = 0
# this is the total number of lines in the ratings.list file currently
total_count = 663327
# initializing the database
movies = []
episodes = {}
movies_set = set()
copying_policy_section = False
rating_error = 0.0
rating_error_num = 0
for line in f:
# display a small progress bar
line_count += 1
if line_count % 10000 == 1 or line_count == total_count:
print ("{:4.2f}% read from the database".format(line_count*100.0/total_count))
# regular expression matching for movies or episodes
m = re.match(line_pattern, line)
if m:
distribution, number_of_votes, rating, title, year, episode = m.groups(0)
number_of_votes = int(number_of_votes)
rating = float(rating)
year = int(year)
# The distribution of votes is given as a string
# with the following characters for each rating
# "." no votes cast "3" 30-39% of the votes "7" 70-79% of the votes
# "0" 1-9% of the votes "4" 40-49% of the votes "8" 80-89% of the votes
# "1" 10-19% of the votes "5" 50-59% of the votes "9" 90-99% of the votes
# "2" 20-29% of the votes "6" 60-69% of the votes "*" 100% of the votes
vote_sum = 0.0
mean_rating = 0.0
single_votes = []
for index, l in enumerate(list(distribution)):
if l == '.':
p = 0
elif l == '*':
p = 1.0
else:
p = (int(l)*10 + 5)/100.0
vote_sum += p
mean_rating += p*(index+1)
single_votes.append(p)
mean_rating /= vote_sum
single_votes = [ v/vote_sum for v in single_votes ]
#weighted_rank = (number_of_votes/(number_of_votes+25000.0))*mean_rating + (25000.0/(number_of_votes+25000.0))*6.9
rating_error += abs(mean_rating - rating)
rating_error_num += 1
stats = {'votes': number_of_votes, 'rating': rating, 'year': year,
'title': title, 'distribution': single_votes}
m_episode = re.match(episode_pattern, episode)
if m_episode and title in movies_set:
# this is an episode, add it no matter the number of votes
episode_title = m_episode.groups(0)[0]
if not title in episodes:
episodes[title] = []
# get the season or episode if possible
m_episode_numbers = re.search(episode_numbers_pattern, episode_title)
if m_episode_numbers:
stats['season'] = int(m_episode_numbers.groups(0)[0])
stats['episode'] = int(m_episode_numbers.groups(0)[1])
stats['episode_title'] = episode_title
episodes[title].append ( stats )
elif number_of_votes > min_votes:
# add the movie
movies.append(stats)
movies_set.add(title)
else:
#print ("The following line can not be parsed: {}".format(line)
# find the COPYING POLICY section
if line.find('COPYING POLICY') >= 0:
copying_policy_section = True
# add the COPYING POLICY section
if copying_policy_section:
copyright += line
# print some statistics
rating_error /= rating_error_num
print ("Error for the rating estimation according to the distribution: {}".format(rating_error))
print ("Number of movies in the database: {}".format(len(movies)))
print ("Number of series in the database: {}".format(len(episodes)))
# output the resulting dataset subset in a json file together
# with the copyright
with open('imdb.json', 'w') as f:
json.dump({'movies': movies, 'episodes': episodes, 'copyright': copyright},
f, encoding='latin1', indent=4)
| [
"Erik.Rodner@uni-jena.de"
] | Erik.Rodner@uni-jena.de |
05d534c0239f9d4aa4cb7e6433649d75109667ae | 57dc403b179ef9e558134089ec5ae71feff07526 | /app_agenda/views.py | 33d1595b023b9e0a13eee9926a66d9ae7b914e6e | [] | no_license | joopedroe/App_Agenda | a041b58620291638587d65e98f73565ca5d092f7 | e787a5c3df27185e9433aff2203175ace0c2fe1d | refs/heads/master | 2020-05-03T07:17:54.499726 | 2019-03-30T00:53:05 | 2019-03-30T00:53:05 | 178,493,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from django.shortcuts import render
from django.views.generic import ListView
# Create your views here.
from .models import *
class HomePageView(ListView):
model=Agenda
template_name='app_agenda/home.html' | [
"labins@labin.canoas"
] | labins@labin.canoas |
26d87e2f55b86eb525dd1b896bd17afd09cd8277 | 09c8e2ead4c8d76e76bfd5fd7b8abaed709b913b | /data_common/spark/hbase_util.py | 7c7631bd0d731a1fe643c2dec7f4d30286a148fe | [] | no_license | jiangsiwei2018/BigData | c004f8e1bb290e6591745aec0dcaeb8542765e3b | 568a627f32c2f4526d508096e1ded942bdf7b425 | refs/heads/master | 2023-08-19T14:48:24.244248 | 2021-10-19T18:56:18 | 2021-10-19T18:56:18 | 345,369,365 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,764 | py | # encoding=utf-8
import json
import xmltodict
class HbaseConf:
"""
定义hbase标准数据格式:
(row_key, {"f1:c1": "v1", "f1:c2": "v2", "f2:c3": "v3", "f2:c4": c4})
例如:
('0001', {'data:grade': '7-1', 'data:school': 'SZZX', 'info:name': 'Tom', 'info:sex': 'male'})
"""
@staticmethod
def get_xml_to_dict(xml_path):
with open(xml_path, encoding='utf-8') as fp:
xml_obj = xmltodict.parse(fp.read())
string = json.dumps(xml_obj)
return json.loads(string)
@staticmethod
def get_hbase_conf(suffix=''):
xml_list = [f'core-site{suffix}.xml', f'hdfs-site{suffix}.xml',
f'yarn-site{suffix}.xml', f'hbase-site{suffix}.xml']
conf_all = {}
import os
for xml_path in xml_list:
xml_path = os.path.dirname(__file__) + '/conf/' + xml_path
conf = HbaseConf.get_xml_to_dict(xml_path)
properties = conf.get('configuration', {}).get('property')
properties = properties if isinstance(properties, list) else [properties]
for property in properties:
conf_all[property['name']] = property['value']
return conf_all
@staticmethod
def spark_hbase_read_format(row):
"""
data = ('0001', '{"qualifier" : "grade", "timestamp" : "1597600074297", "columnFamily" : "data", "row" : "0001", "type" : "Put", "value" : "7-1"}\n{"qualifier" : "school", "timestamp" : "1597600099790", "columnFamily" : "data", "row" : "0001", "type" : "Put", "value" : "SZZX"}\n{"qualifier" : "name", "timestamp" : "1597600001433", "columnFamily" : "info", "row" : "0001", "type" : "Put", "value" : "Tom"}\n{"qualifier" : "sex", "timestamp" : "1597600029796", "columnFamily" : "info", "row" : "0001", "type" : "Put", "value" : "male"}')
将spark读取hbase的记录,是json行数据,转换成标准格式数据
('0001', {'data:grade': '7-1', 'data:school': 'SZZX', 'info:name': 'Tom', 'info:sex': 'male'})
:param row:
:return:
"""
row_key, lines = row[0], row[1]
lines = [json.loads(line) for line in lines.split('\n')]
row_data = {}
for line in lines:
family = line['columnFamily']
column = line['qualifier']
value = line['value']
timestamp = line['timestamp']
row_data[f'{family}:{column}'] = value
# row_data[f'{family}:{column}:timestamp'] = timestamp
return row_key, row_data
@staticmethod
def spark_hbase_write_format(row):
"""
将标准格式数据,转换成spark存储hbase的数据格式
('0001', {'data:grade': '7-1', 'data:school': 'SZZX', 'info:name': 'Tom', 'info:sex': 'male'})
转换成:
[('0001', ['0001', 'data', 'grade', '7-1']),
('0001', ['0001', 'data', 'school', 'SZZX']),
('0001', ['0001', 'info', 'name', 'Tom']),
('0001', ['0001', 'info', 'sex', 'male'])]
:param row:
:return:
"""
row_key, row_data = row[0], row[1]
lines = []
for key, value in row_data.items():
if 'timestamp' in key:
continue
family = key.split(':')[0]
column = key.split(':')[1]
line = (row_key, [row_key, family, column, value])
lines.append(line)
return lines
@staticmethod
def dict2row(row_key, row_data):
"""
将字典格式转换成标准格式
('0001', {'info': {'name': 'Jack', 'sex': 'female'}, 'data': {'school': 'SXZX-1', 'grade': '7-2'}}),
转换成:
('0001', {'data:grade': '7-1', 'data:school': 'SZZX', 'info:name': 'Tom', 'info:sex': 'male'})
py_hbase_insert_list = []
spark_hbase_insert_list = []
for item in data:
row = HbaseConf.dict2row(item[0], item[1])
cells = HbaseConf.convert_write(row)
py_hbase_insert_list.append(row)
spark_hbase_insert_list.extend(cells)
print(py_hbase_insert_list)
# hb.put_batch('student', py_hbase_insert_list)
print(spark_hbase_insert_list)
:param row_key:
:param row_data:
:return:
"""
_dict = {}
for family, info in row_data.items():
for key, value in info.items():
k = f'{family}:{key}'
_dict[k] = value
return row_key, _dict
@staticmethod
def row2dict(row):
"""
将标准格式转换成字典格式
('0001', {'data:grade': '7-1', 'data:school': 'SZZX', 'info:name': 'Tom', 'info:sex': 'male'})
转换成:
('0001', {'info': {'name': 'Jack', 'sex': 'female'}, 'data': {'school': 'SXZX-1', 'grade': '7-2'}}),
:param row_key:
:param row_data:
:return:
"""
row_key = row[0]
row_data = row[1]
_dict = {}
for k, v in row_data.items():
f, c = k.split(':')
if f not in _dict:
_dict[f] = {}
_dict[f][c] = v
return row_key, _dict
@staticmethod
def bytes2str(value, decode='utf-8'):
if isinstance(value, bytes):
return value.decode(decode)
return value
@staticmethod
def row_bytes2str(row):
row_key = HbaseConf.bytes2str(row[0])
row_data = {HbaseConf.bytes2str(k): HbaseConf.bytes2str(v) for k, v in row[1].items()}
return row_key, row_data
class HappyHbaseUtil:
def __init__(self):
"""已经封装了Hbase"""
self.host = '192.168.131.11'
self.port = 9090
self.timeout = 5000
self.client = None
self.connect = None
self.transport = None
self.connection()
def connection(self):
import happybase
self.connect = happybase.Connection(host=self.host,
port=self.port,
autoconnect=True,
transport='buffered',
protocol='binary')
self.client = self.connect.client
def get_table_list(self):
return [HbaseConf.bytes2str(table) for table in self.connect.tables()]
def disable_table(self, table):
"""禁用表:在做一些删除操作之前必须先禁用表"""
self.connect.disable_table(table)
def enable_table(self, table):
"""启用表"""
self.connect.enable_table(table)
def create_table(self, table, family_info):
"""
falimies = {
'cf1': dict(max_versions=10),
'cf2': dict(max_versions=1, block_cache_enabled=False),
'cf3': dict(), # use defaults
}
:param table:
:param falimies:
:return:
"""
self.connect.create_table(table, family_info)
return True
def get_columns_desc(self, table):
"""
{b'data': {'name': b'data:', 'max_versions': 1, 'compression': b'NONE', 'in_memory': False, 'bloom_filter_type': b'ROW', 'bloom_filter_vector_size': 0, 'bloom_filter_nb_hashes': 0, 'block_cache_enabled': True, 'time_to_live': 2147483647}, b'info': {'name': b'info:', 'max_versions': 1, 'compression': b'NONE', 'in_memory': False, 'bloom_filter_type': b'ROW', 'bloom_filter_vector_size': 0, 'bloom_filter_nb_hashes': 0, 'block_cache_enabled': True, 'time_to_live': 2147483647}}
:param table:
:return:
"""
t = self.connect.table(table)
info = t.families()
return info
def get_columns_desc2(self, table):
"""
{b'data:': ColumnDescriptor(name=b'data:', maxVersions=1, compression=b'NONE', inMemory=False, bloomFilterType=b'ROW', bloomFilterVectorSize=0, bloomFilterNbHashes=0, blockCacheEnabled=True, timeToLive=2147483647), b'info:': ColumnDescriptor(name=b'info:', maxVersions=1, compression=b'NONE', inMemory=False, bloomFilterType=b'ROW', bloomFilterVectorSize=0, bloomFilterNbHashes=0, blockCacheEnabled=True, timeToLive=2147483647)}
:param table:
:return:
"""
return self.client.getColumnDescriptors(table)
def get_cells(self, table, row_key, family_column, timestamp=None, include_timestamp=False):
t = self.connect.table(table)
cells = t.cells(row_key, family_column, timestamp=timestamp,
include_timestamp=include_timestamp)
return cells
def get_row(self, table, row_key, columns=None, timestamp=None, include_timestamp=False):
"""
{b'data:grade': b'7-1', b'data:school': b'SZZX', b'info:name': b'Tom', b'info:sex': b'male'}
:param table:
:param row_key:
:param columns:
:param timestamp:
:param include_timestamp:
:return:
"""
t = self.connect.table(table)
info = t.row(row_key, columns=columns, timestamp=timestamp,
include_timestamp=include_timestamp)
return HbaseConf.row_bytes2str((row_key, info))
def get_row2(self, table, row_key):
"""
[TRowResult(row=b'0001', columns={b'data:grade': TCell(value=b'7-1', timestamp=1597600074297), b'data:school': TCell(value=b'SZZX', timestamp=1597600099790), b'info:name': TCell(value=b'Tom', timestamp=1597600001433), b'info:sex': TCell(value=b'male', timestamp=1597600029796)}, sortedColumns=None)]
:param table:
:param row_key:
:return:
"""
return self.client.getRow(table, row_key)
def put_row(self, table, row_key, data):
"""
添加数据
:param table:
:param row_key:
:param data: {'family:key1': 'value1','family:key2': 'value2'}
:return:
"""
t = self.connect.table(table)
t.put(row_key, data)
return True
def get_batch(self, table, rows):
"""
:param table:
:param rows: list, ['row-key1', 'row-key2']
:return:
"""
t = self.connect.table(table)
for row in t.rows(rows):
yield HbaseConf.row_bytes2str(row)
def put_batch(self, table, list_data):
"""
[('row-key1', {'family:key1': 'value1', 'family:key2': 'value2'}),
('row-key2'), {'family:key1': 'value1', 'family:key2': 'value2'}]
:param table:
:param list_data:
:return:
"""
# 批量添加
t = self.connect.table(table)
bat = t.batch()
for row_key, row_data in list_data:
bat.put(row_key, row_data)
bat.send()
def scan(self, table, **kwargs):
"""
scanner = table.scan(
row_start=None,
row_stop=None,
row_prefix=None,
columns=None,
filter=None,
timestamp=None,
include_timestamp=False,
batch_size=1000,
scan_batching=None,
limit=None,
sorted_columns=False,
reverse=False,
)
# row_start:起始行,默认None,即第一行,可传入行号指定从哪一行开始
# row_stop:结束行,默认None,即最后一行,可传入行号指定到哪一行结束(不获取此行数据)
# row_prefix:行号前缀,默认为None,即不指定前缀扫描,可传入前缀来扫描符合此前缀的行
# columns:列,默认为None,即获取所有列,可传入一个list或tuple来指定获取列
# filter:过滤字符串 SingleColumnValueFilter ('f', 'c1', =, 'binary:val1')
message SingleColumnValueFilter {
required ComparatorType comparator = 1;
required string column_name = 2;
required bytes column_value = 3;
required bool filter_if_missing = 4;
required bool latest_version_only = 5;
}
SingleColumnValueFilter(Bytes.toBytes("c1"), Bytes.toBytes("city"), CompareOp.EQUAL, Bytes.toBytes(v1))
# timestamp:时间戳。默认为None,即返回最大的那个时间戳的数据。可传入一个时间戳来获取小于此时间戳的最大时间戳的版本数据
# include_timestamp:是否返回时间戳数据,默认为False
# batch_size:用于检索结果的批量大小
# scan_batching:服务端扫描批处理
# limit:数量
# sorted_columns:是否返回排序的列(根据行名称排序)
# reverse:是否执行反向扫描
# 通过row_start和row_stop参数来设置开始和结束扫描的row key
table.scan(row_start='www.test2.com', row_stop='www.test3.com')
另外,还可以通过设置row key的前缀来进行局部扫描
# 通过row_prefix参数来设置需要扫描的row key
table.scan(row_prefix='www.test')
:return:
"""
t = self.connect.table(table)
for row in t.scan(**kwargs):
yield HbaseConf.row_bytes2str(row)
def close(self):
return self.connect.close()
class HbaseUtil:
def __init__(self):
"""直接通过Hbase"""
self.host = '192.168.131.11'
self.port = 9090
self.timeout = 5000
self.client = None
self.transport = None
self.connection()
def connection(self):
from hbase import Hbase
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
# server端地址和端口,web是HMaster也就是thriftServer主机名,thriftServer默认端口是9090
socket = TSocket.TSocket(self.host, self.port)
# 可以设置超时
socket.setTimeout(self.timeout)
# 设置传输方式(TFramedTransport或TBufferedTransport)
self.transport = TTransport.TBufferedTransport(socket)
# 设置传输协议,缺省简单的二进制序列化协议
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.client = Hbase.Client(protocol)
self.transport.open()
def disable_table(self, name):
"""禁用表:在做一些删除操作之前必须先禁用表"""
self.client.disableTable(name)
def enable_table(self, name):
"""启用表"""
self.client.enableTable(name)
def create_table(self, name, column_descriptors):
self.client.createTable(name, column_descriptors)
def get_table_list(self):
return self.client.getTableNames()
def get_columns_desc(self, name):
return self.client.getColumnDescriptors(name)
def get_row(self, table, row_key):
return self.client.getRow(table, row_key)
def close(self):
return self.transport.close()
if __name__ == '__main__':
conf = HbaseConf.get_hbase_conf()
print(conf)
hb = HappyHbaseUtil()
table_names = hb.get_table_list()
print(table_names)
res = hb.get_row('student', '0001')
res2 = hb.get_row2('student', '0001')
res3 = hb.get_cells('student', '0001', 'info:name')
print(res)
print(res2)
print(res3)
for row in hb.scan('student'):
# filter="SingleColumnValueFilter ('data', 'school', =, 'binary:SZZX')"):
print(row)
# batch = hb.get_batch('student', ['0003', '0004'])
# for item in batch:
# print(item)
# [('row-key1', {'family:key1': 'value1', 'family:key2': 'value2'}),
# ('row-key2'), {'family:key1': 'value1', 'family:key2': 'value2'}]
# data = [
# ('0004', {'info': {'name': 'Jack', 'sex': 'female'}, 'data': {'school': 'SXZX-1', 'grade': '7-2'}}),
# ('0005', {'info': {'name': 'Tony', 'sex': 'male'}, 'data': {'school': 'SXZX-2', 'grade': '7-2'}})
# ]
# data = ('0001', '{"qualifier" : "grade", "timestamp" : "1597600074297", "columnFamily" : "data", "row" : "0001", "type" : "Put", "value" : "7-1"}\n{"qualifier" : "school", "timestamp" : "1597600099790", "columnFamily" : "data", "row" : "0001", "type" : "Put", "value" : "SZZX"}\n{"qualifier" : "name", "timestamp" : "1597600001433", "columnFamily" : "info", "row" : "0001", "type" : "Put", "value" : "Tom"}\n{"qualifier" : "sex", "timestamp" : "1597600029796", "columnFamily" : "info", "row" : "0001", "type" : "Put", "value" : "male"}')
# data_read = HbaseConf.convert_read(data)
# print(data_read)
# print(HbaseConf.row2dict(data_read))
# date_write = HbaseConf.convert_write(data_read)
# print(date_write)
# d = [('0001', ['0001', 'data', 'grade', '7-1']),
# ('0001', ['0001', 'data', 'school', 'SZZX']),
# ('0001', ['0001', 'info', 'name', 'Tom']),
# ('0001', ['0001', 'info', 'sex', 'male'])]
| [
"1910479313@qq.com"
] | 1910479313@qq.com |
c52bcba0f9d5a677631d2c20a62d28a6c84fd7d1 | 3afb516b7a759478a5b7181c94941934c48ef63e | /baselines/cifar/sngp.py | ab31eed8491da1bf18bec06e6979d462495fd4e7 | [
"Apache-2.0"
] | permissive | barseghyanartur/uncertainty-baselines | e85848dd04db998b114254186eb46917395722fc | 982323e1e82699ff42292f53cd3bbf3cd180912c | refs/heads/master | 2023-02-09T00:12:07.632028 | 2021-01-04T09:48:46 | 2021-01-04T09:49:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,412 | py | # coding=utf-8
# Copyright 2020 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wide ResNet 28-10 with SNGP on CIFAR-10.
Spectral-normalized neural GP (SNGP) [1] is a simple method to improve
a deterministic neural network's uncertainty by applying spectral
normalization to the hidden layers, and then replace the dense output layer
with a Gaussian process layer.
## Reproducibility Instruction for CIFAR-100:
When running this script on CIFAR-100, set base_learning_rate=0.08 and
gp_mean_field_factor=12.5 to reproduce the benchmark result.
## Combining with MC Dropout:
As a single-model method, SNGP can be combined with other classic
uncertainty techniques (e.g., Monte Carlo dropout, deep ensemble) to further
improve performance.
This script supports adding Monte Carlo dropout to
SNGP by setting `use_mc_dropout=True`, setting `num_dropout_samples=10`
(or any integer larger than 1). Additionally we recommend adjust
`gp_mean_field_factor` slightly, since averaging already calibrated
individual models (in this case single SNGPs) can sometimes lead to
under-confidence [3].
## References:
[1]: Jeremiah Liu et al. Simple and Principled Uncertainty Estimation with
Deterministic Deep Learning via Distance Awareness.
_arXiv preprint arXiv:2006.10108_, 2020.
https://arxiv.org/abs/2006.10108
[2]: Zhiyun Lu, Eugene Ie, Fei Sha. Uncertainty Estimation with Infinitesimal
Jackknife. _arXiv preprint arXiv:2006.07584_, 2020.
https://arxiv.org/abs/2006.07584
[3]: Rahul Rahaman, Alexandre H. Thiery. Uncertainty Quantification and Deep
Ensembles. _arXiv preprint arXiv:2007.08792_, 2020.
https://arxiv.org/abs/2007.08792
[4]: Hendrycks, Dan et al. AugMix: A Simple Data Processing Method to Improve
Robustness and Uncertainty. In _International Conference on Learning
Representations_, 2020.
https://arxiv.org/abs/1912.02781
[5]: Zhang, Hongyi et al. mixup: Beyond Empirical Risk Minimization. In
_International Conference on Learning Representations_, 2018.
https://arxiv.org/abs/1710.09412
"""
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import data_utils # local file import
import utils # local file import
import uncertainty_metrics as um
flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_integer('per_core_batch_size', 64, 'Batch size per TPU core/GPU.')
flags.DEFINE_float('base_learning_rate', 0.05,
'Base learning rate when total batch size is 128. It is '
'scaled by the ratio of the total batch size to 128.')
flags.DEFINE_integer('lr_warmup_epochs', 1,
'Number of epochs for a linear warmup to the initial '
'learning rate. Use 0 to do no warmup.')
flags.DEFINE_float('lr_decay_ratio', 0.2, 'Amount to decay learning rate.')
flags.DEFINE_list('lr_decay_epochs', ['60', '120', '160'],
'Epochs to decay learning rate by.')
flags.DEFINE_float('l2', 3e-4, 'L2 regularization coefficient.')
flags.DEFINE_float('train_proportion', 1.,
'Only a fraction (between 0 and 1) of the train set is used '
'for training. The remainder can be used for validation.')
flags.DEFINE_enum('dataset', 'cifar10',
enum_values=['cifar10', 'cifar100'],
help='Dataset.')
flags.DEFINE_string('cifar100_c_path', None,
'Path to the TFRecords files for CIFAR-100-C. Only valid '
'(and required) if dataset is cifar100 and corruptions.')
flags.DEFINE_integer('corruptions_interval', -1,
'Number of epochs between evaluating on the corrupted '
'test data. Use -1 to never evaluate.')
flags.DEFINE_integer(
'checkpoint_interval', -1,
'Number of epochs between saving checkpoints. Use -1 to '
'never save checkpoints.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')
flags.DEFINE_string('output_dir', '/tmp/cifar', 'Output directory.')
flags.DEFINE_integer('train_epochs', 250, 'Number of training epochs.')
# Data Augmentation flags.
flags.DEFINE_bool('augmix', False,
'Whether to perform AugMix [4] on the input data.')
flags.DEFINE_integer('aug_count', 1,
'Number of augmentation operations in AugMix to perform '
'on the input image. In the simgle model context, it'
'should be 1. In the ensembles context, it should be'
'ensemble_size if we perform random_augment only; It'
'should be (ensemble_size - 1) if we perform augmix.')
flags.DEFINE_float('augmix_prob_coeff', 0.5, 'Augmix probability coefficient.')
flags.DEFINE_integer('augmix_depth', -1,
'Augmix depth, -1 meaning sampled depth. This corresponds'
'to line 7 in the Algorithm box in [4].')
flags.DEFINE_integer('augmix_width', 3,
'Augmix width. This corresponds to the k in line 5 in the'
'Algorithm box in [4].')
flags.DEFINE_float('mixup_alpha', 0., 'Mixup hyperparameter, 0. to diable.')
# Dropout flags
flags.DEFINE_bool('use_mc_dropout', False,
'Whether to use Monte Carlo dropout for the hidden layers.')
flags.DEFINE_bool('use_filterwise_dropout', True,
'Whether to use filterwise dropout for the hidden layers.')
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate.')
flags.DEFINE_integer('num_dropout_samples', 1,
'Number of dropout samples to use for prediction.')
flags.DEFINE_integer('num_dropout_samples_training', 1,
'Number of dropout samples for training.')
# SNGP flags.
flags.DEFINE_bool('use_spec_norm', True,
'Whether to apply spectral normalization.')
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Gaussian process flags.
flags.DEFINE_bool('use_gp_layer', True,
'Whether to use Gaussian process as the output layer.')
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 2.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_input_dim', 128,
'The dimension to reduce the neural network input for the GP layer '
'(via random Gaussian projection which preserves distance by the '
' Johnson-Lindenstrauss lemma). If -1, no dimension reduction.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number of '
'random features used for the approximation.')
flags.DEFINE_bool(
'gp_input_normalization', True,
'Whether to normalize the input using LayerNorm for GP layer.'
'This is similar to automatic relevance determination (ARD) in the classic '
'GP learning.')
flags.DEFINE_string(
'gp_random_feature_type', 'orf',
'The type of random feature to use. One of "rff" (random fourier feature), '
'"orf" (orthogonal random feature).')
flags.DEFINE_float('gp_cov_ridge_penalty', 1.,
'Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', -1.,
'The discount factor to compute the moving average of precision matrix'
'across epochs. If -1 then compute the exact precision matrix within the '
'latest epoch.')
flags.DEFINE_float(
'gp_mean_field_factor', 25.,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean. See [2] for detail.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_bool('use_bfloat16', False, 'Whether to use mixed precision.')
flags.DEFINE_integer('num_cores', 8, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
tf.io.gfile.makedirs(FLAGS.output_dir)
logging.info('Saving checkpoints at %s', FLAGS.output_dir)
tf.random.set_seed(FLAGS.seed)
if FLAGS.use_gpu:
logging.info('Use GPU')
strategy = tf.distribute.MirroredStrategy()
else:
logging.info('Use TPU at %s',
FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
ds_info = tfds.builder(FLAGS.dataset).info
batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores
// FLAGS.num_dropout_samples_training)
test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
num_classes = ds_info.features['label'].num_classes
aug_params = {
'augmix': FLAGS.augmix,
'aug_count': FLAGS.aug_count,
'augmix_depth': FLAGS.augmix_depth,
'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
'augmix_width': FLAGS.augmix_width,
'ensemble_size': 1,
'mixup_alpha': FLAGS.mixup_alpha,
}
validation_proportion = 1. - FLAGS.train_proportion
use_validation_set = validation_proportion > 0.
train_dataset = data_utils.load_dataset(
split=tfds.Split.TRAIN,
name=FLAGS.dataset,
batch_size=batch_size,
use_bfloat16=FLAGS.use_bfloat16,
aug_params=aug_params,
validation_set=use_validation_set,
validation_proportion=validation_proportion)
train_sample_size = ds_info.splits[
'train'].num_examples * FLAGS.train_proportion
val_sample_size = ds_info.splits['train'].num_examples - train_sample_size
if use_validation_set:
validation_dataset = data_utils.load_dataset(
split=tfds.Split.VALIDATION,
name=FLAGS.dataset,
batch_size=batch_size,
use_bfloat16=FLAGS.use_bfloat16,
aug_params=aug_params,
validation_set=use_validation_set,
validation_proportion=validation_proportion)
validation_dataset = strategy.experimental_distribute_dataset(
validation_dataset)
steps_per_val = steps_per_epoch = int(val_sample_size / batch_size)
clean_test_dataset = utils.load_dataset(
split=tfds.Split.TEST,
name=FLAGS.dataset,
batch_size=test_batch_size,
use_bfloat16=FLAGS.use_bfloat16)
train_sample_size = ds_info.splits[
'train'].num_examples * FLAGS.train_proportion
steps_per_epoch = int(train_sample_size / batch_size)
steps_per_epoch = ds_info.splits['train'].num_examples // batch_size
steps_per_eval = ds_info.splits['test'].num_examples // batch_size
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
test_datasets = {
'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
}
if FLAGS.corruptions_interval > 0:
if FLAGS.dataset == 'cifar10':
load_c_dataset = utils.load_cifar10_c
else:
load_c_dataset = functools.partial(utils.load_cifar100_c,
path=FLAGS.cifar100_c_path)
corruption_types, max_intensity = utils.load_corrupted_test_info(
FLAGS.dataset)
for corruption in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset = load_c_dataset(
corruption_name=corruption,
corruption_intensity=intensity,
batch_size=test_batch_size,
use_bfloat16=FLAGS.use_bfloat16)
test_datasets['{0}_{1}'.format(corruption, intensity)] = (
strategy.experimental_distribute_dataset(dataset))
if FLAGS.use_bfloat16:
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
tf.keras.mixed_precision.experimental.set_policy(policy)
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.output_dir, 'summaries'))
with strategy.scope():
logging.info('Building ResNet model')
if FLAGS.use_spec_norm:
logging.info('Use Spectral Normalization with norm bound %.2f',
FLAGS.spec_norm_bound)
if FLAGS.use_gp_layer:
logging.info('Use GP layer with hidden units %d', FLAGS.gp_hidden_dim)
model = ub.models.wide_resnet_sngp(
input_shape=ds_info.features['image'].shape,
batch_size=batch_size,
depth=28,
width_multiplier=10,
num_classes=num_classes,
l2=FLAGS.l2,
use_mc_dropout=FLAGS.use_mc_dropout,
use_filterwise_dropout=FLAGS.use_filterwise_dropout,
dropout_rate=FLAGS.dropout_rate,
use_gp_layer=FLAGS.use_gp_layer,
gp_input_dim=FLAGS.gp_input_dim,
gp_hidden_dim=FLAGS.gp_hidden_dim,
gp_scale=FLAGS.gp_scale,
gp_bias=FLAGS.gp_bias,
gp_input_normalization=FLAGS.gp_input_normalization,
gp_random_feature_type=FLAGS.gp_random_feature_type,
gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
use_spec_norm=FLAGS.use_spec_norm,
spec_norm_iteration=FLAGS.spec_norm_iteration,
spec_norm_bound=FLAGS.spec_norm_bound)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Linearly scale learning rate and the decay epochs by vanilla settings.
base_lr = FLAGS.base_learning_rate * batch_size / 128
lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
for start_epoch_str in FLAGS.lr_decay_epochs]
lr_schedule = utils.LearningRateSchedule(
steps_per_epoch,
base_lr,
decay_ratio=FLAGS.lr_decay_ratio,
decay_epochs=lr_decay_epochs,
warmup_epochs=FLAGS.lr_warmup_epochs)
optimizer = tf.keras.optimizers.SGD(lr_schedule,
momentum=0.9,
nesterov=True)
metrics = {
'train/negative_log_likelihood': tf.keras.metrics.Mean(),
'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'train/loss': tf.keras.metrics.Mean(),
'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'test/stddev': tf.keras.metrics.Mean(),
}
if use_validation_set:
metrics.update({
'val/negative_log_likelihood': tf.keras.metrics.Mean(),
'val/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'val/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
'val/stddev': tf.keras.metrics.Mean(),
})
if FLAGS.corruptions_interval > 0:
corrupt_metrics = {}
for intensity in range(1, max_intensity + 1):
for corruption in corruption_types:
dataset_name = '{0}_{1}'.format(corruption, intensity)
corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
tf.keras.metrics.Mean())
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
initial_epoch = 0
if latest_checkpoint:
# checkpoint.restore must be within a strategy.scope() so that optimizer
# slot variables are mirrored.
checkpoint.restore(latest_checkpoint)
logging.info('Loaded checkpoint %s', latest_checkpoint)
initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
@tf.function
def train_step(iterator, step):
"""Training StepFn."""
def step_fn(inputs, step):
"""Per-Replica StepFn."""
images, labels = inputs
if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
# Resetting covaraince estimator at the begining of a new epoch.
model.layers[-1].reset_covariance_matrix()
if FLAGS.augmix and FLAGS.aug_count >= 1:
# Index 0 at augmix preprocessing is the unperturbed image.
images = images[:, 1, ...]
# This is for the case of combining AugMix and Mixup.
if FLAGS.mixup_alpha > 0:
labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]
images = tf.tile(images, [FLAGS.num_dropout_samples_training, 1, 1, 1])
if FLAGS.mixup_alpha > 0:
labels = tf.tile(labels, [FLAGS.num_dropout_samples_training, 1])
else:
labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])
with tf.GradientTape() as tape:
logits = model(images, training=True)
if isinstance(logits, tuple):
# If model returns a tuple of (logits, covmat), extract logits
logits, _ = logits
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
if FLAGS.mixup_alpha > 0:
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.categorical_crossentropy(labels,
logits,
from_logits=True))
else:
negative_log_likelihood = tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(labels,
logits,
from_logits=True))
l2_loss = sum(model.losses)
loss = negative_log_likelihood + l2_loss
# Scale the loss given the TPUStrategy will reduce sum all gradients.
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
probs = tf.nn.softmax(logits)
if FLAGS.mixup_alpha > 0:
labels = tf.argmax(labels, axis=-1)
metrics['train/ece'].update_state(labels, probs)
metrics['train/loss'].update_state(loss)
metrics['train/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['train/accuracy'].update_state(labels, logits)
strategy.run(step_fn, args=(next(iterator), step))
@tf.function
def test_step(iterator, dataset_name):
"""Evaluation StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
images, labels = inputs
logits_list = []
stddev_list = []
for _ in range(FLAGS.num_dropout_samples):
logits = model(images, training=False)
if isinstance(logits, tuple):
# If model returns a tuple of (logits, covmat), extract both
logits, covmat = logits
else:
covmat = tf.eye(FLAGS.per_core_batch_size)
if FLAGS.use_bfloat16:
logits = tf.cast(logits, tf.float32)
logits = ed.layers.utils.mean_field_logits(
logits, covmat, mean_field_factor=FLAGS.gp_mean_field_factor)
stddev = tf.sqrt(tf.linalg.diag_part(covmat))
stddev_list.append(stddev)
logits_list.append(logits)
# Logits dimension is (num_samples, batch_size, num_classes).
logits_list = tf.stack(logits_list, axis=0)
stddev_list = tf.stack(stddev_list, axis=0)
stddev = tf.reduce_mean(stddev_list, axis=0)
probs_list = tf.nn.softmax(logits_list)
probs = tf.reduce_mean(probs_list, axis=0)
labels_broadcasted = tf.broadcast_to(
labels, [FLAGS.num_dropout_samples, labels.shape[0]])
log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
labels_broadcasted, logits_list, from_logits=True)
negative_log_likelihood = tf.reduce_mean(
-tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
tf.math.log(float(FLAGS.num_dropout_samples)))
if dataset_name == 'clean':
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
metrics['test/stddev'].update_state(stddev)
elif dataset_name == 'val':
metrics['val/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['val/accuracy'].update_state(labels, probs)
metrics['val/ece'].update_state(labels, probs)
metrics['val/stddev'].update_state(stddev)
else:
corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
labels, probs)
corrupt_metrics['test/stddev_{}'.format(dataset_name)].update_state(
stddev)
strategy.run(step_fn, args=(next(iterator),))
metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
step_variable = tf.Variable(0, dtype=tf.int32)
train_iterator = iter(train_dataset)
start_time = time.time()
for epoch in range(initial_epoch, FLAGS.train_epochs):
logging.info('Starting to run epoch: %s', epoch)
for step in range(steps_per_epoch):
step_variable.assign(step)
# Pass `step` as a tf.Variable to train_step to prevent the tf.function
# train_step() re-compiling itself at each function call.
train_step(train_iterator, step_variable)
current_step = epoch * steps_per_epoch + (step + 1)
max_steps = steps_per_epoch * FLAGS.train_epochs
time_elapsed = time.time() - start_time
steps_per_sec = float(current_step) / time_elapsed
eta_seconds = (max_steps - current_step) / steps_per_sec
message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
current_step / max_steps,
epoch + 1,
FLAGS.train_epochs,
steps_per_sec,
eta_seconds / 60,
time_elapsed / 60))
if step % 20 == 0:
logging.info(message)
datasets_to_evaluate = {'clean': test_datasets['clean']}
if use_validation_set:
datasets_to_evaluate['val'] = validation_dataset
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
datasets_to_evaluate = test_datasets
for dataset_name, test_dataset in datasets_to_evaluate.items():
test_iterator = iter(test_dataset)
logging.info('Testing on dataset %s', dataset_name)
steps_per_eval = steps_per_val if dataset_name == 'val' else steps_per_eval
for step in range(steps_per_eval):
if step % 20 == 0:
logging.info('Starting to run eval step %s of epoch: %s', step,
epoch)
test_start_time = time.time()
test_step(test_iterator, dataset_name)
ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
metrics['test/ms_per_example'].update_state(ms_per_example)
logging.info('Done with testing on %s', dataset_name)
corrupt_results = {}
if (FLAGS.corruptions_interval > 0 and
(epoch + 1) % FLAGS.corruptions_interval == 0):
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity)
logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
metrics['train/loss'].result(),
metrics['train/accuracy'].result() * 100)
if use_validation_set:
logging.info('Val NLL: %.4f, Accuracy: %.2f%%',
metrics['val/negative_log_likelihood'].result(),
metrics['val/accuracy'].result() * 100)
logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
metrics['test/negative_log_likelihood'].result(),
metrics['test/accuracy'].result() * 100)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
with summary_writer.as_default():
for name, result in total_results.items():
tf.summary.scalar(name, result, step=epoch + 1)
for metric in metrics.values():
metric.reset_states()
if (FLAGS.checkpoint_interval > 0 and
(epoch + 1) % FLAGS.checkpoint_interval == 0):
checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved checkpoint to %s', checkpoint_name)
final_checkpoint_name = checkpoint.save(
os.path.join(FLAGS.output_dir, 'checkpoint'))
logging.info('Saved last checkpoint to %s', final_checkpoint_name)
final_save_name = os.path.join(FLAGS.output_dir, 'model')
model.save(final_save_name)
logging.info('Saved model to %s', final_save_name)
if __name__ == '__main__':
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.