text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 30 15:48:18 2018
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import os
import utils
utils.start(__file__)
#==============================================================================
PREF = 'f110_'
KEY = 'SK_ID_CURR'
os.system(f'rm ../feature/t*_{PREF}*')
# =============================================================================
# load
# =============================================================================
train = utils.load_train(['SK_ID_CURR']).set_index('SK_ID_CURR')
test = utils.load_test(['SK_ID_CURR']).set_index('SK_ID_CURR')
prev = utils.read_pickles('../data/previous_application', ['SK_ID_CURR', 'SK_ID_PREV'])
# =============================================================================
# prev
# =============================================================================
gr = prev.groupby('SK_ID_CURR')
train['SK_ID_PREV_min'] = gr.SK_ID_PREV.min()
train['SK_ID_PREV_mean'] = gr.SK_ID_PREV.mean()
train['SK_ID_PREV_max'] = gr.SK_ID_PREV.max()
train['SK_ID_PREV_median'] = gr.SK_ID_PREV.median()
train['SK_ID_PREV_std'] = gr.SK_ID_PREV.std()
train['SK_ID_PREV_std-d-mean'] = train['SK_ID_PREV_std'] / train['SK_ID_PREV_mean']
train['SK_ID_PREV_max-m-min'] = train['SK_ID_PREV_max'] - train['SK_ID_PREV_min']
test['SK_ID_PREV_min'] = gr.SK_ID_PREV.min()
test['SK_ID_PREV_mean'] = gr.SK_ID_PREV.mean()
test['SK_ID_PREV_max'] = gr.SK_ID_PREV.max()
test['SK_ID_PREV_median'] = gr.SK_ID_PREV.median()
test['SK_ID_PREV_std'] = gr.SK_ID_PREV.std()
test['SK_ID_PREV_std-d-mean'] = test['SK_ID_PREV_std'] / test['SK_ID_PREV_mean']
test['SK_ID_PREV_max-m-min'] = test['SK_ID_PREV_max'] - test['SK_ID_PREV_min']
train.reset_index(drop=True, inplace=True)
test.reset_index(drop=True, inplace=True)
# =============================================================================
# output
# =============================================================================
utils.to_feature(train.add_prefix(PREF), '../feature/train')
utils.to_feature(test.add_prefix(PREF), '../feature/test')
#==============================================================================
utils.end(__file__)
|
from assets.models import Server,Label,Project,IDC,ServerLog
from django import forms
from django.forms import widgets
from django.forms import fields
from django.utils.translation import gettext_lazy as _
def server_log(*args,**kwargs):
try:
rows = kwargs.get('rows')
ServerLog.objects.create(**rows)
except Exception as e:
print(e)
class ServerForm(forms.ModelForm):
ssh_port = forms.CharField(
label="SSH端口",
initial="22"
)
ssh_user = forms.CharField(
label="SSH用户名",
initial="root"
)
class Meta:
model = Server
fields = ('name','ip','project','label','is_active','system','idc_name','ssh_user','ssh_port','ansible_group')
class ServerEditForm(forms.ModelForm):
# def __init__(self,*args,**kwargs):
# self.create_by = kwargs.pop('create_by')
# super(ServerEditForm,self).__init__(*args,**kwargs)
name = fields.CharField(
label="资产名称",
error_messages={'required':'资产名称不能为空'},
widget=widgets.Input(attrs={'class':"form-control"})
)
ip = fields.GenericIPAddressField(
label="IP地址",
error_messages={'required':'IP地址不能为空'},
widget=widgets.Input(attrs={
'class': 'form-control',
'data-inputmask': "'alias': 'ip'",
'data-mask': ''
})
)
ssh_port = fields.IntegerField(
label="SSH 端口",
widget=widgets.Input(attrs={'class':'form-control'})
)
ip2 = fields.GenericIPAddressField(
label="公网IP地址",
required=False,
widget=widgets.Input(attrs={
'class': 'form-control',
'data-inputmask': "'alias': 'ip'",
'data-mask': ''
})
)
class Meta:
model = Server
# exclude = ('create_by')
fields = ['name','ip','project','label','is_active','idc_name','ssh_user','ssh_port','ip2','comment']
widgets = {
'project': forms.SelectMultiple(attrs={
'class':'form-control select2','data-placeholder':'业务组'
}),
'label': forms.SelectMultiple(attrs={
'class': 'form-control select2', 'data-placeholder': '标签'
}),
'idc_name':forms.Select(attrs={
'class':'form-control select2'
}),
'ssh_user':forms.SelectMultiple(attrs={
'class': 'form-control select2', 'data-placeholder': '用户'
}),
'comment':forms.Textarea(attrs={
'class': 'form-control'
}),
}
help_texts = {
'name': '* required',
'ip': '* required',
'port': '* required',
}
class IdcForm(forms.ModelForm):
class Meta:
model = IDC
fields = ('name','type','address','contack_name','contack_phone','contack_qq','contack_mail','bandwidth')
widgets = {
'name':widgets.Input(attrs={
'class':'form-control'
}),
'type':forms.Select(attrs={
'class':'form-control select2'
}),
'address':widgets.Input(attrs={
'class':'form-control'
}),
'contack_phone':widgets.Input(attrs={
'class':'form-control'
}),
'contack_name':widgets.Input(attrs={
'class':'form-control'
}),
'contack_qq':widgets.Input(attrs={
'class':'form-control'
}),
'contack_mail':widgets.EmailInput(attrs={
'class':'form-control'
}),
'bandwidth':widgets.Input(attrs={
'class':'form-control'
}),
}
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ('name','remark')
widgets = {
'name': widgets.Input(attrs={
'class': 'form-control'
}),
'remark': widgets.Textarea(attrs={
'class': 'form-control'
})
}
class LabelForm(forms.ModelForm):
class Meta:
model = Label
fields = ('name','remark')
widgets = {
'name': widgets.Input(attrs={
'class': 'form-control'
}),
'remark': widgets.Textarea(attrs={
'class': 'form-control'
})
}
labels = {
'name': '名称',
'remark':'备注'
}
|
import densetorch.data
import densetorch.engine
import densetorch.misc
import densetorch.nn
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
|
"""To check if string is palindrome or not A string is said to be palindrome if the reverse
of the string is the same as string. For example, “malayalam” is a
palindrome, but “music” is not a palindrome."""
def isPalindrome(check):
reverse_string=check[::-1]
if check==reverse_string:
return "String is palindrome"
else:
return "not a palindrome"
|
def main():
print("hello world")
# TODO: Setup the tests
# TODO: Get inputs in JSON format
# TODO: Setup the ojects: Algorithm, Team member -> developer, management, Task
# TODO: Find random solution
# TODO: Add logical rules before solution finding
if __name__ == '__main__':
main()
|
from django.shortcuts import render
from django import forms
import pandas as pd
from datetime import datetime
def home(request):
df = pd.read_csv("/home/siddharth/Stock-Market-Analysis-master/Stock-Market-App/data.csv")
date = df.ix[:,0].apply(lambda x: datetime.strptime(x,"%Y-%m-%d").date())
date = date.to_json(orient = 'records')
close = df.ix[:,4].to_json(orient = 'records')
print(date)
print(close)
return render(request, 'data/Home.html', { 'date' : date , 'price' : close})
|
import random
def calcRandom(_amplitude, _perc_random):
return _amplitude - random.uniform(_amplitude-((_perc_random*_amplitude)/100),_amplitude+((_perc_random*_amplitude)/100)) |
#!/usr/bin/env -S python3 -W ignore
"""
Retrieves the bitcoin price in euros
"""
import requests
try:
r = requests.get('https://api.kraken.com/0/public/Ticker?pair=BTCEUR')
ticker = r.json()
print('%.2f€' % float(ticker['result']['XXBTZEUR']['c'][0]))
except Exception:
print('No data.')
|
#!/usr/bin/python
print ("Jonathan, recuerda esfozarte y ser mejor persona cada dia ")
print ("Tus conocimientos, como Ingeniero de Sistemas, Informatica, Software y Computacion demostraras de lo que heres")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tuskarclient.tests import utils as tutils
from tuskarclient.v2 import client
class ClientTest(tutils.TestCase):
def setUp(self):
super(ClientTest, self).setUp()
mock_http_client = mock.MagicMock()
self.client = client.Client(mock_http_client)
def test_managers_present(self):
self.assertThat(self.client, tutils.HasManager('PlanManager',
'plans'))
self.assertThat(self.client, tutils.HasManager('RoleManager',
'roles'))
|
from sqlalchemy import exc
from flask import Blueprint, jsonify, request
from jsonpatch import JsonPatch, JsonPatchException, JsonPatchConflict
from jsondiff import diff
import pprint
from datetime import datetime
import re
from project import db
from project.api.models import User, School, Site
from project.tests.utils import add_user
from project.api.excel import extract_users
from project.api.school_utils import process_preferences, get_week_number
from project.api.utils import authenticate, authenticate_admin
from project.api.constants import TEACHER, DATE_FORMAT,\
TIME_FORMAT, EDITABLE_PREFERENCES
school_blueprint = Blueprint('school', __name__)
pp = pprint.PrettyPrinter(indent=4)
@school_blueprint.route('/schools/check', methods=['GET'])
@authenticate
def get_week_number_route(resp):
user = User.query.get(resp)
school = School.query.get(user.school_id)
response_object = {
'status': 'fail'
}
if request.args.get('date'):
try:
date = str(request.args.get('date'))
except ValueError:
response_object['message'] = 'No date query provided.'
return jsonify(response_object), 400
try:
week_number = get_week_number(date, school.preferences)
except BaseException as e:
response_object['message'] = str(e)
return jsonify(response_object), 400
response_object = {
'status': 'success',
'data': week_number
}
return jsonify(response_object), 200
@school_blueprint.route('/schools', methods=["GET"])
@authenticate
def get_school(resp):
user = User.query.get(resp)
school = School.query.get(user.school_id)
response_object = {
'status': 'fail',
'message': 'School doesn\'t exist or not assigned to user.'
}
if not school:
return jsonify(response_object), 404
sites = Site.query.filter_by(school_id=school.id).all()
data = school.asdict()
if sites:
data['sites'] = [site.asdict() for site in sites]
response_object = {
'status': 'success',
'message': 'Found school.',
'data': data
}
return jsonify(response_object), 200
@school_blueprint.route('/schools', methods=["POST"])
@authenticate_admin
def add_school(resp):
user = User.query.get(resp)
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
post_data = request.get_json()
if not post_data:
return jsonify(response_object), 400
name = post_data['name']
new_school = School(
name=name
)
try:
db.session.add(new_school)
db.session.flush()
user.school_id = new_school.id
db.session.commit()
response_object = {
'status': 'success',
'message': '{} has been created.'.format(name),
'data': new_school.asdict()
}
return jsonify(response_object), 201
except exc.IntegrityError as e:
db.session.rollback()
return jsonify(response_object), 400
except (exc.IntegrityError, ValueError) as e:
db.session.rollback()
return jsonify(response_object), 400
@school_blueprint.route('/schools/preferences', methods=['PATCH'])
@authenticate_admin
def update_school_preferences(resp):
response_object = {
'status': 'fail',
'message': 'School does not exist.'
}
user = User.query.get(resp)
school = School.query.get(user.school_id)
if not school:
return jsonify(response_object), 400
response_object = {
'status': 'fail',
'message': 'Malformed patch.'
}
# get patch object from client
patch_raw = request.get_json()
if not patch_raw or not isinstance(patch_raw, list):
return jsonify(response_object), 400
# for any times or dates in the patch object, check correct formatting
for edit in patch_raw:
try:
if str(edit['path']) not in EDITABLE_PREFERENCES:
return jsonify(response_object), 400
except KeyError:
return jsonify(response_object), 400
if edit['path'] == '/term_dates':
for halfterm in edit['value']: # dict
try:
datetime.strptime(
halfterm[0], DATE_FORMAT)
datetime.strptime(
halfterm[1], DATE_FORMAT
)
except ValueError:
return jsonify(response_object), 400
elif edit['path'] == '/period_start_times':
for period in edit['value']:
try:
datetime.strptime(
edit['value'][period], TIME_FORMAT)
except ValueError:
return jsonify(response_object), 400
elif edit['path'] == '/period_length_in_minutes':
try:
int(edit['value'])
except ValueError as e:
response_object['message'] = str(e)
return jsonify(response_object), 400
elif edit['path'] == '/weeks_timetable':
try:
assert int(edit['value']) in [1, 2]
except(AssertionError):
return jsonify(response_object), 400
except(ValueError):
return jsonify(response_object), 400
elif edit['path'] == '/days_notice':
try:
int(edit['value'])
except ValueError:
return jsonify(response_object), 400
# convert raw JSON from client into JSONPatch format
patch = JsonPatch(patch_raw)
# get preferences JSON object from school
preferences = school.preferences
# Apply the patch to the dictionary instance of the model
try:
preferences_update = patch.apply(preferences)
except (JsonPatchConflict, JsonPatchException):
return jsonify(response_object), 400
change = diff(preferences, preferences_update)
if not change:
response_object = {
'status': 'success',
'message': '{} preferences unchanged.'.format(school.name)
}
return jsonify(response_object), 200
# check new preferences object for consistency, and process
try:
response_object = process_preferences(preferences_update)
except BaseException as e:
response_object = {
'status': 'fail',
'message': e
}
school.preferences = preferences_update
db.session.commit()
response_object = {
'status': 'success',
'message': 'Preferences for {} have been updated.'.format(school.name),
'data': {'school': school.asdict()}
}
return jsonify(response_object), 200
@school_blueprint.route('/schools/<school_id>/staff', methods=['POST'])
@authenticate_admin
def prepare_staff_accounts(resp, school_id):
school = School.query.get(school_id)
if not school:
response_object = {
'status': 'fail',
'message': 'That school does not exist.'
}
return jsonify(response_object), 401
filename = request.get_json()
if not filename:
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
return jsonify(response_object), 401
wb_staff, staff = extract_users(filename['filename'])
# will return string error code if doesn't work - check, then return
if isinstance(staff, str):
response_object = {
'status': 'fail',
'message': staff
}
return jsonify(response_object), 401
""" PERFORM CHECKS ON EXTRACTED DATA """
response_object = {
'status': 'fail',
'message': 'User import failed'
}
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
for staff_member in staff:
if not EMAIL_REGEX.match(staff_member['email']):
response_object['message'] = 'Emails are incorrect.'
return jsonify(response_object), 401
if staff_member['name'] is None:
response_object['message'] = (
'Names are missing from the uploaded file.')
return jsonify(response_object), 401
if (staff_member['staff_code'] is None
and staff_member['role_code'] is TEACHER):
response_object['message'] = (
'Staff codes are missing from the uploaded file.')
return jsonify(response_object), 401
# get list of emails already signed up to the school, to ensure imported
# emails are unique without raising db Exception
emails = [user.email for user in User.query.filter_by(
school_id=school_id).all()]
skipped_emails = []
for s in staff:
# skip any emails already in database
if s['email'] in emails:
skipped_emails.append(s['email'])
continue
new_user = add_user(
name=s['name'], email=s['email'],
password='password', role_code=s['role_code'],
staff_code=s['staff_code'], school_id=school.id)
try:
db.session.add(new_user)
db.session.commit()
except Exception as e:
return jsonify({'status': 'fail', 'message': str(e)}), 401
# if emails needed to be skipped (admin should be one) send list in
# warning message of response dict
if len(skipped_emails) > 0:
response_object['warning'] = {'skipped_users': skipped_emails}
response_object['status'] = 'success'
response_object['data'] = \
{'staff': [user.asdict() for user in User.query.filter_by(
school_id=school.id).all()]}
response_object['message'] = 'Please ensure these users are correct.'
return jsonify(response_object), 200
|
"""
Django settings for sit project.
Generated by 'django-admin startproject' using Django 1.11.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from __future__ import absolute_import
import os
import redis
from kombu import Queue
from celery.schedules import crontab
BROKER_URL = 'redis://127.0.0.1:6379'
REDIS_POOL = redis.ConnectionPool.from_url(BROKER_URL)
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_QUEUES = (
Queue('celery'),
)
CELERYBEAT_SCHEDULE = {
'daily_assigned_issue_reminder': {
'task': 'apis.task.send_daily_assigned_issue',
'schedule': crontab(hour='5', minute=51),
'options': {'queue': 'celery'}
}
}
CELERY_TIMEZONE = 'Asia/Calcutta'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9bj&ikplc^pd98jluf53g&nt&xr)2jp*!tbwig!ef84x_t-#qh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'apis'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sit.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('SIT_DBNAME'),
'USER': os.environ.get('SIT_DBOWNR'),
'PASSWORD': os.environ.get('SIT_DBPSWD'),
'HOST': os.environ.get('SIT_DBHOST'),
'PORT': '5432',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
},
]
EMAIL_BACKEND = 'django_smtp_ssl.SSLEmailBackend'
EMAIL_HOST = os.environ.get('SIT_HOST')
EMAIL_PORT = os.environ.get('SIT_PORT', 465)
EMAIL_HOST_USER = os.environ.get('SIT_USER_EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('SIT_PASSWORD')
EMAIL_USE_TLS = True
EMAIL_USE_SSL = False
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
import main
import setup
import constants as c
import pygame as pg
class AiMode(main._Mode):
def __init__(self):
super().__init__()
def startup(self):
pass
def update(self, surface):
pass
def get_event(self, event):
pass
|
"""
So this is a script for showing todays prime number. Started 21. September 2019. For some reason. Please know that this is a inside joke so dont expect understanding anything.
"""
from datetime import date
f_date = date(2019, 9, 21)
l_date = date.today()
f = []
n_days = f_date - l_date
m_days = n_days.days
m_days = m_days - (m_days*2)
def primes(goal):
global f
global todayprime
amount = 0
D = {}
q = 2 # first integer to test for primality.
while amount < goal:
if q not in D:
# not marked composite, must be prime
#first multiple of q not already marked
D[q * q] = [q]
amount += 1
else:
for p in D[q]:
D.setdefault(p + q, []).append(p)
# no longer need D[q], free memory
del D[q]
q += 1
todayprime = q -1
def prime():
global todayprime
primes(m_days)
|
"""
题目描述
用两个栈来实现一个队列,完成队列的Push和Pop操作。 队列中的元素为int类型。
"""
"""
解题思路
第一个栈stack_1临时保存插入的数据,当调用弹出函数时候,若stack_2不为空则直接弹出,
若为空,则把stack_1中的数据全部弹出放到stack_2中。
注:stack_2都是存放的旧的数据,弹出时一定符合队列的规律。
"""
class Solution:
def __init__(self):
self.stack_1 = []
self.stack_2 = []
def push(self,node):
self.stack_1.append(node)
def pop(self):
if self.stack_2:
return self.stack_2.pop()
else:
while self.stack_1:
self.stack_2.append(self.stack_1.pop())
return self.stack_2.pop()
if __name__ == '__main__':
s = Solution()
s.push(1)
s.push(2)
print(s.stack_1)
s.pop()
print(s.stack_2)
print(s.stack_1)
|
number = str(input())
# разделение числа на две части
first_half = int(number) // 1000
second_half = int(number) % 1000
# цифры первой половины
first = first_half // 100
second = first_half % 100 // 10
third = first_half % 100 % 10
# цифры второй половины
fourth = second_half // 100
fifth = second_half % 100 // 10
sixth = second_half % 100 % 10
if first + second + third == fourth + fifth + sixth:
print("Счастливый")
else:
print("Обычный")
|
n=int(input())
a=[0]*(n+1)
a[1]=1
for i in range(2,n+1):
a[i]=a[i-1]+a[i-2]
print(a[n-1],a[n],end=' ') |
# coding:utf-8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import json
import warnings
import contextlib
from typing import Any, Dict, List, Optional, Tuple, Union
import paddle
from paddle.dataset.common import md5file
from ..utils.log import logger
from ..utils.downloader import get_path_from_url, DownloaderCheck
DOC_FORMAT = r"""
Examples:
.. code-block:: python
"""
DOWNLOAD_CHECK = False
def download_file(save_dir, filename, url, md5=None, task=None):
"""
Download the file from the url to specified directory.
Check md5 value when the file is exists, if the md5 value is the same as the existed file, just use
the older file, if not, will download the file from the url.
Args:
save_dir(string): The specified directory saving the file.
filename(string): The specified filename saving the file.
url(string): The url downling the file.
md5(string, optional): The md5 value that checking the version downloaded.
"""
logger.disable()
global DOWNLOAD_CHECK
if not DOWNLOAD_CHECK:
DOWNLOAD_CHECK = True
checker = DownloaderCheck(task)
checker.start()
checker.join()
fullname = os.path.join(save_dir, filename)
if os.path.exists(fullname):
if md5 and (not md5file(fullname) == md5):
get_path_from_url(url, save_dir, md5)
else:
get_path_from_url(url, save_dir, md5)
logger.enable()
return fullname
def add_docstrings(*docstr):
"""
The function that add the doc string to doc of class.
"""
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(DOC_FORMAT) + "".join(docstr)
return fn
return docstring_decorator
@contextlib.contextmanager
def static_mode_guard():
paddle.enable_static()
yield
paddle.disable_static()
@contextlib.contextmanager
def dygraph_mode_guard():
paddle.disable_static()
yield
class TermTreeNode(object):
"""Defination of term node. All members are protected, to keep rigorism of data struct.
Args:
sid (str): term id of node.
term (str): term, common name of this term.
base (str): `cb` indicates concept base, `eb` indicates entity base.
term_type (Optional[str], optional): type of this term, constructs hirechical of `term` node. Defaults to None.
hyper (Optional[str], optional): parent type of a `type` node. Defaults to None.
node_type (str, optional): type statement of node, `type` or `term`. Defaults to "term".
alias (Optional[List[str]], optional): alias of this term. Defaults to None.
alias_ext (Optional[List[str]], optional): extended alias of this term, CANNOT be used in matching.
Defaults to None.
sub_type (Optional[List[str]], optional): grouped by some term. Defaults to None.
sub_term (Optional[List[str]], optional): some lower term. Defaults to None.
data (Optional[Dict[str, Any]], optional): to sore full imformation of a term. Defaults to None.
"""
def __init__(self,
sid: str,
term: str,
base: str,
node_type: str="term",
term_type: Optional[str]=None,
hyper: Optional[str]=None,
level: Optional[int]=None,
alias: Optional[List[str]]=None,
alias_ext: Optional[List[str]]=None,
sub_type: Optional[List[str]]=None,
sub_term: Optional[List[str]]=None,
data: Optional[Dict[str, Any]]=None):
self._sid = sid
self._term = term
self._base = base
self._term_type = term_type
self._hyper = hyper
self._sub_term = sub_term if sub_term is not None else []
self._sub_type = sub_type if sub_type is not None else []
self._alias = alias if alias is not None else []
self._alias_ext = alias_ext if alias_ext is not None else []
self._data = data
self._level = level
self._node_type = node_type
self._sons = set()
def __str__(self):
if self._data is not None:
return json.dumps(self._data, ensure_ascii=False)
else:
res = {
"termid": self._sid,
"term": self._term,
"src": self._base,
"alias": self._alias,
"alias_ext": self._alias_ext,
"termtype": self._term_type,
"subterms": self._sub_term,
"subtype": self._sub_type,
"links": []
}
return json.dumps(res, ensure_ascii=False)
@property
def sid(self):
return self._sid
@property
def term(self):
return self._term
@property
def base(self):
return self._base
@property
def alias(self):
return self._alias
@property
def alias_ext(self):
return self._alias_ext
@property
def termtype(self):
return self._term_type
@property
def subtype(self):
return self._sub_type
@property
def subterm(self):
return self._sub_term
@property
def hyper(self):
return self._hyper
@property
def level(self):
return self._level
@property
def sons(self):
return self._sons
@property
def node_type(self):
return self._node_type
def add_son(self, son_name):
self._sons.add(son_name)
@classmethod
def from_dict(cls, data: Dict[str, Any]):
"""Build a node from dictionary data.
Args:
data (Dict[str, Any]): Dictionary data contain all k-v data.
Returns:
[type]: TermTree node object.
"""
return cls(sid=data["termid"],
term=data["term"],
base=data["src"],
term_type=data["termtype"],
sub_type=data["subtype"],
sub_term=data["subterms"],
alias=data["alias"],
alias_ext=data["alias_ext"],
data=data)
@classmethod
def from_json(cls, json_str: str):
"""Build a node from JSON string.
Args:
json_str (str): JSON string formatted by TermTree data.
Returns:
[type]: TermTree node object.
"""
dict_data = json.loads(json_str)
return cls.from_dict(dict_data)
class TermTree(object):
"""TermTree class.
"""
def __init__(self):
self._nodes: Dict[str, TermTreeNode] = {}
self._root = TermTreeNode(
sid="root", term="root", base="cb", node_type="root", level=0)
self._nodes["root"] = self.root
self._index = {}
def __build_sons(self):
for node in self._nodes:
self.__build_son(self._nodes[node])
def __getitem__(self, item):
return self._nodes[item]
def __contains__(self, item):
return item in self._nodes
def __iter__(self):
return self._nodes.__iter__()
@property
def root(self):
return self._root
def __load_type(self, file_path: str):
with open(file_path, "rt", newline="") as csvfile:
file_handler = csv.DictReader(csvfile, delimiter="\t")
for row in file_handler:
if row["type-1"] not in self:
self.add_type(type_name=row["type-1"], hyper_type="root")
if row["type-2"] != "" and row["type-2"] not in self:
self.add_type(
type_name=row["type-2"], hyper_type=row["type-1"])
if row["type-3"] != "" and row["type-3"] not in self:
self.add_type(
type_name=row["type-3"], hyper_type=row["type-2"])
def __judge_term_node(self, node: TermTreeNode) -> bool:
if node.termtype not in self:
raise ValueError(
f"Term type of new node {node.termtype} does not exists.")
if node.sid in self:
warnings.warn(f"{node.sid} exists, will be replaced by new node.")
def add_term(self,
term: Optional[str]=None,
base: Optional[str]=None,
term_type: Optional[str]=None,
sub_type: Optional[List[str]]=None,
sub_term: Optional[List[str]]=None,
alias: Optional[List[str]]=None,
alias_ext: Optional[List[str]]=None,
data: Optional[Dict[str, Any]]=None):
"""Add a term into TermTree.
Args:
term (str): common name of name.
base (str): term is concept or entity.
term_type (str): term type of this term
sub_type (Optional[List[str]], optional): sub type of this term, must exists in TermTree. Defaults to None.
sub_terms (Optional[List[str]], optional): sub terms of this term. Defaults to None.
alias (Optional[List[str]], optional): alias of this term. Defaults to None.
alias_ext (Optional[List[str]], optional): . Defaults to None.
data (Optional[Dict[str, Any]], optional): [description]. Defaults to None.
"""
if data is not None:
new_node = TermTreeNode.from_dict(data)
else:
new_node = TermTreeNode(
sid=f"{term_type}_{base}_{term}",
term=term,
base=base,
term_type=term_type,
sub_term=sub_term,
sub_type=sub_type,
alias=alias,
alias_ext=alias_ext,
node_type="term")
self.__judge_term_node(new_node)
self._nodes[new_node.sid] = new_node
self.__build_index(new_node)
def add_type(self, type_name, hyper_type):
if type_name in self._nodes:
raise ValueError(f"Term Type {type_name} exists.")
if hyper_type not in self._nodes:
raise ValueError(
f"Hyper type {hyper_type} does not exist, please add it first.")
if self._nodes[hyper_type].level == 3:
raise ValueError(
"Term type schema must be 3-LEVEL, 3rd level type node should not be a parent of type node."
)
self._nodes[type_name] = TermTreeNode(
sid=type_name,
term=type_name,
base=None,
hyper=hyper_type,
node_type="type",
level=self._nodes[hyper_type].level + 1)
self.__build_index(self._nodes[type_name])
def __load_file(self, file_path: str):
with open(file_path, encoding="utf-8") as fp:
for line in fp:
data = json.loads(line)
self.add_term(data=data)
def __build_son(self, node: TermTreeNode):
"""Build sons of a node
Args:
node (TermTreeNode): son node.
"""
type_node = None
if node.termtype is not None:
type_node = self._nodes[node.termtype]
elif node.hyper is not None:
type_node = self._nodes[node.hyper]
if type_node is not None:
type_node.add_son(node.sid)
for sub_type in node.subtype:
sub_type_node = self._nodes[sub_type]
sub_type_node.add_son(node.sid)
def build_son(self, node: str):
self.__build_son(self[node])
def __build_index(self, node: TermTreeNode):
if node.term not in self._index:
self._index[node.term] = []
self._index[node.term].append(node.sid)
for alia in node.alias:
if alia not in self._index:
self._index[alia] = []
self._index[alia].append(node.sid)
def __judge_hyper(self, source_id, target_id) -> bool:
queue = [source_id]
visited_node = {source_id}
while len(queue) > 0:
cur_id = queue.pop(0)
if cur_id == target_id:
return True
cur_node = self._nodes[cur_id]
edge = []
if cur_node.hyper is not None:
edge.append(cur_node.hyper)
if cur_node.termtype is not None:
edge.append(cur_node.termtype)
edge.extend(cur_node.subtype)
for next_id in edge:
if next_id not in visited_node:
queue.append(next_id)
visited_node.add(next_id)
return False
def find_term(self, term: str, term_type: Optional[str]=None) -> Tuple[
bool, Union[List[str], None]]:
"""Find a term in Term Tree. If term not exists, return None.
If `term_type` is not None, will find term with this type.
Args:
term (str): term to look up.
term_type (Optional[str], optional): find term in this term_type. Defaults to None.
Returns:
Union[None, List[str]]: [description]
"""
if term not in self._index:
return False, None
else:
if term_type is None:
return True, self._index[term]
else:
out = []
for term_id in self._index[term]:
if self.__judge_hyper(term_id, term_type) is True:
out.append(term_id)
if len(out) > 0:
return True, out
else:
return False, None
def build_from_dir(self, term_schema_path, term_data_path):
"""Build TermTree from a directory which should contain type schema and term data.
Args:
dir ([type]): [description]
"""
self.__load_type(term_schema_path)
self.__load_file(term_data_path)
self.__build_sons()
@classmethod
def from_dir(cls, term_schema_path, term_data_path) -> "TermTree":
"""Build TermTree from a directory which should contain type schema and term data.
Args:
source_dir ([type]): [description]
Returns:
TermTree: [description]
"""
term_tree = cls()
term_tree.build_from_dir(term_schema_path, term_data_path)
return term_tree
def __dfs(self,
cur_id: str,
depth: int,
path: Dict[str, str],
writer: csv.DictWriter):
cur_node = self._nodes[cur_id]
if cur_node.node_type == "term":
return
if depth > 0:
path[f"type-{depth}"] = cur_id
if path["type-1"] != "":
writer.writerow(path)
for son in cur_node.sons:
self.__dfs(son, depth + 1, path, writer)
if depth > 0:
path[f"type-{depth}"] = ""
def save(self, save_dir):
"""Save term tree to directory `save_dir`
Args:
save_dir ([type]): Directory.
"""
if os.path.exists(save_dir) is False:
os.makedirs(save_dir, exist_ok=True)
out_path = {}
for i in range(1, 3):
out_path[f"type-{i}"] = ""
with open(
f"{save_dir}/termtree_type.csv", "wt", encoding="utf-8",
newline="") as fp:
fieldnames = ["type-1", "type-2", "type-3"]
csv_writer = csv.DictWriter(
fp, delimiter="\t", fieldnames=fieldnames)
csv_writer.writeheader()
self.__dfs("root", 0, out_path, csv_writer)
with open(
f"{save_dir}/termtree_data", "w", encoding="utf-8",
newline="") as fp:
for nid in self:
node = self[nid]
if node.node_type == "term":
print(node, file=fp)
|
import socketio
import eventlet
sio = socketio.Server(cors_allowed_origins='*')
app = socketio.WSGIApp(sio)
@sio.event
def connect(sid, environ):
print('connect ', sid)
sio.emit('NEW_CONNECTION',{"id":"01234567","money":56450,"passport":"Pasaporte platino full"})
if __name__ == '__main__':
eventlet.wsgi.server(eventlet.listen(('localhost', 4567)), app) |
from celery import Celery
from music_aekt.downloaders.zing import ZingDownloader
from music_aekt.downloaders.nhaccuatui import NCTDownloader
from music_aekt.player import moc
app = Celery('tasks',
backend='redis://localhost',
broker='redis://localhost')
SAVE_LOCATION = '/tmp'
HEADERS = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'}
ZING_PATTERN = 'var xml_link = "(.*?)";'
@app.task
def download(url):
if "mp3.zing.vn" in url:
d = ZingDownloader(url=url,
path=SAVE_LOCATION,
headers=HEADERS,
pattern = ZING_PATTERN)
elif "nhaccuatui.com" in url:
d = NCTDownloader(url=url,
path=SAVE_LOCATION,
headers=HEADERS)
f = d.download_mp3_file()
moc.append(f)
|
def add(n1, n2):
return n1 + n2
def subtract(n1, n2):
return n1 - n2
def multiply(n1, n2):
return n1 * n2
def divide(n1, n2):
return n1 / n2
operations = {"+":add,
"-":subtract,
"*":multiply,
"/":divide
}
def calculator():
num1 = float(input("enter the first number: "))
for symbol in operations:
print(symbol)
continue_or_not = True
while continue_or_not:
operation_symbol = input("enter the operation which you wish to perform: ")
num2 = float(input("enter the next number: "))
calculation_function = operations[operation_symbol]
answer = calculation_function(num1, num2)
print(f"{num1}{operation_symbol}{num2}={answer}")
if input(f"type y to work with {answer} or press n to start a new calculation ") == 'y':
num1 = answer
else:
continue_or_not = False
calculator()
calculator() |
import random
def answers(ch):
if ch==1:
print("It is certain")
elif ch==2:
print("Good outlook")
elif ch==3:
print("Most likely")
elif ch==4:
print("Reply hazy")
elif ch==5:
print("Cannot predict now")
elif ch==6:
print("ConceNtrate and ask again")
elif ch==7:
print("My reply is no")
elif ch==8:
print("Very doubtful")
elif ch==9:
print("Better not tell you now")
elif ch==10:
print("Dont count on it")
elif ch==11:
print("As I see it, Yes")
elif ch==12:
print("Without a doubt")
elif 13:
print("Not sure")
else:
print("Ask again later")
print("Hello There, I am the Magic 8 Ball, What is your name?")
name = input()
print('Hello ' + name)
def MagicBall8():
print('Ask me a question.')
input()
p=random.randint(1, 14)
answers(p)
print('I hope that helped!')
Replay()
def Replay():
print ('Do you have another question? [Y/N] ')
reply = input()
if reply.upper() == 'Y':
MagicBall8()
elif reply.upper() == 'N':
print("Great playing with you")
else:
print('Sorry, didnt get the question. Can you please repeat.')
Replay()
MagicBall8()
|
# --->
# Created by liumeiyu on 2020/3/16.
# '_'
import cv2
import numpy as np
import matplotlib.pyplot as plt
from graphy import Graph
'''图像二值化'''
class Binary(Graph):
def __init__(self, img_path):
super().__init__(img_path)
def img_biny_np(self, thrd):
img_b = self.copy_img()
for i in range(self.img_h):
for j in range(self.img_w):
if self.gray_img[i][j] > thrd:
img_b[i][j] = 255
else:
img_b[i][j] = 0
return img_b
def biny(self, thrd):
r, img_b1 = cv2.threshold(self.gray_img, thrd, 255, cv2.THRESH_BINARY) # r阈值
r, img_b2 = cv2.threshold(self.gray_img, thrd, 255, cv2.THRESH_BINARY_INV)
r, img_b3 = cv2.threshold(self.gray_img, thrd, 255, cv2.THRESH_TRUNC) # 大于thrd为thrd, 反之不变
r, img_b4 = cv2.threshold(self.gray_img, thrd, 255, cv2.THRESH_TOZERO)
r, img_b5 = cv2.threshold(self.gray_img, thrd, 255, cv2.THRESH_TOZERO_INV)
img_b6 = cv2.adaptiveThreshold(self.gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 501, 0)
img_b7 = cv2.adaptiveThreshold(self.gray_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 501, 0)
r, img_b8 = cv2.threshold(self.gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 自动计算双峰图的谷底阈值
print(r)
img_ = [self.gray_img, img_b1, img_b2, img_b3, img_b4, img_b5, img_b6, img_b7, img_b8]
for i in range(len(img_)):
plt.subplot(331 + i)
plt.imshow(img_[i], 'gray')
plt.show()
# 迭代选取最佳阈值
def get_thrd(self, ts):
total_gray = np.sum(self.gray_img)
hist_g = cv2.calcHist(self.gray_img, [0], None, [256], [0, 255])
hist_g = np.reshape(hist_g, (1, -1))[0]
T1 = total_gray // self.gray_img.size
T2 = 0
while abs(T2 - T1) > ts:
s1, n1, s2, n2 = 0, 0, 0, 0
for i, j in enumerate(hist_g):
if i < T1:
s1 += j * i
n1 += j
else:
s2 += j * i
n2 += j
T2 = (s1 / n1 + s2 / n2) / 2
T1, T2 = T2, T1
return T1
|
# CodeSkulptor runs Python programs in your browser.
# Click the upper left button to run this simple demo.
# CodeSkulptor runs in Chrome 18+, Firefox 11+, and Safari 6+.
# Some features may work in other browsers, but do not expect
# full functionality. It does NOT run in Internet Explorer.
import simplegui
import math
#Global state
width =300
height = 200
clockCtr = [width/2, height/2 - 15]
clockRadius = 40
splitRadius = clockRadius * 1
splitPosition = [clockCtr[0], clockCtr[1] - clockRadius]
score = 0
attempts = 0
scoreboard = "Welcome!" #shows scores/attempts
message = 'start'
interval = 100 #timer interval
clock = '' #clock string
time = 0 #stores the time elapsed in millisecs
prevTime = -1;
points = 10 #theta angle points
#converts aggregate time into units of time
def convertToSplit():
return str(time % 10)
def convertToSeconds():
seconds = int(time / 10) % 60
secondsDisplay = str (seconds)
if seconds < 10:
secondsDisplay = '0' + secondsDisplay
return secondsDisplay
def convertToMinutes():
minutes = int((time / 10)/ 60)
return str(minutes)
#scoreboard stuff
def calculateScore():
global score, attempts, prevTime
split = int(convertToSplit())
if not prevTime == time:
if split == 0:
score += 1
attempts += 1
prevTime = time;
updateScoreboard()
def updateScoreboard():
global scoreboard
percentage = 0
if not attempts == 0:
percentage = int(score / attempts * 100)
scoreboard = str(score) + '/' + str(attempts) + ": " + str(percentage) + '%'
def moveSplitHand():
slice = 2 * math.pi / points;
theta = slice * ((time % points) - (points / 4));
splitPosition[0] = clockCtr[0] + splitRadius * math.cos(theta)
splitPosition[1] = clockCtr[1] + splitRadius * math.sin(theta)
# Event Handlers
# Handler for tick
def tick():
global time, clock
minutes, seconds, splitSeconds = 0,0,0
time += 1 #increments the time value
split = convertToSplit()
seconds = convertToSeconds()
minutes = convertToMinutes()
moveSplitHand()
clock = minutes + ':' + seconds + '.' + split
# Handler for start
def start():
timer.start()
# Handler for stop
def stop():
global scoreboard
timer.stop()
calculateScore()
# Handler for reset
def reset():
global score, attempts, time
score = 0
attempts = 0
time = 0
updateScoreboard()
# Handler to draw on canvas
def draw(canvas):
canvas.draw_line(clockCtr, (clockCtr[0], clockCtr[1] - clockRadius), 6, "Red") #mark
canvas.draw_line(clockCtr, splitPosition, 2, "Yellow") #split hand
canvas.draw_text(clock, [width/4+15,height-20], 36, "Yellow")
canvas.draw_text(scoreboard, [5,25], 20, "Red")
canvas.draw_circle(clockCtr, clockRadius, 5, "Yellow")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", width, height)
timer = simplegui.create_timer(interval, tick)
frame.add_button("Start", start)
frame.add_button("Stop", stop)
frame.add_label("")
frame.add_button("Reset", reset)
frame.set_draw_handler(draw)
# Start the frame animation
frame.start()
timer.start()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiMerchantDepartmentCreateModel(object):
def __init__(self):
self._auth_code = None
self._dept_name = None
self._label_code = None
self._parent_dept_id = None
@property
def auth_code(self):
return self._auth_code
@auth_code.setter
def auth_code(self, value):
self._auth_code = value
@property
def dept_name(self):
return self._dept_name
@dept_name.setter
def dept_name(self, value):
self._dept_name = value
@property
def label_code(self):
return self._label_code
@label_code.setter
def label_code(self, value):
self._label_code = value
@property
def parent_dept_id(self):
return self._parent_dept_id
@parent_dept_id.setter
def parent_dept_id(self, value):
self._parent_dept_id = value
def to_alipay_dict(self):
params = dict()
if self.auth_code:
if hasattr(self.auth_code, 'to_alipay_dict'):
params['auth_code'] = self.auth_code.to_alipay_dict()
else:
params['auth_code'] = self.auth_code
if self.dept_name:
if hasattr(self.dept_name, 'to_alipay_dict'):
params['dept_name'] = self.dept_name.to_alipay_dict()
else:
params['dept_name'] = self.dept_name
if self.label_code:
if hasattr(self.label_code, 'to_alipay_dict'):
params['label_code'] = self.label_code.to_alipay_dict()
else:
params['label_code'] = self.label_code
if self.parent_dept_id:
if hasattr(self.parent_dept_id, 'to_alipay_dict'):
params['parent_dept_id'] = self.parent_dept_id.to_alipay_dict()
else:
params['parent_dept_id'] = self.parent_dept_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiMerchantDepartmentCreateModel()
if 'auth_code' in d:
o.auth_code = d['auth_code']
if 'dept_name' in d:
o.dept_name = d['dept_name']
if 'label_code' in d:
o.label_code = d['label_code']
if 'parent_dept_id' in d:
o.parent_dept_id = d['parent_dept_id']
return o
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy
from .tf_idf import TfIDf
from .build_vsm import BuildVsm
from sklearn.cluster import KMeans
from .save_to_redis import r, remove_to_redis
from .save_to_redis import save_to_redis, remove_to_redis
from .config import load_data_set, classify_file1, abs_filename, get_content, get_every_content
from .get_hot_topic import HotClassify
basedir_name = os.path.dirname(os.path.abspath(__file__))
print(basedir_name)
def handle_topic_redis(file):
"""
对该类型聚类结果,热度等进行判断,如存在则删除
:param file: 类型名称
:return: None
"""
word = [u':hot', u':keyword', u':max', u':聚类结果']
for name in word:
file_name = file + name
if r.lrange(file_name, 0, -1):
remove_to_redis(file_name)
def k_means_every_type_topic():
basedir_name = os.path.dirname(os.path.abspath(__file__))
file_list = os.listdir(basedir_name + '/分类结果')
basedir_name = u'分类结果'
print(file_list)
for file_name in file_list:
print('聚类类别,{}'.format(file_name[:-4]))
handle_topic_redis(file_name[:-4])
print(file_name)
rows, comments, follows, times = get_content(basedir_name, file_name)
tf = TfIDf(rows, comments, follows, times)
tf_idf_dict = tf.tf_idf()
print(len(rows))
print(sorted(tf_idf_dict.items(), key=lambda d: d[1], reverse=True)[:20])
vsm = BuildVsm(rows, tf_idf_dict)
scores = vsm.build_vsm(file_name[:-4])
vsm_abs_path = 'vsm集合/{}/{}.txt'.format(file_name[:-4], file_name[:-4])
if len(rows) >= 120:
n = 5
elif len(rows) >= 50:
n = 4
elif len(rows) <= 30:
n = 2
else:
n = 3
k_cluster = KMeans(init='k-means++', n_clusters=n)
data_set = numpy.mat(load_data_set(file_name[:-4]))
print(len(data_set))
labels = k_cluster.fit_predict(data_set)
labels = list(labels)
classify_file1(labels, file_name[:-4], rows, follows, comments, times, scores, file_name[:-4])
get_every_type_top_keyword()
from redis import Redis
r2 = Redis(host='localhost', port=6379, db=2)
r3 = Redis(host='localhost', port=6379, db=2)
def get_every_type_top_keyword():
word_tag = ['买卖交易', '求助', '校园生活', '学校新闻', '网络', '情感', '毕业话题']
keywords = {}
print('生成')
hot = HotClassify()
for word in word_tag:
print(word)
hot.cal_cluster_hot(word)
for word in word_tag:
for i in range(1, 10):
if r2.lrange(word + ":cluster:" + str(i) + ":keywords", 0, -1):
r2.delete(word + ":cluster:" + str(i) + ":keywords")
if r2.lrange(word + ":cluster:" + str(i) + ":values", 0, -1):
r2.delete(word + ":cluster:" + str(i) + ":values")
if r2.lrange(word + ":cluster:" + str(i) + ':max', 0, -1):
rows, comments, follows, times = get_every_content(word, i)
tf = TfIDf(rows, comments, follows, times)
tf_idf_dict = tf.tf_idf()
tf_idf_dict = sorted(tf_idf_dict.items(), key=lambda d: d[1], reverse=True)
keywords[word + ":cluster:" + str(i)] = tf_idf_dict[:10]
for t in tf_idf_dict[:10]:
print(t)
r2.lpush(word + ":cluster:" + str(i) + ":keywords", t[0])
r2.lpush(word + ":cluster:" + str(i) + ":values", t[1])
if __name__ == '__main__':
k_means_every_type_topic()
|
import sys
VOWELS = set(['a', 'e', 'i', 'o', 'u'])
def count_vowels_consonants(string):
if not isinstance(string, basestring):
raise TypeError('Input must be a string')
vowels_count = dict(
a=0,
e=0,
i=0,
o=0,
u=0
)
total_consonants = 0
for char in string:
if not char.isalpha():
continue
char_low = char.lower()
if char_low in VOWELS:
vowels_count[char_low] += 1
else:
total_consonants += 1
return total_consonants, vowels_count
if __name__ == '__main__':
stream = sys.stdin
string_in = sys.stdin.readline()
total_consonants, vowels_count = count_vowels_consonants(string_in)
print 'Total number of consonants: %s'%total_consonants
print 'Vowels count:'
print vowels_count
|
from dataloader import DataLoader
import torch
from collections import namedtuple
import pickle
import utils
print('> Loading DataLoader')
class opt:
debug = 0
dataloader = DataLoader(None, opt, 'i80')
print('> Loading splits')
splits = torch.load('/home/atcold/vLecunGroup/nvidia-collab/traffic-data-atcold/data_i80_v0/splits.pth')
for split in splits:
data_dict = dict()
print(f'> Building {split}')
for idx in splits[split]:
car_path = dataloader.ids[idx]
timeslot, car_id = utils.parse_car_path(car_path)
data_dict[idx] = timeslot, car_id
print(f'> Pickling {split}')
with open(f'{split}.pkl', 'wb') as f:
pickle.dump(data_dict, f)
|
# -*- coding: UTF-8 -*-
from nameko.rpc import rpc
import pymysql
from docx import Document
from docx.enum.table import WD_ROW_HEIGHT_RULE
from docx.shared import RGBColor, Pt
from docx.enum.text import WD_UNDERLINE, WD_LINE_SPACING
from docx.oxml.ns import qn
class Compute(object):
name = "test"
@rpc
def test(self):
return "hello"
@rpc
def compute(self):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select TEACHER_ID from es_honor where HONOR like '%院士%' "
cursor.execute(sql)
teacher_id = cursor.fetchall()
return teacher_id
@rpc
def compute1(self,id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,charset='utf8')
cursor = db.cursor()
sql = "select NAME from es_teacher where ID = %s "
cursor.execute(sql,(id))
teacher_name = cursor.fetchone()
return teacher_name
@rpc
def compute2(self):
teacher_name = []
teacher_id = self.compute()
for i in teacher_id:
teacher_name.append(self.compute1(i))
return teacher_name
@rpc
def get_institutionId(self,schoolName,institutionName):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306, charset='utf8')
cursor = db.cursor()
sql = "select ID from es_institution where SCHOOL_NAME = %s and NAME = %s "
cursor.execute(sql, (schoolName,institutionName))
institution_id = cursor.fetchall()
return institution_id
@rpc
def get_schoolId(self,schoolName):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select SCHOOL_ID from es_institution where SCHOOL_NAME = %s"
cursor.execute(sql, (schoolName))
school_id = cursor.fetchone()
return school_id
@rpc
def get_teacher_name_and_insId(self,school_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select NAME,INSTITUTION_ID,HOMEPAGE from es_teacher where SCHOOL_ID = %s and ACADEMICIAN > 1"
cursor.execute(sql, (school_id))
teacher = cursor.fetchall()
return teacher
@rpc
def get_institution_name(self,institution_ID):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select NAME from es_institution where ID = %s "
cursor.execute(sql, (institution_ID))
institution_id = cursor.fetchone()
return institution_id
@rpc
def get_academicianName(self,institution_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306, charset='utf8')
cursor = db.cursor()
sql = "select NAME, HOMEPAGE from es_teacher where INSTITUTION_ID = %s and ACADEMICIAN > 0 "
cursor.execute(sql, (institution_id))
teacher_name = cursor.fetchall()
return teacher_name
@rpc
def get_institutionNamebyschoolName(self,schoolname):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select NAME from es_institution where SCHOOL_NAME = %s "
cursor.execute(sql, (schoolname))
institution_name = cursor.fetchall()
return institution_name
class document(object):
name = "document"
# 根据学校名,学院名获取学院ID
@rpc
def get_institutionId(self, schoolName, institutionName):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select ID from es_institution where SCHOOL_NAME = %s and NAME = %s "
cursor.execute(sql, (schoolName, institutionName))
institution_id = cursor.fetchone()
return institution_id
# 根据学院id获取学院中所有老师的ID,姓名,是否院士,是否杰出青年,是否长江学者
@rpc
def get_teacher_info(self, institutionId):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select ID,NAME,ACADEMICIAN,OUTYOUTH,CHANGJIANG from es_teacher where INSTITUTION_ID = %s"
cursor.execute(sql, (institutionId))
teacherInfo = cursor.fetchall()
return teacherInfo
# 根据学校名,学院名获取重点实验室名
@rpc
def get_lab(self, school_name, institution_name):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT name FROM main_lab where org= %s and institution = %s "
cursor.execute(sql, (school_name, institution_name))
lab = cursor.fetchall()
lab_name = []
for i in lab:
b = i[0].index("(")
lab_name.append(i[0][0:b])
return lab_name
# 获取领头人领域
@rpc
def get_fields(self, institution_id, teacher_name):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT FIELDS FROM es_teacher where INSTITUTION_ID = %s and NAME = %s "
cursor.execute(sql, (institution_id, teacher_name))
fields = cursor.fetchone()
return fields
# 根据学院id获取重点学科代码,评价
@rpc
def get_maindis(self, institution_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = '''SELECT DISCIPLINE_CODE,EVALUATION FROM es_relation_in_dis where INSTITUTION_ID = %s and (EVALUATION = 'A+' or EVALUATION = 'A')'''
cursor.execute(sql, (institution_id))
maindis = cursor.fetchall()
return maindis
# 根据老师id获取合著老师姓名,合著数量
@rpc
def get_relation(self, teacher_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select teacher2_name,paper_num from teacher_teacher where teacher1_id = %s"
cursor.execute(sql, (teacher_id))
teacher_list = cursor.fetchall()
return teacher_list
# 根据学科代码获取学科名
@rpc
def get_discipline(self, discipline_code):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT NAME FROM es_discipline where CODE = %s"
cursor.execute(sql, (discipline_code))
discipline_name = cursor.fetchone()
discipline_name = discipline_name[0]
return discipline_name
# 根据老师名,学院id,获取老师ID
@rpc
def get_teacher_id(self, name, institution):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT ID FROM es_teacher where NAME = %s and INSTITUTION_ID = %s"
cursor.execute(sql, (name, institution))
teacher_id = cursor.fetchone()
return teacher_id
# 根据学校名获取带头人姓名,项目名,项目年份
@rpc
def get_project(self, org):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT PERSON,PROJECT_NAME,FUNDS,YEAR FROM eval_project where ORG = %s and FUNDS is not NULL"
cursor.execute(sql, (org))
project = cursor.fetchall()
project_list = []
for i in project:
project_list.append(i)
return project_list
# 根据作者id,年份获取论文所有作者,论文名
@rpc
def get_paper_info_1(self, author_id, year):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT author,name from eds_paper_clean where author_id = %s and year = %s"
cursor.execute(sql, (author_id, year))
paper = cursor.fetchall()
paperlist = []
for i in paper:
paperlist.append(i[0])
return paperlist
# 根据作者id,获取论文所有作者,论文名
@rpc
def get_paper_info_2(self, author_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT author,name from eds_paper_clean where author_id = %s"
cursor.execute(sql, (author_id))
paper = cursor.fetchall()
# paperlist = []
# for i in paper:
# paperlist.append(i[0])
return paper
# 根据老师ID,年份获取荣誉
@rpc
def get_honor_1(self, teacher_id, year):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select HONOR from es_honor where TEACHER_ID = %s and year = %s"
cursor.execute(sql, (teacher_id, year))
honor = cursor.fetchall()
return honor
# 根据老师ID,年份获取荣誉
@rpc
def get_honor_2(self, teacher_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select HONOR from es_honor where TEACHER_ID = %s"
cursor.execute(sql, (teacher_id))
honor = cursor.fetchall()
return honor
@rpc
# 根据老师名获取老师头衔:是否院士,是否长江,是否杰青
def get_title(self, teacherName, institution_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select NAME,ACADEMICIAN,OUTYOUTH,CHANGJIANG from es_teacher where NAME = %s and INSTITUTION_ID=%s"
cursor.execute(sql, (teacherName, institution_id))
title = cursor.fetchone()
return title
@rpc
# 根据老师名获取老师专利
def get_invention(self, teacherName):
db = pymysql.connect(host='47.104.236.183', db='zhuanli', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select title,date1 from zhuanli where author_list like '%%%s%%'" %(teacherName)
cursor.execute(sql)
invention = cursor.fetchall()
return invention
# 创建文档
@rpc
def createdocument(self,institution_info, team):
document = Document()
document.styles['Normal'].font.name = u'微软雅黑'
document.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
# 创建段落
p = document.add_paragraph("")
# 设置段落左右居中
#p.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置段落的段前间距
p.paragraph_format.space_before = Pt(5)
# 设置段落得断后间距
p.paragraph_format.space_after = Pt(5)
# 设置行间距
p.paragraph_format.line_spacing = Pt(8)
# 设置段落间距的格式为最小值
p.paragraph_format.line_spacing_rule = WD_LINE_SPACING.AT_LEAST
run = p.add_run(
institution_info["school_name"] + "科研简报" + institution_info["institution_name"] + institution_info["date"])
# 设置字体
run.font.name = u'宋体'
run._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')
# 颜色设置,这里是用RGB颜色
run.font.color.rgb = RGBColor(0, 0, 0)
# 设置字体大小
run.font.size = Pt(21)
# 字体是否加粗
run.bold = True
# 无下划线
run.underline = WD_UNDERLINE.NONE
# 创建表格
table = document.add_table(rows=1, cols=1)
# 表格风格
table.style = "Table Grid"
for row in table.rows:
# 设置每行表格高度
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
# 在表格中写入文字
run = table.cell(0, 0).paragraphs[0].add_run("一、院系概况")
# 设置表格中字体
# 字体大小
run.font.size = Pt(15)
# 字体大小
run.font.color.rgb = RGBColor(91, 155, 213)
# 是否加粗
run.bold = True
table = document.add_table(rows=1, cols=2)
# 表格风格
table.style = "Table Grid"
for row in table.rows:
# 设置每行表格高度
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("国家重点学科")
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run.font.size = Pt(14)
run.bold = True
run.font.color.rgb = RGBColor(237, 125, 49)
run = table.cell(0, 1).paragraphs[0].add_run("评价")
table.cell(0, 1).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 1).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 1).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run.font.size = Pt(14)
run.bold = True
run.font.color.rgb = RGBColor(237, 125, 49)
# 创建表格
table = document.add_table(rows=len(institution_info["maindis"]), cols=2)
# 表格风格
table.style = "Table Grid"
for row in table.rows:
# 设置每行表格高度
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
key = []
value = []
for i in institution_info["maindis"].keys():
key.append(i)
for i in institution_info["maindis"].values():
value.append(i)
count = len(institution_info["maindis"])
for i in range(0, count):
run = table.cell(i, 0).paragraphs[0].add_run(key[i])
run.font.name = u'微软雅黑'
run._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
run.font.size = Pt(12)
run.font.color.rgb = RGBColor(0, 0, 0)
table.cell(i, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(i, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(i, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run = table.cell(i, 1).paragraphs[0].add_run(value[i])
run.font.name = u'微软雅黑'
run._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
run.font.size = Pt(12)
run.font.color.rgb = RGBColor(0, 0, 0)
table.cell(i, 1).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(i, 1).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(i, 1).paragraphs[0].paragraph_format.line_spacing = Pt(30)
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("科研平台")
run.font.size = Pt(14)
run.font.color.rgb = RGBColor(237, 125, 49)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run(j + " " for j in institution_info['mainlab'])
run.font.size = Pt(12)
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 团队介绍
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("二、科研团队")
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run.font.size = Pt(15)
run.font.color.rgb = RGBColor(91, 155, 213)
run.bold = True
for i in team:
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("项目成员")
run.font.size = Pt(14)
run.font.color.rgb = RGBColor(237, 125, 49)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 领军人物
table = document.add_table(rows=1, cols=5)
table.style = "Table Grid"
table.cell(0, 0)
table.cell(0, 1).merge(table.cell(0, 2)).merge(table.cell(0, 3)).merge(table.cell(0, 4))
run = table.cell(0, 0).paragraphs[0].add_run("领军人物")
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run.font.size = Pt(12)
run.bold = True
run = table.cell(0, 2).paragraphs[0].add_run(i['head_name'])
run.font.size = Pt(12)
table.cell(0, 2).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 2).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 2).paragraphs[0].paragraph_format.line_spacing = Pt(30)
if len(i["changjiang_list"]) > 0:
table = document.add_table(rows=1, cols=5)
table.style = "Table Grid"
table.cell(0, 0)
table.cell(0, 1).merge(table.cell(0, 2)).merge(table.cell(0, 3)).merge(table.cell(0, 4))
run = table.cell(0, 0).paragraphs[0].add_run("院士")
run.font.size = Pt(12)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run = table.cell(0, 1).paragraphs[0].add_run(j + " " for j in i['academician_list'])
run.font.size = Pt(12)
table.cell(0, 1).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 1).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 1).paragraphs[0].paragraph_format.line_spacing = Pt(30)
if len(i["changjiang_list"]) > 0:
table = document.add_table(rows=1, cols=5)
table.style = "Table Grid"
table.cell(0, 0)
table.cell(0, 1).merge(table.cell(0, 2)).merge(table.cell(0, 3)).merge(table.cell(0, 4))
run = table.cell(0, 0).paragraphs[0].add_run("长江学者")
run.font.size = Pt(12)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run = table.cell(0, 1).paragraphs[0].add_run(j + " " for j in i["changjiang_list"])
run.font.size = Pt(12)
table.cell(0, 1).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 1).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 1).paragraphs[0].paragraph_format.line_spacing = Pt(30)
if len(i['outyouth_list']) > 0:
table = document.add_table(rows=1, cols=5)
table.style = "Table Grid"
table.cell(0, 0)
table.cell(0, 1).merge(table.cell(0, 2)).merge(table.cell(0, 3)).merge(table.cell(0, 4))
run = table.cell(0, 0).paragraphs[0].add_run("杰出青年")
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run.font.size = Pt(12)
run.bold = True
run = table.cell(0, 1).paragraphs[0].add_run(j + " " for j in i["outyouth_list"])
run.font.size = Pt(12)
table.cell(0, 1).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 1).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 1).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 其他成员
table = document.add_table(rows=1, cols=5)
table.style = "Table Grid"
table.cell(0, 0)
table.cell(0, 1).merge(table.cell(0, 2)).merge(table.cell(0, 3)).merge(table.cell(0, 4))
run = table.cell(0, 0).paragraphs[0].add_run("其他成员")
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
run.font.size = Pt(12)
run.bold = True
run = table.cell(0, 1).paragraphs[0].add_run(j + " " for j in i["other_list"])
run.font.size = Pt(12)
table.cell(0, 1).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 1).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 1).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 团队研究方向
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("团队研究方向")
run.font.size = Pt(14)
run.font.color.rgb = RGBColor(237, 125, 49)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run(i["team_direction"])
run.font.size = Pt(12)
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 团队成果
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("团队成果")
run.font.size = Pt(14)
run.font.color.rgb = RGBColor(237, 125, 49)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 论文成果
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("论文成果")
run.font.size = Pt(12)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
run = table.cell(0, 0).paragraphs[0].add_run("《" + j + "》" for j in i["paper"])
run.font.size = Pt(12)
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 专利成果
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("专利成果")
run.font.size = Pt(12)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
run = table.cell(0, 0).paragraphs[0].add_run("《" + j + "》" for j in i["invention"])
run.font.size = Pt(12)
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
# 获奖成果
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
run = table.cell(0, 0).paragraphs[0].add_run("获奖成果")
run.font.size = Pt(12)
run.bold = True
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
run = table.cell(0, 0).paragraphs[0].add_run(j for j in i["award"])
run.font.size = Pt(12)
table.cell(0, 0).paragraphs[0].paragraph_format.space_before = Pt(2)
table.cell(0, 0).paragraphs[0].paragraph_format.space_after = Pt(6)
table.cell(0, 0).paragraphs[0].paragraph_format.line_spacing = Pt(30)
table = document.add_table(rows=1, cols=1)
table.style = "Table Grid"
for row in table.rows:
row.height = Pt(30)
row.height_rule = WD_ROW_HEIGHT_RULE.AT_LEAST
# 保存文档
document.save("./static/docx/"+institution_info['school_name']+"科研简报"+institution_info['institution_name']+institution_info['date']+".docx")
class team(object):
name = "team"
@rpc
#根据学校名和学院名获取学院id
def get_institutionId(self, schoolName, institutionName):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select ID,SCHOOL_ID from es_institution where SCHOOL_NAME = %s and NAME = %s "
cursor.execute(sql, (schoolName, institutionName))
institution = cursor.fetchone()
return institution
@rpc
#根据学校名获取学院名获取这个学院所有老师
def get_teacher(self,school_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select NAME from es_teacher where SCHOOL_ID = %s"
cursor.execute(sql,(school_id))
teacher_list = cursor.fetchall()
return teacher_list
@rpc
#根据老师名,学校名,学院名获取老师的id
def get_teacher_id(self,teacher_name,school_id,institution_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select ID from es_teacher where NAME = %s and SCHOOL_ID = %s and INSTITUTION_ID = %s"
cursor.execute(sql,(teacher_name,school_id,institution_id))
teacher_id = cursor.fetchone()
return teacher_id
@rpc
#根据老师id,获取所有论文的作者
def get_member(self,author_id):
db = pymysql.connect(host='47.104.236.183', db='eds_base', user='root', password='SLX..eds123', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select author from es_paper where author_id = %s"
cursor.execute(sql,(author_id))
member = cursor.fetchall()
return member
@rpc
#根据老师名获取老师头衔:是否院士,是否长江,是否杰青
def get_title(self, teacherName ,institution_id):
db = pymysql.connect(host='47.106.83.33',db='eds_base',user='root',password='111111',port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select NAME,ACADEMICIAN,OUTYOUTH,CHANGJIANG from es_teacher where NAME = %s and INSTITUTION_ID=%s"
cursor.execute(sql, (teacherName, institution_id))
title = cursor.fetchone()
return title
class title_search(object):
name = "title_search"
@rpc
# 根据学校名和学院名获取学院id
def get_institutionId(self, schoolName, institutionName):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select ID from es_institution where SCHOOL_NAME = %s and NAME = %s "
cursor.execute(sql, (schoolName, institutionName))
institution_id = cursor.fetchone()
return institution_id
@rpc
#根据老师名获取老师头衔:是否院士,是否长江,是否杰青
def get_title(self, teacherName ,institution_id):
db = pymysql.connect(host='47.106.83.33',db='eds_base',user='root',password='111111',port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select NAME,ACADEMICIAN,OUTYOUTH,CHANGJIANG from es_teacher where NAME = %s and INSTITUTION_ID=%s"
cursor.execute(sql, (teacherName, institution_id))
title = cursor.fetchone()
return title
class paper_search(object):
name = "paper_search"
@rpc
# 根据学校名和学院名获取学院id
def get_institutionId(self, schoolName, institutionName):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select ID from es_institution where SCHOOL_NAME = %s and NAME = %s "
cursor.execute(sql, (schoolName, institutionName))
institution_id = cursor.fetchone()
return institution_id
@rpc
#根据老师名和学院id获取老师的id
def get_teacherid(self,teacher_name,institution_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select ID from es_teacher where NAME = %s and INSTITUTION_ID = %s"
cursor.execute(sql,(teacher_name,institution_id))
teacher_id = cursor.fetchone()
return teacher_id
# 根据作者id,获取所有论文名和年份
@rpc
def get_paper(self, author_id):
db = pymysql.connect(host='47.104.236.183', db='eds_base', user='root', password='SLX..eds123', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT name,year from es_paper where author_id = %s"
cursor.execute(sql, (author_id))
paper = cursor.fetchall()
return paper
class teacher_update(object):
name = "teacher_update"
@rpc
# 将数据插入到teacher_update_info表中,等待审核
def insert_teacher_info(self, teacher_info):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "insert into teacher_update_info(name,title,sex,school,institution,email,tel,birthyear,fields) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
cursor.execute(sql, (teacher_info['name'], teacher_info['title'], teacher_info['sex'], teacher_info['school'],
teacher_info['institution'], teacher_info['email'], teacher_info['tel'],
teacher_info['birthyear'], teacher_info['fields']))
db.commit()
@rpc
#查看需要审核的老师更新数据
def get_info(self):
db = pymysql.connect(host='47.106.83.33',db='eds_base', user='root', password='111111',port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select * from teacher_update_info"
cursor.execute(sql)
teacher_info = cursor.fetchall()
return teacher_info
# 根据学校名和学院名获取学校id和学院id
@rpc
def get_institutionId(self, schoolName, institutionName):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "select SCHOOL_ID,ID from es_institution where SCHOOL_NAME = %s and NAME = %s "
cursor.execute(sql, (schoolName, institutionName))
institution_id = cursor.fetchone()
return institution_id
#根据老师姓名,学校名和学院名获取老师ID
@rpc
def get_teacher_id(self, name, school_id, institution_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111', port=3306,
charset='utf8')
cursor = db.cursor()
sql = "SELECT ID FROM es_teacher where NAME = %s and SCHOOL_ID = %s and INSTITUTION_ID = %s"
cursor.execute(sql, (name, school_id,institution_id))
teacher_id = cursor.fetchone()
return teacher_id
#根据收集的信息更新老师的信息
@rpc
def update_teacher_info(self,title,sex,email,tel,birthyear,fields,teacher_id):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111',port=3306,
charset='utf8')
cursor = db.cursor()
sql = "update es_teacher set TITLE = %s, SEX = %s, EMAIL = %s, TEL = %s, BIRTHYEAR = %s, FIELDS = %s where ID = %s"
cursor.execute(sql,(title,sex,email,tel,birthyear,fields,teacher_id))
db.commit()
#将新增的老师信息添加进数据库
@rpc
def insert_teacher_newinfo(self,name,title,sex,school_id,institution_id,email,tel,birthyear,fields):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111',port=3306,
charset='utf8')
cursor = db.cursor()
sql = "insert into es_teacher(NAME,TITLE,SEX,SCHOOL_ID,INSTITUTION_ID,EMAIL,TEL,BIRTHYEAR,FIELDS) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
cursor.execute(sql,(name,title,sex,school_id,institution_id,email,tel,birthyear,fields))
db.commit()
#删除审核通过和不通过的老师新数据
@rpc
def delete_teacher_info(self,name,title,sex,school,institution,email,tel,birthyear,fields):
db = pymysql.connect(host='47.106.83.33', db='eds_base', user='root', password='111111',port=3306,
charset='utf8')
cursor = db.cursor()
sql = "delete from teacher_update_info where name=%s and title=%s and sex=%s and school=%s and institution=%s and email=%s and tel=%s and birthyear=%s and fields=%s"
cursor.execute(sql,(name,title,sex,school,institution,email,tel,birthyear,fields))
db.commit()
|
__author__ = 'Amit'
from bs4 import BeautifulSoup
# noinspection PyUnresolvedReferences
from WS_Specs import ws_specs
def title_string_scrap(html_response_data):
"""
A) Takes html response data (bytes)--> Returns string value in the title section
"""
soup = BeautifulSoup(html_response_data)
return soup.title.string.strip()
# noinspection PyBroadException
def clean_up_name(name_str, name_remove_str):
"""
A) Takes a string value and cleans up the string to be removed (Both provided by calling function)
B) Returns original string if name_remove_str is not found in name_str
"""
try:
val = name_str.index(name_remove_str)
name = name_str[: val]
return name
except:
return name_str
def data_scrap(html_text, data_marker_str, data_start_str, data_end_str):
"""
A) Takes html page as text and three string parameters (**All 4 provided by calling function**) to locate data
B) 'Data' as string 0r 'None - Value Error'
1) data_marker-str-->identify the block for data-->Unique to page being scraped
2) data_start_str-->identify position on left of data-->Unique to page being scraped
3) data_end_str-->identify position on right of data-->Unique to page being scraped
"""
# noinspection PyBroadException
try:
data_marker_pos = html_text.index(data_marker_str)
data_start_pos = html_text.index(data_start_str, data_marker_pos)
data_end_pos = html_text.index(data_end_str, data_marker_pos)
data_text = html_text[data_start_pos + 1:data_end_pos]
except:
data_text = 'None - Value Error'
return data_text
def get_title_plus_data(html_response_data):
# Using Response data to pull Title - Name
data_title_string = title_string_scrap(html_response_data)
data_title_clean = clean_up_name(data_title_string, ws_specs['name_remove_str'])
# Extract HTML text from data to pull data
soup = BeautifulSoup(html_response_data)
html_text = soup.get_text()
data_string = data_scrap(html_text, ws_specs['data_marker_str'], ws_specs['data_start_str'], ws_specs['data_end_str'])
return [data_title_clean, data_string]
|
# Generated by Django 2.1.2 on 2019-03-01 06:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('inventario', '0034_auto_20190228_2056'),
]
operations = [
migrations.DeleteModel(
name='Direccion_Envio',
),
migrations.AlterUniqueTogether(
name='img_producto',
unique_together={('id_producto', 'orden')},
),
]
|
'''
If a runner runs 10 miles in 30 minutes and 30 seconds,
What is his/her average speed in kilometers per hour? (Tip: 1 mile = 1.6 km)
'''
#Calculate speed in miles per second
time_seconds = (30*60) + 30
distance_miles = 10
speed_miles_per_second = distance_miles / time_seconds
#Convert speed (miles per second) to miles per hour
speed_miles_per_hour = speed_miles_per_second * 3600
#Convert speed (miles per hour)to kilometers per hour
speed_kilo_hour = speed_miles_per_hour * 1.6
print(f'The speed in kilometers per hour is {speed_kilo_hour}')
|
import socket
import random
import conmysql
class UserClient:
def __init__(self):
self.client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.ip = socket.gethostbyname(socket.gethostname())
self.port = -1
def connect(self,hostip):
port = -1
while port == -1:
user_name = input("请输入用户名:\n")
port = conmysql.port_find(user_name)
if port == -1:
print("用户名不存在,请重新输入")
print("请选择发信还是收信服务(1.发信 2.收信)")
choose = input()
while choose != '1' and choose != '2':
print("输入错误,请重新选择")
choose = input()
if choose == '1':
port = port
else:
port = port + 1
self.client_socket.connect((hostip,port))
print("连接服务器成功")
def start(self):
while True:
sendmsg = input()
# sdmsg = "["+str(self.ip) + "," + str(self.port) + "]" + sendmsg
self.client_socket.send(sendmsg.encode("utf-8"))
if sendmsg == "QUIT":
break
recv_data = self.client_socket.recv(1024)
if recv_data.decode("utf-8") != "$$":
print("From Server:",recv_data.decode("utf-8"))
#关闭与服务器的连接
self.client_socket.close()
def main():
user_client = UserClient()
user_client.connect("127.0.0.1")
user_client.start()
if __name__ == '__main__':
main() |
#!/usr/bin/python
import math
class PaginationHelper:
# The constructor takes in an array of items and a integer indicating
# how many items fit within a single page
def __init__(self, collection, items_per_page):
self.collection = collection
self.items_per_page = items_per_page
## returns the number of items within the entire collection
def item_count(self):
return len(self.collection)
## returns the number of pages
def page_count(self):
return int(math.ceil(self.item_count() / int(self.items_per_page)))
## returns the number of items on the current page. page_index is zero based
## this method should return -1 for page_index values that are out of range
def page_item_count(self,page_index):
if int(page_index) == self.page_count() - 1:
return self.item_count() % self.items_per_page
if page_index not in range(0,self.page_count()):
return int(-1)
else:
return int(self.items_per_page)
## determines what page an item is on. Zero based indexes.
## this method should return -1 for item_index values that are out of range
def page_index(self,item_index):
if int(item_index) not in range(0, self.item_count()):
return int(-1)
else:
return int(int(item_index + 1) / int(self.items_per_page))
p1 = PaginationHelper([1,2,3,4,5], 2)
print(f"""
My input array is {p1.collection} with {p1.items_per_page} items per page.
There are {p1.item_count()} items in the array on {p1.page_count()} pages.
On page 3 there are {p1.page_item_count(2)} item(s).
The 3rd value is on page {p1.page_index(2) + 1} and the 5th value is on page {p1.page_index(4) + 1}.
""") |
#import pd
from pymisp import (MISPEvent, MISPSighting, MISPTag, MISPOrganisation, MISPObject)
from pymisp import MISPEvent, MISPObject, PyMISP, ExpandedPyMISP, MISPSharingGroup
import argparse
import csv
#import pandas as pd
import requests
import io
import os
import time
import datetime
import json
#import warnings
today=str(datetime.date.today())
from elasticsearch import Elasticsearch
es = Elasticsearch(['http://elastisearch_host:9200'])
#es = Elasticsearch(['http://elastisearch_host:9200'], verify_certs=False)
r = es.search(index="nvd-"+today+"", body={"size": 500, "query": {"match": {"Product": "AWS"}}})
#res = es.search(index="report-results-"+today+"", body={"query": {"match_all": {}}})
#json_data=json.loads(r)
#print(r)
#print(json.loads(result))
misp_url="https://misp_site"
misp_key="misp_authkey"
misp_verifycert = False
for i in r['hits']['hits']:
# print(i["_source"])
assigner = i['_source']['Assigner']
attack_complexity = i["_source"]["Attack_Complexity"]
attack_vector = i["_source"]["Attack_Vector"]
cve = i["_source"]["CVE"]
cvssv3_base_score = i["_source"]["CVSSV3_Base_Score"]
cvssv3_vector_string = i["_source"]["CVSSV3_Vector_String"]
confidentiality_impact = i["_source"]["Confidentiality_Impact"]
description = i["_source"]["Description"]
integrity_impact = i["_source"]["Integrity_Impact"]
last_modified = i["_source"]["Last_Modified"]
privilege_required = i["_source"]["Privilege_Required"]
product = i["_source"]["Product"]
published = i["_source"]["Published"]
scope = i["_source"]["Scope"]
severity = i["_source"]["Severity"]
user_interaction = i["_source"]["User_Interaction"]
url = "https://misp_site"
key = "misp_authkey"
misp_verifycert = False
misp = ExpandedPyMISP(url, key, misp_verifycert)
event = MISPEvent()
event.info = "Vulnerability Report - Platform: "+product+": "+severity+""
# event.publish = True
# self.sharing_group_id = "2"
# self.sharing_group_name = "CKN"
#event.sharing_group_id = "2"
# event.sharing_group_id = "1"
# sharing_group_uuid = "73c83703-3e60-4c5c-91d8-776ce30fae86"
# event.sharing_group.name = "CKN"
event.distribution = "0"
# event.sharing_group_id = "1"
# event.sharing_group_name = "CKN"
# event.sharing_group_uuid = "CKN
event.analysis = "1"
if(severity == "CRITICAL" or severity == "HIGH"):
event.threat_level_id = "1"
event.published = True
event.distribution = "2"
# event.sharing_group_id = "1"
else:
event.threat_level_id = "2"
event.published = False
event.add_tag('tlp:white')
event.add_tag('CVE')
# for a in cve.split(","):
# event.add_attribute('vulnerability', str(a))
# event.add_attribute('other', str(assigner), comment="Assigner", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(attack_complexity), comment="Attack Complexity", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(attack_vector), comment="Attack Vector", disable_correlation=True, to_ids=False)
event.add_attribute('vulnerability', str(cve), comment="CVE", disable_correlation=False, to_ids=False)
event.add_attribute('other', str(cvssv3_base_score), comment="CVSSV3 Base Score", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(cvssv3_vector_string), comment="CVSSV3 Vector String", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(confidentiality_impact), comment="Confidentiality Impact", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(description), comment="Description", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(integrity_impact), comment="Integrity Impact", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(last_modified), comment="Last Modified", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(privilege_required), comment="Privilege Required", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(product), comment="Platform", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(published), comment="Published", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(scope), comment="Scope", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(severity), comment="Severity", disable_correlation=True, to_ids=False)
event.add_attribute('other', str(user_interaction), comment="User Interaction", disable_correlation=True, to_ids=False)
# if(cvssv3_base >= 8):
event = misp.add_event(event)
# print(event)
|
# Generated by Django 2.0.1 on 2018-01-22 06:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ApplicationDomainTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=50, verbose_name='域名')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
],
options={
'verbose_name': '应用域名表',
'verbose_name_plural': '应用域名表',
'ordering': ['domain'],
},
),
migrations.CreateModel(
name='ApplicationTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=30, verbose_name='应用')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
],
options={
'verbose_name': '应用表',
'verbose_name_plural': '应用表',
'ordering': ['app'],
},
),
migrations.CreateModel(
name='CDNTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('CDN', models.CharField(max_length=30, verbose_name='CDN名称')),
('CDN_Remarks', models.TextField(blank=True, null=True, verbose_name='备注')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
],
options={
'verbose_name': 'CDN表',
'verbose_name_plural': 'CDN表',
'ordering': ['CDN'],
},
),
migrations.CreateModel(
name='ControlDomainTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ControlDomain', models.CharField(max_length=30, verbose_name='调度控制域名')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
('CDN', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Resourcesmanagement.CDNTable')),
],
options={
'verbose_name': '调度控制域名表',
'verbose_name_plural': '调度控制域名表',
'ordering': ['ControlDomain'],
},
),
migrations.CreateModel(
name='ControlIpTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ControlIp', models.GenericIPAddressField(protocol='ipv4', verbose_name='调度ip')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
('CDN', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Resourcesmanagement.CDNTable')),
],
options={
'verbose_name': '调度控制ip表',
'verbose_name_plural': '调度控制ip表',
'ordering': ['ControlIp'],
},
),
migrations.CreateModel(
name='CoverTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cover', models.CharField(max_length=50, verbose_name='覆盖情况')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
],
options={
'verbose_name': '覆盖情况',
'verbose_name_plural': '覆盖情况',
},
),
migrations.CreateModel(
name='RegionTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('region', models.CharField(max_length=30, verbose_name='域')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
('CDN', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Resourcesmanagement.CDNTable')),
],
options={
'verbose_name': 'CDN域表',
'verbose_name_plural': 'CDN域表',
'ordering': ['region'],
},
),
migrations.CreateModel(
name='restype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('restype', models.CharField(max_length=50, verbose_name='类型')),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='发表时间')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='更新时间')),
],
options={
'verbose_name': '域名类型表',
'verbose_name_plural': '域名类型表',
},
),
migrations.AlterUniqueTogether(
name='cdntable',
unique_together={('CDN',)},
),
migrations.AlterUniqueTogether(
name='applicationtable',
unique_together={('app',)},
),
migrations.AddField(
model_name='applicationdomaintable',
name='app',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Resourcesmanagement.ApplicationTable', verbose_name='应用'),
),
migrations.AddField(
model_name='applicationdomaintable',
name='region',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Resourcesmanagement.RegionTable', verbose_name='域'),
),
migrations.AddField(
model_name='applicationdomaintable',
name='res_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Resourcesmanagement.restype', verbose_name='类型'),
),
migrations.AddField(
model_name='applicationdomaintable',
name='resoucescover',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Resourcesmanagement.CoverTable', verbose_name='覆盖情况'),
),
migrations.AlterUniqueTogether(
name='regiontable',
unique_together={('region',)},
),
migrations.AlterUniqueTogether(
name='controliptable',
unique_together={('ControlIp',)},
),
migrations.AlterUniqueTogether(
name='controldomaintable',
unique_together={('ControlDomain',)},
),
migrations.AlterUniqueTogether(
name='applicationdomaintable',
unique_together={('domain', 'region', 'app')},
),
]
|
#!/usr/bin/env python
#from localOptionsAll import *
from localOptionsAll import *
## datasets = [
# 'MET',
# 'MET_data_Reco',
## 'AMSB_mGrav50K_0p5ns_Reco',
## 'AMSB_mGrav50K_1ns_Reco',
## 'AMSB_mGrav50K_5ns_Reco',
## 'Wjets',
## 'ZJetsToNuNu',
## 'TTbar',
## 'QCD',
## 'DY',
## 'Diboson',
## ]
BNTreeUseScript = True
BNTreeScript = 'BNTreeMonojetRun.C'
BNTreeChannel = 'MonoJet'
BNTreeWt = 'events_puScaleFactor * events_muonScaleFactor * events_electronScaleFactor' # excludes lumi weight, which is included automatically
input_histograms = [
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 17:26:37 2019
@author: matteo
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as sts
import math
import os
import glob
import warnings
import potion.envs
import gym
env = gym.make("LQ-v0")
small_eps = 1e-6
def bootstrap_ci(x, conf=0.95, resamples=10000):
means = [np.mean(x[np.random.choice(x.shape[0], size=x.shape[0], replace=True), :], axis=0) for _ in range(resamples)]
low = np.percentile(means, (1-conf)/2 * 100, axis=0)
high = np.percentile(means, (1 - (1-conf)/2) * 100, axis=0)
low = np.nan_to_num(low)
high = np.nan_to_num(high)
return low, high
def plot_all(dfs, key='Perf', name='', xkey=None):
lines = []
for df in dfs:
value = df[key]
xx = range(len(value)) if xkey is None else df[xkey]
line, = plt.plot(xx, value, label=name)
lines.append(line)
plt.xlabel('Iterations')
plt.ylabel(key)
return lines
def moments(dfs):
cdf = pd.concat(dfs, sort=True).groupby(level=0)
return cdf.mean(), cdf.std().fillna(0)
def plot_ci(dfs, key='Perf', conf=0.95, name='', xkey=None, bootstrap=False, resamples=10000, mult=1., stds=1.):
n_runs = len(dfs)
mean_df, std_df = moments(dfs)
if "TotSamples" in mean_df:
for i in range(1,len(mean_df["TotSamples"])):
mean_df.at[i, "TotSamples"] = max(mean_df["TotSamples"][i-1], mean_df["TotSamples"][i])
mean = mean_df[key] * mult
std = std_df[key] * mult
if xkey is None:
xx = range(len(mean))
elif xkey in mean_df:
xx = mean_df[xkey]
else:
xx = np.array(range(len(mean))) * 100
line, = plt.plot(xx, mean, label=name)
if conf==None:
interval = (mean - std * stds, mean + std * stds)
elif bootstrap:
data = np.array([df[key] * mult for df in dfs])
interval = bootstrap_ci(data, conf, resamples)
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="invalid value encountered in multiply")
interval = sts.t.interval(conf, n_runs-1,loc=mean,scale=std/math.sqrt(n_runs))
plt.fill_between(xx, interval[0], interval[1], alpha=0.3)
print('%s: %f +- %f' % (name, np.mean(mean), np.mean(std)))
return line
def save_csv(env, name, key, conf=0.95, path='.', rows=200, batchsize=500, xkey=None, bootstrap=False, resamples=10000, mult=1., step=1, stds=1.):
dfs = load_all(env + '_' + name, rows)
n_runs = len(dfs)
mean_df, std_df = moments(dfs)
if "TotSamples" in mean_df:
for i in range(1,len(mean_df["TotSamples"])):
mean_df.at[i, "TotSamples"] = max(mean_df["TotSamples"][i-1], mean_df["TotSamples"][i])
mean = mean_df[key].values * mult
std = std_df[key].values * mult + 1e-24
if conf==None:
interval = (mean - std*stds, mean + std*stds)
elif bootstrap:
data = np.array([df[key] * mult for df in dfs])
interval = bootstrap_ci(data, conf, resamples)
else:
interval = sts.t.interval(conf, n_runs-1,loc=mean,scale=std/math.sqrt(n_runs))
low, high = interval
if rows is not None:
mean = mean[:rows]
low = low[:rows]
high = high[:rows]
if xkey is None:
xx = range(len(mean))
elif xkey in mean_df:
xx = mean_df[xkey]
else:
xx = np.array(range(len(mean))) * 100
for i in range(len(mean)):
if not np.isfinite(low[i]):
low[i] = mean[i]
if not np.isfinite(high[i]):
high[i] = mean[i]
plotdf = pd.DataFrame({("it" if xkey is None else xkey): xx, "mean" : mean, "low" : low, "high": high})
plotdf = plotdf.iloc[0:-1:step]
print(len(plotdf))
plotdf.to_csv(path + '/' + env.lower() + '_' + name.lower() + '_' + key.lower() + '.csv', index=False, header=False)
def load_all(name, rows=200):
dfs = [pd.read_csv(file, index_col=False, nrows=rows) for file in glob.glob("*.csv") if file.startswith(name + '_')]
for df in dfs:
if 'Oracle' not in df and 'param0' in df and 'param1' not in df:
df['Oracle'] = np.zeros(len(df['param0']))
for i in range(len(df['param0'])):
df.at[i,'Oracle'] = env.computeJ(df['param0'][i], 1.)
return dfs
def compare(env, names, keys=['Perf'], conf=0.95, logdir=None, separate=False, ymin=None, ymax=None, rows=200, xkey=None, xmax=None, bootstrap=False, resamples=10000, mult=None, roll=1., stds=1.):
figures = []
for key in keys:
figures.append(plt.figure())
if ymin is not None and ymax is not None:
plt.ylim(ymin, ymax)
if xmax is not None:
plt.xlim(0, xmax)
if logdir is not None:
os.chdir(logdir)
handles = []
if type(roll) is int or type(roll) is float:
roll = [int(roll)]*len(names)
if mult is None:
mult = [1.] * len(names)
for i, name in enumerate(names):
dfs = load_all(env + '_' + name, rows=rows)
dfs = [dfs[j].rolling(roll[i]).mean() for j in range(len(dfs))]
if separate:
handles+=(plot_all(dfs, key, name, xkey=xkey))
else:
handles.append(plot_ci(dfs, key, conf, name, xkey=xkey, bootstrap=bootstrap, resamples=resamples, mult=mult[i], stds=stds))
plt.legend(handles=handles)
plt.show()
return figures |
# coding=utf-8
from unittest import TestCase
from squirrel_play import squirrel_play
class SquirrelPlayTest(TestCase):
def test_squirrelplay_no_sun(self):
self.assertTrue(squirrel_play(70, False))
def test_squirrelplay_sun(self):
self.assertTrue(squirrel_play(95, True))
def test_squirrelplay_no_sun_up(self):
self.assertFalse(squirrel_play(95, False))
def test_squirrelplay_no_sun_low(self):
self.assertFalse(squirrel_play(55, False)) |
import maya.cmds as cmds
def createLoc(type):
print type
if cmds.objExists("loc_Grp_1"):
print "it exists already"
else:
if type == "first":
filePath = "R:/Jx4/tools/dcc/maya/scripts/autoRigger/importFiles/biped/"
elif type == "third":
filePath = "R:/Jx4/tools/dcc/maya/scripts/autoRigger/importFiles/quadruped/"
fileType = "ma"
fileObj = "obj"
masterObj = ''
files = cmds.getFileList(folder=filePath, filespec='*.%s' % fileType)
filesObj = cmds.getFileList(folder=filePath, filespec='*.%s' % fileObj)
print files
if len(files) == 0:
cmds.warning("no files found")
else:
for f in files:
masterObj = cmds.file(filePath + f, i=True)
allJoints = []
jointList = []
locList = []
objList = []
allJoints = cmds.listRelatives('root_joint',ad=True)
allJoints.append('root_joint')
print allJoints
cmds.select('root_joint')
cmds.select( cmds.listRelatives( type='joint', fullPath=True, allDescendents=True ), add=True )
cmds.select( cmds.listRelatives( parent=True, fullPath=True ), add=True )
sel = cmds.ls ( selection = True, type = 'joint' )
if not sel :
cmds.warning( "Please select a joint / No joints in selection " )
return
locGrp = cmds.group(n="loc_Grp_#", em=True)
cmds.addAttr ( locGrp, attributeType='double' , longName='locScale' , defaultValue=1.0 , keyable=1 )
masterLoc = cmds.spaceLocator(n="loc_0")[0]
cmds.parent( masterLoc, locGrp )
print " u are here"
for attr in ["scaleZ", "scaleY", "scaleX"]:
cmds.connectAttr ( locGrp + ".locScale" , "%s.%s" % ( masterLoc, attr ) )
is_root_loop = True
loc_to_rename = masterLoc
for jnt in sel:
#print jnt
jointList.append(jnt)
coordsT = cmds.xform ( jnt, query=True, worldSpace=True, t=True )
coordsR = cmds.xform ( jnt, query=True, worldSpace=True, ro=True)
cmds.select( masterLoc, replace=True )
if not is_root_loop:
loc_to_rename = cmds.duplicate( returnRootsOnly=True , inputConnections=True )[0]
# No more errors!
renamed_loc = cmds.rename(str(loc_to_rename), ("loc_" + str(jnt)))
locList.append(renamed_loc)
#renamed_locs = renamed_loc.split("loc_",1)[1]
#_parent = cmds.listRelatives(jnt,p=True)
#proper_parent = "loc_" + _parent[-1]
if is_root_loop:
masterLoc = renamed_loc
cmds.xform(t=coordsT )
cmds.xform(ro=coordsR)
#cmds.parent(renamed_loc,proper_parent)
is_root_loop = False
counter = 0
for joints in jointList:
getParent = cmds.listRelatives(joints,p=True)
if not getParent:
print "no parent"
else:
properParent = "loc_" + getParent[-1]
print properParent
properChild = locList[counter]
print properChild
print counter
cmds.parent(properChild,properParent,a=True)
counter += 1
count = 0
for o in filesObj:
newFile = cmds.file(filePath + o, i=True)
grabFile = 'joint_visualizer_joint_visualizer'
objGroup = cmds.group(n='obj_group_#',em=True)
deleteList = []
for i in locList:
dupFile = cmds.duplicate(grabFile)
cmds.parent(dupFile,objGroup)
renameFile = cmds.rename(dupFile,'obj_' + str(i))
count +=1
print renameFile
objList.append(renameFile)
print i
cmds.parentConstraint(i,renameFile,mo=False)
const = cmds.listRelatives(renameFile,c=True, type = 'constraint')
cmds.delete(const)
cmds.parentConstraint(renameFile,i)
counterObj=0
for joints in jointList:
getObjParent = cmds.listRelatives(joints,p=True)
if not getObjParent:
print "no parent"
else:
properObjParent = "obj_loc_" + getObjParent[-1]
print properObjParent
properObjChild = objList[counterObj]
print properObjChild
print counterObj
cmds.parent(properObjChild,properObjParent,a=True)
counterObj += 1
objGroupList = cmds.listRelatives("obj_group_1",ad=True)
for obj in objGroupList:
print obj
if "end" in obj:
deleteList.append(obj)
print "goodbye"
elif "R_" in obj:
deleteList.append(obj)
else:
print "you get to live to see another day"
for locator in locList:
print locator
if "R_" in locator:
deleteList.append(locator)
for item in deleteList:
#print item
cmds.delete(item)
cmds.delete('joint_visualizer_joint_visualizer')
for j in jointList:
cmds.delete(j)
|
# Generated by Django 3.1.2 on 2020-10-29 14:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title_text', models.CharField(max_length=100)),
('flags', models.IntegerField(default=0, help_text='AUTHOR_FLAGS_HELP')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_text', models.CharField(max_length=20)),
('order_number', models.IntegerField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.author')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('template_text', models.CharField(max_length=20)),
('status', models.IntegerField(choices=[(1, 'Draft'), (2, 'Private'), (3, 'Public'), (-1, 'Deleted')], default=1)),
('flags', models.IntegerField(default=0, help_text='POST_FLAGS_HELP')),
('view_count', models.IntegerField(default=0)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('updated_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, help_text='POST_TITLE_HELP')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.author')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PostContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(choices=[('en', 'En'), ('ja', 'Ja')], default='ja', max_length=2)),
('title_text', models.CharField(blank=True, help_text='POST_TITLE_HELP', max_length=100)),
('summary_text', models.CharField(blank=True, help_text='POST_TITLE_HELP', max_length=1000, null=True)),
('search_text', models.CharField(blank=True, help_text='POST_TITLE_HELP', max_length=5000)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.post')),
],
),
migrations.CreateModel(
name='PostCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.category')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.post')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_text', models.CharField(blank=True, max_length=100)),
('comment_text', models.TextField(max_length=1000)),
('status', models.IntegerField(choices=[(1, 'Unapproved'), (2, 'Approved'), (-1, 'Deleted')], default=1)),
('client_text', models.CharField(max_length=20)),
('created_date', models.DateTimeField()),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.comment')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.post')),
],
),
]
|
#TRABAJO CONJUNTO REALIZADO POR JOSE LUIS, SANDRA Y JAUME#
version = "1.0"
#Nombre del proyecto - BrisCards
#--------------------------------Este archivo contiene el juego
#Imports
import random
import time
import os
import sys
import msvcrt
#-----------------------------------------------------------------------------------Funciones del sistema operativo
#Borrar pantalla
def borrar_pantalla():
os.system('cls' if os.name=='nt' else 'clear')
# ___ ___ ______ _
# | \/ | | ___ \ | |
# | . . | ___ _ __ _ _ | |_/ /___| |_ _ __ _ _
# | |\/| |/ _ \ '_ \| | | | | // _ \ __| '__| | | |
# | | | | __/ | | | |_| | | |\ \ __/ |_| | | |_| |
# \_| |_/\___|_| |_|\__,_| \_| \_\___|\__|_| \__, |
# __/ |
# |___/
#---------------------------------------------------Funcion para resetear los valores iniciales de las partidas
def reset_match():
global nombreMaquina
global puntosJugador
global puntosMaquina
global puntosComprobar1
global puntosComprobar2
global acabarPartida
global Finalizar
global gana
global turno
global cartasjugador
global cartasmaquina
global cardlist
cartasjugador = []
cartasmaquina = []
cardlist = []
nombreMaquina = 'Maquina'
puntosMaquina = 0
puntosJugador = 0
puntosComprobar1 = 0
puntosComprobar2 = 0
generar_cartas()
random.shuffle(cardlist)
cartasjugador.append(cardlist[0])
cardlist.remove(cartasjugador[0])
cartasmaquina.append(cardlist[0])
cardlist.remove(cartasmaquina[0])
cartasjugador.append(cardlist[0])
cardlist.remove(cartasjugador[1])
cartasmaquina.append(cardlist[0])
cardlist.remove(cartasmaquina[1])
cartasjugador.append(cardlist[0])
cardlist.remove(cartasjugador[2])
cartasmaquina.append(cardlist[0])
cardlist.remove(cartasmaquina[2])
cartaDominante = cardlist[0]
cardlist.remove(cartaDominante)
elegirPrimerTurno = random.randint(0,1)
if elegirPrimerTurno == 0:
turno = 'Máquina'
else:
turno = 'Jugador'
acabarPartida = False
Finalizar = False
gana = 'Nadie'
# ----------------------------------------------------------Función para mostrar el menú "Retry"
selection = 'Retry'
def moverse_RetryMenu():
global selection
global runningRM
global currentMenu
selectBool = False # Esta variable evita que el menu parpadee infinitamente
if msvcrt.kbhit():
key = msvcrt.getch().decode("utf-8").lower()
if key == "w":
if selection == 'Retry':
selection = 'Volver'
elif selection == 'Volver':
selection = 'Retry'
selectBool = True
elif key == "s":
if selection == 'Retry':
selection = 'Volver'
elif selection == 'Volver':
selection = 'Retry'
selectBool = True
elif key == "x":
if selection == 'Retry':
reset_match()
jugar()
runningRM = False
elif selection == 'Volver':
runningRM = False
reset_match()
currentMenu = 'Principal'
selection = 'Jugar'
showmenu()
if selectBool == True: # El menu solo se mostrará cuando se tenga que actualizar
RetryMenu()
def RetryMenu():
global selection
borrar_pantalla()
print(' __________________________________________________________________')
print('| |')
print('| -- F I N D E L A P A R T I D A -- |')
print('|__________________________________________________________________|')
print('| |')
if selection == 'Retry':
print('| [x]> Jugar otra vez |')
else:
print('| Jugar otra vez |')
print('| |')
if selection == 'Volver':
print('| [x]> Volver al menu |')
else:
print('| Volver al menu |')
print('| |')
print('|__________________________________________________________________|')
print('')
print(' Controles')
print(' __________________')
print('| |')
print('| Arriba: [W] |')
print('| Abajo: [S] |')
print('| Seleccionar: [X] |')
print('| Atras: [Z] |')
print('|__________________|\n')
moverse_RetryMenu()
runningRM = True
RetryMenu()
def ShowRetryMenu():
global selection
global currentMenu
currentMenu = 'RetryMenu'
selection = 'Retry'
RetryMenu()
while runningRM == True:
moverse_RetryMenu()
# _____ _ _ _ _ _
# / __ \ | (_) | | | | (_)
# | / \/ ___ __| |_ __ _ ___ __| | ___| | _ _ _ ___ __ _ ___
# | | / _ \ / _` | |/ _` |/ _ \ / _` |/ _ \ | | | | | |/ _ \/ _` |/ _ \
# | \__/\ (_) | (_| | | (_| | (_) | | (_| | __/ | | | |_| | __/ (_| | (_) |
# \____/\___/ \__,_|_|\__, |\___/ \__,_|\___|_| | |\__,_|\___|\__, |\___/
# __/ | _/ | __/ |
# |___/ |__/ |___/
#Declaración de variables:
cartasjugador = []
cartasmaquina = []
cardlist = []
nombreMaquina = 'Maquina'
puntosMaquina = 0
puntosJugador = 0
puntosComprobar1 = 0
puntosComprobar2 = 0
acabarPartida = False
Finalizar = False
gana = 'Nadie'
winner = 'Nadie'
#----------------------------------------Juego
#Generación de todas las cartas
def generar_cartas():
for bastos in range (1, 13):
if bastos != 8 and bastos != 9:
if bastos < 10:
cardlist.append('0' + str(bastos) + "-Bastos")
else:
cardlist.append(str(bastos) + "-Bastos")
for copas in range (1, 13):
if copas != 8 and copas != 9:
if copas < 10:
cardlist.append('0' + str(copas) + "-Copas")
else:
cardlist.append(str(copas) + "-Copas")
for oros in range (1, 13):
if oros != 8 and oros != 9:
if oros < 10:
cardlist.append('0' + str(oros) + "-Oros")
else:
cardlist.append(str(oros) + "-Oros")
for espadas in range (1, 13):
if espadas != 8 and espadas != 9:
if espadas < 10:
cardlist.append('0' + str(espadas) + "-Espadas")
else:
cardlist.append(str(espadas) + "-Espadas")
#Procedemos a sacar las cartas del sobre
generar_cartas()
#Se barajan las cartas
random.shuffle(cardlist)
#Se le entregan tres cartas al jugador y tres cartas a la máquina
cartasjugador.append(cardlist[0])
cardlist.remove(cartasjugador[0])
cartasmaquina.append(cardlist[0])
cardlist.remove(cartasmaquina[0])
cartasjugador.append(cardlist[0])
cardlist.remove(cartasjugador[1])
cartasmaquina.append(cardlist[0])
cardlist.remove(cartasmaquina[1])
cartasjugador.append(cardlist[0])
cardlist.remove(cartasjugador[2])
cartasmaquina.append(cardlist[0])
cardlist.remove(cartasmaquina[2])
#Se elige la carta dominante del juego
cartaDominante = cardlist[0]
cardlist.remove(cartaDominante)
#Se elige el turno de quien empieza
elegirPrimerTurno = random.randint(0,1)
if elegirPrimerTurno == 0:
turno = 'Máquina'
else:
turno = 'Jugador'
def compruba_puntos(cartasComprobar1, cartasComprobar2):
global puntosComprobar1
global puntosComprobar2
if int(cartasComprobar1[0:2]) == 2:
puntosComprobar1 = 0
elif int(cartasComprobar1[0:2]) == 4:
puntosComprobar1 = 0
elif int(cartasComprobar1[0:2]) == 5:
puntosComprobar1 = 0
elif int(cartasComprobar1[0:2]) == 6:
puntosComprobar1 = 0
elif int(cartasComprobar1[0:2]) == 7:
puntosComprobar1 = 0
if int(cartasComprobar2[0:2]) == 2:
puntosComprobar2 = 0
elif int(cartasComprobar2[0:2]) == 4:
puntosComprobar2 = 0
elif int(cartasComprobar2[0:2]) == 5:
puntosComprobar2 = 0
elif int(cartasComprobar2[0:2]) == 6:
puntosComprobar2 = 0
elif int(cartasComprobar2[0:2]) == 7:
puntosComprobar2 = 0
def comprobar_ganador(turnoQuien, jugadorsacas, cartasmaquinas):
global puntosJugador
global puntosMaquina
global turno
global cartasmaquina
global puntosComprobar1
global puntosComprobar2
global gana
#comprobar puntos
cartasComprobar1 = jugadorsacas
cartasComprobar2 = cartasmaquinas
#Comprobar las cartas del jugador y asignar la puntuacion que corresponde
if int(cartasComprobar1[0:2]) == 10:
puntosComprobar1 = 2
elif int(cartasComprobar1[0:2]) == 11:
puntosComprobar1 = 3
elif int(cartasComprobar1[0:2]) == 12:
puntosComprobar1 = 4
elif int(cartasComprobar1[0:2]) == 1:
puntosComprobar1 = 11
elif int(cartasComprobar1[0:2]) == 3:
puntosComprobar1 = 10
elif int(cartasComprobar1[0:2]) == 2:
puntosComprobar1 = 0.1
elif int(cartasComprobar1[0:2]) == 4:
puntosComprobar1 = 0.2
elif int(cartasComprobar1[0:2]) == 5:
puntosComprobar1 = 0.3
elif int(cartasComprobar1[0:2]) == 6:
puntosComprobar1 = 0.4
elif int(cartasComprobar1[0:2]) == 7:
puntosComprobar1 = 0.5
#Comprobar las cartas de la maquina y asignar la puntuacion que corresponde
if int(cartasComprobar2[0:2]) == 10:
puntosComprobar2 = 2
elif int(cartasComprobar2[0:2]) == 11:
puntosComprobar2 = 3
elif int(cartasComprobar2[0:2]) == 12:
puntosComprobar2 = 4
elif int(cartasComprobar2[0:2]) == 1:
puntosComprobar2 = 11
elif int(cartasComprobar2[0:2]) == 3:
puntosComprobar2 = 10
if int(cartasComprobar2[0:2]) == 2:
puntosComprobar2 = 0.1
elif int(cartasComprobar2[0:2]) == 4:
puntosComprobar2 = 0.2
elif int(cartasComprobar2[0:2]) == 5:
puntosComprobar2 = 0.3
elif int(cartasComprobar2[0:2]) == 6:
puntosComprobar2 = 0.4
elif int(cartasComprobar2[0:2]) == 7:
puntosComprobar2 = 0.5
#RESULTADO
#Si el palo de la carta de la maquina no es del mismo que del palo del jugador
if cartasComprobar1[3:] != cartasComprobar2[3:]:
#Si el palo de la maquina no es del palo dominante
if cartasComprobar2[3:] != cartaDominante[3:]:
if turnoQuien == 'Máquina':
gana = 'Máquina'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosMaquina += int(puntosComprobar1) + int(puntosComprobar2)
else:
gana = 'Jugador'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosJugador += int(puntosComprobar1) + int(puntosComprobar2)
#Si es del palo dominante
elif cartasComprobar2[3:] == cartaDominante[3:]:
if cartasComprobar1[3:] != cartaDominante[3:]:
if turnoQuien == 'Máquina':
gana = 'Jugador'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosJugador += int(puntosComprobar1) + int(puntosComprobar2)
else:
gana = 'Máquina'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosMaquina += int(puntosComprobar1) + int(puntosComprobar2)
#Si el palo de la carta de la maquina es el mismo que la del jugador
else:
#Si la carta del jugador es mas grande que la de la maquina
if float(puntosComprobar1) > float(puntosComprobar2):
if turnoQuien == 'Máquina':
gana = 'Máquina'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosMaquina += int(puntosComprobar1) + int(puntosComprobar2)
else:
gana = 'Jugador'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosJugador += int(puntosComprobar1) + int(puntosComprobar2)
#Si la carta del jugador es mas pequeña que la de la máquina
else:
if turnoQuien == 'Máquina':
gana = 'Jugador'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosJugador += int(puntosComprobar1) + int(puntosComprobar2)
else:
gana = 'Máquina'
compruba_puntos(cartasComprobar1, cartasComprobar2)
puntosMaquina += int(puntosComprobar1) + int(puntosComprobar2)
borrar_pantalla()
print('Carta dominante: ' + cartaDominante + ' Puntos maquina:' + str(puntosMaquina) + ' Puntos jugador:' + str(puntosJugador))
print()
print(' EL GANADOR DE ESTA RONDA ES : ' + gana)
print('Tus cartas')
print('[1] ' + cartasjugador[0])
if len(cartasjugador) > 1:
print('[2] ' + cartasjugador[1])
else:
print('[2] - ')
if len(cartasjugador) > 2:
print('[3] ' + cartasjugador[2])
else:
print('[3] - ')
print('__________________________________')
choice = input('Pulsa [ENTER] para continuar')
turno = gana
#---------------------------------------------------------------Se define la pantalla en caso de que el jugador continue el turno
def turno_jugador():
choice = 'x'
global puntosJugador
global puntosMaquina
global cartaDominante
global acabarPartida
global Finalizar
if acabarPartida == True:
Finalizar = True
finishTurn = False
while finishTurn == False:
borrar_pantalla()
print('Carta dominante: ' + cartaDominante + ' Puntos maquina:' + str(puntosMaquina) + ' Puntos jugador:' + str(puntosJugador))
print()
print()
print('Tus cartas')
print('[1] ' + cartasjugador[0])
if len(cartasjugador) > 1:
print('[2] ' + cartasjugador[1])
else:
print('[2] - ')
if len(cartasjugador) > 2:
print('[3] ' + cartasjugador[2])
else:
print('[3] - ')
print('__________________________________')
choice = input('Escoje una carta, 1, 2, o 3 : ')
if choice == '1':
jugadorsaca = cartasjugador[0]
finishTurn = True
elif choice == '2' and len(cartasjugador) > 1:
jugadorsaca = cartasjugador[1]
finishTurn = True
elif choice == '3' and len(cartasjugador) > 2:
jugadorsaca = cartasjugador[2]
finishTurn = True
borrar_pantalla()
print('Carta dominante: ' + cartaDominante + ' Puntos maquina:' + str(puntosMaquina) + ' Puntos jugador:' + str(puntosJugador))
print()
print(' Jugador saca: ' + jugadorsaca + ' Maquina saca: ' + cartasmaquina[0] )
print('Tus cartas')
print('[1] ' + cartasjugador[0])
if len(cartasjugador) > 1:
print('[2] ' + cartasjugador[1])
else:
print('[2] - ')
if len(cartasjugador) > 2:
print('[3] ' + cartasjugador[2])
else:
print('[3] - ')
print('__________________________________')
choice = input('Pulsa [ENTER] para continuar')
comprobar_ganador('Jugador', jugadorsaca, cartasmaquina[0])
#Borrar la carta del mazo del jugador y de la máquina
cartasjugador.remove(jugadorsaca)
cartasmaquina.remove(cartasmaquina[0])
#Poner otra carta en el lugar del mazo principal
if not cardlist:
pass
else:
if gana == 'Jugador':
cartasjugador.append(cardlist[0])
cardlist.remove(cardlist[0])
if cardlist:
cartasmaquina.append(cardlist[0])
cardlist.remove(cardlist[0])
else:
cartasmaquina.append(cartaDominante)
acabarPartida = True
else:
cartasmaquina.append(cardlist[0])
cardlist.remove(cardlist[0])
if cardlist:
cartasjugador.append(cardlist[0])
cardlist.remove(cardlist[0])
else:
cartasjugador.append(cartaDominante)
acabarPartida = True
#-----------------------------------Se define la pantalla en caso de que la maquina continue el turno
def turno_maquina():
choice = 'x'
global puntosJugador
global puntosMaquina
global cartaDominante
global acabarPartida
global Finalizar
finishTurn = False
if not cartasjugador:
Finalizar = True
while finishTurn == False:
borrar_pantalla()
print('Carta dominante: ' + cartaDominante + ' Puntos maquina:' + str(puntosMaquina) + ' Puntos jugador:' + str(puntosJugador))
print()
print(' Maquina saca: ' + cartasmaquina[0] )
print('Tus cartas')
print('[1] ' + cartasjugador[0])
if len(cartasjugador) > 1:
print('[2] ' + cartasjugador[1])
else:
print('[2] - ')
if len(cartasjugador) > 2:
print('[3] ' + cartasjugador[2])
else:
print('[3] - ')
print('__________________________________')
choice = input('Escoje una carta, 1, 2, o 3 : ')
if choice == '1':
jugadorsaca = cartasjugador[0]
finishTurn = True
elif choice == '2' and len(cartasjugador) > 1:
jugadorsaca = cartasjugador[1]
finishTurn = True
elif choice == '3' and len(cartasjugador) > 2:
jugadorsaca = cartasjugador[2]
finishTurn = True
borrar_pantalla()
print('Carta dominante: ' + cartaDominante + ' Puntos maquina:' + str(puntosMaquina) + ' Puntos jugador:' + str(puntosJugador))
print()
print(' Maquina saca: ' + cartasmaquina[0] + ' Jugador saca: ' + jugadorsaca + ' ' )
print('Tus cartas')
print('[1] ' + cartasjugador[0])
if len(cartasjugador) > 1:
print('[2] ' + cartasjugador[1])
else:
print('[2] - ')
if len(cartasjugador) > 2:
print('[3] ' + cartasjugador[2])
else:
print('[3] - ')
print('__________________________________')
choice = input('Pulsa [ENTER] para continuar')
comprobar_ganador('Máquina', cartasmaquina[0], jugadorsaca)
#Borrar la carta del mazo del jugador y de la máquina
cartasjugador.remove(jugadorsaca)
cartasmaquina.remove(cartasmaquina[0])
#Poner otra carta en el lugar desde el mazo principal
if not cardlist:
pass
else:
if gana == 'Jugador':
cartasjugador.append(cardlist[0])
cardlist.remove(cardlist[0])
if cardlist:
cartasmaquina.append(cardlist[0])
cardlist.remove(cardlist[0])
else:
cartasmaquina.append(cartaDominante)
acabarPartida = True
else:
cartasmaquina.append(cardlist[0])
cardlist.remove(cardlist[0])
if cardlist:
cartasjugador.append(cardlist[0])
cardlist.remove(cardlist[0])
else:
cartasjugador.append(cartaDominante)
acabarPartida = True
#----------------------------------------------------------------Actualiza el archivo de los resultados
def actualizar_resultados():
global winner
if not os.path.exists('Config/results.bcd'): # Si no existe el archivo, crea uno nuevo
file = open('Config/results.bcd', 'w')
file.write("0\n")
file.write('0')
file.close()
file = open("Config/results.bcd", "r") # Lee el archivo y recoge los resultados
currentUserMark = int(file.readline())
currentMachMark = int(file.readline())
file.close()
if winner == 'JUGADOR':
currentUserMark += 1
else:
currentMachMark += 1
file = open("Config/results.bcd", "w") # Actualiza los resultados
file.write(str(currentUserMark) + "\n")
file.write(str(currentMachMark))
file.close()
#---------------------------------------------------------------------------Bucle del juego
def jugar():
global winner
running = True
while running:
if not cartasjugador:
if puntosJugador > puntosMaquina:
winner = 'JUGADOR'
else:
winner = 'MÁQUINA'
exit = False
actualizar_resultados()
while exit == False:
borrar_pantalla()
print('Carta dominante: ' + cartaDominante + ' Puntos maquina:' + str(puntosMaquina) + ' Puntos jugador:' + str(puntosJugador))
print()
print(' EL GANADOR DE LA PARTIDA ES: ' + winner)
print()
print()
print()
print()
print('__________________________________')
choice = input('Pulsa [1] para continuar')
if choice == '1':
running = False
exit = True
ShowRetryMenu()
if running:
if turno == 'Jugador':
turno_jugador()
elif turno == 'Máquina':
turno_maquina()
# ___ ___ ______ _ _ _
# | \/ | | ___ \ (_) (_) | |
# | . . | ___ _ __ _ _ | |_/ / __ _ _ __ ___ _ _ __ __ _| |
# | |\/| |/ _ \ '_ \| | | | | __/ '__| | '_ \ / __| | '_ \ / _` | |
# | | | | __/ | | | |_| | | | | | | | | | | (__| | |_) | (_| | |
# \_| |_/\___|_| |_|\__,_| \_| |_| |_|_| |_|\___|_| .__/ \__,_|_|
# | |
# |_|
#--------------------------------Funciones del menu principal
#------------------------------------------------------Comentarios de los integrantes
# 25/11/2020 - Jose: Le ha dado un toque personal al menú
# añadiendo un poco de ASCII
#
# [!] import msvcrt -> La app solo correrá
# en windows.
#-------------------------------------------------------------------------------------Variables del menu
selection = 'Jugar'
exitMenu = 'False'
#-------------------------------------------------------------------------------------Funciones de movimiento
#------------------------------------------------------Funcion para moverse dentro del menu principal<
def moverse_menuX1():
global selection
global currentMenu
global exitMenu
selectBool = False # Esta variable evita que el menu parpadee infinitamente
if msvcrt.kbhit():
key = msvcrt.getch().decode("utf-8").lower()
if key == "w":
if selection == 'Jugar':
selection = 'Salir'
elif selection == 'Registro':
selection = 'Jugar'
elif selection == 'Instrucciones':
selection = 'Registro'
elif selection == 'Salir':
selection = 'Instrucciones'
selectBool = True
elif key == "s":
if selection == 'Jugar':
selection = 'Registro'
elif selection == 'Registro':
selection = 'Instrucciones'
elif selection == 'Instrucciones':
selection = 'Salir'
elif selection == 'Salir':
selection = 'Jugar'
selectBool = True
elif key == "x":
if selection == 'Instrucciones':
selection = 'Rules'
currentMenu = 'Instrucciones'
mostrar_menuX2()
elif selection == 'Jugar':
exitMenu = True
jugar()
elif selection == 'Salir':
borrar_pantalla()
sys.exit()
elif selection == 'Registro':
currentMenu = 'Registro'
mostrar_menuX3()
if selectBool == True: # El menu solo se mostrará cuando se tenga que actualizar
mostrar_menuX1()
#-------------------------------------------------------Funcion para moverse dentro del menu de instrucciones<
def moverse_menuX2():
global selection
global currentMenu
selectBool = False # Esta variable evita que el menu parpadee infinitamente
if msvcrt.kbhit():
key = msvcrt.getch().decode("utf-8").lower()
if key == "w":
if selection == 'Rules':
selection = 'Volver'
elif selection == 'Puntuacion':
selection = 'Rules'
elif selection == 'Volver':
selection = 'Puntuacion'
selectBool = True
elif key == "s":
if selection == 'Rules':
selection = 'Puntuacion'
elif selection == 'Puntuacion':
selection = 'Volver'
elif selection == 'Volver':
selection = 'Rules'
selectBool = True
elif key == "x":
if selection == 'Rules':
currentMenu = 'Rules'
mostrar_menuX2_1()
elif selection == 'Puntuacion':
currentMenu = 'Puntuacion'
mostrar_menuX2_2()
elif selection == 'Volver':
selection = 'Instrucciones'
currentMenu = 'Principal'
mostrar_menuX1()
elif key == "z":
selection = 'Instrucciones'
currentMenu = 'Principal'
mostrar_menuX1()
if selectBool == True: # El menu solo se mostrará cuando se tenga que actualizar
mostrar_menuX2()
#----------------------------------------------------Funcion para salir del menu de reglas del juego<
def moverse_menuX2_1():
global selection
global currentMenu
if msvcrt.kbhit():
key = msvcrt.getch().decode("utf-8").lower()
if key == "x" or key == "z":
selection = 'Rules'
currentMenu = 'Instrucciones'
mostrar_menuX2()
#---------------------------------------------------Funcion para salir del menu de Puntuacion de cartas<
def moverse_menuX2_2():
global selection
global currentMenu
if msvcrt.kbhit():
key = msvcrt.getch().decode("utf-8").lower()
if key == "x" or key == "z":
selection = 'Puntuacion'
currentMenu = 'Instrucciones'
mostrar_menuX2()
#-------------------------------------------------------------Funcion para salir del menu de Resultados<
def moverse_menuX3():
global selection
global currentMenu
if msvcrt.kbhit():
key = msvcrt.getch().decode("utf-8").lower()
if key == "x" or key == "z":
selection = 'Registro'
currentMenu = 'Principal'
mostrar_menuX1()
#------------------------------------------------------------------Funciones de Pantallas
#------------------------------------------------------Mostrar el menu principal<
def mostrar_menuX1():
global selection
borrar_pantalla()
print(' __________________________________________________________________')
print('| | |')
print('| BRISCARD | version 1.0 |')
print('|_________________________________|________________________________|')
print('| |')
if selection == 'Jugar':
print('| [x]> Jugar |')
else:
print('| Jugar |')
print('| |')
if selection == 'Registro':
print('| [x]> Registros |')
else:
print('| Registros |')
print('| |')
if selection == 'Instrucciones':
print('| [x]> Instrucciones |')
else:
print('| Instrucciones |')
print('| |')
if selection == 'Salir':
print('| [x]> Salir |')
else:
print('| Salir |')
print('| |')
print('|__________________________________________________________________|')
print('')
print(' Controles')
print(' __________________')
print('| |')
print('| Arriba: [W] |')
print('| Abajo: [S] |')
print('| Seleccionar: [X] |')
print('| Atras: [Z] |')
print('|__________________|\n')
moverse_menuX1()
#--------------------------------------------------------------------------------------------------------------------------------Mostrar el menu de instrucciones<
def mostrar_menuX2():
global selection
borrar_pantalla()
print(' __________________________________________________________________')
print('| | |')
print('| BRISCARD | Instrucciones |')
print('|_________________________________|________________________________|')
print('| |')
if selection == 'Rules':
print('| [x]> Reglas de los turnos |')
else:
print('| Reglas de los turnos |')
print('| |')
if selection == 'Puntuacion':
print('| [x]> Puntuacion de las cartas |')
else:
print('| Puntuacion de las cartas |')
print('| |')
if selection == 'Volver':
print('| [x]> Volver |')
else:
print('| Volver |')
print('| |')
print('|__________________________________________________________________|')
print('')
print(' Controles')
print(' __________________')
print('| |')
print('| Arriba: [W] |')
print('| Abajo: [S] |')
print('| Seleccionar: [X] |')
print('| Atras: [Z] |')
print('|__________________|\n')
moverse_menuX2()
#-----------------------------------------------------------------------------------------------------------------------------Mostrar el menu de reglas del juego<
def mostrar_menuX2_1():
global selection
borrar_pantalla()
print(' __________________________________________________________________')
print('| | |')
print('| BRISCARD | Reglas de los turnos |')
print('|_________________________________|________________________________|')
print('| |')
print('| Al iniciar la partida el turno se elige aleatoriamente, pero |')
print('| cuando la primera ronda termina, empieza sacando carta el |')
print('| jugador que ha ganado anteriormente. |')
print('| |')
print('| Ese jugador seguira sacando carta primero hasta que pierda. |')
print('| |')
print('|__________________________________________________________________|')
print('')
print(' Controles')
print(' __________________')
print('| |')
print('| Arriba: [W] |')
print('| Abajo: [S] |')
print('| Seleccionar: [X] |')
print('| Atras: [Z] |')
print('|__________________|\n')
moverse_menuX2_1()
#-------------------------------------------------------Mostrar el menu de puntuacion de las cartas<
def mostrar_menuX2_2():
global selection
borrar_pantalla()
print(' __________________________________________________________________')
print('| | |')
print('| BRISCARD | Puntuacion de las cartas |')
print('|_________________________________|________________________________|')
print('| |')
print('| 01 => 11 pts 06 => -- pts |')
print('| 02 => -- pts 07 => -- pts |')
print('| 03 => 10 pts 10 => 2 pts |')
print('| 04 => -- pts 11 => 3 pts |')
print('| 05 => -- pts 12 => 4 pts |')
print('| |')
print('| El jugador que saca primero decide el palo que debe sacar |')
print('| el adversario, si este saca un palo diferente y no es del mismo |')
print('| que la carta dominante, pierde automaticamente. |')
print('| |')
print('| En cambio, si la carta que saca el adversario es del palo |')
print('| dominante y la del otro jugador no lo era, gana siempre aquel |')
print('| que ha lanzado la carta del palo dominante. |')
print('| |')
print('| En el caso de que ambos hayan lanzado cartas del mismo palo |')
print('| se tendrá en cuenta la puntuacion de las cartas para determinar |')
print('| el ganador de la ronda. |')
print('|__________________________________________________________________|')
print('')
print(' Controles')
print(' __________________')
print('| |')
print('| Arriba: [W] |')
print('| Abajo: [S] |')
print('| Seleccionar: [X] |')
print('| Atras: [Z] |')
print('|__________________|\n')
moverse_menuX2_2()
#-------------------------------------------------------Mostrar el menu de los resultados<
def mostrar_menuX3():
global selection
if not os.path.exists('Config/results.bcd'): # Si no existe el archivo, crea uno nuevo
file = open('Config/results.bcd', 'w')
file.write("0\n")
file.write('0')
file.close()
file = open("Config/results.bcd", "r") # Lee el archivo y recoge los resultados
currentUserMark = int(file.readline())
currentMachMark = int(file.readline())
file.close()
file = open("Config/results.bcd", "r") # Lee el archivo y recoge los resultados
currentUserMark = int(file.readline())
currentMachMark = int(file.readline())
file.close()
#---------------Formatea el resultado dependiendo del número de digitos de este
# Para los resultados del jugador
userBlankSpaces = 17
for i in range(0,len(str(currentUserMark))):
userBlankSpaces -= 1
userSpaces = ''
for i in range (0,userBlankSpaces):
userSpaces = userSpaces + ' '
# Para los resultados de la máquina
machBlankSpaces = 14
for i in range(0,len(str(currentMachMark))):
machBlankSpaces -= 1
machSpaces = ''
for i in range (0,machBlankSpaces):
machSpaces = machSpaces + ' '
borrar_pantalla()
print(' __________________________________________________________________')
print('| | |')
print('| BRISCARD | Registro |')
print('|_________________________________|________________________________|')
print('| |')
print('| |')
print('|' + machSpaces + 'La máquina ha ganado en total ' + str(currentMachMark) + ' partidas |')
print('| |')
print('|' + userSpaces + 'Tu has ganado en total ' + str(currentUserMark) +' partidas |')
print('| |')
print('|__________________________________________________________________|')
print('')
print(' Controles')
print(' __________________')
print('| |')
print('| Arriba: [W] |')
print('| Abajo: [S] |')
print('| Seleccionar: [X] |')
print('| Atras: [Z] |')
print('|__________________|\n')
moverse_menuX3()
#--------------------------------------------------------Muestra la pantalla pricipal
def pantalla_principal():
borrar_pantalla()
print(' __________________________________________________________________')
print('| |')
print('| BIENVENIDO A BRISCARD |')
print('|__________________________________________________________________|')
print('| ______ _ _ |')
print('| | ___ \ (_) | | |')
print('| | |_/ /_ __ _ ___ ___ __ _ _ __ __| | |')
print('| | ___ \ __| / __|/ __/ _ | __/ _ | |')
print('| | |_/ / | | \__ \ (_| (_| | | | (_| | |')
print('| \____/|_| |_|___/\___\__,_|_| \__,_| |')
print('| |')
print('| Pulsa [ENTER] para continuar |')
print('| |')
print('|__________________________________________________________________|')
print('')
print(' Controles')
print(' __________________')
print('| |')
print('| Arriba: [W] |')
print('| Abajo: [S] |')
print('| Seleccionar: [X] |')
print('| Atras: [Z] |')
print('|__________________|\n')
moverse_menuX1()
choice = input('')
#----------------------------------------------------------Loop funcional del menu
def showmenu():
mostrar_menuX1()
global currentMenu
currentMenu = 'Principal'
exitMenu = False
mostrar_menuX1()
while exitMenu == False:
if currentMenu == 'Principal':
moverse_menuX1()
elif currentMenu == 'Instrucciones':
moverse_menuX2()
elif currentMenu == 'Rules':
moverse_menuX2_1()
elif currentMenu == 'Puntuacion':
moverse_menuX2_2()
elif currentMenu == 'Registro':
moverse_menuX3()
elif currentMenu == 'RetryMenu':
moverse_RetryMenu()
time.sleep(0.1) #Tiempo de refresco de respuesta (evita el sobrecalentamiento)
pantalla_principal()
currentMenu = 'Principal'
showmenu() |
# -*- coding: utf-8 -*-
"""
Created on Nov 12 15:22:30 2015
@author: frickjm
"""
import helperFuncs
import matplotlib.pyplot as plt
import skimage.io as io
from skimage.transform import resize
import numpy as np
from os import listdir
from os.path import isdir
from os import mkdir
from os.path import isfile
from random import shuffle
import cPickle
import sys
from sklearn.metrics import mean_squared_error
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad
sys.setrecursionlimit(10000)
np.random.seed(0)
"""*************************************************************************"""
"""*************************************************************************"""
"""get the solubility for training"""
def getTargets():
out = {}
with open("../data/sols.pickle",'rb') as f:
d = cPickle.load(f)
for k,v in d.iteritems():
out[k] = [float(v)]
return out
"""Dump the weights of the model for visualization"""
def dumpWeights(model):
layercount = 0
for layer in model.layers:
try:
weights = model.layers[layercount].get_weights()[0]
size = len(weights)
if size < 100:
with open(folder+"layer"+str(layercount)+".pickle",'wb') as f:
cp = cPickle.Pickler(f)
cp.dump(weights)
else:
pass
except IndexError:
pass
layercount +=1
"""Find out the RMSE of just guessing the mean solubility for comparison purposes"""
def testAverages(direct,targets):
means = np.mean(targets.values(),axis=0)
s = len(means)
ld = listdir(direct)
shuffle(ld)
num = 20000
preds = np.zeros((num,s),dtype=np.float)
y = np.zeros((num,s),dtype=np.float)
count = 0
for x in ld[:num]:
CID = x[:x.find(".png")]
y[count] = targets[CID]
preds[count] = means
count+=1
print "RMSE of guessing: ", np.sqrt(mean_squared_error(y, preds))
"""*************************************************************************"""
"""*************************************************************************"""
"""Require an argument specifying whether this is an update or a new model, parse input"""
update, size, lay1size, run = helperFuncs.handleArgs(sys.argv)
"""Define parameters of the run"""
imdim = size - 20 #strip 10 pixels buffer from each size
direct = "../data/images"+str(size)+"/" #directory containing the images
ld = listdir(direct) #contents of that directory
numEx = len(ld) #number of images in the directory
shuffle(ld) #shuffle the image list for randomness
outType = "solubility" #what the CNN is predicting
DUMP_WEIGHTS = True #will we dump the weights of conv layers for visualization
trainTestSplit = 0.90 #percentage of data to use as training data
batch_size = 32 #how many training examples per batch
chunkSize = 50000 #how much data to ever load at once
testChunkSize = 5000 #how many examples to evaluate per iteration
"""Define the folder where the model will be stored based on the input arguments"""
folder = helperFuncs.defineFolder(outType,size,lay1size,run)
"""Load the train/test split information if update, else split and write out which images are in which dataset"""
trainFs, testFs = helperFuncs.getTrainTestSplit(update,folder,numEx,trainTestSplit)
trainL = len(trainFs)
testL = len(testFs)
print "number of examples: ", numEx
print "training examples : ", trainL
print "test examples : ", testL
#batch_size = 32 #how many training examples per batch
#chunkSize = 5000 #how much data to ever load at once
#testChunkSize = 600 #how many examples to evaluate per iteration
numTrainEx = min(trainL,chunkSize)
targets = helperFuncs.getSolubilityTargets() #get the solubility value for each CID
outsize = len(targets[targets.keys()[0]]) #this it the size of the target (# of targets)
"""Initialize empty matrices to hold our images and our target vectors"""
trainImages = np.zeros((numTrainEx,1,imdim,imdim),dtype=np.float)
trainTargets = np.zeros((numTrainEx,outsize),dtype=np.float)
testImages = np.zeros((testChunkSize,1,imdim,imdim),dtype=np.float)
testTargets = np.zeros((testChunkSize,outsize),dtype=np.float)
"""If we are training a new model, define it"""
if sys.argv[1].lower().strip() == "new":
model = Sequential()
model.add(Convolution2D(32, 1, lay1size, lay1size, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 32, lay1size, lay1size, border_mode='full'))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Convolution2D(32, 32, 5, 5))
model.add(Activation('relu'))
model.add(Convolution2D(64, 32, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Convolution2D(64, 64, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Convolution2D(64, 64, 4, 4))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4096, 512, init='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512, outsize, init='normal'))
model.compile(loss='mean_squared_error', optimizer='adadelta')
model.set_weights(helperFuncs.getWeights("../OCRfeatures/300_5_1/wholeModel.pickle"))
"""If we are continuing to train an old model, load it"""
if update:
with open(folder+"wholeModel.pickle",'rb') as f:
model = cPickle.load(f)
""" TRAINING """
numIterations = trainL/chunkSize + 1
superEpochs = 100
RMSE = 1000000
oldRMSE = 1000000
for sup in range(0,superEpochs):
shuffle(trainFs)
print "*"*80
print "TRUE EPOCH ", sup
print "*"*80
for i in range(0,numIterations):
print "iteration ",i,": ", i*chunkSize," through ", (i+1)*chunkSize
count = 0
for x in trainFs[i*chunkSize:(i+1)*chunkSize]:
if x.find(".png") > -1:
CID = x[:x.find(".png")]
image = io.imread(direct+x,as_grey=True)[10:-10,10:-10]
#image = np.where(image > 0.1,1.0,0.0)
trainImages[count,0,:,:] = image
trainTargets[count] = targets[CID]
count +=1
model.fit(trainImages, trainTargets, batch_size=batch_size, nb_epoch=1)
if oldRMSE == RMSE:
if DUMP_WEIGHTS:
dumpWeights(model)
with open(folder+"bestModel.pickle", 'wb') as f:
cp = cPickle.Pickler(f)
cp.dump(model)
else:
with open(folder+"wholeModel.pickle", 'wb') as f:
cp = cPickle.Pickler(f)
cp.dump(model)
shuffle(testFs)
count = 0
for x in testFs[:testChunkSize]:
if x.find(".png") > -1:
CID = x[:x.find(".png")]
image = io.imread(direct+x,as_grey=True)[10:-10,10:-10]
#image = np.where(image > 0.1,1.0,0.0)
testImages[count,0,:,:] = image
testTargets[count] = targets[CID]
count +=1
preds = model.predict(testImages)
RMSE = np.sqrt(mean_squared_error(testTargets, preds))
print "RMSE of epoch: ", RMSE
oldRMSE = min(oldRMSE,RMSE)
|
################################################################################
# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the LICENSE file for details.
# SPDX-License-Identifier: MIT
#
# Fusion models for Atomic and molecular STructures (FAST)
# File utility functions
################################################################################
import os
def get_files(a_dir):
return [name for name in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, name))]
def get_files_prefix(a_dir, a_prefix):
return [name for name in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, name)) and name.startswith(a_prefix)]
def get_files_ext(a_dir, a_ext):
return [name for name in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, name)) and name.endswith(a_ext)]
def get_files_prefix_ext(a_dir, a_prefix, a_ext):
return [name for name in os.listdir(a_dir) if os.path.isfile(os.path.join(a_dir, name)) and name.startswith(a_prefix) and name.endswith(a_ext)]
def get_subdirectories(a_dir):
return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name))]
def get_subdirectories_prefix(a_dir, a_prefix):
return [name for name in os.listdir(a_dir) if os.path.isdir(os.path.join(a_dir, name)) and name.startswith(a_prefix)]
def valid_file(a_path):
return os.path.isfile(a_path) and os.path.getsize(a_path) > 0
|
#!/usr/bin/python3
import sys;
import argparse;
import random;
##This script is for predecting toss between "Lengaburu" and "Enchai" teams
#function definition: ***toss_result()***
def toss_result():
teams=['lengaburu','enchai'];
for team in teams:
if (team == "lengaburu") :
if (weather == "clear") & (match_type == "day"):
print("#1-Lengaburu wins toss and Bats");
elif (weather == "cloudy") & (match_type == "night"):
print("#1-Lengaburu wins toss and Bowls");
else:
print("#1-Lengaburu wins toss and Bats");
else:
if (weather == "clear") & (match_type == "day"):
print("#2-Enchai wins toss and Bowls");
elif (weather == "cloudy") & (match_type == "night"):
print("#2-Enchai wins toss and Bats");
else:
print("#2-Enchai wins toss and Bats");
#Main method
if __name__=="__main__":
#for help message
parser=argparse.ArgumentParser(description="This script is for predecting toss based on conditions");
parser.add_argument("weather",help='Enter weather(clear/cloudy) as first argument in command line (lower case)');
parser.add_argument("match_type",help='Enter match_type(day/night) as second argument in command line (lower case)');
args = parser.parse_args()
weather=sys.argv[1];
match_type=sys.argv[2];
coin_flip=1;
for coin in range(coin_flip):
flip=random.randint(1,2);
if flip == 1:
#function call
toss_result();
else:
#function call
toss_result();
|
import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import check_password
from django.shortcuts import get_object_or_404
from ninja import Schema
from ninja.errors import ValidationError
from ninja.orm import create_schema
from pydantic import root_validator
User = get_user_model()
username = settings.NINJA_AUTH_CONFIG.pop('USERNAME_FIELD')
def get_username_field():
if 'USERNAME_FIELD' in settings.NINJA_AUTH_CONFIG:
return settings.NINJA_AUTH_CONFIG.pop('USERNAME_FIELD')
else:
return 'username'
class TokenPayload(Schema):
user_id: int
class Message(Schema):
message: str
class LoginOutSchema(Schema):
type: str
token: str
valid_until: datetime.datetime
class LoginSchema(Schema):
username: str
password: str
class RefreshToken(Schema):
token: str
class ForgotMyPassword(Schema):
email: str
class PasswordChange(Schema):
old_password: str
new_password: str |
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from .models import Book
# Create your views here.
class BookListView(ListView):
model = Book
context_object_name = 'book_list' ## Can access it with this name else would had to use
template_name = 'books/book_list.html'
class BookDetailView(DetailView):
model = Book
context_object_name = 'book'
template_name = 'books/book_detail.html' |
#!/usr/bin/env python
from termcolors import *
import sys
tc = []
for n in xrange(len(tf)): tc.append((tf[n], tfl[n]))
for b in tb:
for c, lc in tc:
sys.stdout.write("%s %s " % \
(globals()[b](globals()[c]('XXXXX')), globals()[b](globals()[lc]('OOOOO'))))
print('\n')
|
#!/usr/bin/env python2.7
from __future__ import print_function
# This is a placeholder for a Google-internal import.
from grpc.beta import implementations
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
import base64
import os
tf.app.flags.DEFINE_string('server', 'localhost:9000',
'PredictionService host:port')
tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format')
FLAGS = tf.app.flags.FLAGS
class Processor:
__stub = None
__vocab = None
__wrap = None
def __init__(self, server, dataDir):
host, port = server.split(':')
channel = implementations.insecure_channel(host, int(port))
self.__stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
def Process(self, modelName, inputList):
image = base64.decodestring(inputList[0])
# Send request
request = predict_pb2.PredictRequest()
request.model_spec.name = modelName
request.model_spec.signature_name = 'predict_images'
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image, shape=[1]))
result = self.__stub.Predict(request, 10.0) # 10 secs timeout
print(result)
myresult = str(result)
return myresult
if __name__ == '__main__':
p = Processor(FLAGS.server, '')
image = open(FLAGS.image, 'rb')
ListOfInputs = []
image_read = image.read()
image_64_encode = base64.encodestring(image_read)
ListOfInputs.append(image_64_encode)
result = p.Process('inception', ListOfInputs)
print(result)
|
from .Instruccion import Instruccion
from .Mensaje import *
from enum import Enum
class TIPO_ARITMETICA(Enum) :
SUMA = 1,
RESTA = 2,
MULTIPLICACION = 3,
DIVISION = 4,
RESIDUO = 5,
ABSOLUTO = 6
class Aritmetica(Instruccion) :
def __init__(self, izquierda, derecha, tipo, linea, columna) :
self.izquierda = izquierda
self.derecha = derecha
self.tipo = tipo
self.linea = linea
self.columna = columna
def ejecutar(self,ts,mensajes) :
izq = None if not self.izquierda else self.izquierda.ejecutar(ts,mensajes)
der = None if not self.derecha else self.derecha.ejecutar(ts,mensajes)
if izq is None and der is None:
return None
if der is None:
if self.tipo == TIPO_ARITMETICA.ABSOLUTO:
if isinstance(izq,int) or isinstance(izq,float):
return abs(izq)
mensajes.append(Mensaje(TIPO_MENSAJE.SEMANTICO,'No se puede obtener el valor absoluto de una cadena.',self.linea,self.columna))
return None
return None
if self.tipo == TIPO_ARITMETICA.SUMA:
if (isinstance(izq,int) or isinstance(izq,float)) and (isinstance(der,int) or isinstance(der,float)):
return izq+der
if isinstance(izq,str) and isinstance(der,str):
return izq+der
if isinstance(izq,str) and (isinstance(der,int) or isinstance(der,float)):
return izq+str(der)
if isinstance(der,str) and (isinstance(izq,int) or isinstance(izq,float)):
return der+str(izq)
if self.tipo == TIPO_ARITMETICA.RESTA:
if (isinstance(izq,int) or isinstance(izq,float)) and (isinstance(der,int) or isinstance(der,float)):
return izq-der
mensajes.append(Mensaje(TIPO_MENSAJE.SEMANTICO,'No se puede restar una cadena.',self.linea,self.columna))
return None
if self.tipo == TIPO_ARITMETICA.MULTIPLICACION:
if (isinstance(izq,int) or isinstance(izq,float)) and (isinstance(der,int) or isinstance(der,float)):
return izq*der
mensajes.append(Mensaje(TIPO_MENSAJE.SEMANTICO,'No se puede multiplicar una cadena.',self.linea,self.columna))
return None
if self.tipo == TIPO_ARITMETICA.DIVISION:
if isinstance(izq,int) and isinstance(der,int):
return izq//der
if (isinstance(izq,int) or isinstance(izq,float)) and (isinstance(der,int) or isinstance(der,float)):
return izq/der
mensajes.append(Mensaje(TIPO_MENSAJE.SEMANTICO,'No se puede dividir una cadena.',self.linea,self.columna))
return None
if self.tipo == TIPO_ARITMETICA.RESIDUO:
if (isinstance(izq,int) or isinstance(izq,float)) and (isinstance(der,int) or isinstance(der,float)):
return izq%der
mensajes.append(Mensaje(TIPO_MENSAJE.SEMANTICO,'No se puede obtener el residuo de una cadena.',self.linea,self.columna))
return None
def getAST_Ascendente(self) :
arbol = ""
if self.derecha is None:
arbol += '\"abs_'+str(self)+'\"' + '[label=\"abs\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"abs_'+str(self)+'\"\n'
arbol += '\"PIZQ_'+str(self)+'\"' + '[label=\"(\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"PIZQ_'+str(self)+'\"\n'
arbol += '\"'+str(self.izquierda)+'\"' + '[label=\"expresion\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"'+str(self.izquierda)+'\"\n'
arbol += self.izquierda.getAST_Ascendente()
arbol += '\"PDER_'+str(self)+'\"' + '[label=\")\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"PDER_'+str(self)+'\"\n'
else:
arbol += '\"'+str(self.izquierda)+'\"' + '[label=\"expresion\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"'+str(self.izquierda)+'\"\n'
arbol += self.izquierda.getAST_Ascendente()
arbol += '\"sig_'+str(self)+'\"' + '[label=\"'+self.tipo.name+'\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"sig_'+str(self)+'\"\n'
arbol += '\"'+str(self.derecha)+'\"' + '[label=\"expresion\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"'+str(self.derecha)+'\"\n'
arbol += self.derecha.getAST_Ascendente()
return arbol
def getAST_Descendente(self) :
arbol = ""
if self.derecha is None:
arbol += '\"abs_'+str(self)+'\"' + '[label=\"abs\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"abs_'+str(self)+'\"\n'
arbol += '\"PIZQ_'+str(self)+'\"' + '[label=\"(\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"PIZQ_'+str(self)+'\"\n'
arbol += '\"'+str(self.izquierda)+'\"' + '[label=\"expresion\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"'+str(self.izquierda)+'\"\n'
arbol += self.izquierda.getAST_Descendente()
arbol += '\"PDER_'+str(self)+'\"' + '[label=\")\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"PDER_'+str(self)+'\"\n'
else:
arbol += '\"'+str(self.izquierda)+'\"' + '[label=\"expresion\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"'+str(self.izquierda)+'\"\n'
arbol += self.izquierda.getAST_Descendente()
arbol += '\"exp_der_'+str(self.izquierda)+'\"' + '[label=\"expresion\'\"] ;\n'
arbol +='\"'+str(self)+'\" -> \"exp_der_'+str(self.izquierda)+'\"\n'
arbol += '\"sig_'+str(self)+'\"' + '[label=\"'+self.tipo.name+'\"] ;\n'
arbol += '\"exp_der_'+str(self.izquierda)+'\" -> \"sig_'+str(self)+'\"\n'
arbol += '\"'+str(self.derecha)+'\"' + '[label=\"expresion\"] ;\n'
arbol += '\"exp_der_'+str(self.izquierda)+'\" -> \"'+str(self.derecha)+'\"\n'
arbol += self.derecha.getAST_Descendente()
return arbol
|
# Generated by Django 2.2.1 on 2019-08-09 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Pay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pg', models.CharField(max_length=45)),
('pay_method', models.CharField(max_length=45)),
('name', models.CharField(blank=True, max_length=45, null=True)),
('amount', models.IntegerField(blank=True, null=True)),
('buyer_email', models.CharField(blank=True, max_length=45, null=True)),
('buyer_name', models.CharField(blank=True, max_length=45, null=True)),
('buyer_tel', models.CharField(blank=True, max_length=45, null=True)),
('buyer_addr', models.CharField(blank=True, max_length=45, null=True)),
('buyer_postcode', models.CharField(blank=True, max_length=45, null=True)),
('m_redirect_url', models.CharField(blank=True, max_length=45, null=True)),
],
options={
'managed': True,
},
),
]
|
from .parser import xlsxParser
from math import floor
from random import shuffle
from .settings import *
class Classes:
parser = xlsxParser()
def __init__(self, blocks): #constructor and intializing the data from the database
self.blocks = blocks
self.subjectChoices = [[name[0].lower(), [name[i].lower() for i in [3, 4, 5, 6]]]
for name in self.parser.getDatabase('D7', 'J114', 'Sheet10')]
self.teachersList = [[item.lower() for item in teacher] for teacher in self.parser.getDatabase(
'C5', 'D29', 'Teachers')]
self.NoOfStudentsPerSubject = [
[sub[0], len(sub[1])] for sub in self.getStudentsInSubjects()]
def addNoOfStudents(self):
for subject in SUBJECTLIST:
NoOfSubject = 0 # counter for total students for each subject
for block in self.blocks:
NoOfSubject += block.count(subject)
for line in self.NoOfStudentsPerSubject: # find students taking each subject and store in studentsInSubject
if line[0] == subject:
studentsInSubject = line[1]
parts = chunkIt(studentsInSubject, NoOfSubject)
counter = 0
for i, block in enumerate(self.blocks): # add students
for j, subjects in enumerate(block):
if subjects == subject:
self.blocks[i][j] = [subjects, parts[counter]]
counter += 1
def teachAssignment(self):
self.classes = self.blocks
# go through every block in classes
for i, block in enumerate(self.classes):
tempTeachersList = self.teachersList[:]
shuffle(tempTeachersList)
for j, sub in enumerate(block): # go through every subject in the block
index = [teacher[1] # find the index of the subject in the tempTeachersList
for teacher in tempTeachersList].index(sub[0])
# insert the teacher from tempTeachersList to classes
self.classes[i][j].insert(0, tempTeachersList[index][0])
# remove that teacher from the tempTeachersList
tempTeachersList.pop(index)
def getStudentsInSubjects(self): # create a Students In each Subject list from Subject choices of students
studentsInSubjects = []
for subject in SUBJECTLIST:
studentsInSubject = []
for student in self.subjectChoices:
if subject in student[1]:
studentsInSubject.append(student[0])
studentsInSubjects.append([subject, studentsInSubject])
return studentsInSubjects
def addStudents(self):
StudentsInSubjects = self.getStudentsInSubjects()
for i, block in enumerate(self.classes): #adding students in each class
for j, clas in enumerate(block):
index = [item[0] for item in StudentsInSubjects].index(clas[1])
students = StudentsInSubjects[index][1][:clas[2]]
StudentsInSubjects[index][1] = StudentsInSubjects[index][1][clas[2]:]
self.classes[i][j].append(students)
return self.classes
def getClasses(self): # encapsulation of all the methods from above
self.addNoOfStudents()
self.teachAssignment()
return self.addStudents()
def displayClasses(self): #print the classes list in a visually nice way
for blockNo, block in enumerate(self.getClasses()):
print(f'block {blockNo+1}: ', '\n')
for clas in block:
print(clas, '\n')
|
# Cody Holthus - Rip City Robotics
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_table
class TabContents:
def __init__(self):
self.dut_config_tab = dbc.Card(
dbc.CardBody(
[
html.P("Configure the DUT", className="card-text"),
dbc.Button("Save",
id="dut_config_button",
color="primary"),
dash_table.DataTable(
id='adding-rows-table',
columns=[{
'name': ['Voltage Source',
'Vmax (Volts)',
'Vmin (Volts',
'Von Delay (ms)'],
'id': ['vs','vmax','vmin','von_delay'],
'deletable': False,
'renamable': True
}],
editable=True,
row_deletable=True
),
html.Button('Add Row', id='editing-rows-button', n_clicks=0),
]
),
className="mt-3",
)
self.test_config_tab = dbc.Card(
dbc.CardBody(
[
html.P("This is tab 2!", className="card-text"),
dbc.Button("Don't click here", color="danger"),
]
),
className="mt-3",
)
self.tabs = dbc.Tabs(
[
dbc.Tab(label="1 - Configure DUT",
tab_id="dut_config_tab"),
dbc.Tab(label="2 - Configure Test",
tab_id="test_config_tab"),
dbc.Tab(label="3 - Simulate",
tab_id="simulate_tab"),
dbc.Tab(label="4 - Run Test",
tab_id="run_test_tab"),
],
id="tabs"
)
def get_all_tabs(self):
return self.tabs
def render_tab(self,active_tab):
if active_tab == 'dut_config_tab':
return self.dut_config_tab
|
from __future__ import with_statement
import os
import sys
import datetime
import flask
import simplejson
def todfo(ci):
cijson=ci.to_json()
cidict=simplejson.loads(cijson)
return cidict
def todfl(cil):
cijsonl=[e.to_json() for e in cil]
cidictl=[simplejson.loads(e) for e in cijsonl]
return cidictl
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../')))
#print "ATH", sys.path
from random import choice
from flask.ext.mongoengine import MongoEngine
from flask.ext.mongoengine.wtf import model_form
from flask_debugtoolbar import DebugToolbarExtension
app = flask.Flask(__name__)
app.config.from_object(__name__)
app.config['MONGODB_SETTINGS'] = {'DB': 'adsgut'}
app.config['TESTING'] = True
app.config['SECRET_KEY'] = 'flask+mongoengine=<3'
app.debug = True
app.config['DEBUG_TB_PANELS'] = (
'flask.ext.debugtoolbar.panels.versions.VersionDebugPanel',
'flask.ext.debugtoolbar.panels.timer.TimerDebugPanel',
'flask.ext.debugtoolbar.panels.headers.HeaderDebugPanel',
'flask.ext.debugtoolbar.panels.request_vars.RequestVarsDebugPanel',
'flask.ext.debugtoolbar.panels.template.TemplateDebugPanel',
'flask.ext.debugtoolbar.panels.logger.LoggingPanel',
'flask.ext.mongoengine.panels.MongoDebugPanel'
)
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
db = MongoEngine()
db.init_app(app)
DebugToolbarExtension(app)
from mongogut import itemsandtags
adsgut=app
from flask import request, session, g, redirect, url_for, \
abort, render_template, flash, escape, make_response, Blueprint
import datetime
from werkzeug import Response
from mongoengine import Document
from bson.objectid import ObjectId
class MongoEngineJsonEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Document):
return obj.to_mongo()
elif isinstance(obj, ObjectId):
return unicode(obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
try:
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return simplejson.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
""" jsonify with support for MongoDB ObjectId
"""
return Response(simplejson.dumps(dict(*args, **kwargs), cls=MongoEngineJsonEncoder), mimetype='application/json')
#BUG; these currently dont have doAborts
#do we need a dictpop? CHECK
#FOR GET
def _dictg(k,d, listmode=False):
val=d.get(k, [None])
if d.has_key(k):
d.pop(k)
if listmode:
return val
else:
return val[0]
#FOR POST
def _dictp(k,d, default=None):
val=d.get(k, default)
if d.has_key(k):
d.pop(k)
return val
def _userpostget(g, postdict):
nick=_dictp('useras', postdict)
if nick:
useras=g.db.getUserForNick(g.currentuser, nick)
else:
useras=g.currentuser
return useras
def _userget(g, qdict):
nick=_dictg('useras', qdict)
userthere=_dictg('userthere', qdict)
if nick:
useras=g.db.getUserForNick(g.currentuser, nick)
else:
useras=g.currentuser
if userthere:
usernick=useras.nick
else:
usernick=False
return useras, usernick
def _sortget(qdict):
#a serialixed dict of ascending and field
sortstring=_dictg('sort', qdict)
if not sortstring:
return None
sort={}
sort['field'], sort['ascending'] = sortstring.split(':')
return sort
#criteria is a multiple ampersand list, with colon separators.
#eg criteria=basic__fqin:eq:something&criteria=
#we create from it a criteria list of dicts
def _criteriaget(qdict):
#a serialixed dict of arbitrary keys, with mongo style encoding
#later we will clamp down on it. BUG
critlist=_dictg('criteria', qdict, True)
if not critlist[0]:
return False
crit=[]
for ele in critlist:
cr={}
cr['field'], cr['op'], cr['value'] = ele.split(':',2)
crit.append(cr)
return crit
def _queryget(qdict):
#a serialixed dict of arbitrary keys, with mongo style encoding
#later we will clamp down on it. BUG
querylist=_dictg('query', qdict, True)
if not querylist[0]:
return {}
q={}
for ele in querylist:
field, value = ele.split(':',1)
if not q.has_key(field):
q[field]=[]
q[field].append(value)
return q
def _pagtupleget(qdict):
#a serialized tuple of offset, pagesize
pagtuplestring=_dictg('pagtuple', qdict)
if not pagtuplestring:
return None
plist=pagtuplestring.split(':')
pagtuple=[int(e) if e else -1 for e in pagtuplestring.split(':')]
return pagtuple
#currently force a new items each time.
def _itemsget(qdict):
itemlist=_dictg('items', qdict, True)
print "itemlist", itemlist
if not itemlist[0]:
return []
#Possible security hole bug
return itemlist
def _itemspostget(qdict):
itemlist=_dictp('items', qdict)
if not itemlist:
return []
#Possible security hole bug
return itemlist
def _postablesget(qdict):
plist=_dictp('postables', qdict)
if not plist:
return []
#Possible security hole bug
return plist
#used in POST, not in GET
def _itemstagsget(qdict):
itemstagslist=_dictp('itemsandtags', qdict)
if not itemstagslist:
return []
#Possible security hole bug
return itemstagslist
#used in POST, not in get
def _tagspecsget(qdict):
tagspecs=_dictp('tagspecs', qdict)
if not tagspecs:
return []
#Possible security hole bug
return tagspecs
#x
@adsgut.before_request
def before_request():
username=session.get('username', None)
print "USER", username
p=itemsandtags.Postdb(db)
w=p.whosdb
g.db=w
g.dbp=p
if not username:
username='adsgut'
#superuser if no login BUG: use only for testing
#currently set to sysuser. Atherwise have user login and set.
g.currentuser=g.db.getUserForNick(None, username)
@adsgut.route('/login', methods=['GET', 'POST'])
def login():
error=None
if request.method == 'POST':
session['username'] = request.form['username']
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('index'))
return render_template('login.html', error=error, useras=g.currentuser)
@adsgut.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('index'))
#x
@adsgut.route('/')
def index():
return render_template('index.html', useras=g.currentuser)
#x
@adsgut.route('/all')
def all():
groups=g.db.allGroups(g.currentuser)
apps=g.db.allApps(g.currentuser)
libraries=g.db.allLibraries(g.currentuser)
users=g.db.allUsers(g.currentuser)
return flask.render_template('index.html', groups=groups, apps=apps, users=users, libraries=libraries)
#######################################################################################################################
#######################################################################################################################
#Information about users, groups, and apps
#TODO: should we support a modicum of user information for others
#like group and app owners?
#x
@adsgut.route('/user/<nick>')
def userInfo(nick):
user=g.db.getUserInfo(g.currentuser, nick)
return jsonify(user=user)
#x
@adsgut.route('/user/<nick>/profile/html')
def userProfileHtml(nick):
user=g.db.getUserInfo(g.currentuser, nick)
return render_template('userprofile.html', theuser=user, useras=g.currentuser)
@adsgut.route('/user/<nick>/postablesuserisin')
def postablesUserIsIn(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
allpostables=g.db.postablesForUser(g.currentuser, useras)
groups=[e['fqpn'] for e in allpostables if e['ptype']=='group']
libraries=[e['fqpn'] for e in allpostables if e['ptype']=='library']
apps=[e['fqpn'] for e in allpostables if e['ptype']=='app']
groups.remove("adsgut/group:public")
groups.remove(useras.nick+"/group:default")
return jsonify(groups=groups, libraries=libraries, apps=apps)
#x
@adsgut.route('/user/<nick>/groupsuserisin')
def groupsUserIsIn(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
groups=[e['fqpn'] for e in g.db.postablesForUser(g.currentuser, useras, "group")]
groups.remove("adsgut/group:public")
groups.remove(useras.nick+"/group:default")
return jsonify(groups=groups)
#x
@adsgut.route('/user/<nick>/groupsuserowns')
def groupsUserOwns(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
groups=[e['fqpn'] for e in g.db.ownerOfPostables(g.currentuser, useras, "group")]
return jsonify(groups=groups)
#x
@adsgut.route('/user/<nick>/groupsuserisinvitedto')
def groupsUserIsInvitedTo(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
groups=[e['fqpn'] for e in g.db.postableInvitesForUser(g.currentuser, useras, "group")]
return jsonify(groups=groups)
#x
@adsgut.route('/user/<nick>/appsuserisin')
def appsUserIsIn(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
apps=[e['fqpn'] for e in g.db.postablesForUser(g.currentuser, useras, "app")]
return jsonify(apps=apps)
#BUG: not right
@adsgut.route('/user/<nick>/appsusercanwriteto')
def appsUserCanWriteTo(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
apps=[e['fqpn'] for e in g.db.postablesForUser(g.currentuser, useras, "app")]
return jsonify(apps=apps)
#x
@adsgut.route('/user/<nick>/appsuserowns')
def appsUserOwns(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
apps=[e['fqpn'] for e in g.db.ownerOfPostables(g.currentuser, useras, "app")]
return jsonify(apps=apps)
#use this for the email invitation?
#x
@adsgut.route('/user/<nick>/appsuserisinvitedto')
def appsUserIsInvitedTo(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
apps=[e['fqpn'] for e in g.db.postableInvitesForUser(g.currentuser, useras, "app")]
return jsonify(apps=apps)
@adsgut.route('/user/<nick>/librariesuserisin')
def librariesUserIsIn(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
libs=[e['fqpn'] for e in g.db.postablesForUser(g.currentuser, useras, "library")]
return jsonify(libraries=libs)
#BUG: not right
@adsgut.route('/user/<nick>/librariesusercanwriteto')
def librariesUserCanWriteTo(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
libs=[e['fqpn'] for e in g.db.postablesForUser(g.currentuser, useras, "library")]
return jsonify(libraries=libs)
#x
@adsgut.route('/user/<nick>/librariesuserowns')
def librariesUserOwns(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
libs=[e['fqpn'] for e in g.db.ownerOfPostables(g.currentuser, useras, "library")]
return jsonify(libraries=libs)
#use this for the email invitation?
#x
@adsgut.route('/user/<nick>/librariesuserisinvitedto')
def librariesUserIsInvitedTo(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
libs=[e['fqpn'] for e in g.db.postableInvitesForUser(g.currentuser, useras, "library")]
return jsonify(libraries=libs)
#BUG currentuser useras here?
@adsgut.route('/user/<nick>/items')
def userItems(nick):
useras=g.db.getUserInfo(g.currentuser, nick)
num, vals=g.dbp.getItemsForQuery(g.currentuser, useras,
{'group':[useras.nick+"/group:default"]} )
#userdict={'count':num, 'items':[simplejson.loads(v.to_json()) for v in vals]}
return jsonify(count=num, items=vals)
#######################################################################################################################
#creating groups and apps
#accepting invites.
#DELETION methods not there BUG
#BUG: check currentuser useras stuff here
def createPostable(g, request, ptypestr):
spec={}
jsonpost=dict(request.json)
useras=_userpostget(g,jsonpost)
name=_dictp('name', jsonpost)
if not name:
doabort("BAD_REQ", "No Name Specified")
description=_dictp('description', jsonpost, '')
spec['creator']=useras.basic.fqin
spec['name']=name
spec['description']=description
postable=g.db.addPostable(g.currentuser, useras, ptypestr, spec)
return postable
@adsgut.route('/group', methods=['POST'])#groupname/description
def createGroup():
if request.method == 'POST':
newgroup=createPostable(g, request, "group")
return jsonify(newgroup)
else:
doabort("BAD_REQ", "GET not supported")
@adsgut.route('/app', methods=['POST'])#name/description
def createApp():
if request.method == 'POST':
newapp=createPostable(g, request, "app")
return jsonify(newapp)
else:
doabort("BAD_REQ", "GET not supported")
@adsgut.route('/library', methods=['POST'])#name/description
def createLibrary():
if request.method == 'POST':
newlibrary=createPostable(g, request, "library")
return jsonify(newlibrary)
else:
doabort("BAD_REQ", "GET not supported")
@adsgut.route('/postable/<powner>/<ptype>:<pname>/changes', methods=['POST'])#user/op
def doPostableChanges(powner, ptype, pname):
#add permit to match user with groupowner
fqpn=powner+"/"+ptype+":"+pname
if request.method == 'POST':
#specify your own nick for accept or decline
print "LALALALALLA", request.json
jsonpost=dict(request.json)
memberable=_dictp('memberable', jsonpost)
changerw=_dictp('changerw', jsonpost)
if changerw==None:
changerw=False
#for inviting this is nick of user invited.
#for accepting this is your own nick
if not memberable:
doabort("BAD_REQ", "No User Specified")
op=_dictp('op', jsonpost)
print "NICKOP", memberable, op
if not op:
doabort("BAD_REQ", "No Op Specified")
if op=="invite":
utba, p=g.db.inviteUserToPostable(g.currentuser, g.currentuser, fqpn, memberable, changerw)
return jsonify({'status':'OK', 'info': {'invited':utba.nick, 'to':fqpn}})
elif op=='accept':
me, p=g.db.acceptInviteToPostable(g.currentuser, fqpn, memberable)
return jsonify({'status':'OK', 'info': {'invited':me.nick, 'to': fqpn, 'accepted':True}})
elif op=='decline':
#BUG add something to invitations to mark declines.
return jsonify({'status': 'OK', 'info': {'invited':memberable, 'to': fqpn, 'accepted':False}})
elif op=='changeowner':
#you must be the current owner
newo, p=g.db.changeOwnershipOfPostable(g.currentuser, g.currentuser, fqpn, memberable)
return jsonify({'status': 'OK', 'info': {'changedto':memberable, 'for': fqpn}})
elif op=='togglerw':
mem, p = g.db.toggleRWForMembership(g.currentuser, g.currentuser, fqpn, memberable)
return jsonify({'status': 'OK', 'info': {'user':memberable, 'for': fqpn}})
else:
doabort("BAD_REQ", "No Op Specified")
else:
doabort("BAD_REQ", "GET not supported")
#DEPRECATED: REMOVE
@adsgut.route('/group/<groupowner>/group:<groupname>/doinvitation', methods=['POST'])#user/op
def doInviteToGroup(groupowner, groupname):
#add permit to match user with groupowner
fqgn=groupowner+"/group:"+groupname
if request.method == 'POST':
#specify your own nick for accept or decline
jsonpost=dict(request.json)
nick=_dictp('userthere', jsonpost)
#for inviting this is nick of user invited.
#for accepting this is your own nick
if not nick:
doabort("BAD_REQ", "No User Specified")
op=_dictp('op', jsonpost)
if not op:
doabort("BAD_REQ", "No Op Specified")
if op=="invite":
utba, p=g.db.inviteUserToPostableUsingNick(g.currentuser, fqgn, nick)
return jsonify({'status':'OK', 'info': {'invited':utba.nick, 'to':fqgn}})
elif op=='accept':
me, p=g.db.acceptInviteToPostable(g.currentuser, fqgn, nick)
return jsonify({'status':'OK', 'info': {'invited':me.nick, 'to': fqgn, 'accepted':True}})
elif op=='decline':
#BUG add something to invitations to mark declines.
return jsonify({'status': 'OK', 'info': {'invited':nick, 'to': fqgn, 'accepted':False}})
elif op=='decline':
#BUG add something to invitations to mark declines.
return jsonify({'status': 'OK', 'info': {'invited':nick, 'to': fqgn, 'accepted':False}})
else:
doabort("BAD_REQ", "No Op Specified")
else:
doabort("BAD_REQ", "GET not supported")
#BUG: user leakage as we do user info for all users in group. another users groups should not be obtainable
#BUG: should this handle a general memberable? must use a SHOWNFIELDS
#BUG: do we want a useras here? Also BUG:no existing version for tag, or POST to changer generable ownerable info yet
def addMemberToPostable(g, request, fqpn):
jsonpost=dict(request.json)
#BUG:need fqun right now. work with nicks later
fqmn=_dictp('member', jsonpost)
changerw=_dictp('changerw', jsonpost)
if not changerw:
changerw=False
# if not g.currentuser.nick:
# doabort("BAD_REQ", "No User Specified")
user, postable=g.db.addMemberableToPostable(g.currentuser, g.currentuser, fqpn, fqmn, changerw)
return user, postable
def getMembersOfPostable(g, request, fqpn):
useras=g.currentuser
users=g.db.membersOfPostableFromFqin(g.currentuser,useras,fqpn)
userdict={'users':users}
return userdict
def getInvitedsForPostable(g, request, fqpn):
useras=g.currentuser
users=g.db.invitedsForPostableFromFqin(g.currentuser,useras,fqpn)
userdict={'users':users}
return userdict
@adsgut.route('/group/<groupowner>/group:<groupname>/inviteds')
def groupInviteds(groupowner, groupname):
fqgn=groupowner+"/group:"+groupname
userdict=getInvitedsForPostable(g, request, fqgn)
return jsonify(userdict)
@adsgut.route('/library/<libraryowner>/library:<libraryname>/inviteds')
def libraryInviteds(libraryowner, libraryname):
fqln=libraryowner+"/library:"+libraryname
userdict=getInvitedsForPostable(g, request, fqln)
return jsonify(userdict)
@adsgut.route('/group/<groupowner>/group:<groupname>/members', methods=['GET', 'POST'])#user
def addMembertoGroup_or_groupMembers(groupowner, groupname):
#add permit to match user with groupowner
fqgn=groupowner+"/group:"+groupname
if request.method == 'POST':
member, group=addMemberToPostable(g, request, fqgn)
return jsonify({'status':'OK', 'info': {'member':member.basic.fqin, 'type':'group', 'postable':group.basic.fqin}})
else:
userdict=getMembersOfPostable(g, request, fqgn)
return jsonify(userdict)
@adsgut.route('/app/<appowner>/app:<appname>/members', methods=['GET', 'POST'])#user
def addMemberToApp_or_appMembers(appowner, appname):
#add permit to match user with groupowner
fqan=appowner+"/app:"+appname
if request.method == 'POST':
member, app=addMemberToPostable(g, request, fqan)
return jsonify({'status':'OK', 'info': {'member':member.basic.fqin, 'type':'app', 'postable':app.basic.fqin}})
else:
userdict=getMembersOfPostable(g, request, fqan)
return jsonify(userdict)
#deprecare library for postable
@adsgut.route('/library/<libraryowner>/library:<libraryname>/members', methods=['GET', 'POST'])#user
def addMemberToLibrary_or_libraryMembers(libraryowner, libraryname):
#add permit to match user with groupowner
fqln=libraryowner+"/library:"+libraryname
if request.method == 'POST':
member, library=addMemberToPostable(g, request, fqln)
return jsonify({'status':'OK', 'info': {'member':member.basic.fqin, 'type':'library', 'postable':library.basic.fqin}})
else:
userdict=getMembersOfPostable(g, request, fqln)
return jsonify(userdict)
@adsgut.route('/postable/<powner>/<ptype>:<pname>/members', methods=['GET', 'POST'])#user
def addMemberToPostable_or_postableMembers(powner, ptype, pname):
fqpn=powner+"/"+ptype+":"+pname
if request.method == 'POST':
member, postable=addMemberToPostable(g, request, fqpn)
return jsonify({'status':'OK', 'info': {'member':member.basic.fqin, 'type':ptype, 'postable':postable.basic.fqin}})
else:
userdict=getMembersOfPostable(g, request, fqpn)
return jsonify(userdict)
#######################################################################################################################
#######################################################################################################################
def postable(ownernick, name, ptypestr):
fqpn=ownernick+"/"+ptypestr+":"+name
postable=g.db.getPostableInfo(g.currentuser, g.currentuser, fqpn)
isowner=False
if g.db.isOwnerOfPostable(g.currentuser, g.currentuser, postable):
isowner=True
return postable, isowner
#POST/GET in a lightbox?
@adsgut.route('/group/html')
def creategrouphtml():
pass
#get group info
#x
@adsgut.route('/group/<groupowner>/group:<groupname>')
def groupInfo(groupowner, groupname):
return jsonify(group=postable(groupowner, groupname, "group")[0])
#x
@adsgut.route('/postable/<groupowner>/group:<groupname>/profile/html')
def groupProfileHtml(groupowner, groupname):
group, owner=postable(groupowner, groupname, "group")
return render_template('groupprofile.html', thegroup=group, owner=owner, useras=g.currentuser)
@adsgut.route('/group/<groupowner>/group:<groupname>/filter/html')
def groupFilterHtml(groupowner, groupname):
querystring=request.query_string
group, owner=postable(groupowner, groupname, "group")
return render_template('groupfilter.html', thegroup=group, querystring=querystring, owner=owner, useras=g.currentuser)
# @adsgut.route('/group/<groupowner>/group:<groupname>/items')
# def groupItems(groupowner, groupname):
# group=postable(groupowner, groupname, "group")
# num, vals=g.dbp.getItemsForQuery(g.currentuser, g.currentuser,
# {'postables':[group.basic.fqin]} )
# groupdict={'count':num, 'items':[simplejson.loads(v.to_json()) for v in vals]}
# return jsonify(groupdict)
#######################################################################################################################
#######################################################################################################################
#POST/GET in a lightbox?
@adsgut.route('/app/html')
def createapphtml():
pass
#x
@adsgut.route('/app/<appowner>/app:<appname>')
def appInfo(appowner, appname):
return jsonify(app=postable(appowner, appname, "app")[0])
#x
@adsgut.route('/postable/<appowner>/app:<appname>/profile/html')
def appProfileHtml(appowner, appname):
app, owner=postable(appowner, appname, "app")
return render_template('appprofile.html', theapp=app, owner=owner, useras=g.currentuser)
# @adsgut.route('/app/<appowner>/app:<appname>/items')
# def appItems(appowner, appname):
# app=postable(appowner, appname, "app")
# num, vals=g.dbp.getItemsForQuery(g.currentuser, g.currentuser,
# {'postables':[app.basic.fqin]} )
# appdict={'count':num, 'items':[simplejson.loads(v.to_json()) for v in vals]}
# return jsonify(appdict)
#######################################################################################################################
#######################################################################################################################
#POST/GET in a lightbox?
@adsgut.route('/library/html')
def createlibraryhtml():
pass
#get group info
#x
@adsgut.route('/library/<libraryowner>/library:<libraryname>')
def libraryInfo(libraryowner, libraryname):
return jsonify(library=postable(libraryowner, libraryname, "library")[0])
#x
@adsgut.route('/postable/<libraryowner>/library:<libraryname>/profile/html')
def libraryProfileHtml(libraryowner, libraryname):
library, owner=postable(libraryowner, libraryname, "library")
return render_template('libraryprofile.html', thelibrary=library, owner=owner, useras=g.currentuser)
@adsgut.route('/library/<libraryowner>/library:<libraryname>/filter/html')
def libraryFilterHtml(libraryowner, libraryname):
querystring=request.query_string
library, owner=postable(libraryowner, libraryname, "library")
return render_template('libraryfilter.html', thelibrary=library, querystring=querystring, owner=owner, useras=g.currentuser)
@adsgut.route('/postable/<po>/<pt>:<pn>/filter/html')
def postableFilterHtml(po, pt, pn):
querystring=request.query_string
p, owner=postable(po, pn, pt)
pflavor='pos'
if pn=='public' and po=='adsgut' and pt=='group':
pflavor='pub'
if pn=='default' and pt=='group':
tqtype='stags'
pflavor='udg'
else:
tqtype='tagname'
tqtype='tagname'
#BUG using currentuser right now. need to support a notion of useras
return render_template('postablefilter.html', p=p, pflavor=pflavor, querystring=querystring, tqtype=tqtype, useras=g.currentuser, owner=owner)
# @adsgut.route('/library/<libraryowner>/library:<libraryname>/items')
# def libraryItems(libraryowner, libraryname):
# library=postable(libraryowner, libraryname, "library")
# num, vals=g.dbp.getItemsForQuery(g.currentuser, g.currentuser,
# {'postables':[library.basic.fqin]} )
# libdict={'count':num, 'items':[simplejson.loads(v.to_json()) for v in vals]}
# return jsonify(libdict)
#######################################################################################################################
#######################################################################################################################
def _getContext(q):
#BUG:user contexts will be hidden. So this will change
if not q.has_key('cuser'):
return None
context={}
if q['cuser']=="True":
context['user']=True
else:
context['user']=False
if not q.has_key('ctype'):
return None
context['type']=q['ctype']
if not q.has_key('cvalue'):
return None
context['value']=q['cvalue']
return context
#these might be supersed by tagging based results to populate the left side
#The users simple tags, not singletonmode (ie no notes), not libraries
@adsgut.route('/user/<nick>/tagsuserowns')
def tagsUserOwns(nick):
query=dict(request.args)
useras, usernick=_userget(g, query)
tagtype= _dictg('tagtype', query)
#will not get notes
stags=g.dbp.getTagsAsOwnerOnly(g.currentuser, useras, tagtype)
stagdict={'simpletags':stags}
return jsonify(stagdict)
#these are the simple tags user owns as well as can write to by dint of giving it to a group.
@adsgut.route('/user/<nick>/tagsusercanwriteto')
def tagsUserCanWriteTo(nick):
query=dict(request.args)
useras, usernick=_userget(g, query)
tagtype= _dictg('tagtype', query)
stags=g.dbp.getAllTagsForUser(g.currentuser, useras, tagtype)
stagdict={'simpletags':stags}
return jsonify(stagdict)
@adsgut.route('/user/<nick>/tagsasmember')
def tagsUserAsMember(nick):
query=dict(request.args)
useras, usernick=_userget(g, query)
tagtype= _dictg('tagtype', query)
stags=g.dbp.getTagsAsMemberOnly(g.currentuser, useras, tagtype)
stagdict={'simpletags':stags}
return jsonify(stagdict)
########################
#################now going to tags and posts#################################
#above 3 stags will be superseded, rolled in
#BUG: no multis are done for now.
#POST posts items into postable, get gets items for postable consistent with user.
#ALL ITEMS in POST MUST BE OF SAME TYPE
@adsgut.route('/postable/<pns>/<ptype>:<pname>/items', methods=['GET', 'POST'])
def itemsForPostable(pns, ptype, pname):
#userthere/[fqins]
#q={sort?, pagtuple?, criteria?, postable}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
#henceforth this will be names
items = _itemspostget(jsonpost)
itemtype=_dictp('itemtype', jsonpost)
fqpn=pns+"/"+ptype+":"+upname
pds=[]
for name in items:
#doing this for its idempotency
itemspec={'name':name, 'itemtype':itemtype}
i=g.dbp.saveItem(g.currentuser, useras, itemspec)
i,pd=g.dbp.postItemIntoPostable(g.currentuser, useras, fqpn, i)
pds.append(pd)
itempostings={'status':'OK', 'postings':pds, 'postable':fqpn}
return jsonify(itempostings)
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
#BUG find a way of having the usernick in this context be from elsewhere
#the elsewhere would come from postings and taggings, and confine to this group
#perhaps all the query funcs would need some re-org
print "QQQ",query, request.args
#need to pop the other things like pagetuples etc. Helper funcs needed
sort = _sortget(query)
pagtuple = _pagtupleget(query)
criteria= _criteriaget(query)
postable= pns+"/"+ptype+":"+pname
q=_queryget(query)
print "Q is", q
if not q.has_key('postables'):
q['postables']=[]
q['postables'].append(postable)
#By this time query is popped down
count, items=g.dbp.getItemsForQuery(g.currentuser, useras,
q, usernick, criteria, sort, pagtuple)
return jsonify({'items':items, 'count':count, 'postable':postable})
@adsgut.route('/library/<libraryowner>/library:<libraryname>/items')
def libraryItems(libraryowner, libraryname):
return itemsForPostable(libraryowner, "library", libraryname)
@adsgut.route('/app/<appowner>/app:<appname>/items')
def appItems(appowner, appname):
return itemsForPostable(appowner, "app", appname)
@adsgut.route('/group/<groupowner>/group:<groupname>/items')
def groupItems(groupowner, groupname):
return itemsForPostable(groupowner, "group", groupname)
#For the RHS, given a set of items. Should this even be exposed as such?
#we need it for post, but goes the GET make any sense?
#CHECK: and is it secure?
#this is post tagging into postable for POST
@adsgut.route('/postable/<pns>/<ptype>:<pname>/taggings', methods=['GET', 'POST'])
def taggingsForPostable(pns, ptype, pname):
#userthere/fqin/fqtn
#q={sort?, criteria?, postable}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
itemsandtags = _itemstagsget(jsonpost)
fqpn=pns+"/"+ptype+":"+upname
tds=[]
for d in itemsandtags:
fqin=d['fqin']
fqtn=d['fgtn']
td=g.dbp.getTaggingDoc(g.currentuser, useras, fqin, fqtn)
i,t,td=g.dbp.postTaggingIntoPostable(g.currentuser, useras, fqpn, td)
tds.append(td)
itemtaggings={'status':'OK', 'taggings':tds, 'postable':fqpn}
return jsonify(itemtaggings)
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
#need to pop the other things like pagetuples etc. Helper funcs needed
sort = _sortget(query)
criteria= _criteriaget(query)
postable= pns+"/"+ptype+":"+pname
q=_queryget(query)
if not q.has_key('postables'):
q['postables']=[]
q['postables'].append(postable)
#By this time query is popped down
count, taggings=g.dbp.getTaggingsForQuery(g.currentuser, useras,
q, usernick, criteria, sort)
return jsonify({'taggings':taggings, 'count':count, 'postable':postable})
#GET all tags consistent with user for a particular postable and further query
#Why is this useful? And why tags from taggingdocs?
@adsgut.route('/postable/<pns>/<ptype>:<pname>/tags', methods=['GET'])
def tagsForPostable(pns, ptype, pname):
#q={sort?, criteria?, postable}
query=dict(request.args)
useras, usernick=_userget(g, query)
#need to pop the other things like pagetuples etc. Helper funcs needed
#sort = _sortget(query)
criteria= _criteriaget(query)
postable= pns+"/"+ptype+":"+pname
q=_queryget(query)
if not q.has_key('postables'):
q['postables']=[]
q['postables'].append(postable)
#By this time query is popped down
count, tags=g.dbp.getTagsForQuery(g.currentuser, useras,
q, usernick, criteria)
return jsonify({'tags':tags, 'count':count})
#post saveItems(s), get could get various things such as stags, postings, and taggings
#get could take a bunch of items as arguments, or a query
@adsgut.route('/items', methods=['POST', 'GET'])
def items():
##useras?/name/itemtype
#q={useras?, userthere?, sort?, pagetuple?, criteria?, stags|tagnames ?, postables?}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
creator=useras.basic.fqin
items = _itemspostget(jsonpost)
itemtype = _dictp('itemtype', jsonpost)
if not itemtype:
doabort("BAD_REQ", "No itemtype specified for item")
for name in items:
itspec={'creator':creator, 'name':name, 'itemtype':itemtype}
newitem=g.dbp.saveItem(g.currentuser, useras, itspec)
return jsonify({'status':'OK', 'info':items})
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
#need to pop the other things like pagetuples etc. Helper funcs needed
sort = _sortget(query)
pagtuple = _pagtupleget(query)
criteria= _criteriaget(query)
#By this time query is popped down
count, items=g.dbp.getItemsForQuery(g.currentuser, useras,
query, usernick, criteria, sort, pagtuple)
return jsonify({'items':items, 'count':count})
#Get tags for a query. We can use post to just create a new tag. [NOT TO DO TAGGING]
#This is as opposed to tagging an item and would be used in biblio apps and such.
#CHECK: currently get coming from taggingdocs. Not sure about this
#BUG: we should make sure it only allows name based tags
#Will let you create multiple tags
#GET again comes from taggingdocs. Why?
@adsgut.route('/tags', methods=['POST', 'GET'])
def tags():
##useras?/name/itemtype
#q={useras?, userthere?, sort?, pagetuple?, criteria?, stags|tagnames ?, postables?}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
tagspecs=_tagspecsget(jsonpost)
newtags=[]
for ti in tagspecs:
if not ti.has_key('name'):
doabort('BAD_REQ', "No name specified for tag")
if not ti.has_key('tagtype'):
doabort('BAD_REQ', "No tagtypes specified for tag")
tagspec={}
tagspec['creator']=useras.basic.fqin
if ti.has_key('name'):
tagspec['name'] = ti['name']
tagspec['tagtype'] = ti['tagtype']
t=g.dbp.makeTag(g.currentuser, useras, tagspec)
newtags.append(t)
#returning the taggings requires a commit at this point
tags={'status':'OK', 'info':{'item': i.basic.fqin, 'tags':[td for td in newtags]}}
return jsonify(tags)
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
#need to pop the other things like pagetuples etc. Helper funcs needed
criteria= _criteriaget(query)
#By this time query is popped down
count, tags=g.dbp.getTagsForQuery(g.currentuser, useras,
query, usernick, criteria)
return jsonify({'tags':tags, 'count':count})
#GET tags for an item or POST: tagItem
#Currently GET coming from taggingdocs: BUG: not sure of this
def _setupTagspec(ti, useras):
#atleast one of name or content must be there (tag or note)
if not (ti.has_key('name') or ti.has_key('content')):
doabort('BAD_REQ', "No name or content specified for tag")
if not ti['tagtype']:
doabort('BAD_REQ', "No tagtypes specified for tag")
tagspec={}
tagspec['creator']=useras.basic.fqin
if ti.has_key('name'):
tagspec['name'] = ti['name']
if ti.has_key('content'):
tagspec['content'] = ti['content']
tagspec['tagtype'] = ti['tagtype']
return tagspec
@adsgut.route('/tags/<ns>/<itemname>', methods=['GET', 'POST'])
def tagsForItem(ns, itemname):
#taginfos=[{tagname/tagtype/description}]
#q=fieldlist=[('tagname',''), ('tagtype',''), ('context', None), ('fqin', None)]
ifqin=ns+"/"+itemname
if request.method == 'POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
itemtype=_dictp('itemtype', jsonpost)
itemspec={'name':itemname, 'itemtype':itemtype}
#KEY:IF i have a item it must exist, so this one is NOT used for items not yet there
#i=g.dbp._getItem(g.currentuser, ifqin)
i=g.dbp.saveItem(g.currentuser, useras, itemspec)
tagspecs=_tagspecsget(jsonpost)
newtaggings=[]
for ti in tagspecs:
tagspec=_setupTagspec(ti, useras)
print "TAGSPEC IS", tagspec
i,t,td=g.dbp.tagItem(g.currentuser, useras, i, tagspec)
newtaggings.append(td)
#returning the taggings requires a commit at this point
taggings={'status':'OK', 'info':{'item': i.basic.fqin, 'tagging':[td for td in newtaggings]}}
return jsonify(taggings)
else:
print "REQUEST.args", request.args, dict(request.args)
query=dict(request.args)
useras, usernick=_userget(g, query)
#need to pop the other things like pagetuples etc. Helper funcs needed
sort = _sortget(query)
#By this time query is popped down
#I am not convinced this is how to do this query
# criteria= _criteriaget(query)
# criteria.append(['field':'thething__thingtopostfqin', 'op':'eq', 'value':ifqin])
# count, tags=g.dbp.getTagsForQuery(g.currentuser, useras,
# query, usernick, criteria, sort)
count, tags= g.dbp.getTagsConsistentWithUserAndItems(g.currentuser, useras, [ifqin], sort)
return jsonify({'tags':tags, 'count':count})
####These are the fromSpec family of functions for GET
#multi item multi tag tagging on POST and get taggings
@adsgut.route('/items/taggings', methods=['POST', 'GET'])
def itemsTaggings():
##name/itemtype/uri/
#q={useras?, sort?, items}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
items = _itemspostget(jsonpost)
tagspecs=_tagspecsget(jsonpost)
itemtype=_dictp('itemtype', jsonpost)
newtaggings=[]
for name in items:
itemspec={'name':name, 'itemtype':itemtype}
i=g.dbp.saveItem(g.currentuser, useras, itemspec)
for ti in tagspecs:
tagspec=_setupTagspec(ti, useras)
i,t,td=g.dbp.tagItem(g.currentuser, useras, i, tagspec)
newtaggings.append(td)
itemtaggings={'status':'OK', 'taggings':newtaggings}
return jsonify(itemtaggings)
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
#need to pop the other things like pagetuples etc. Helper funcs needed
sort = _sortget(query)
items = _itemsget(query)
#By this time query is popped down
taggingsdict=g.dbp.getTaggingsConsistentWithUserAndItems(g.currentuser, useras,
items, sort)
return jsonify(taggingsdict)
#multi item multi postable posting on POST and get posts
@adsgut.route('/items/postings', methods=['POST', 'GET'])
def itemsPostings():
##name/itemtype/uri/
#q={useras?, sort?, items}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
items = _itemspostget(jsonpost)
fqpns = _postablesget(jsonpost)
itemtype=_dictp('itemtype', jsonpost)
pds=[]
for name in items:
itemspec={'name':name, 'itemtype':itemtype}
i=g.dbp.saveItem(g.currentuser, useras, itemspec)
for fqpn in fqpns:
i,pd=g.dbp.postItemIntoPostable(g.currentuser, useras, fqpn, i)
pds.append(pd)
itempostings={'status':'OK', 'postings':pds}
return jsonify(itempostings)
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
print 'QUERY', query
#need to pop the other things like pagetuples etc. Helper funcs needed
sort = _sortget(query)
items = _itemsget(query)
#By this time query is popped down
postingsdict=g.dbp.getPostingsConsistentWithUserAndItems(g.currentuser, useras,
items, sort)
return jsonify(postingsdict)
@adsgut.route('/items/taggingsandpostings', methods=['POST', 'GET'])
def itemsTaggingsAndPostings():
##name/itemtype/uri/
#q={useras?, sort?, items}
if request.method=='POST':
junk="NOT YET IMPLEMENTED AND I DONT THINK WE WILL"
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
#print 'AAAQUERY', query, request.args
#need to pop the other things like pagetuples etc. Helper funcs needed
sort = _sortget(query)
items = _itemsget(query)
#By this time query is popped down
postingsdict=g.dbp.getPostingsConsistentWithUserAndItems(g.currentuser, useras,
items, sort)
taggingsdict=g.dbp.getTaggingsConsistentWithUserAndItems(g.currentuser, useras,
items, sort)
#print "MEEP",taggingsdict, postingsdict
return jsonify(postings=postingsdict, taggings=taggingsdict)
@adsgut.route('/itemtypes', methods=['POST', 'GET'])
def itemtypes():
##useras?/name/itemtype
#q={useras?, userthere?, sort?, pagetuple?, criteria?, stags|tagnames ?, postables?}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
itspec={}
itspec['creator']=useras.basic.fqin
itspec['name'] = _dictp('name', jsonpost)
if not itspec['name']:
doabort("BAD_REQ", "No name specified for itemtype")
itspec['postable'] = _dictp('postable', jsonpost)
if not itspec['postable']:
doabort("BAD_REQ", "No postable specified for itemtype")
newitemtype=g.dbp.addItemType(g.currentuser, useras, itspec)
return jsonify({'status':'OK', 'info':newitemtype})
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
criteria= _criteriaget(query)
isitemtype=True
count, thetypes=g.dbp.getTypesForQuery(g.currentuser, useras, criteria, usernick, isitemtype)
return jsonify({'types':thetypes, 'count':count})
#BUG: how to handle bools
@adsgut.route('/tagtypes', methods=['POST', 'GET'])
def tagtypes():
##useras?/name/itemtype
#q={useras?, userthere?, sort?, pagetuple?, criteria?, stags|tagnames ?, postables?}
if request.method=='POST':
jsonpost=dict(request.json)
useras = _userpostget(g, jsonpost)
itspec={}
itspec['creator']=useras.basic.fqin
itspec['name'] = _dictp('name', jsonpost)
itspec['tagmode'] = _dictp('tagmode', jsonpost)
itspec['singletonmode'] = _dictp('singletonmode',jsonpost)
if not itspec['tagmode']:
del itspec['tagmode']
else:
itspec['tagmode']=bool(itspec['tagmode'])
if not itspec['singletonmode']:
del itspec['singletonmode']
else:
itspec['singletonmode']=bool(itspec['singletonmode'])
if not itspec['name']:
doabort("BAD_REQ", "No name specified for itemtype")
itspec['postable'] = _dictp('postable', jsonpost)
if not itspec['postable']:
doabort("BAD_REQ", "No postable specified for itemtype")
newtagtype=g.dbp.addTagType(g.currentuser, useras, itspec)
return jsonify({'status':'OK', 'info':newtagtype})
else:
query=dict(request.args)
useras, usernick=_userget(g, query)
criteria= _criteriaget(query)
isitemtype=False
count, thetypes=g.dbp.getTypesForQuery(g.currentuser, useras, criteria, usernick, isitemtype)
return jsonify({'types':thetypes, 'count':count})
@adsgut.route('/itemsinfo')
def itemsinfo():
query=dict(request.args)
itemstring=query.get('items',[''])[0]
items=itemstring.split(':')
theitems=[{'basic':{'name':i.split('/')[-1], 'fqin':i}} for i in items]
return jsonify({'items': theitems, 'count':len(theitems)})
@adsgut.route('/postform/<itemtypens>/<itemtypename>/html', methods=['POST', 'GET'])
def postForm(itemtypens, itemtypename):
print "NS,NAME", itemtypens, itemtypename
itemtype=itemtypens+"/"+itemtypename
if request.method=='POST':
return "Not Yet Done"
else:
print "ITEMTYPE", itemtype
query=dict(request.args)
querystring=request.query_string
itemstring=query.get('items',[''])[0]
items=itemstring.split(':')
theitems=[]
if itemtype=="ads/pub":
theitems=[{ 'basic':{'name':i.split('/')[-1],'fqin':i}} for i in items]
if itemtype=="ads/search":
theitems=[{ 'basic':{'name':itemstring,'fqin':'ads/'+itemstring}}]
print "THEITEMS", theitems
#How do we BUG get itemtype. we should redofqin to ads/pub:name as the itemtype
#always determines the namespace of the item. This would mean name had to be
#globally unique rather than locally for user usage, unless we have a dual name
#currently get from url
singlemode=False
if len(theitems) ==1:
singlemode=True
#this ought to be got from itemtype, currently BUG hack
nameable=False
if itemtype=="ads/search":
nameable=True
if nameable and singlemode:
nameable=True
return render_template('postform2.html', items=theitems,
querystring=querystring,
singlemode=singlemode,
nameable=nameable,
itemtype=itemtypename,
useras=g.currentuser)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=4000) |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 16 10:47:43 2017
@author: Jon Wee
"""
import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
#from scipy import stats
## ln consumption growth
samples = 1000
#epsilon = np.random.normal(loc=0, scale=1,samples)
# random function
epsilon = np.random.standard_normal(samples) # standard normal distribution
nu = np.random.choice([0,np.log(0.65)], p=[0.983,0.017],size=samples) # random with a particular probability
lng = 0.02+0.02*epsilon+nu
########################################################################################
# Hansen-Jagannathan Bound
gamma_size = 1000
gamma1 = np.round(np.linspace(1,4,gamma_size),3)
Ke_ratio = []
for i in range(gamma_size):
M = 0.99*(np.exp(lng))**(-gamma1[i])
SDM = np.std(M)
MeanM = np.mean(M)
Ke_ratio = np.append(Ke_ratio, SDM/MeanM)
idx = np.searchsorted(Ke_ratio, 0.4)
print("Hansen-Jagannathan bound:", Ke_ratio[idx],gamma1[idx])
#########################################################################################
gamma2 = np.linspace(1,7,gamma_size)
Price_Div = []
for i in range(gamma_size):
xx = np.mean(0.99*(np.exp(lng))**(1-gamma2[i]))
Price_Div = np.append(Price_Div, xx)
##########################################################################################
Equity_Prem = []
for i in range(gamma_size):
M2 = (0.99*(np.exp(lng))**(-gamma2[i]))
mkt_ret = (1/Price_Div[i])*(np.mean(np.exp(lng)))
Rf = 1/(np.mean(M2))
Equity_Prem = np.append(Equity_Prem, mkt_ret-Rf)
############### GRAPH ##########################################
plt.figure(figsize=(12 , 8))
plt.plot(lng)
plt.xlabel('Events', fontsize = 20)
plt.ylabel('Consumption Growth', fontsize = 20)
plt.title('Consumption Growth with rare disasters', fontsize=20)
plt.xlim(-1,1000,20)
############### PART 1 ##########################################
plt.figure(figsize=(12 , 8))
plt.plot(gamma1,Ke_ratio, 'b')
plt.plot([0,5],[0.4,0.4],'r', linestyle = '--')
plt.plot(gamma1[idx],Ke_ratio[idx], 'ro', markersize = 10)
plt.text(gamma1[idx]-0.7,Ke_ratio[idx]+0.005, s = 'Min Gamma: %s' % np.round(gamma1[idx],3), fontsize=15 )
plt.grid(False)
plt.xlabel('Gamma', fontsize = 20)
plt.ylabel('SD(M)/E(M)', fontsize = 20)
plt.title('Hansen–Jagannathan Bound', fontsize=20)
#plt.xlim(1,4,20)
#plt.ylim(0,0.65,10)
#plt.legend()
plt.show()
############## PART 2 #############################################################
plt.figure(figsize=(12,8))
plt.plot(gamma2,Price_Div, 'b')
plt.grid(False), 'b'
plt.xlabel('Gamma', fontsize = 20)
plt.ylabel('Price-Dividend Ratio', fontsize = 20)
plt.title('Price-Dividend Ratio', fontsize=20)
#plt.xlim(1,7,20)
#plt.ylim(0.9,1.1,10)
plt.show()
############## PART 3 #############################################################
plt.figure(figsize=(12,8))
plt.plot(gamma2,Equity_Prem, 'b')
plt.grid(False)
plt.xlabel('Gamma', fontsize = 20)
plt.ylabel('Equity Premium', fontsize = 20)
plt.title('Equity Premium', fontsize=20)
#plt.xlim(1,7,20)
#plt.ylim(0,0.15,10)
plt.show()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import datetime
from airflow.api_connexion.schemas.task_schema import TaskCollection, task_collection_schema, task_schema
from airflow.operators.empty import EmptyOperator
class TestTaskSchema:
def test_serialize(self):
op = EmptyOperator(
task_id="task_id",
start_date=datetime(2020, 6, 16),
end_date=datetime(2020, 6, 26),
)
result = task_schema.dump(op)
expected = {
"class_ref": {
"module_path": "airflow.operators.empty",
"class_name": "EmptyOperator",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": "2020-06-26T00:00:00+00:00",
"execution_timeout": None,
"extra_links": [],
"owner": "airflow",
"operator_name": "EmptyOperator",
"params": {},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-16T00:00:00+00:00",
"task_id": "task_id",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
}
assert expected == result
class TestTaskCollectionSchema:
def test_serialize(self):
tasks = [EmptyOperator(task_id="task_id1", params={"foo": "bar"})]
collection = TaskCollection(tasks, 1)
result = task_collection_schema.dump(collection)
expected = {
"tasks": [
{
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {
"foo": {
"__class": "airflow.models.param.Param",
"value": "bar",
"description": None,
"schema": {},
}
},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": None,
"task_id": "task_id1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
}
],
"total_entries": 1,
}
assert expected == result
|
#Out of Boundary Paths
from collections import defaultdict
class Solution:
def findPaths(self, m, n, N, i, j):
dp = defaultdict(lambda: 0)
dp[(i,j)] = 1
M = pow(10, 9) + 7
count = 0
directions = [ [1, 0], [-1, 0], [0, 1], [0, -1] ]
for k in range(N):
new_dp = defaultdict(lambda: 0)
for position, num_of_paths in dp.items():
for dr, dc in directions:
r, c = position[0] + dr, position[1] + dc
if 0 <= r < m and 0 <= c < n:
if r == m-1 or c == n-1 or r == 0 or c == 0:
count = (count + dp[position]) % M
dp[(r,c)] = (dp[(r,c)] + dp[position]) % M
dp = new_dp
return count |
from random import randint
import pytest
from yadm import Document
from yadm import fields
# from yadm.aio.aggregation import AioAggregator
class Doc(Document):
__collection__ = 'docs'
i = fields.IntegerField()
@pytest.fixture(scope='function')
def docs(loop, db):
async def gen_docs():
async with db.bulk_write(Doc) as writer:
docs = []
for n in range(randint(10, 20)):
doc = Doc(i=randint(-666, 666))
await writer.insert_one(doc)
docs.append(doc)
return docs
return loop.run_until_complete(gen_docs())
def test_async_for(loop, db, docs):
async def test():
agg = db.aggregate(Doc).match(i={'$gt': 0}).project(n='$i')
count = 0
async for item in agg:
assert item['n'] > 0
count += 1
assert count == len([d.i for d in docs if d.i > 0])
loop.run_until_complete(test())
|
import sys
sys.stdin = open("input.txt")
from collections import deque
T = int(input())
def func(start) :
queue = deque([(start, 0)])
visited = [0] * (V+1) # 인덱스 맞추기 위해 앞에 한칸 비우기
while queue : # 큐가 빌때까지
now, distance = queue.popleft()
if now == G : # 도착하면 거리 반환
return distance
elif visited[now] == 0 : # 방문하지 않았다면
visited[now]=1
distance+=1
for next in dic[now] :
queue.append([next, distance])
for tc in range(1, T+1):
# V : 노드의 개수
# E : 간선 정보
V, E = map(int, input().split())
dic = {}
for _ in range(E) :
start, end = map(int, input().split())
tmp = dic.get(start, list())
tmp.append(end)
dic[start] = tmp
tmp =dic.get(end, list())
tmp.append(start)
dic[end] = tmp
# S : 출발노드
# G : 도착노드
S, G = map(int, input().split())
result = func(S)
if result == None :
result =0
print("#{} {}".format(tc, result))
|
from django.urls import path
from .views import BlogPageView, AboutPageView, BlogDetailView, BlogCreateView,BlogUpdateView, BlogDeleteView
urlpatterns=[
path('', BlogPageView.as_view(), name='main-home'),
path('about/', AboutPageView.as_view(), name='home-about'),
path('post/<int:pk>/', BlogDetailView.as_view(), name='post-detail'),
path('post/new/', BlogCreateView.as_view(), name='post_new'),
path('post/<int:pk>/edit/', BlogUpdateView.as_view(), name='post_edit'),
path('post/<int:pk>/delete/', BlogDeleteView.as_view(), name='post_delete'),
] |
from torch import nn
from torch.nn import functional as func
from torchsupport.modules.basic import MLP
from betafold.generate.angles import \
TorsionConditionalPrior, TorsionConditionalEncoder, TorsionConditionalDecoder
class ReshapeMLP(nn.Module):
def __init__(self, in_size, out_size, **kwargs):
super(ReshapeMLP, self).__init__()
self.mlp = MLP(
in_size, out_size, **kwargs
)
def forward(self, inputs):
out = inputs.view(inputs.size(0), -1)
out = self.mlp(out)
return out
class ConvStack(nn.Module):
def __init__(self, in_size, out_size, hidden_size=4, depth=3):
super(ConvStack, self).__init__()
self.blocks = nn.ModuleList([
nn.Conv1d(in_size, 2 ** hidden_size, 3, padding=1)
] + [
nn.Conv1d(2 ** (hidden_size + idx), 2 ** (hidden_size + idx + 1), 3, padding=1)
for idx in range(depth - 1)
] + [
nn.Conv1d(2 ** (hidden_size + depth - 1), out_size, 3, padding=1)
])
def forward(self, inputs):
out = inputs
for block in self.blocks:
out = func.elu_(block(out))
out = func.max_pool1d(out, 2)
out = func.adaptive_max_pool1d(out, 1)
return out.reshape(out.size(0), -1)
def create_cvae_conv(feature_size, out_size, latent_size,
sequence_size, depth, hidden_size):
condition_conv = ConvStack(
feature_size, out_size, depth=depth
)
prior_conv = ConvStack(
feature_size, out_size, depth=depth
)
encoder_angle_conv = ConvStack(
4, out_size, depth=depth
)
decoder_angle_conv = ReshapeMLP(
latent_size, out_size,
hidden_size=hidden_size, depth=depth
)
prior = TorsionConditionalPrior(
prior_conv,
sequence_size=sequence_size,
hidden_size=out_size,
latent_size=latent_size,
depth=2
)
encoder = TorsionConditionalEncoder(
encoder_angle_conv, condition_conv,
sequence_size=sequence_size,
hidden_size=out_size,
latent_size=latent_size,
depth=2
)
decoder = TorsionConditionalDecoder(
decoder_angle_conv, condition_conv,
sequence_size=sequence_size,
latent_size=latent_size,
depth=2, return_torsion=False
)
return encoder, decoder, prior
def create_cvae_mlp(feature_size, out_size, latent_size,
sequence_size, depth, hidden_size):
condition_mlp = ReshapeMLP(
sequence_size * feature_size, out_size,
hidden_size=hidden_size, depth=depth
)
encoder_angle_mlp = ReshapeMLP(
sequence_size * 4, out_size,
hidden_size=hidden_size, depth=depth
)
decoder_angle_mlp = ReshapeMLP(
latent_size, out_size,
hidden_size=hidden_size, depth=depth
)
prior = TorsionConditionalPrior(
condition_mlp,
sequence_size=sequence_size,
hidden_size=out_size,
latent_size=latent_size,
depth=2
)
encoder = TorsionConditionalEncoder(
encoder_angle_mlp, condition_mlp,
sequence_size=sequence_size,
hidden_size=out_size,
latent_size=latent_size,
depth=2
)
decoder = TorsionConditionalDecoder(
decoder_angle_mlp, condition_mlp,
sequence_size=sequence_size,
latent_size=latent_size,
depth=2, return_torsion=False
)
return encoder, decoder, prior
|
#Input values to an array from the user and sort the array in ascending order.
arr=[]
n=int(input("length of array:"))
for j in range(0,n):
m=int(input())
arr.append(m)
for i in range(0,n):
for j in range(i+1,n):
if(arr[i] < arr[j]):
temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
print("second highest element",arr[1])
|
def ration(user_breakfast_calories, user_breakfast_proteins, user_breakfast_fats, user_breakfast_carbohydrates, user_dinner_calories, user_dinner_proteins, user_dinner_fats, user_dinner_carbohydrates, user_supper_calories, user_supper_proteins, user_supper_fats, user_supper_carbohydrates, file):
'''
(int...) -> list
A function which returnes menu for user by his/her needs
'''
user_calories = user_breakfast_calories + user_dinner_calories + user_supper_calories
user_proteins = user_breakfast_proteins + user_dinner_proteins + user_supper_proteins
user_fats = user_breakfast_fats + user_dinner_fats + user_supper_fats
user_carbohydrates = user_breakfast_carbohydrates + user_dinner_carbohydrates + user_supper_carbohydrates
user_menu = [[], [], [], []]
menu = []
for line in file:
menu.append(line[:-1].split(' : '))
difference = 0
min_difference = 500
index = 0
second = 0
for i in range(0, len(menu)):
if menu[i][0] == 'САЛАТИ':
second = i
break
for i in range(second, len(menu)):
if len(menu[i]) != 1:
difference = user_breakfast_carbohydrates - float(menu[i][4]) * float(menu[i][1])/100
if difference < min_difference:
min_difference = difference
index = i
user_calories -= float(menu[index][5])
user_proteins -= float(menu[index][2])
user_fats -= float(menu[index][3])
user_carbohydrates -= float(menu[index][4])
user_menu[0].append(menu.pop(index))
# ***
difference = 0
min_difference = 500
index = 0
stop = 0
while user_dinner_carbohydrates > 0:
if len(menu) < 5:
stop = 1
for i in range(0, len(menu)):
if len(menu[i]) != 1:
difference = user_dinner_carbohydrates - float(menu[i][4]) * float(menu[i][1])/100
if difference < min_difference:
min_difference = difference
index = i
try:
if len(menu[index]) != 1:
user_dinner_carbohydrates -= float(menu[index][4])
user_calories -= float(menu[index][5])
user_proteins -= float(menu[index][2])
user_fats -= float(menu[index][3])
user_carbohydrates -= float(menu[index][4])
user_menu[1].append(menu.pop(index))
except:
stop = 1
if stop == 1:
break
difference = 0
min_difference = 500
index = 0
second = 0
for i in range(0, len(menu)):
if menu[i][0] == 'ДРУГI СТРАВИ':
second = i
break
if user_dinner_carbohydrates > 10:
for i in range(second, len(menu)):
if len(menu[i]) != 1:
difference = user_dinner_carbohydrates - float(menu[i][4]) * float(menu[i][1])/100
if difference < min_difference:
min_difference = difference
index = i
user_calories -= float(menu[index][5])
user_proteins -= float(menu[index][2])
user_fats -= float(menu[index][3])
user_carbohydrates -= float(menu[index][4])
user_menu[1].append(menu.pop(index))
# ***
difference = 0
min_difference = 500
index = 0
for i in range(0, len(menu)):
if len(menu[i]) != 1:
difference = user_supper_carbohydrates - float(menu[i][4]) * float(menu[i][1])/100
if difference < min_difference:
min_difference = difference
index = i
try:
user_calories -= float(menu[index][5])
user_proteins -= float(menu[index][2])
user_fats -= float(menu[index][3])
user_carbohydrates -= float(menu[index][4])
user_menu[2].append(menu.pop(index))
except:
pass
# ***
difference = 0
min_difference = 500
index = 0
while user_carbohydrates > 0:
for i in range(0, len(menu)):
if len(menu[i]) != 1:
difference = user_carbohydrates - float(menu[i][4]) * float(menu[i][1])/100
if difference < min_difference:
min_difference = difference
index = i
try:
if len(menu[index]) != 1:
user_calories -= float(menu[index][5]) * float(menu[index][1])/100
user_proteins -= float(menu[index][2]) * float(menu[index][1])/100
user_fats -= float(menu[index][3]) * float(menu[index][1])/100
user_carbohydrates -= float(menu[index][4]) * float(menu[index][1])/100
user_menu[3].append(menu.pop(index))
except:
break
return user_menu
|
import sys
import preprocessing
from models import ConvModel, FCModel, A3CModel, GRUModel
import numpy as np
class HyperParams:
def __init__(self, arg_hyps=None):
hyp_dict = dict()
hyp_dict['string_hyps'] = {
"exp_name":"default",
"model_type":"gru", # Options include 'dense', 'conv', 'a3c'
"env_type":"Pong-v0",
"optim_type":'rmsprop' # Options: rmsprop, adam
}
hyp_dict['int_hyps'] = {
"n_epochs": 3, # PPO update epoch count
"batch_size": 256, # PPO update batch size
"max_tsteps": int(2e6),
"n_tsteps": 128, # Maximum number of tsteps per rollout per perturbed copy
"n_envs": 11, # Number of parallel python processes
"n_frame_stack":3, # Number of frames to stack in MDP state
"n_rollouts": 22,
'h_size':288,
"n_past_rews":25,
"grid_size": 15,
"unit_size": 4,
"n_foods": 2,
}
hyp_dict['float_hyps'] = {
"lr":0.0001,
"lr_low": float(1e-12),
"lambda_":.95,
"gamma":.99,
"val_coef":1,
"entr_coef":.01,
"entr_coef_low":.001,
"max_norm":.5,
"epsilon": .2, # PPO update clipping constant
"epsilon_low":.05,
}
hyp_dict['bool_hyps'] = {
"resume":False,
"render": False,
"clip_vals": False,
"decay_eps": False,
"decay_lr": False,
"decay_entr": False,
"use_nstep_rets": False,
"norm_advs": True,
"norm_batch_advs": False,
"use_bnorm": False,
}
self.hyps = self.read_command_line(hyp_dict)
if arg_hyps is not None:
for arg_key in arg_hyps.keys():
self.hyps[arg_key] = arg_hyps[arg_key]
# Hyperparameter Manipulations
self.hyps['grid_size'] = [self.hyps['grid_size'],self.hyps['grid_size']]
if self.hyps['batch_size'] > self.hyps['n_rollouts']*self.hyps['n_tsteps']:
self.hyps['batch_size'] = self.hyps['n_rollouts']*self.hyps['n_tsteps']
# Model Type
model_type = self.hyps['model_type'].lower()
if "conv" == model_type:
self.hyps['model'] = ConvModel
elif "a3c" == model_type:
self.hyps['model'] = A3CModel
elif "fc" == model_type or "dense" == model_type:
self.hyps['model'] = FCModel
elif "gru" == model_type:
self.hyps['model'] = GRUModel
else:
self.hyps['model'] = ConvModel
# Preprocessor Type
env_type = self.hyps['env_type'].lower()
if "pong" in env_type:
self.hyps['preprocess'] = preprocessing.pong_prep
elif "breakout" in env_type:
self.hyps['preprocess'] = preprocessing.breakout_prep
elif "snake" in env_type:
self.hyps['preprocess'] = preprocessing.snake_prep
else:
self.hyps['preprocess'] = preprocessing.atari_prep
def read_command_line(self, hyps_dict):
"""
Reads arguments from the command line. If the parameter name is not declared in __init__
then the command line argument is ignored.
Pass command line arguments with the form parameter_name=parameter_value
hyps_dict - dictionary of hyperparameter dictionaries with keys:
"bool_hyps" - dictionary with hyperparameters of boolean type
"int_hyps" - dictionary with hyperparameters of int type
"float_hyps" - dictionary with hyperparameters of float type
"string_hyps" - dictionary with hyperparameters of string type
"""
bool_hyps = hyps_dict['bool_hyps']
int_hyps = hyps_dict['int_hyps']
float_hyps = hyps_dict['float_hyps']
string_hyps = hyps_dict['string_hyps']
if len(sys.argv) > 1:
for arg in sys.argv:
arg = str(arg)
sub_args = arg.split("=")
if sub_args[0] in bool_hyps:
bool_hyps[sub_args[0]] = sub_args[1] == "True"
elif sub_args[0] in float_hyps:
float_hyps[sub_args[0]] = float(sub_args[1])
elif sub_args[0] in string_hyps:
string_hyps[sub_args[0]] = sub_args[1]
elif sub_args[0] in int_hyps:
int_hyps[sub_args[0]] = int(sub_args[1])
return {**bool_hyps, **float_hyps, **int_hyps, **string_hyps}
# Methods
def hyper_search(hyps, hyp_ranges, keys, idx, trainer, search_log):
"""
hyps - dict of hyperparameters created by a HyperParameters object
type: dict
keys: name of hyperparameter
values: value of hyperparameter
hyp_ranges - dict of ranges for hyperparameters to take over the search
type: dict
keys: name of hyperparameters to be searched over
values: list of values to search over for that hyperparameter
keys - keys of the hyperparameters to be searched over. Used to
allow order of hyperparameter search
idx - the index of the current key to be searched over
trainer - trainer object that handles training of model
"""
if idx >= len(keys):
if 'search_id' not in hyps:
hyps['search_id'] = 0
hyps['exp_name'] = hyps['exp_name']+"0"
hyps['hyp_search_count'] = np.prod([len(hyp_ranges[key]) for key in keys])
id_ = len(str(hyps['search_id']))
hyps['search_id'] += 1
hyps['exp_name'] = hyps['exp_name'][:-id_]+str(hyps['search_id'])
best_avg_rew = trainer.train(hyps)
params = [str(key)+":"+str(hyps[key]) for key in keys]
search_log.write(", ".join(params)+" – BestRew:"+str(best_avg_rew)+"\n")
search_log.flush()
else:
key = keys[idx]
for param in hyp_ranges[key]:
hyps[key] = param
hyper_search(hyps, hyp_ranges, keys, idx+1, trainer, search_log)
return
def make_hyper_range(low, high, range_len, method="log"):
if method.lower() == "random":
param_vals = np.random.random(low, high+1e-5, size=range_len)
elif method.lower() == "uniform":
step = (high-low)/(range_len-1)
pos_step = (step > 0)
range_high = high+(1e-5)*pos_step-(1e-5)*pos_step
param_vals = np.arange(low, range_high, step=step)
else:
range_low = np.log(low)/np.log(10)
range_high = np.log(high)/np.log(10)
step = (range_high-range_low)/(range_len-1)
arange = np.arange(range_low, range_high, step=step)
if len(arange) < range_len:
arange = np.append(arange, [range_high])
param_vals = 10**arange
param_vals = [float(param_val) for param_val in param_vals]
return param_vals
|
from typing import List
from collections import defaultdict
class Solution:
def longestSubsequence(self, arr: List[int], difference: int) -> int:
maxLength = 0
seqDict = defaultdict(int)
for num in arr:
seqDict[num] = max(seqDict[num], 1+ seqDict[num-difference])
maxLength = max(maxLength, seqDict[num])
return maxLength
l = [1,5,7,8,5,3,4,2,1]
ob = Solution()
print(ob.longestSubsequence(l,-2)) |
from tkinter import ttk
from tkinter import font
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
import requests
import io
from PIL import Image, ImageTk
from urllib.request import urlopen
import os
apiKey = os.environ.get('API_KEY')
def getLocation():
url = "https://ip-geo-location.p.rapidapi.com/ip/check"
querystring = {"format":"json"}
headers = {
'x-rapidapi-key': apiKey,
'x-rapidapi-host': "ip-geo-location.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
response = response.json()
return response['city']['name']
def getWeather():
location = getLocation()
url = "https://weatherapi-com.p.rapidapi.com/current.json"
querystring = {"q":location}
headers = {
'x-rapidapi-key': apiKey,
'x-rapidapi-host': "weatherapi-com.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
response = response.json()
return response
def App():
#getting weather
actualWeather = getWeather()
#getting weather image
url = actualWeather['current']['condition']['icon']
page = urlopen('http:' + url)
weatherByteImg = io.BytesIO(page.read())
weatherPilImg = Image.open(weatherByteImg)
weatherTkImg = ImageTk.PhotoImage(weatherPilImg)
#app components
cityName = Label(root, text=actualWeather['location']['name'], justify='right')
weatherImage = Label(root, image=weatherTkImg)
weatherInfo = Frame(root)
weatherOptions = Frame(root)
refreshingOptions = Frame(root)
#app grid
cityName.grid(row=0, column=0, sticky="nsew")
weatherImage.grid(row=1, column=0, sticky="nsew")
weatherInfo.grid(row=2, column=0, columnspan=3, sticky="nsew")
weatherOptions.grid(row=3, column=0, sticky="nsew")
refreshingOptions.grid(row=4, column=0, sticky="nsew")
#weather info grid
weatherInfo.columnconfigure((0,1,2), weight=1)
weatherInfo.rowconfigure((0,1), weight=1)
#weather options grid
weatherOptions.columnconfigure((0,1,2,3,4,5), weight=1)
weatherOptions.rowconfigure(0, weight=1)
weatherOptions.rowconfigure(1, weight=3)
Label(weatherOptions, text="Choose own units", anchor="e", justify=LEFT).grid(row=0, column=0, columnspan=10)
#weather options vars
tempUnit = StringVar()
tempUnit.set("Celsius")
windSpeedUnit = StringVar()
windSpeedUnit.set("Km/h")
visibilityUnit = StringVar()
visibilityUnit.set("Kilometers")
#weather info data
def updateInfos():
settings = [tempUnit, windSpeedUnit]
tempInfo = StringVar()
feelTempInfo = StringVar()
windSpeedInfo = StringVar()
visibilityInfo = (str(actualWeather['current']['vis_km']))
windDirInfo = (str(actualWeather['current']['wind_dir']))
pressureInfo = (str(actualWeather['current']['pressure_mb']))
for x in settings:
if x.get() == "Celsius":
tempInfo.set(str(actualWeather['current']['temp_c']) + " C°")
feelTempInfo.set(str(actualWeather['current']['feelslike_c']) + " C°")
if x.get() == "Fahrenheit":
tempInfo.set(str(actualWeather['current']['temp_f']) + " F°")
feelTempInfo.set(str(actualWeather['current']['feelslike_f']) + " F°")
if x.get() == "Km/h":
windSpeedInfo.set(str(actualWeather['current']['wind_kph']) + " Km/h")
if x.get() == "Miles/h":
windSpeedInfo.set(str(actualWeather['current']['wind_mph']) + " Miles/h")
temp = Label(weatherInfo, text="Temperature: " + tempInfo.get())
feelTemp = Label(weatherInfo, text="Feel like temperature: " + feelTempInfo.get())
windSpeed = Label(weatherInfo, text="Wind speed: " + windSpeedInfo.get())
visibility = Label(weatherInfo, text="Visibility: " + visibilityInfo)
windDir = Label(weatherInfo, text="Wind direction: " + windDirInfo)
pressure = Label(weatherInfo, text="Pressure: " + pressureInfo)
weatherInfo.update_idletasks()
temp.grid(row=0,column=0)
feelTemp.grid(row=1,column=0)
windSpeed.grid(row=0, column=1)
windDir.grid(row=1, column=1)
pressure.grid(row=0, column=2)
visibility.grid(row=1, column=2)
#weather info buttons
Radiobutton(weatherOptions, text="C°", variable=tempUnit, value="Celsius", command=updateInfos).grid(row=1, column=0)
Radiobutton(weatherOptions, text="F°", variable=tempUnit, value="Fahrenheit", command=updateInfos).grid(row=1, column=1)
Radiobutton(weatherOptions, text="Km/h", variable=windSpeedUnit, value="Km/h", command=updateInfos).grid(row=1, column=2)
Radiobutton(weatherOptions, text="Miles/h", variable=windSpeedUnit, value="Miles/h", command=updateInfos).grid(row=1, column=3)
"""
Radiobutton(weatherOptions, text="Kilometers", variable=visibilityUnit, value="Kilometers", command=updateInfos).grid(row=1, column=4)
Radiobutton(weatherOptions, text="Miles", variable=visibilityUnit, value="Miles", command=updateInfos).grid(row=1, column=5)
"""
updateInfos()
#refresh option
def refresh():
messagebox.showinfo("Refreshed", "Now you have the newest weather info!")
App()
refreshingOptions.columnconfigure((0,1), weight=1)
lastUpdateInfo = actualWeather['current']['last_updated']
lastUpdate = Label(refreshingOptions, text="Last updated: " + str(lastUpdateInfo))
refreshButton = Button(refreshingOptions, text="Refresh", command=refresh)
lastUpdate.grid(column=0, sticky="s")
refreshButton.grid(column=1)
#cosmetic changes as font
cityName.configure(font=('',50), anchor="center")
weatherImage.configure(anchor="center")
#mainloop
root.mainloop()
if __name__ == "__main__":
#window settings
root = Tk(className='WeatherApp')
rootX = "600"
rootY = "400"
root.geometry(rootX + "x" + rootY)
root.columnconfigure((0), weight=1)
root.rowconfigure((0,2,3,4), weight=1)
root.rowconfigure(1, weight=4)
App()
|
from api.models.data_engine_job import DataEngineJob
import threading
class __Singleton(type):
"""Utility for making the JobQueue service a singleton."""
instance = None
def __call__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = super().__call__(*args, **kwargs)
return cls.instance
class JobQueue(metaclass=__Singleton):
"""Provides refresh, next, set, and remove methods for building and retreiving
the next job out of the queue as well as adding/updating and removing them.
This class is a singleton and must be accessed through the class instance()
method.
"""
QUEUED_JOBS_FILTER = DataEngineJob.state.in_([
'new', 'retryable_failure', 'delayed_start'
])
def __init__(self, data_engine_job=DataEngineJob):
self.lock = threading.Lock()
self.jobs = []
self.data_engine_job = data_engine_job
self.refresh()
def set(self, job):
"""Adds or updates the priority of a job in the queue.
Parameters:
job (DataEngineJob): The job to add/update.
"""
self.lock.acquire()
try:
if job in self.jobs:
self.jobs.remove(job)
self.jobs.append(job)
self.jobs.sort()
finally:
self.lock.release()
def pop(self):
"""Returns the next job in the queue and removes it or None.
Returns:
job (JobEngine): The next job.
"""
self.lock.acquire()
try:
if len(self.jobs) > 0:
return self.jobs.pop(0)
finally:
self.lock.release()
def peek(self):
"""Returns the next job in the queue but does not remove it.
Returns:
job (JobEngine): The next job.
"""
if len(self.jobs) > 0:
return self.jobs[0]
def remove(self, job):
"""Removes the provided job if it is in the queue.
Parameters:
job (DataEngineJob): The job to remove.
"""
self.lock.acquire()
try:
self.jobs.remove(job)
except ValueError:
pass
finally:
self.lock.release()
def refresh(self):
"""Refreshes the contents of the queue by loading all queued jobs from
the database.
"""
self.lock.acquire()
try:
self.jobs.clear()
result = self.data_engine_job.query.filter(
self.QUEUED_JOBS_FILTER).all()
for job in result:
self.jobs.append(job)
self.jobs.sort()
finally:
self.lock.release()
|
obj_model_def = "ObjectDetector/config/yolov3.cfg"
obj_weights_path = "ObjectDetector/weights/yolov3.weights"
obj_class_path = "ObjectDetector/data/coco.names"
obj_img_size = 416
class ObjDetectorInfo:
def __init__(self):
self.model_def = obj_model_def
self.weights_path = obj_weights_path
self.class_path = obj_class_path
self.img_size = obj_img_size
def getInfo(self):
return self
def setInputSize(img_size):
self.img_size = img_size
|
from datetime import datetime
class Bokning:
def __init__(self, starttid, sluttid, kund, sporthall):
self._starttid = starttid
self._sluttid = sluttid
self._kund = kund
self._sporthall = sporthall
def getHall(self):
return self._sporthall
def getDates(self):
start = datetime.strptime(self._starttid,"%Y-%m-%d %H:%M")
slut = datetime.strptime(self._sluttid,"%Y-%m-%d %H:%M")
return [start, slut]
def __str__(self):
return f"{self._starttid} -- {self._sluttid} -> {self._sporthall}"
def writeTaken(self):
return f"{self._starttid} -- {self._sluttid} -> {self._sporthall.getTyp()} taken "
|
import socket
import os
ROOT_PATH = '/home/dev1/share/'
UPLOAD_PATH = '/home/dev1/uploads/'
def create_listen_sock(port=8080):
listen_fd = socket.socket()
listen_fd.bind(('', port))
listen_fd.listen(5)
return listen_fd
def parse_client_data(data: bytes):
return data.split(b':')[0].strip()
def download_handler(client_sock, data: bytes):
total = 0
filename = data.split(b':')[1].strip().decode('utf-8')
file_path = os.path.join(ROOT_PATH, filename)
print("服务器下载文件信息:", file_path)
if not os.path.isfile(file_path):
client_sock.close()
with open(file_path, 'rb') as fp:
buf = fp.read(1024)
while buf:
ret = client_sock.send(buf)
total += ret
buf = fp.read(1024)
print("总共发送", total)
def list_handler(client_sock):
import pickle
res = os.listdir(ROOT_PATH)
data = pickle.dumps(res)
ret = client_sock.send(data)
print("发送了", ret)
def put_handler(t_sock, data: bytes):
filename = data.split(b':')[1].strip().decode('utf-8')
file_path = os.path.join(UPLOAD_PATH, filename)
with open(file_path, 'wb') as fp:
buf = t_sock.recv(1024)
while buf:
fp.write(buf)
buf = t_sock.recv(1024)
print("上传成功")
def main_process(listen_fd: socket.socket):
new_fd, _ = listen_fd.accept()
print("新链接到来", new_fd)
buf = new_fd.recv(1024)
flags = parse_client_data(buf)
if flags == b'G':
download_handler(new_fd, buf)
elif flags == b'L':
list_handler(new_fd)
elif flags == b'P':
put_handler(new_fd, buf)
new_fd.close()
if __name__ == '__main__':
sock = create_listen_sock(9000)
while True:
main_process(sock)
|
from setuptools import setup, find_packages
import sys, os
version = '0.0.1'
setup(name='geeknote',
version=version,
description="GeekNote python evernote client",
long_description="""\
a python evernote client
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='evernote',
author='Vitaliy Rodenko',
author_email='contact@geeknote.me',
url='http://geeknote.me',
license='MIT',
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [ 'geeknote = geeknote:main' ]
},
)
|
from django.conf.urls import url
from django.urls import path
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from . import views
from rest_framework_jwt.views import obtain_jwt_token
schema_view = get_schema_view(
openapi.Info(
title='API',
default_version='v1'
),
)
urlpatterns = [
path('product/list', views.product_list),
path('product/options', views.product_options),
path('product/create', views.product_create),
path('product/<int:product_id>/get', views.product_update),
path('product/<int:product_id>/update', views.product_update),
path('product/<int:product_id>/delete', views.product_update),
path('stocklevel/<int:product_id>/product', views.stocklevel_product),
path('stocklevel/create', views.stocklevel_create),
path('stocklevel/<int:stocklevel_id>/get', views.stocklevel_update),
path('stocklevel/<int:stocklevel_id>/update', views.stocklevel_update),
path('stocklevel/<int:stocklevel_id>/delete', views.stocklevel_update),
path('producer/list', views.producer_list),
path('producer/options', views.producer_options),
path('producer/create', views.producer_create),
path('producer/<int:producer_id>/get', views.producer_update),
path('producer/<int:producer_id>/update', views.producer_update),
path('producer/<int:producer_id>/delete', views.producer_update),
path('warehouse/list', views.warehouse_list),
path('warehouse/options', views.warehouse_options),
path('warehouse/create', views.warehouse_create),
path('warehouse/<int:warehouse_id>/get', views.warehouse_update),
path('warehouse/<int:warehouse_id>/update', views.warehouse_update),
path('warehouse/<int:warehouse_id>/delete', views.warehouse_update),
path('storage/options', views.storage_options),
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
url(r'^api-token-auth/', obtain_jwt_token),
]
|
def exist(x):
global now
for i in list(str(x)):
now[int(i)] += 1
if 0 in now:
return True
else: return False
num = int(input())
count = 0
for cases in range(1,num+1):
n = int(input())
count += 1
if n == 0:
print('Case #'+str(count)+': INSOMNIA\n')
continue
now = [0,0,0,0,0,0,0,0,0,0]
for i in list(str(n)):
now[int(i)] += 1
multi = 2
while exist(multi*n):
multi += 1
print('Case #'+str(count)+': '+str(multi*n)+'\n') |
import requests
import json
import pandas as pd
import xlwings as xw
from time import sleep
from datetime import datetime, time , timedelta
import os
import numpy as np
pd.set_option('display.width',1500)
pd.set_option('display.max_columns',75)
pd.set_option('display.max_rows',1500)
url_oc = "https://www.nseindia.com/option-chain"
url = f"https://www.nseindia.com/api/option-chain-indices?symbol=NIFTY"
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, '
'like Gecko) '
'Chrome/80.0.3987.149 Safari/537.36',
'accept-language': 'en,gu;q=0.9,hi;q=0.8', 'accept-encoding': 'gzip, deflate, br'}
expiry = "26-Aug-2021"
excel_file ="Option_Chain_Analysis.xlsx"
wb = xw.Book(excel_file)
sheet_oi_single = wb.sheets("OIData")
sht_live=wb.sheets("Data")
df_list =[]
mp_list =[]
oi_filename = os.path.join("Files","oi_data_records_{0}.json".format(datetime.now().strftime("%d%m%y")))
mp_filename = os.path.join("Files","mp_data_records_{0}.json".format(datetime.now().strftime("%d%m%y")))
def fetch_oi(df,mp_df):
tries = 1
max_retries = 1
while tries <= max_retries:
try:
session = requests.Session()
request = session.get(url_oc, headers=headers, timeout=5)
cookies = dict(request.cookies)
response = session.get(url, headers=headers, timeout=5, cookies=cookies)
data_json = response.json()
r = data_json
print("hello")
if expiry:
ce_values = [data['CE'] for data in r['records']['data']if 'CE' in data and str(data['expiryDate']).lower() == str(expiry).lower()]
pe_values = [data['PE'] for data in r['records']['data']if 'PE' in data and str(data['expiryDate']).lower() == str(expiry).lower()]
else:
ce_values = [data['CE'] for data in r['filtered']['data']if 'CE' in data]
pe_values = [data['PE'] for data in r['filtered']['data']if 'PE' in data]
ce_data = pd.DataFrame(ce_values)
pe_data = pd.DataFrame(pe_values)
ce_data = ce_data.sort_values(['strikePrice'])
pe_data = pe_data.sort_values(['strikePrice'])
sheet_oi_single.range("A2").options(index=False,headers=False).value = ce_data.drop(columns=['underlying','identifier','bidQty','bidprice','askQty','askPrice','expiryDate','totalBuyQuantity','totalBuyQuantity','totalTradedVolume','underlyingValue'], axis=1)[['openInterest','changeinOpenInterest','pchangeinOpenInterest','impliedVolatility','lastPrice','change','pChange','strikePrice']]
sheet_oi_single.range("I2").options(index=False,headers=False).value = pe_data.drop(columns=['strikePrice','underlying','identifier','bidQty','bidprice','askQty','askPrice','expiryDate','totalBuyQuantity','totalBuyQuantity','totalTradedVolume','underlyingValue'], axis=1,)[['openInterest','changeinOpenInterest','pchangeinOpenInterest','impliedVolatility','lastPrice','change','pChange']]
ce_data['type'] = 'CE'
pe_data['type'] = 'PE'
df1 = pd.concat([ce_data,pe_data])
if len(df_list) > 0 :
df1['Time'] = df_list[-1][0]['Time']
if len(df_list) > 0 and df1.to_dict('records')== df_list[-1]:
print("Duplicate data. Not recording")
sleep(10)
tries +=1
continue
df1['Time'] = datetime.now().strftime("%H:%M")
pcr = pe_data['totalTradedVolume'].sum()/ce_data['totalTradedVolume'].sum()
mp_dict = {datetime.now().strftime("%H: %M"): {'underlying' :df1['underlyingValue'].iloc[-1],
'MaxPain': wb.sheets("Dashboard").range("H8").value,
'pcr' : pcr,
'call_decay': ce_data['change'].mean(),
'put_decay': pe_data['change'].mean() }}
df3 = pd.DataFrame(mp_dict).transpose()
mp_df = pd.concat([mp_df,df3])
with open(mp_filename, "w") as files:
files.write(json.dumps(mp_df.to_dict(), indent =4,sort_keys= True))
wb.sheets['MPData'].range("A2").options(header = False).value = mp_df
if not df.empty:
df = df[['strikePrice', 'expiryDate', 'underlying', 'identifier', 'openInterest', 'changeinOpenInterest',
'pchangeinOpenInterest','totalTradedVolume', 'impliedVolatility', 'lastPrice', 'change',
'pChange',
'totalBuyQuantity', 'totalSellQuantity', 'bidQty','bidprice','askQty', 'askPrice',
'underlyingValue', 'type',
'Time']]
df1 = df1[['strikePrice', 'expiryDate', 'underlying', 'identifier', 'openInterest', 'changeinOpenInterest',
'pchangeinOpenInterest','totalTradedVolume', 'impliedVolatility', 'lastPrice', 'change',
'pChange',
'totalBuyQuantity', 'totalSellQuantity', 'bidQty','bidprice','askQty', 'askPrice',
'underlyingValue', 'type',
'Time']]
df = pd.concat([df,df1])
df_list.append(df1.to_dict('records'))
with open(oi_filename, "w") as files:
files.write(json.dumps(df_list, indent =4,sort_keys= True))
return df, mp_df
except Exception as error:
print("Error {0}".format(error))
tries +=1
sleep(10)
continue
if tries>= max_retries:
print("Max Retries exceeded. No new Data at time {0}".format(datetime.now()))
return df, mp_df
def main():
global df_list
try :
df_list = json.loads(open(oi_filename).read())
except Exception as error:
print("Error reading data. Error: {0}".format(error))
df_list =[]
if df_list:
df = pd.DataFrame()
for item in df_list:
df = pd.concat([df, pd.DataFrame(item)])
else:
df = pd.DataFrame()
try :
mp_list = json.loads(open(mp_filename).read())
mp_df = pd.DataFrame().from_dict(mp_list)
except Exception as error:
print("Error reading data. Error: {0}".format(error))
mp_list =[]
mp_df = pd.DataFrame()
timeframe = 1
while time(9,15) <= datetime.now().time() <= time(20,30) :
timenow = datetime.now()
check = True if timenow.minute/timeframe in list(np.arange(0.0, 60.0)) else False
if check :
nextscan = timenow + timedelta(minutes = timeframe)
df, mp_df = fetch_oi(df,mp_df)
if not df.empty:
df['impliedVolatility'] = df['impliedVolatility'].replace(to_replace = 0, method = 'bfill').values
df['identifier'] = df['strikePrice'].astype(str) + df['type']
sht_live.range("A1").value= df
wb.api.RefreshAll()
waitsecs = int ((nextscan- datetime.now()).seconds)
print("Wait for {0} seconds".format(waitsecs))
sleep(waitsecs) if waitsecs > 0 else sleep(0)
else:
print("No data received")
sleep(30)
if __name__ ==" __main__" :
main()
main() |
import factory
from django_eth_events import models
class DaemonFactory(factory.DjangoModelFactory):
class Meta:
model = models.Daemon
block_number = 0
|
"""
Authors: Kevin Eckert
General Control File for Sensor Test
This is simply a script that allows one to get data
from the BMP388 sensor and see it printed on screen,
as well as access more detailed, stored information
which is place in a CSV formatted txt file.
"""
from time import sleep
from machine import I2C, Pin
from write import write_data
import BMP388 as BMP
import VL53L0X as L0X # that's a zero
import adxl345 as ADX # that's a 1, not an L
import accelerometer as ACCEL
from distance import height_lidar
def acquire_data(seconds=20):
# Constants and things
bmp_file = "bmpdata.txt"
lidar_file = "lidardata.txt"
# I2C Object
i2c = I2C(0, scl=Pin(22), sda=Pin(21))
# Configure BMP
BMP.set_oversampling(i2c)
cal = BMP.read_coefficients(i2c)
# Configure VL50LOX
adxl = ADX.ADXL345(i2c, 83)
a = ACCEL.accelerometer(adxl, 'adxl345_calibration_2point')
lidar = L0X.VL53L0X(i2c, 41)
this = height_lidar(a, lidar)
# main loop:
for i in range(seconds):
# BMP
BMPdata = BMP.get_altitude(i2c, cal)
write_data(bmp_file, str(BMPdata[0]) + "," + str(BMPdata[1]) + "," + str(BMPdata[2]) + "," + str(BMPdata[3]))
print("BMP " + str(i) + '>\t' + str(BMPdata[0])) # optional
# LIDAR
LIDARdata = str(this.read)
write_data(lidar_file, LIDARdata)
print("LDR " + str(i) + '>\t' + LIDARdata)
sleep(1)
# Successful Completion Here
return 1
|
# -*- coding: utf-8 -*-
"""
Workflows to grab input file structures.
"""
import logging as log
import os
import nipype.pipeline.engine as pe
from hansel.operations import joint_value_map, valuesmap_to_dict
from nipype.interfaces.io import DataSink
from nipype.interfaces.utility import IdentityInterface
from neuro_pypes import configuration
from neuro_pypes.crumb import DataCrumb
from neuro_pypes.utils import extend_trait_list, joinstrings
def build_crumb_workflow(wfname_attacher, data_crumb, in_out_kwargs, output_dir,
cache_dir='', wf_name="main_workflow"):
""" Returns a workflow for the give `data_crumb` with the attached workflows
given by `attach_functions`.
Parameters
----------
wfname_attacher: dict[Str] -> function
Dictionary with name of the workflow and its corresponding
attach function that will be in charge of attaching workflows
to the main input/output workflow.
data_crumb: hansel.Crumb
The crumb until the subject files.
Example: Crumb('/home/hansel/cobre/raw/{subject_id}/session_1/{modality}/{image_file})
At least one crumb arguments of `data_crumb` must be open,
this argument will be replaced by the corresponding image name.
in_out_kwargs: dict with keyword arguments
This arguments are for the in_out_crumb_wf.
Mainly 'files_crumb_args' which will declare the values each file
type the crumb arguments in `data_crumb` must be replaced with.
Example:
{'anat': [('modality', 'anat_1'),
('image', 'mprage.nii.gz')],
'rest': [('modality', 'rest_1'),
('image', 'rest.nii.gz')],
}
cache_dir: str
The working directory of the workflow.
output_dir: str
The output folder path.
wf_name: str
Name of the main workflow.
Returns
-------
wf: Nipype Workflow
"""
if not data_crumb.exists():
raise IOError("Expected an existing folder for `data_crumb`, got {}.".format(data_crumb))
if not data_crumb.isabs():
raise IOError("Expected an absolute Crumb path for `data_crumb`, got {}.".format(data_crumb))
if not wfname_attacher or wfname_attacher is None:
raise ValueError(
"Expected `wfname_attacher` to have at least one function, "
"got {}.".format(wfname_attacher)
)
# if not in_out_kwargs or in_out_kwargs is None:
# raise ValueError("Expected `in_out_kwargs` to have at least the name for sets of parameters for "
# " `data_crumb`, got {}.".format(in_out_kwargs))
# check some args
if not cache_dir:
cache_dir = os.path.join(os.path.dirname(output_dir), "wd")
# print the configuration parameters
log.info('Using the following configuration parameters:')
log.info(configuration)
# generate the workflow
main_wf = crumb_wf(
work_dir=cache_dir,
data_crumb=data_crumb,
output_dir=output_dir,
file_templates=in_out_kwargs,
wf_name=wf_name
)
for wf_name, attach_wf in wfname_attacher.items():
main_wf = attach_wf(main_wf=main_wf, wf_name=wf_name)
# move the crash files folder elsewhere
main_wf.config["execution"]["crashdump_dir"] = os.path.join(main_wf.base_dir, main_wf.name, "log")
log.info('Workflow created.')
return main_wf
def crumb_wf(work_dir, data_crumb, output_dir, file_templates,
wf_name="main_workflow"):
""" Creates a workflow with the `subject_session_file` input nodes and an empty `datasink`.
The 'datasink' must be connected afterwards in order to work.
Parameters
----------
work_dir: str
Path to the workflow temporary folder
data_crumb: hansel.Crumb
The crumb until the subject files.
Example: Crumb('/home/hansel/data/{subject_id}/{session_id}/{modality}/{image_file})
output_dir: str
Path to where the datasink will leave the results.
file_templates: Dict[str -> list of 2-tuple]
Maps of crumb argument values to specify each file in the `data_crumb`.
Example: {'anat': [('modality', 'anat'), ('image_file', 'anat_hc.nii.gz')],
'pet': [('modality', 'pet'), ('image_file', 'pet_fdg.nii.gz')],
}
wf_name: str
Name of the main workflow
Returns
-------
wf: Workflow
"""
# create the root workflow
wf = pe.Workflow(name=wf_name, base_dir=work_dir)
# datasink
datasink = pe.Node(
DataSink(parameterization=False, base_directory=output_dir, ),
name="datasink"
)
# input workflow
# (work_dir, data_crumb, crumb_arg_values, files_crumb_args, wf_name="input_files"):
select_files = pe.Node(
DataCrumb(crumb=data_crumb, templates=file_templates, raise_on_empty=False),
name='selectfiles'
)
# basic file name substitutions for the datasink
undef_args = select_files.interface._infields
substitutions = [(name, "") for name in undef_args]
substitutions.append(("__", "_"))
datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions,
substitutions)
# Infosource - the information source that iterates over crumb values map from the filesystem
infosource = pe.Node(interface=IdentityInterface(fields=undef_args), name="infosrc")
infosource.iterables = list(valuesmap_to_dict(joint_value_map(data_crumb, undef_args)).items())
infosource.synchronize = True
# connect the input_wf to the datasink
joinpath = pe.Node(joinstrings(len(undef_args)), name='joinpath')
# Connect the infosrc node to the datasink
input_joins = [(name, 'arg{}'.format(arg_no + 1))
for arg_no, name in enumerate(undef_args)]
wf.connect([
(infosource, select_files, [(field, field) for field in undef_args]),
(select_files, joinpath, input_joins),
(joinpath, datasink, [("out", "container")]),
],
)
return wf
|
def countConstruct(target, word_bank):
tab = [0 for _ in range(len(target)+1)]
# seed - there's only 1 way to construct an empty string
tab[0] = 1
for i in range(len(target)+1):
if tab[i] != 0:
for word in word_bank:
if target[i:].startswith(word):
tab[i+len(word)] += tab[i]
return tab[-1]
print(countConstruct("purple",["purp","p","ur","le","purpl"]))
print(countConstruct("abcdef", ["ab","abc","cd","def","abcd"]))
print(countConstruct("skateboard", ["bo","rd","ate","t","ska","sk","boar"]))
print(countConstruct("enterapotentpot",["a","p","ent","enter","ot","o","t"]))
print(countConstruct("eeeeeeeeeeeeeeeeeeeeeeeeeef",[
"e",
"ee",
"eee",
"eeee",
"eeeee",
"eeeeee"]))
|
buildings = [(1,11,5), (3,6,7), (3,13,9), (12,7,16), (16,3,25), (19,18,22)]
edges = []
edges.extend([building[0],building[2]] for building in buildings)
edges = sorted(sum(edges,[])) #sorting and flatening the list of building edges
current = 0
points = []
for i in edges:
active = []
active.extend(building for building in buildings if (building[0] <= i and building[2] > i))
#current observed point is within borders of these buildings (active buildings)
if not active:
#if there is no active buildings, highest point is 0
current = 0
points.append((i,0))
continue
max_h = max(building[1] for building in active)
if max_h != current:
#if current highest point is lower then highest point of current active buildings change current highest point
current = max_h
points.append((i,max_h))
print(points) |
MESSAGE_TIMESPAN = 2000
SIMULATED_DATA = False
I2C_ADDRESS = 0x76
GPIO_PIN_ADDRESS = 24
BLINK_TIMESPAN = 1000
|
# Generated by Django 2.2.1 on 2019-08-07 05:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField(unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['category_name'],
},
),
]
|
import time
from base.base_action import BaseAction
from base.driver import get_driver
from page.page import Page
class TestShortCut:
def setup(self):
self.driver = get_driver()
self.page = Page(self.driver)
def teardown(self):
time.sleep(3)
self.driver.quit()
# 9.6 王茹楠
#点击个人中心——快捷方式页面-点击返回-个人中心页
def test_back_button(self):
#step1 首页点击个人中心按钮
self.page.homepage.click_center_button()
#step2 点击个人中心页面“快捷方式”
self.page.centerpage.click_shortcut_tab()
#step3 断言是否跳转到“快捷方式”页面
assert self.page.shortcutpage.get_text(self.page.shortcutpage.stort_cut_title)=="快捷方式"
#step4 点击返回
self.page.shortcutpage.click_back_button()
# step5 断言是否成功
assert self.page.centerpage.get_text(self.page.centerpage.tittle)=="个人中心"
# #快捷方式页-点击添加-有弹窗-点击添加-点击确定-点击添加
# #没有弹窗-点击添加
# def test_school_timetable_add(self):
# # step1 首页点击个人中心按钮
# time.sleep(3)
# self.page.homepage.click_center_button()
# print("1")
#
# # 调用croll_page_one_time()方法 滑动屏幕
# self.page.shortcutpage.scroll_page_one_time()
# print("2")
#
#
#
# #step2 点击个人中心页面“快捷方式”
# self.page.centerpage.click_shortcut_tab()
#
# time.sleep(2)
# print("3")
# #step4 点击快捷方式页面--课程表——添加
#
# self.page.shortcutpage.click_school_timetable_add()
# print("4")
# time.sleep(2)
# # 判断是否有添加到桌面的弹窗
# if self.page.shortcutpage.if_exist_pop_up_window:
# # step6 有添加到桌面的弹窗就点击添加
# self.page.shortcutpage.click_school_home_screen_add()
# print("5")
# time.sleep(1)
# # 点击确定
# #self.page.shortcutpage.click_add_to_the_desktop_confirm()
# print("6")
#
# # 点击快捷方式页面--课程表——添加
# self.page.shortcutpage.click_school_timetable_add()
# print("7")
#
# # 断言是否创建成功
# assert self.page.shortcutpage.is_toast_exist("已创建")
# return
#
# # # 没出现弹窗,直接断言是否创建成功
# # #点击返回
# # self.page.shortcutpage.click_back_button()
# # # step2 点击个人中心页面“快捷方式”
# # self.page.centerpage.click_shortcut_tab()
# # self.page.shortcutpage.click_school_timetable_add()
#
# assert self.page.shortcutpage.is_toast_exist("已创建")
# 快捷方式-添加课程表
def test_school_timetable_add(self):
# step1 首页点击个人中心按钮
time.sleep(3)
self.page.homepage.click_center_button()
# step2 点击个人中心页面“快捷方式”
self.page.centerpage.click_shortcut_tab()
# # step3 断言是否跳转到“快捷方式”页面
# assert self.page.stortcutpage.get_text(self.page.stortcutpage.stort_cut_title)=='快捷方式'
time.sleep(2)
# step4 点击快捷方式页面--课程表——添加
self.page.shortcutpage.click_school_timetable_add()
# step5 判断是否有添加到桌面的弹窗
if self.driver.find_elements_by_xpath("//*[contains(@resource-id,'widget_name')]"):
# step6 有添加到桌面的弹窗就点击添加
self.page.shortcutpage.click_school_home_screen_add()
time.sleep(1)
# step7 判断是否有已尝试添加到桌面的弹窗
if self.driver.find_elements_by_id("com.xiaomi.xiaoailite:id/btn_confirm"):
# if self.page.shortcutpage.is_button_exist(self.page.shortcutpage.is_add_desk_allary_exist):
# if self.page.shortcutpage.is_add_desk_allary_exist():
# step8 判断是否有已尝试添加到桌面的弹窗,有就点击确定
self.page.shortcutpage.click_add_to_the_desktop_confirm()
# step8 点击快捷方式页面--课程表——添加
self.page.shortcutpage.click_school_timetable_add()
# step9 断言是否创建成功
assert self.page.shortcutpage.is_toast_exist("该应用桌面快捷方式已创建")
# step10 没出现弹窗,直接断言是否创建成功
else:
assert self.page.shortcutpage.is_toast_exist("该应用桌面快捷方式已创建")
def test_xiaoai_translate_add(self):
# step1 首页点击个人中心按钮
time.sleep(3)
self.page.homepage.click_center_button()
# 调用croll_page_one_time()方法 滑动屏幕
self.page.shortcutpage.scroll_page_one_time()
#step2 点击个人中心页面“快捷方式”
self.page.centerpage.click_shortcut_tab()
time.sleep(2)
#step4 点击快捷方式页面--课程表——添加
self.page.shortcutpage.click_xiaoai_translate_add()
time.sleep(2)
print("4")
# 判断是否有添加到桌面的弹窗
if self.driver.find_elements_by_xpath("//*[contains(@resource-id,'widget_name')]"):
# step6 有添加到桌面的弹窗就点击添加
self.page.shortcutpage.click_xiaoai_translate_button()
time.sleep(1)
# 判断是否有已尝试添加到桌面的弹窗
if self.driver.find_elements_by_id("com.xiaomi.xiaoailite:id/btn_confirm"):
#判断是否有已尝试添加到桌面的弹窗,有就点击确定
self.page.shortcutpage.click_add_to_the_desktop_confirm()
# 点击快捷方式页面--课程表——添加
self.page.shortcutpage.click_xiaoai_translate_add()
print("7")
# 断言是否创建成功
assert self.page.shortcutpage.is_toast_exist("已创建")
#return
# # 没出现弹窗,直接断言是否创建成功
# #点击返回
# self.page.shortcutpage.click_back_button()
# # step2 点击个人中心页面“快捷方式”
# self.page.centerpage.click_shortcut_tab()
# self.page.shortcutpage.click_school_timetable_add()
assert self.page.shortcutpage.is_toast_exist("已创建")
|
#for extracting images
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
#opening extracted images
from PIL import Image
import requests
from io import BytesIO
#convertine image to text
import pytesseract
#storing data into json file
import json
#goes to where tesseract download is saved
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
#opens and stores webpage url with sizing chart
html = urlopen('https://www.railsclothing.com/pages/size-charts')
bs = BeautifulSoup(html, 'html.parser')
images = bs.find_all('img', {'src': re.compile('.jpg')})
for image in images:
url = 'http:' + image['src'] #gets url of image
response = requests.get(url)
img = Image.open(BytesIO(response.content)) #opens image
text = pytesseract.image_to_string(img) #converts image into text
with open('data.json', 'w') as f:
json.dump(text, f) #stores into json file
|
class Environment:
def setup(self):
raise NotImplementedError
def start(self):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def clear(self):
raise NotImplementedError
def pull(self):
raise NotImplementedError
# return data, path, starting_direction
def put(self, key, data):
raise NotImplementedError
def check(self):
raise NotImplementedError
# return a Status
def load_path(self, i):
raise NotImplementedError
|
import math
l = int(input('enter lower input'))
x = l
while x >= l:
y = str(x)
sum1 = 0
for i in y:
num = int(i)
f = math.factorial(num)
sum1=sum1+f
if sum1==x:
print(x)
else:
pass
x = x + 1
|
"""JSON helper functions"""
import os
import calendar
from datetime import date
import json
import traceback
from functools import wraps
from django.views.decorators.csrf import csrf_exempt
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from rest_framework_simplejwt.authentication import JWTAuthentication
def get_all_sundays(to_date):
"""Get all the sundays upto the given date"""
sundays = []
sundays_str = []
year = to_date.year
A = calendar.TextCalendar(calendar.SUNDAY)
for month in range(1,13):
if month > to_date.month:
break
for iday in A.itermonthdays(year,month):
if iday!=0:
day=date(year,month,iday)
if day.weekday()==6 and day <= to_date:
sundays.append(day)
sundays_str.append(day.strftime('%d/%b'))
return sundays, sundays_str
class ValidationError(Exception):
pass
def JsonResponse(data, dump=True, status=200):
try:
data['errors']
except KeyError:
data['success'] = True
except TypeError:
pass
return HttpResponse(
json.dumps(data, cls=DjangoJSONEncoder) if dump else data,
content_type='application/json',
status=status,
)
def JsonError(error_string, status=200):
data = {
'success': False,
'errors': error_string,
}
return JsonResponse(data, status=status)
def JsonResponseBadRequest(error_string):
return JsonError(error_string, status=400)
def JsonResponseUnauthorized(error_string):
return JsonError(error_string, status=401)
def JsonResponseForbidden(error_string):
return JsonError(error_string, status=403)
def JsonResponseNotFound(error_string):
return JsonError(error_string, status=404)
def JsonResponseNotAllowed(error_string):
return JsonError(error_string, status=405)
def JsonResponseNotAcceptable(error_string):
return JsonError(error_string, status=406)
import base64
def url_encode(string):
string_bytes = string.encode("ascii")
base64_bytes = base64.b64encode(sample_string_bytes)
return base64_bytes.decode("ascii")
def url_decode(base64_string):
base64_bytes = base64_string.encode("ascii")
sample_string_bytes = base64.b64decode(base64_bytes)
return sample_string_bytes.decode("ascii")
def decode_docid(view_func):
@csrf_exempt
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if kwargs.get('doc_id'):
decoded_doc_id = url_decode(kwargs['doc_id'])
kwargs['doc_id'] = int(decoded_doc_id)
return view_func(request, *args, **kwargs)
return _wrapped_view
def token_required(view_func):
"""Decorator which ensures the user has provided a correct token."""
@csrf_exempt
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
try:
jwt_auth = JWTAuthentication()
user, token = jwt_auth.authenticate(request)
except:
return JsonResponseUnauthorized(
"TOKEN REQUIRED")
if not (user and token):
return JsonResponseNotAcceptable("Token not present")
if user and token:
request.user = user
request.token = token
return view_func(request, *args, **kwargs)
return JsonResponseForbidden("Invalid token supplied")
return _wrapped_view
|
import sys
from pathlib import Path
#sys.path.append(str(Path('./..').resolve()))
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchnet as tnt
import pandas as pd
import yaml
from utils import logger
from test import Tester
from environment import DemoEnvironment, initialize, print_label_name
from gradcam import GradCam, save_class_activation_images
from data import CxrDataset, MGH_DATA_BASE
from preprocess.dcm2png import DcmToPng
class Demo(Tester):
def __init__(self, env, pt_runtime="test", fn_net=None, fl_gradcam=False, cls_gradcam=None, th_gradcam=0.7, fl_ensemble=False):
super().__init__(env, pt_runtime=pt_runtime, fn_net=fn_net, fl_gradcam=fl_gradcam, cls_gradcam=cls_gradcam, fl_ensemble=fl_ensemble)
self.th_gradcam = th_gradcam
self.fl_gradcam_save = False
self.fl_ensemble = fl_ensemble
def demo_cxr_evaluation(self, epoch=1, fl_save=False, hmp_dims=None):
if self.fn_net == None:
pt_model = self.pt_runtime.joinpath('models/multitask_model_00.pth.tar')
else:
pt_model = self.pt_runtime.joinpath('models/'+str(self.fn_net)+'_00.pth.tar')
self.env.load_model(pt_model)
try:
self.load()
except:
logger.debug('there is no pkl to load.')
prob, pred = self.demo_cxr_test(epoch, self.env.test_loader, fl_save=fl_save)
if self.fl_gradcam:
self.gradcam_data(self.env.gradcam_loader, hmp_dims=hmp_dims)
result ={
'prob': prob.tolist(),
'pred': pred.tolist(),
}
return result
def demo_cxr_ensemble_evaluation(self, epoch=1, fl_save=False, hmp_dims=None, n_ens=1):
probs = []
if self.fl_gradcam:
cams = np.ones((len(self.env.gradcam_loader), len(self.cls_gradcam), 16, 16))
for k in range(n_ens):
pt_model = self.pt_runtime.joinpath('models/'+str(self.fn_net)+f'_{k:02d}.pth.tar')
self.env.load_model(pt_model)
#logger.info(f'network to test: {self.env.model}')
try:
self.load()
except:
logger.debug('there is no pkl to load.')
prob, _ = self.demo_cxr_test(epoch, self.env.test_loader, fl_save=fl_save)
probs.append(prob)
if self.fl_gradcam:
_, _, cam = self.gradcam_data(self.env.gradcam_loader, hmp_dims=hmp_dims)
cams += cam
# evaluate ensemble's performance
prob, pred = self.demo_cxr_ensemble_test(probs, n_ens)
if self.fl_gradcam:
#[doyun] need to debugging
_, _, cams = self.gradcam_data(self.env.gradcam_loader, ens_flg=True, cams_ens=cams, prob_ens=prob)
result ={
'prob': prob.tolist(),
'pred': pred.tolist(),
}
return result
def demo_cxr_test(self, epoch, test_loader, fl_save=False, fl_iter_save=False, fl_iter_target=0):
test_set = test_loader.dataset
out_dim = self.env.out_dim
labels = self.env.labels
CxrDataset.eval()
self.env.model.eval()
with torch.no_grad():
tqdm_desc = f'testing '
t = tqdm(enumerate(test_loader), total=len(test_loader), desc=tqdm_desc,
dynamic_ncols=True)
pred = torch.zeros(len(test_loader), out_dim).int()
prob = torch.zeros(len(test_loader), out_dim).float()
for bt_idx, tp_data in t:
output, _ = self.test_batch(tp_data)
output[:, 0:-1] = torch.sigmoid(output[:, 0:-1])
# view position decision
pred[bt_idx, 0] = 1 if output[:, 0].item() >= output[:, 1].item() else 0
pred[bt_idx, 1] = 0 if output[:, 0].item() >= output[:, 1].item() else 1
# gender decision
pred[bt_idx, 2] = 1 if output[:, 2].item() >= output[:, 3].item() else 0
pred[bt_idx, 3] = 0 if output[:, 2].item() >= output[:, 3].item() else 1
# gender decision
pred[bt_idx, 4] = 1 if (output[:, 4].item() >= output[:, 5].item()) & (output[:, 4].item() >= output[:, 6].item()) else 0
pred[bt_idx, 5] = 1 if (output[:, 5].item() >= output[:, 4].item()) & (output[:, 5].item() >= output[:, 6].item()) else 0
pred[bt_idx, 6] = 1 if (output[:, 6].item() >= output[:, 5].item()) & (output[:, 6].item() >= output[:, 4].item()) else 0
# decision
pred[bt_idx, 7] = 1 if output[:, 7].item() >= 0.7 else 0
pred[bt_idx, 8] = 1 if output[:, 8].item() >= 0.7 else 0
pred[bt_idx, 9] = 1 if output[:, 9].item() >= 0.7 else 0
pred[bt_idx, 10] = 1 if output[:, 10].item() >= 0.7 else 0
pred[bt_idx, 11] = 1 if output[:, 11].item() >= 0.7 else 0
pred[bt_idx, 12] = 1 if output[:, 12].item() >= 0.7 else 0
pred[bt_idx, 13] = 1 if output[:, 13].item() >= 0.7 else 0
pred[bt_idx, 14] = int(np.round(output[:, 14].item()))
prob[bt_idx, :] = output
return prob, pred
def demo_cxr_ensemble_test(self, probs, n_ens):
output = torch.zeros(probs[0].shape)
for i in range(n_ens):
output += probs[i]
output /= n_ens
pred = torch.zeros(probs[0].shape).int()
prob = torch.zeros(probs[0].shape).float()
# view position decision
for i in range(pred.shape[0]):
pred[i, 0] = 1 if output[i, 0].item() >= output[i, 1].item() else 0
pred[i, 1] = 0 if output[i, 0].item() >= output[i, 1].item() else 1
# gender decision
pred[i, 2] = 1 if output[i, 2].item() >= output[i, 3].item() else 0
pred[i, 3] = 0 if output[i, 2].item() >= output[i, 3].item() else 1
# gender decision
pred[i, 4] = 1 if (output[i, 4].item() >= output[i, 5].item()) & (output[i, 4].item() >= output[i, 6].item()) else 0
pred[i, 5] = 1 if (output[i, 5].item() >= output[i, 4].item()) & (output[i, 5].item() >= output[i, 6].item()) else 0
pred[i, 6] = 1 if (output[i, 6].item() >= output[i, 5].item()) & (output[i, 6].item() >= output[i, 4].item()) else 0
# decision
pred[i, 7] = 1 if output[i, 7].item() >= 0.7 else 0
pred[i, 8] = 1 if output[i, 8].item() >= 0.7 else 0
pred[i, 9] = 1 if output[i, 9].item() >= 0.7 else 0
pred[i, 10] = 1 if output[i, 10].item() >= 0.7 else 0
pred[i, 11] = 1 if output[i, 11].item() >= 0.7 else 0
pred[i, 12] = 1 if output[i, 12].item() >= 0.7 else 0
pred[i, 13] = 1 if output[i, 13].item() >= 0.7 else 0
pred[i, 14] = int(np.round(output[i, 14].item()))
prob = output
return prob, pred
def cxr_predict(hmp_dims=None, dcm_file=None, cuda='0', fl_gradcam=True, Nens=3, th_gradcam=0.7, input_type='dicom'):
print("\n-------------------------------------------------------------------")
print("| |")
print("| |")
print("| v1.0 MGH Age, View, Gender, Vendor, Abnormal Detection |")
print("| (Copyright (c) 2021-2022, MGH LMIC. All rights reserved.) |")
print("| |")
print("| |")
print("-------------------------------------------------------------------\n")
param_cuda = cuda
param_labels = ['ap', 'pa', 'female', 'male', 'varian', 'agfa', 'ge',
'Foreign body>.>.', 'Hilar/mediastinum>Cardiomegaly>.',
'Lung density>Increased lung density>Atelectasis',
'Lung density>Increased lung density>Pulmonary edema',
'Lung density>Increased lung density>pneumonia',
'Pleura>Pleural effusion>.', 'abnormal',
'PatientAge']
param_path = None
param_runDir = ''
param_type = 0
param_preModel = 'multitask_model'
param_gradcam = fl_gradcam
param_arch = None
param_task = 2
param_clsGcam = param_labels[-8:-2]
param_Nens = Nens
fl_ensemble = False if param_Nens == 1 else True
runtime_path, device = initialize(param_runDir, param_cuda)
# image preprocessing
d = DcmToPng(param_labels, dcm_path=runtime_path.joinpath('input_dir/DICOM').resolve(), png_path=runtime_path.joinpath('input_dir/IMAGEFILE').resolve(), ds=dcm_file, localOp=True, input_type=input_type)
#d = DcmToPng(param_labels, dcm_path=runtime_path.joinpath('dicoms').resolve(), png_path=runtime_path.joinpath('pngs').resolve(), ds=dcm_file)
if input_type == 'dicom':
d.dcm2png()
# start network inference
env = DemoEnvironment(device, runtime_path, mtype=param_type, name_labels=param_labels, name_paths=param_path, name_model=param_arch, task_type=param_task)
t = Demo(env, pt_runtime=runtime_path, fn_net=param_preModel, fl_gradcam=param_gradcam, cls_gradcam=param_clsGcam, th_gradcam=th_gradcam, fl_ensemble=fl_ensemble)
if (fl_ensemble):
result = t.demo_cxr_ensemble_evaluation(hmp_dims=hmp_dims, n_ens=param_Nens)
else:
result = t.demo_cxr_evaluation(hmp_dims=hmp_dims)
df_prob = pd.DataFrame(result['prob'], columns=print_label_name)
df_pred = pd.DataFrame(result['pred'], columns=print_label_name)
df_file = pd.read_csv(runtime_path.joinpath('input_dir/IMAGEFILE/images.csv'))
df_prob['file'] = df_file['PATH']
df_pred['file'] = df_file['PATH']
runtime_path.joinpath('output_dir/Classification').mkdir(parents=True, exist_ok=True)
df_prob.to_csv(runtime_path.joinpath('output_dir/Classification/probability.txt'), header=True, index=True, sep=',', mode='w')
df_pred.to_csv(runtime_path.joinpath('output_dir/Classification/prediction.txt'), header=True, index=True, sep=',', mode='w')
#print(result)
return result
if __name__ == "__main__":
with open('config_dir/config.yaml', 'r') as f:
config = yaml.load(f)
result = cxr_predict(input_type=config['input'], cuda=config['cuda'], fl_gradcam=config['fl_gradcam'], hmp_dims=(config['hmp_dims'], config['hmp_dims']), Nens=config['Nens'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.